Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2009 Red Hat, Inc. |
| 3 | * |
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 5 | * the COPYING file in the top-level directory. |
| 6 | */ |
| 7 | |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 9 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 10 | #include <linux/mm.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/highmem.h> |
| 13 | #include <linux/hugetlb.h> |
| 14 | #include <linux/mmu_notifier.h> |
| 15 | #include <linux/rmap.h> |
| 16 | #include <linux/swap.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 17 | #include <linux/shrinker.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 18 | #include <linux/mm_inline.h> |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 19 | #include <linux/swapops.h> |
Matthew Wilcox | 4897c76 | 2015-09-08 14:58:45 -0700 | [diff] [blame] | 20 | #include <linux/dax.h> |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 21 | #include <linux/kthread.h> |
| 22 | #include <linux/khugepaged.h> |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 23 | #include <linux/freezer.h> |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 24 | #include <linux/pfn_t.h> |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 25 | #include <linux/mman.h> |
Ralf Baechle | 325adeb | 2012-10-15 13:44:56 +0200 | [diff] [blame] | 26 | #include <linux/pagemap.h> |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 27 | #include <linux/debugfs.h> |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 28 | #include <linux/migrate.h> |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 29 | #include <linux/hashtable.h> |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 30 | #include <linux/userfaultfd_k.h> |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 31 | #include <linux/page_idle.h> |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 32 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 33 | #include <asm/tlb.h> |
| 34 | #include <asm/pgalloc.h> |
| 35 | #include "internal.h" |
| 36 | |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 37 | enum scan_result { |
| 38 | SCAN_FAIL, |
| 39 | SCAN_SUCCEED, |
| 40 | SCAN_PMD_NULL, |
| 41 | SCAN_EXCEED_NONE_PTE, |
| 42 | SCAN_PTE_NON_PRESENT, |
| 43 | SCAN_PAGE_RO, |
| 44 | SCAN_NO_REFERENCED_PAGE, |
| 45 | SCAN_PAGE_NULL, |
| 46 | SCAN_SCAN_ABORT, |
| 47 | SCAN_PAGE_COUNT, |
| 48 | SCAN_PAGE_LRU, |
| 49 | SCAN_PAGE_LOCK, |
| 50 | SCAN_PAGE_ANON, |
Kirill A. Shutemov | b1caa95 | 2016-01-15 16:52:39 -0800 | [diff] [blame] | 51 | SCAN_PAGE_COMPOUND, |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 52 | SCAN_ANY_PROCESS, |
| 53 | SCAN_VMA_NULL, |
| 54 | SCAN_VMA_CHECK, |
| 55 | SCAN_ADDRESS_RANGE, |
| 56 | SCAN_SWAP_CACHE_PAGE, |
| 57 | SCAN_DEL_PAGE_LRU, |
| 58 | SCAN_ALLOC_HUGE_PAGE_FAIL, |
| 59 | SCAN_CGROUP_CHARGE_FAIL |
| 60 | }; |
| 61 | |
| 62 | #define CREATE_TRACE_POINTS |
| 63 | #include <trace/events/huge_memory.h> |
| 64 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 65 | /* |
Jianguo Wu | 8bfa3f9 | 2013-11-12 15:07:16 -0800 | [diff] [blame] | 66 | * By default transparent hugepage support is disabled in order that avoid |
| 67 | * to risk increase the memory footprint of applications without a guaranteed |
| 68 | * benefit. When transparent hugepage support is enabled, is for all mappings, |
| 69 | * and khugepaged scans all mappings. |
| 70 | * Defrag is invoked by khugepaged hugepage allocations and by page faults |
| 71 | * for all hugepage allocations. |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 72 | */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 73 | unsigned long transparent_hugepage_flags __read_mostly = |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 74 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 75 | (1<<TRANSPARENT_HUGEPAGE_FLAG)| |
Andrea Arcangeli | 13ece88 | 2011-01-13 15:47:07 -0800 | [diff] [blame] | 76 | #endif |
| 77 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE |
| 78 | (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)| |
| 79 | #endif |
Andrea Arcangeli | d39d33c3 | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 80 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)| |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 81 | (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)| |
| 82 | (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 83 | |
| 84 | /* default scan 8*512 pte (or vmas) every 30 second */ |
| 85 | static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8; |
| 86 | static unsigned int khugepaged_pages_collapsed; |
| 87 | static unsigned int khugepaged_full_scans; |
| 88 | static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000; |
| 89 | /* during fragmentation poll the hugepage allocator once every minute */ |
| 90 | static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000; |
| 91 | static struct task_struct *khugepaged_thread __read_mostly; |
| 92 | static DEFINE_MUTEX(khugepaged_mutex); |
| 93 | static DEFINE_SPINLOCK(khugepaged_mm_lock); |
| 94 | static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait); |
| 95 | /* |
| 96 | * default collapse hugepages if there is at least one pte mapped like |
| 97 | * it would have happened if the vma was large enough during page |
| 98 | * fault. |
| 99 | */ |
| 100 | static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1; |
| 101 | |
| 102 | static int khugepaged(void *none); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 103 | static int khugepaged_slab_init(void); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 104 | static void khugepaged_slab_exit(void); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 105 | |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 106 | #define MM_SLOTS_HASH_BITS 10 |
| 107 | static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); |
| 108 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 109 | static struct kmem_cache *mm_slot_cache __read_mostly; |
| 110 | |
| 111 | /** |
| 112 | * struct mm_slot - hash lookup from mm to mm_slot |
| 113 | * @hash: hash collision list |
| 114 | * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head |
| 115 | * @mm: the mm that this information is valid for |
| 116 | */ |
| 117 | struct mm_slot { |
| 118 | struct hlist_node hash; |
| 119 | struct list_head mm_node; |
| 120 | struct mm_struct *mm; |
| 121 | }; |
| 122 | |
| 123 | /** |
| 124 | * struct khugepaged_scan - cursor for scanning |
| 125 | * @mm_head: the head of the mm list to scan |
| 126 | * @mm_slot: the current mm_slot we are scanning |
| 127 | * @address: the next address inside that to be scanned |
| 128 | * |
| 129 | * There is only the one khugepaged_scan instance of this cursor structure. |
| 130 | */ |
| 131 | struct khugepaged_scan { |
| 132 | struct list_head mm_head; |
| 133 | struct mm_slot *mm_slot; |
| 134 | unsigned long address; |
H Hartley Sweeten | 2f1da64 | 2011-10-31 17:09:25 -0700 | [diff] [blame] | 135 | }; |
| 136 | static struct khugepaged_scan khugepaged_scan = { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 137 | .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), |
| 138 | }; |
| 139 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 140 | static DEFINE_SPINLOCK(split_queue_lock); |
| 141 | static LIST_HEAD(split_queue); |
| 142 | static unsigned long split_queue_len; |
| 143 | static struct shrinker deferred_split_shrinker; |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 144 | |
Nicholas Krause | 2c0b80d | 2015-09-08 15:00:33 -0700 | [diff] [blame] | 145 | static void set_recommended_min_free_kbytes(void) |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 146 | { |
| 147 | struct zone *zone; |
| 148 | int nr_zones = 0; |
| 149 | unsigned long recommended_min; |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 150 | |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 151 | for_each_populated_zone(zone) |
| 152 | nr_zones++; |
| 153 | |
Mel Gorman | 974a786 | 2015-11-06 16:28:34 -0800 | [diff] [blame] | 154 | /* Ensure 2 pageblocks are free to assist fragmentation avoidance */ |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 155 | recommended_min = pageblock_nr_pages * nr_zones * 2; |
| 156 | |
| 157 | /* |
| 158 | * Make sure that on average at least two pageblocks are almost free |
| 159 | * of another type, one for a migratetype to fall back to and a |
| 160 | * second to avoid subsequent fallbacks of other types There are 3 |
| 161 | * MIGRATE_TYPES we care about. |
| 162 | */ |
| 163 | recommended_min += pageblock_nr_pages * nr_zones * |
| 164 | MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; |
| 165 | |
| 166 | /* don't ever allow to reserve more than 5% of the lowmem */ |
| 167 | recommended_min = min(recommended_min, |
| 168 | (unsigned long) nr_free_buffer_pages() / 20); |
| 169 | recommended_min <<= (PAGE_SHIFT-10); |
| 170 | |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 171 | if (recommended_min > min_free_kbytes) { |
| 172 | if (user_min_free_kbytes >= 0) |
| 173 | pr_info("raising min_free_kbytes from %d to %lu " |
| 174 | "to help transparent hugepage allocations\n", |
| 175 | min_free_kbytes, recommended_min); |
| 176 | |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 177 | min_free_kbytes = recommended_min; |
Han Pingtian | 42aa83c | 2014-01-23 15:53:28 -0800 | [diff] [blame] | 178 | } |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 179 | setup_per_zone_wmarks(); |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 180 | } |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 181 | |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 182 | static int start_stop_khugepaged(void) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 183 | { |
| 184 | int err = 0; |
| 185 | if (khugepaged_enabled()) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 186 | if (!khugepaged_thread) |
| 187 | khugepaged_thread = kthread_run(khugepaged, NULL, |
| 188 | "khugepaged"); |
Viresh Kumar | 18e8e5c | 2015-08-12 15:59:46 +0530 | [diff] [blame] | 189 | if (IS_ERR(khugepaged_thread)) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 190 | pr_err("khugepaged: kthread_run(khugepaged) failed\n"); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 191 | err = PTR_ERR(khugepaged_thread); |
| 192 | khugepaged_thread = NULL; |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 193 | goto fail; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 194 | } |
Xiao Guangrong | 911891a | 2012-10-08 16:29:41 -0700 | [diff] [blame] | 195 | |
| 196 | if (!list_empty(&khugepaged_scan.mm_head)) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 197 | wake_up_interruptible(&khugepaged_wait); |
Andrea Arcangeli | f000565 | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 198 | |
| 199 | set_recommended_min_free_kbytes(); |
Xiao Guangrong | 911891a | 2012-10-08 16:29:41 -0700 | [diff] [blame] | 200 | } else if (khugepaged_thread) { |
Xiao Guangrong | 911891a | 2012-10-08 16:29:41 -0700 | [diff] [blame] | 201 | kthread_stop(khugepaged_thread); |
| 202 | khugepaged_thread = NULL; |
| 203 | } |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 204 | fail: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 205 | return err; |
| 206 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 207 | |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 208 | static atomic_t huge_zero_refcount; |
Wang, Yalin | 56873f4 | 2015-02-11 15:24:51 -0800 | [diff] [blame] | 209 | struct page *huge_zero_page __read_mostly; |
Kirill A. Shutemov | 4a6c129 | 2012-12-12 13:50:47 -0800 | [diff] [blame] | 210 | |
Matthew Wilcox | fc43704 | 2015-09-08 14:58:51 -0700 | [diff] [blame] | 211 | struct page *get_huge_zero_page(void) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 212 | { |
| 213 | struct page *zero_page; |
| 214 | retry: |
| 215 | if (likely(atomic_inc_not_zero(&huge_zero_refcount))) |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 216 | return READ_ONCE(huge_zero_page); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 217 | |
| 218 | zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, |
| 219 | HPAGE_PMD_ORDER); |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 220 | if (!zero_page) { |
| 221 | count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 222 | return NULL; |
Kirill A. Shutemov | d8a8e1f | 2012-12-12 13:51:09 -0800 | [diff] [blame] | 223 | } |
| 224 | count_vm_event(THP_ZERO_PAGE_ALLOC); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 225 | preempt_disable(); |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 226 | if (cmpxchg(&huge_zero_page, NULL, zero_page)) { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 227 | preempt_enable(); |
Yu Zhao | 5ddacbe | 2014-10-29 14:50:26 -0700 | [diff] [blame] | 228 | __free_pages(zero_page, compound_order(zero_page)); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 229 | goto retry; |
| 230 | } |
| 231 | |
| 232 | /* We take additional reference here. It will be put back by shrinker */ |
| 233 | atomic_set(&huge_zero_refcount, 2); |
| 234 | preempt_enable(); |
Jason Low | 4db0c3c | 2015-04-15 16:14:08 -0700 | [diff] [blame] | 235 | return READ_ONCE(huge_zero_page); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | static void put_huge_zero_page(void) |
| 239 | { |
| 240 | /* |
| 241 | * Counter should never go to zero here. Only shrinker can put |
| 242 | * last reference. |
| 243 | */ |
| 244 | BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); |
| 245 | } |
| 246 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 247 | static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink, |
| 248 | struct shrink_control *sc) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 249 | { |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 250 | /* we can free zero page only if last reference remains */ |
| 251 | return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; |
| 252 | } |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 253 | |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 254 | static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, |
| 255 | struct shrink_control *sc) |
| 256 | { |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 257 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 258 | struct page *zero_page = xchg(&huge_zero_page, NULL); |
| 259 | BUG_ON(zero_page == NULL); |
Yu Zhao | 5ddacbe | 2014-10-29 14:50:26 -0700 | [diff] [blame] | 260 | __free_pages(zero_page, compound_order(zero_page)); |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 261 | return HPAGE_PMD_NR; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | return 0; |
| 265 | } |
| 266 | |
| 267 | static struct shrinker huge_zero_page_shrinker = { |
Glauber Costa | 4889646 | 2013-08-28 10:18:15 +1000 | [diff] [blame] | 268 | .count_objects = shrink_huge_zero_page_count, |
| 269 | .scan_objects = shrink_huge_zero_page_scan, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 270 | .seeks = DEFAULT_SEEKS, |
| 271 | }; |
| 272 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 273 | #ifdef CONFIG_SYSFS |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 274 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 275 | static ssize_t double_flag_show(struct kobject *kobj, |
| 276 | struct kobj_attribute *attr, char *buf, |
| 277 | enum transparent_hugepage_flag enabled, |
| 278 | enum transparent_hugepage_flag req_madv) |
| 279 | { |
| 280 | if (test_bit(enabled, &transparent_hugepage_flags)) { |
| 281 | VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags)); |
| 282 | return sprintf(buf, "[always] madvise never\n"); |
| 283 | } else if (test_bit(req_madv, &transparent_hugepage_flags)) |
| 284 | return sprintf(buf, "always [madvise] never\n"); |
| 285 | else |
| 286 | return sprintf(buf, "always madvise [never]\n"); |
| 287 | } |
| 288 | static ssize_t double_flag_store(struct kobject *kobj, |
| 289 | struct kobj_attribute *attr, |
| 290 | const char *buf, size_t count, |
| 291 | enum transparent_hugepage_flag enabled, |
| 292 | enum transparent_hugepage_flag req_madv) |
| 293 | { |
| 294 | if (!memcmp("always", buf, |
| 295 | min(sizeof("always")-1, count))) { |
| 296 | set_bit(enabled, &transparent_hugepage_flags); |
| 297 | clear_bit(req_madv, &transparent_hugepage_flags); |
| 298 | } else if (!memcmp("madvise", buf, |
| 299 | min(sizeof("madvise")-1, count))) { |
| 300 | clear_bit(enabled, &transparent_hugepage_flags); |
| 301 | set_bit(req_madv, &transparent_hugepage_flags); |
| 302 | } else if (!memcmp("never", buf, |
| 303 | min(sizeof("never")-1, count))) { |
| 304 | clear_bit(enabled, &transparent_hugepage_flags); |
| 305 | clear_bit(req_madv, &transparent_hugepage_flags); |
| 306 | } else |
| 307 | return -EINVAL; |
| 308 | |
| 309 | return count; |
| 310 | } |
| 311 | |
| 312 | static ssize_t enabled_show(struct kobject *kobj, |
| 313 | struct kobj_attribute *attr, char *buf) |
| 314 | { |
| 315 | return double_flag_show(kobj, attr, buf, |
| 316 | TRANSPARENT_HUGEPAGE_FLAG, |
| 317 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); |
| 318 | } |
| 319 | static ssize_t enabled_store(struct kobject *kobj, |
| 320 | struct kobj_attribute *attr, |
| 321 | const char *buf, size_t count) |
| 322 | { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 323 | ssize_t ret; |
| 324 | |
| 325 | ret = double_flag_store(kobj, attr, buf, count, |
| 326 | TRANSPARENT_HUGEPAGE_FLAG, |
| 327 | TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG); |
| 328 | |
| 329 | if (ret > 0) { |
Xiao Guangrong | 911891a | 2012-10-08 16:29:41 -0700 | [diff] [blame] | 330 | int err; |
| 331 | |
| 332 | mutex_lock(&khugepaged_mutex); |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 333 | err = start_stop_khugepaged(); |
Xiao Guangrong | 911891a | 2012-10-08 16:29:41 -0700 | [diff] [blame] | 334 | mutex_unlock(&khugepaged_mutex); |
| 335 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 336 | if (err) |
| 337 | ret = err; |
| 338 | } |
| 339 | |
| 340 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 341 | } |
| 342 | static struct kobj_attribute enabled_attr = |
| 343 | __ATTR(enabled, 0644, enabled_show, enabled_store); |
| 344 | |
| 345 | static ssize_t single_flag_show(struct kobject *kobj, |
| 346 | struct kobj_attribute *attr, char *buf, |
| 347 | enum transparent_hugepage_flag flag) |
| 348 | { |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 349 | return sprintf(buf, "%d\n", |
| 350 | !!test_bit(flag, &transparent_hugepage_flags)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 351 | } |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 352 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 353 | static ssize_t single_flag_store(struct kobject *kobj, |
| 354 | struct kobj_attribute *attr, |
| 355 | const char *buf, size_t count, |
| 356 | enum transparent_hugepage_flag flag) |
| 357 | { |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 358 | unsigned long value; |
| 359 | int ret; |
| 360 | |
| 361 | ret = kstrtoul(buf, 10, &value); |
| 362 | if (ret < 0) |
| 363 | return ret; |
| 364 | if (value > 1) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 365 | return -EINVAL; |
| 366 | |
Ben Hutchings | e27e615 | 2011-04-14 15:22:21 -0700 | [diff] [blame] | 367 | if (value) |
| 368 | set_bit(flag, &transparent_hugepage_flags); |
| 369 | else |
| 370 | clear_bit(flag, &transparent_hugepage_flags); |
| 371 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 372 | return count; |
| 373 | } |
| 374 | |
| 375 | /* |
| 376 | * Currently defrag only disables __GFP_NOWAIT for allocation. A blind |
| 377 | * __GFP_REPEAT is too aggressive, it's never worth swapping tons of |
| 378 | * memory just to allocate one more hugepage. |
| 379 | */ |
| 380 | static ssize_t defrag_show(struct kobject *kobj, |
| 381 | struct kobj_attribute *attr, char *buf) |
| 382 | { |
| 383 | return double_flag_show(kobj, attr, buf, |
| 384 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, |
| 385 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); |
| 386 | } |
| 387 | static ssize_t defrag_store(struct kobject *kobj, |
| 388 | struct kobj_attribute *attr, |
| 389 | const char *buf, size_t count) |
| 390 | { |
| 391 | return double_flag_store(kobj, attr, buf, count, |
| 392 | TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, |
| 393 | TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG); |
| 394 | } |
| 395 | static struct kobj_attribute defrag_attr = |
| 396 | __ATTR(defrag, 0644, defrag_show, defrag_store); |
| 397 | |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 398 | static ssize_t use_zero_page_show(struct kobject *kobj, |
| 399 | struct kobj_attribute *attr, char *buf) |
| 400 | { |
| 401 | return single_flag_show(kobj, attr, buf, |
| 402 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
| 403 | } |
| 404 | static ssize_t use_zero_page_store(struct kobject *kobj, |
| 405 | struct kobj_attribute *attr, const char *buf, size_t count) |
| 406 | { |
| 407 | return single_flag_store(kobj, attr, buf, count, |
| 408 | TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG); |
| 409 | } |
| 410 | static struct kobj_attribute use_zero_page_attr = |
| 411 | __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 412 | #ifdef CONFIG_DEBUG_VM |
| 413 | static ssize_t debug_cow_show(struct kobject *kobj, |
| 414 | struct kobj_attribute *attr, char *buf) |
| 415 | { |
| 416 | return single_flag_show(kobj, attr, buf, |
| 417 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
| 418 | } |
| 419 | static ssize_t debug_cow_store(struct kobject *kobj, |
| 420 | struct kobj_attribute *attr, |
| 421 | const char *buf, size_t count) |
| 422 | { |
| 423 | return single_flag_store(kobj, attr, buf, count, |
| 424 | TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG); |
| 425 | } |
| 426 | static struct kobj_attribute debug_cow_attr = |
| 427 | __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store); |
| 428 | #endif /* CONFIG_DEBUG_VM */ |
| 429 | |
| 430 | static struct attribute *hugepage_attr[] = { |
| 431 | &enabled_attr.attr, |
| 432 | &defrag_attr.attr, |
Kirill A. Shutemov | 79da540 | 2012-12-12 13:51:12 -0800 | [diff] [blame] | 433 | &use_zero_page_attr.attr, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 434 | #ifdef CONFIG_DEBUG_VM |
| 435 | &debug_cow_attr.attr, |
| 436 | #endif |
| 437 | NULL, |
| 438 | }; |
| 439 | |
| 440 | static struct attribute_group hugepage_attr_group = { |
| 441 | .attrs = hugepage_attr, |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 442 | }; |
| 443 | |
| 444 | static ssize_t scan_sleep_millisecs_show(struct kobject *kobj, |
| 445 | struct kobj_attribute *attr, |
| 446 | char *buf) |
| 447 | { |
| 448 | return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs); |
| 449 | } |
| 450 | |
| 451 | static ssize_t scan_sleep_millisecs_store(struct kobject *kobj, |
| 452 | struct kobj_attribute *attr, |
| 453 | const char *buf, size_t count) |
| 454 | { |
| 455 | unsigned long msecs; |
| 456 | int err; |
| 457 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 458 | err = kstrtoul(buf, 10, &msecs); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 459 | if (err || msecs > UINT_MAX) |
| 460 | return -EINVAL; |
| 461 | |
| 462 | khugepaged_scan_sleep_millisecs = msecs; |
| 463 | wake_up_interruptible(&khugepaged_wait); |
| 464 | |
| 465 | return count; |
| 466 | } |
| 467 | static struct kobj_attribute scan_sleep_millisecs_attr = |
| 468 | __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show, |
| 469 | scan_sleep_millisecs_store); |
| 470 | |
| 471 | static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj, |
| 472 | struct kobj_attribute *attr, |
| 473 | char *buf) |
| 474 | { |
| 475 | return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs); |
| 476 | } |
| 477 | |
| 478 | static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj, |
| 479 | struct kobj_attribute *attr, |
| 480 | const char *buf, size_t count) |
| 481 | { |
| 482 | unsigned long msecs; |
| 483 | int err; |
| 484 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 485 | err = kstrtoul(buf, 10, &msecs); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 486 | if (err || msecs > UINT_MAX) |
| 487 | return -EINVAL; |
| 488 | |
| 489 | khugepaged_alloc_sleep_millisecs = msecs; |
| 490 | wake_up_interruptible(&khugepaged_wait); |
| 491 | |
| 492 | return count; |
| 493 | } |
| 494 | static struct kobj_attribute alloc_sleep_millisecs_attr = |
| 495 | __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show, |
| 496 | alloc_sleep_millisecs_store); |
| 497 | |
| 498 | static ssize_t pages_to_scan_show(struct kobject *kobj, |
| 499 | struct kobj_attribute *attr, |
| 500 | char *buf) |
| 501 | { |
| 502 | return sprintf(buf, "%u\n", khugepaged_pages_to_scan); |
| 503 | } |
| 504 | static ssize_t pages_to_scan_store(struct kobject *kobj, |
| 505 | struct kobj_attribute *attr, |
| 506 | const char *buf, size_t count) |
| 507 | { |
| 508 | int err; |
| 509 | unsigned long pages; |
| 510 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 511 | err = kstrtoul(buf, 10, &pages); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 512 | if (err || !pages || pages > UINT_MAX) |
| 513 | return -EINVAL; |
| 514 | |
| 515 | khugepaged_pages_to_scan = pages; |
| 516 | |
| 517 | return count; |
| 518 | } |
| 519 | static struct kobj_attribute pages_to_scan_attr = |
| 520 | __ATTR(pages_to_scan, 0644, pages_to_scan_show, |
| 521 | pages_to_scan_store); |
| 522 | |
| 523 | static ssize_t pages_collapsed_show(struct kobject *kobj, |
| 524 | struct kobj_attribute *attr, |
| 525 | char *buf) |
| 526 | { |
| 527 | return sprintf(buf, "%u\n", khugepaged_pages_collapsed); |
| 528 | } |
| 529 | static struct kobj_attribute pages_collapsed_attr = |
| 530 | __ATTR_RO(pages_collapsed); |
| 531 | |
| 532 | static ssize_t full_scans_show(struct kobject *kobj, |
| 533 | struct kobj_attribute *attr, |
| 534 | char *buf) |
| 535 | { |
| 536 | return sprintf(buf, "%u\n", khugepaged_full_scans); |
| 537 | } |
| 538 | static struct kobj_attribute full_scans_attr = |
| 539 | __ATTR_RO(full_scans); |
| 540 | |
| 541 | static ssize_t khugepaged_defrag_show(struct kobject *kobj, |
| 542 | struct kobj_attribute *attr, char *buf) |
| 543 | { |
| 544 | return single_flag_show(kobj, attr, buf, |
| 545 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); |
| 546 | } |
| 547 | static ssize_t khugepaged_defrag_store(struct kobject *kobj, |
| 548 | struct kobj_attribute *attr, |
| 549 | const char *buf, size_t count) |
| 550 | { |
| 551 | return single_flag_store(kobj, attr, buf, count, |
| 552 | TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG); |
| 553 | } |
| 554 | static struct kobj_attribute khugepaged_defrag_attr = |
| 555 | __ATTR(defrag, 0644, khugepaged_defrag_show, |
| 556 | khugepaged_defrag_store); |
| 557 | |
| 558 | /* |
| 559 | * max_ptes_none controls if khugepaged should collapse hugepages over |
| 560 | * any unmapped ptes in turn potentially increasing the memory |
| 561 | * footprint of the vmas. When max_ptes_none is 0 khugepaged will not |
| 562 | * reduce the available free memory in the system as it |
| 563 | * runs. Increasing max_ptes_none will instead potentially reduce the |
| 564 | * free memory in the system during the khugepaged scan. |
| 565 | */ |
| 566 | static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj, |
| 567 | struct kobj_attribute *attr, |
| 568 | char *buf) |
| 569 | { |
| 570 | return sprintf(buf, "%u\n", khugepaged_max_ptes_none); |
| 571 | } |
| 572 | static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj, |
| 573 | struct kobj_attribute *attr, |
| 574 | const char *buf, size_t count) |
| 575 | { |
| 576 | int err; |
| 577 | unsigned long max_ptes_none; |
| 578 | |
Jingoo Han | 3dbb95f | 2013-09-11 14:20:25 -0700 | [diff] [blame] | 579 | err = kstrtoul(buf, 10, &max_ptes_none); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 580 | if (err || max_ptes_none > HPAGE_PMD_NR-1) |
| 581 | return -EINVAL; |
| 582 | |
| 583 | khugepaged_max_ptes_none = max_ptes_none; |
| 584 | |
| 585 | return count; |
| 586 | } |
| 587 | static struct kobj_attribute khugepaged_max_ptes_none_attr = |
| 588 | __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show, |
| 589 | khugepaged_max_ptes_none_store); |
| 590 | |
| 591 | static struct attribute *khugepaged_attr[] = { |
| 592 | &khugepaged_defrag_attr.attr, |
| 593 | &khugepaged_max_ptes_none_attr.attr, |
| 594 | &pages_to_scan_attr.attr, |
| 595 | &pages_collapsed_attr.attr, |
| 596 | &full_scans_attr.attr, |
| 597 | &scan_sleep_millisecs_attr.attr, |
| 598 | &alloc_sleep_millisecs_attr.attr, |
| 599 | NULL, |
| 600 | }; |
| 601 | |
| 602 | static struct attribute_group khugepaged_attr_group = { |
| 603 | .attrs = khugepaged_attr, |
| 604 | .name = "khugepaged", |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 605 | }; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 606 | |
| 607 | static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 608 | { |
| 609 | int err; |
| 610 | |
| 611 | *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj); |
| 612 | if (unlikely(!*hugepage_kobj)) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 613 | pr_err("failed to create transparent hugepage kobject\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 614 | return -ENOMEM; |
| 615 | } |
| 616 | |
| 617 | err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group); |
| 618 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 619 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 620 | goto delete_obj; |
| 621 | } |
| 622 | |
| 623 | err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group); |
| 624 | if (err) { |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 625 | pr_err("failed to register transparent hugepage group\n"); |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 626 | goto remove_hp_group; |
| 627 | } |
| 628 | |
| 629 | return 0; |
| 630 | |
| 631 | remove_hp_group: |
| 632 | sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group); |
| 633 | delete_obj: |
| 634 | kobject_put(*hugepage_kobj); |
| 635 | return err; |
| 636 | } |
| 637 | |
| 638 | static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 639 | { |
| 640 | sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group); |
| 641 | sysfs_remove_group(hugepage_kobj, &hugepage_attr_group); |
| 642 | kobject_put(hugepage_kobj); |
| 643 | } |
| 644 | #else |
| 645 | static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj) |
| 646 | { |
| 647 | return 0; |
| 648 | } |
| 649 | |
| 650 | static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj) |
| 651 | { |
| 652 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 653 | #endif /* CONFIG_SYSFS */ |
| 654 | |
| 655 | static int __init hugepage_init(void) |
| 656 | { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 657 | int err; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 658 | struct kobject *hugepage_kobj; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 659 | |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 660 | if (!has_transparent_hugepage()) { |
| 661 | transparent_hugepage_flags = 0; |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 662 | return -EINVAL; |
Andrea Arcangeli | 4b7167b | 2011-01-13 15:47:09 -0800 | [diff] [blame] | 663 | } |
| 664 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 665 | err = hugepage_init_sysfs(&hugepage_kobj); |
| 666 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 667 | goto err_sysfs; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 668 | |
| 669 | err = khugepaged_slab_init(); |
| 670 | if (err) |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 671 | goto err_slab; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 672 | |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 673 | err = register_shrinker(&huge_zero_page_shrinker); |
| 674 | if (err) |
| 675 | goto err_hzp_shrinker; |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 676 | err = register_shrinker(&deferred_split_shrinker); |
| 677 | if (err) |
| 678 | goto err_split_shrinker; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 679 | |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 680 | /* |
| 681 | * By default disable transparent hugepages on smaller systems, |
| 682 | * where the extra memory used could hurt more than TLB overhead |
| 683 | * is likely to save. The admin can still enable it through /sys. |
| 684 | */ |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 685 | if (totalram_pages < (512 << (20 - PAGE_SHIFT))) { |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 686 | transparent_hugepage_flags = 0; |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 687 | return 0; |
| 688 | } |
Rik van Riel | 97562cd | 2011-01-13 15:47:12 -0800 | [diff] [blame] | 689 | |
Kirill A. Shutemov | 79553da293 | 2015-04-15 16:14:56 -0700 | [diff] [blame] | 690 | err = start_stop_khugepaged(); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 691 | if (err) |
| 692 | goto err_khugepaged; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 693 | |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 694 | return 0; |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 695 | err_khugepaged: |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 696 | unregister_shrinker(&deferred_split_shrinker); |
| 697 | err_split_shrinker: |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 698 | unregister_shrinker(&huge_zero_page_shrinker); |
| 699 | err_hzp_shrinker: |
| 700 | khugepaged_slab_exit(); |
| 701 | err_slab: |
Shaohua Li | 569e559 | 2012-01-12 17:19:11 -0800 | [diff] [blame] | 702 | hugepage_exit_sysfs(hugepage_kobj); |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 703 | err_sysfs: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 704 | return err; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 705 | } |
Paul Gortmaker | a64fb3c | 2014-01-23 15:53:30 -0800 | [diff] [blame] | 706 | subsys_initcall(hugepage_init); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 707 | |
| 708 | static int __init setup_transparent_hugepage(char *str) |
| 709 | { |
| 710 | int ret = 0; |
| 711 | if (!str) |
| 712 | goto out; |
| 713 | if (!strcmp(str, "always")) { |
| 714 | set_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 715 | &transparent_hugepage_flags); |
| 716 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 717 | &transparent_hugepage_flags); |
| 718 | ret = 1; |
| 719 | } else if (!strcmp(str, "madvise")) { |
| 720 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 721 | &transparent_hugepage_flags); |
| 722 | set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 723 | &transparent_hugepage_flags); |
| 724 | ret = 1; |
| 725 | } else if (!strcmp(str, "never")) { |
| 726 | clear_bit(TRANSPARENT_HUGEPAGE_FLAG, |
| 727 | &transparent_hugepage_flags); |
| 728 | clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, |
| 729 | &transparent_hugepage_flags); |
| 730 | ret = 1; |
| 731 | } |
| 732 | out: |
| 733 | if (!ret) |
Andrew Morton | ae3a8c1 | 2014-06-04 16:06:58 -0700 | [diff] [blame] | 734 | pr_warn("transparent_hugepage= cannot parse, ignored\n"); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 735 | return ret; |
| 736 | } |
| 737 | __setup("transparent_hugepage=", setup_transparent_hugepage); |
| 738 | |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 739 | pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 740 | { |
| 741 | if (likely(vma->vm_flags & VM_WRITE)) |
| 742 | pmd = pmd_mkwrite(pmd); |
| 743 | return pmd; |
| 744 | } |
| 745 | |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 746 | static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot) |
Bob Liu | b3092b3 | 2012-12-11 16:00:41 -0800 | [diff] [blame] | 747 | { |
| 748 | pmd_t entry; |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 749 | entry = mk_pmd(page, prot); |
Bob Liu | b3092b3 | 2012-12-11 16:00:41 -0800 | [diff] [blame] | 750 | entry = pmd_mkhuge(entry); |
| 751 | return entry; |
| 752 | } |
| 753 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 754 | static inline struct list_head *page_deferred_list(struct page *page) |
| 755 | { |
| 756 | /* |
| 757 | * ->lru in the tail pages is occupied by compound_head. |
| 758 | * Let's use ->mapping + ->index in the second tail page as list_head. |
| 759 | */ |
| 760 | return (struct list_head *)&page[2].mapping; |
| 761 | } |
| 762 | |
| 763 | void prep_transhuge_page(struct page *page) |
| 764 | { |
| 765 | /* |
| 766 | * we use page->mapping and page->indexlru in second tail page |
| 767 | * as list_head: assuming THP order >= 2 |
| 768 | */ |
| 769 | BUILD_BUG_ON(HPAGE_PMD_ORDER < 2); |
| 770 | |
| 771 | INIT_LIST_HEAD(page_deferred_list(page)); |
| 772 | set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); |
| 773 | } |
| 774 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 775 | static int __do_huge_pmd_anonymous_page(struct mm_struct *mm, |
| 776 | struct vm_area_struct *vma, |
Andrea Arcangeli | 230c92a | 2015-09-04 15:47:20 -0700 | [diff] [blame] | 777 | unsigned long address, pmd_t *pmd, |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 778 | struct page *page, gfp_t gfp, |
| 779 | unsigned int flags) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 780 | { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 781 | struct mem_cgroup *memcg; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 782 | pgtable_t pgtable; |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 783 | spinlock_t *ptl; |
Andrea Arcangeli | 230c92a | 2015-09-04 15:47:20 -0700 | [diff] [blame] | 784 | unsigned long haddr = address & HPAGE_PMD_MASK; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 785 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 786 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 787 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 788 | if (mem_cgroup_try_charge(page, mm, gfp, &memcg, true)) { |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 789 | put_page(page); |
| 790 | count_vm_event(THP_FAULT_FALLBACK); |
| 791 | return VM_FAULT_FALLBACK; |
| 792 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 793 | |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 794 | pgtable = pte_alloc_one(mm, haddr); |
| 795 | if (unlikely(!pgtable)) { |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 796 | mem_cgroup_cancel_charge(page, memcg, true); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 797 | put_page(page); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 798 | return VM_FAULT_OOM; |
| 799 | } |
| 800 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 801 | clear_huge_page(page, haddr, HPAGE_PMD_NR); |
Minchan Kim | 52f3762 | 2013-04-29 15:08:15 -0700 | [diff] [blame] | 802 | /* |
| 803 | * The memory barrier inside __SetPageUptodate makes sure that |
| 804 | * clear_huge_page writes become visible before the set_pmd_at() |
| 805 | * write. |
| 806 | */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 807 | __SetPageUptodate(page); |
| 808 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 809 | ptl = pmd_lock(mm, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 810 | if (unlikely(!pmd_none(*pmd))) { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 811 | spin_unlock(ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 812 | mem_cgroup_cancel_charge(page, memcg, true); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 813 | put_page(page); |
| 814 | pte_free(mm, pgtable); |
| 815 | } else { |
| 816 | pmd_t entry; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 817 | |
| 818 | /* Deliver the page fault to userland */ |
| 819 | if (userfaultfd_missing(vma)) { |
| 820 | int ret; |
| 821 | |
| 822 | spin_unlock(ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 823 | mem_cgroup_cancel_charge(page, memcg, true); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 824 | put_page(page); |
| 825 | pte_free(mm, pgtable); |
Andrea Arcangeli | 230c92a | 2015-09-04 15:47:20 -0700 | [diff] [blame] | 826 | ret = handle_userfault(vma, address, flags, |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 827 | VM_UFFD_MISSING); |
| 828 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 829 | return ret; |
| 830 | } |
| 831 | |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 832 | entry = mk_huge_pmd(page, vma->vm_page_prot); |
| 833 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 834 | page_add_new_anon_rmap(page, vma, haddr, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 835 | mem_cgroup_commit_charge(page, memcg, false, true); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 836 | lru_cache_add_active_or_unevictable(page, vma); |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 837 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 838 | set_pmd_at(mm, haddr, pmd, entry); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 839 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | e1f56c8 | 2013-11-14 14:30:48 -0800 | [diff] [blame] | 840 | atomic_long_inc(&mm->nr_ptes); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 841 | spin_unlock(ptl); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 842 | count_vm_event(THP_FAULT_ALLOC); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 843 | } |
| 844 | |
David Rientjes | aa2e878 | 2012-05-29 15:06:17 -0700 | [diff] [blame] | 845 | return 0; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 846 | } |
| 847 | |
Andi Kleen | cc5d462 | 2011-03-22 16:33:13 -0700 | [diff] [blame] | 848 | static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 849 | { |
Mel Gorman | 71baba4 | 2015-11-06 16:28:28 -0800 | [diff] [blame] | 850 | return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp; |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 851 | } |
| 852 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 853 | /* Caller must hold page table lock. */ |
Kirill A. Shutemov | d295e34 | 2015-09-08 14:59:34 -0700 | [diff] [blame] | 854 | static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 855 | struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 856 | struct page *zero_page) |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 857 | { |
| 858 | pmd_t entry; |
Andrew Morton | 7c41416 | 2015-09-08 14:58:43 -0700 | [diff] [blame] | 859 | if (!pmd_none(*pmd)) |
| 860 | return false; |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 861 | entry = mk_pmd(zero_page, vma->vm_page_prot); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 862 | entry = pmd_mkhuge(entry); |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 863 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 864 | set_pmd_at(mm, haddr, pmd, entry); |
Kirill A. Shutemov | e1f56c8 | 2013-11-14 14:30:48 -0800 | [diff] [blame] | 865 | atomic_long_inc(&mm->nr_ptes); |
Andrew Morton | 7c41416 | 2015-09-08 14:58:43 -0700 | [diff] [blame] | 866 | return true; |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 867 | } |
| 868 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 869 | int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 870 | unsigned long address, pmd_t *pmd, |
| 871 | unsigned int flags) |
| 872 | { |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 873 | gfp_t gfp; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 874 | struct page *page; |
| 875 | unsigned long haddr = address & HPAGE_PMD_MASK; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 876 | |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 877 | if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 878 | return VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 879 | if (unlikely(anon_vma_prepare(vma))) |
| 880 | return VM_FAULT_OOM; |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 881 | if (unlikely(khugepaged_enter(vma, vma->vm_flags))) |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 882 | return VM_FAULT_OOM; |
Dominik Dingel | 593befa | 2014-10-23 12:07:44 +0200 | [diff] [blame] | 883 | if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) && |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 884 | transparent_hugepage_use_zero_page()) { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 885 | spinlock_t *ptl; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 886 | pgtable_t pgtable; |
| 887 | struct page *zero_page; |
| 888 | bool set; |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 889 | int ret; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 890 | pgtable = pte_alloc_one(mm, haddr); |
| 891 | if (unlikely(!pgtable)) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 892 | return VM_FAULT_OOM; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 893 | zero_page = get_huge_zero_page(); |
| 894 | if (unlikely(!zero_page)) { |
| 895 | pte_free(mm, pgtable); |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 896 | count_vm_event(THP_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 897 | return VM_FAULT_FALLBACK; |
Andi Kleen | 81ab420 | 2011-04-14 15:22:06 -0700 | [diff] [blame] | 898 | } |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 899 | ptl = pmd_lock(mm, pmd); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 900 | ret = 0; |
| 901 | set = false; |
| 902 | if (pmd_none(*pmd)) { |
| 903 | if (userfaultfd_missing(vma)) { |
| 904 | spin_unlock(ptl); |
Andrea Arcangeli | 230c92a | 2015-09-04 15:47:20 -0700 | [diff] [blame] | 905 | ret = handle_userfault(vma, address, flags, |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 906 | VM_UFFD_MISSING); |
| 907 | VM_BUG_ON(ret & VM_FAULT_FALLBACK); |
| 908 | } else { |
| 909 | set_huge_zero_page(pgtable, mm, vma, |
| 910 | haddr, pmd, |
| 911 | zero_page); |
| 912 | spin_unlock(ptl); |
| 913 | set = true; |
| 914 | } |
| 915 | } else |
| 916 | spin_unlock(ptl); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 917 | if (!set) { |
| 918 | pte_free(mm, pgtable); |
| 919 | put_huge_zero_page(); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 920 | } |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 921 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 922 | } |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 923 | gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); |
| 924 | page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 925 | if (unlikely(!page)) { |
| 926 | count_vm_event(THP_FAULT_FALLBACK); |
Kirill A. Shutemov | c029255 | 2013-09-12 15:14:05 -0700 | [diff] [blame] | 927 | return VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 128ec03 | 2013-09-12 15:14:03 -0700 | [diff] [blame] | 928 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 929 | prep_transhuge_page(page); |
Andrea Arcangeli | 230c92a | 2015-09-04 15:47:20 -0700 | [diff] [blame] | 930 | return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp, |
| 931 | flags); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 932 | } |
| 933 | |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 934 | static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 935 | pmd_t *pmd, pfn_t pfn, pgprot_t prot, bool write) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 936 | { |
| 937 | struct mm_struct *mm = vma->vm_mm; |
| 938 | pmd_t entry; |
| 939 | spinlock_t *ptl; |
| 940 | |
| 941 | ptl = pmd_lock(mm, pmd); |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 942 | entry = pmd_mkhuge(pfn_t_pmd(pfn, prot)); |
| 943 | if (pfn_t_devmap(pfn)) |
| 944 | entry = pmd_mkdevmap(entry); |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 945 | if (write) { |
| 946 | entry = pmd_mkyoung(pmd_mkdirty(entry)); |
| 947 | entry = maybe_pmd_mkwrite(entry, vma); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 948 | } |
Ross Zwisler | 01871e5 | 2016-01-15 16:56:02 -0800 | [diff] [blame] | 949 | set_pmd_at(mm, addr, pmd, entry); |
| 950 | update_mmu_cache_pmd(vma, addr, pmd); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 951 | spin_unlock(ptl); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 952 | } |
| 953 | |
| 954 | int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 955 | pmd_t *pmd, pfn_t pfn, bool write) |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 956 | { |
| 957 | pgprot_t pgprot = vma->vm_page_prot; |
| 958 | /* |
| 959 | * If we had pmd_special, we could avoid all these restrictions, |
| 960 | * but we need to be consistent with PTEs and architectures that |
| 961 | * can't support a 'special' bit. |
| 962 | */ |
| 963 | BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); |
| 964 | BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == |
| 965 | (VM_PFNMAP|VM_MIXEDMAP)); |
| 966 | BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); |
Dan Williams | f25748e3 | 2016-01-15 16:56:43 -0800 | [diff] [blame] | 967 | BUG_ON(!pfn_t_devmap(pfn)); |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 968 | |
| 969 | if (addr < vma->vm_start || addr >= vma->vm_end) |
| 970 | return VM_FAULT_SIGBUS; |
| 971 | if (track_pfn_insert(vma, &pgprot, pfn)) |
| 972 | return VM_FAULT_SIGBUS; |
Matthew Wilcox | ae18d6d | 2015-09-08 14:59:14 -0700 | [diff] [blame] | 973 | insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write); |
| 974 | return VM_FAULT_NOPAGE; |
Matthew Wilcox | 5cad465 | 2015-09-08 14:58:54 -0700 | [diff] [blame] | 975 | } |
| 976 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 977 | int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, |
| 978 | pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, |
| 979 | struct vm_area_struct *vma) |
| 980 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 981 | spinlock_t *dst_ptl, *src_ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 982 | struct page *src_page; |
| 983 | pmd_t pmd; |
| 984 | pgtable_t pgtable; |
| 985 | int ret; |
| 986 | |
| 987 | ret = -ENOMEM; |
| 988 | pgtable = pte_alloc_one(dst_mm, addr); |
| 989 | if (unlikely(!pgtable)) |
| 990 | goto out; |
| 991 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 992 | dst_ptl = pmd_lock(dst_mm, dst_pmd); |
| 993 | src_ptl = pmd_lockptr(src_mm, src_pmd); |
| 994 | spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 995 | |
| 996 | ret = -EAGAIN; |
| 997 | pmd = *src_pmd; |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 998 | if (unlikely(!pmd_trans_huge(pmd) && !pmd_devmap(pmd))) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 999 | pte_free(dst_mm, pgtable); |
| 1000 | goto out_unlock; |
| 1001 | } |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1002 | /* |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1003 | * When page table lock is held, the huge zero pmd should not be |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1004 | * under splitting since we don't split the page itself, only pmd to |
| 1005 | * a page table. |
| 1006 | */ |
| 1007 | if (is_huge_zero_pmd(pmd)) { |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 1008 | struct page *zero_page; |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1009 | /* |
| 1010 | * get_huge_zero_page() will never allocate a new page here, |
| 1011 | * since we already have a zero page to copy. It just takes a |
| 1012 | * reference. |
| 1013 | */ |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 1014 | zero_page = get_huge_zero_page(); |
Andrea Arcangeli | 6b251fc | 2015-09-04 15:46:20 -0700 | [diff] [blame] | 1015 | set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, |
Kirill A. Shutemov | 5918d10 | 2013-04-29 15:08:44 -0700 | [diff] [blame] | 1016 | zero_page); |
Kirill A. Shutemov | fc9fe82 | 2012-12-12 13:50:51 -0800 | [diff] [blame] | 1017 | ret = 0; |
| 1018 | goto out_unlock; |
| 1019 | } |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1020 | |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 1021 | if (pmd_trans_huge(pmd)) { |
| 1022 | /* thp accounting separate from pmd_devmap accounting */ |
| 1023 | src_page = pmd_page(pmd); |
| 1024 | VM_BUG_ON_PAGE(!PageHead(src_page), src_page); |
| 1025 | get_page(src_page); |
| 1026 | page_dup_rmap(src_page, true); |
| 1027 | add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); |
| 1028 | atomic_long_inc(&dst_mm->nr_ptes); |
| 1029 | pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable); |
| 1030 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1031 | |
| 1032 | pmdp_set_wrprotect(src_mm, addr, src_pmd); |
| 1033 | pmd = pmd_mkold(pmd_wrprotect(pmd)); |
| 1034 | set_pmd_at(dst_mm, addr, dst_pmd, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1035 | |
| 1036 | ret = 0; |
| 1037 | out_unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1038 | spin_unlock(src_ptl); |
| 1039 | spin_unlock(dst_ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1040 | out: |
| 1041 | return ret; |
| 1042 | } |
| 1043 | |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1044 | void huge_pmd_set_accessed(struct mm_struct *mm, |
| 1045 | struct vm_area_struct *vma, |
| 1046 | unsigned long address, |
| 1047 | pmd_t *pmd, pmd_t orig_pmd, |
| 1048 | int dirty) |
| 1049 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1050 | spinlock_t *ptl; |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1051 | pmd_t entry; |
| 1052 | unsigned long haddr; |
| 1053 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1054 | ptl = pmd_lock(mm, pmd); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1055 | if (unlikely(!pmd_same(*pmd, orig_pmd))) |
| 1056 | goto unlock; |
| 1057 | |
| 1058 | entry = pmd_mkyoung(orig_pmd); |
| 1059 | haddr = address & HPAGE_PMD_MASK; |
| 1060 | if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty)) |
| 1061 | update_mmu_cache_pmd(vma, address, pmd); |
| 1062 | |
| 1063 | unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1064 | spin_unlock(ptl); |
Will Deacon | a1dd450 | 2012-12-11 16:01:27 -0800 | [diff] [blame] | 1065 | } |
| 1066 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1067 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, |
| 1068 | struct vm_area_struct *vma, |
| 1069 | unsigned long address, |
| 1070 | pmd_t *pmd, pmd_t orig_pmd, |
| 1071 | struct page *page, |
| 1072 | unsigned long haddr) |
| 1073 | { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1074 | struct mem_cgroup *memcg; |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1075 | spinlock_t *ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1076 | pgtable_t pgtable; |
| 1077 | pmd_t _pmd; |
| 1078 | int ret = 0, i; |
| 1079 | struct page **pages; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1080 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 1081 | unsigned long mmun_end; /* For mmu_notifiers */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1082 | |
| 1083 | pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR, |
| 1084 | GFP_KERNEL); |
| 1085 | if (unlikely(!pages)) { |
| 1086 | ret |= VM_FAULT_OOM; |
| 1087 | goto out; |
| 1088 | } |
| 1089 | |
| 1090 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
Andi Kleen | cc5d462 | 2011-03-22 16:33:13 -0700 | [diff] [blame] | 1091 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE | |
| 1092 | __GFP_OTHER_NODE, |
Andi Kleen | 19ee151 | 2011-03-04 17:36:31 -0800 | [diff] [blame] | 1093 | vma, address, page_to_nid(page)); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1094 | if (unlikely(!pages[i] || |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1095 | mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL, |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1096 | &memcg, false))) { |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1097 | if (pages[i]) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1098 | put_page(pages[i]); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1099 | while (--i >= 0) { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1100 | memcg = (void *)page_private(pages[i]); |
| 1101 | set_page_private(pages[i], 0); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1102 | mem_cgroup_cancel_charge(pages[i], memcg, |
| 1103 | false); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1104 | put_page(pages[i]); |
| 1105 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1106 | kfree(pages); |
| 1107 | ret |= VM_FAULT_OOM; |
| 1108 | goto out; |
| 1109 | } |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1110 | set_page_private(pages[i], (unsigned long)memcg); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1111 | } |
| 1112 | |
| 1113 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 1114 | copy_user_highpage(pages[i], page + i, |
Hillf Danton | 0089e48 | 2011-10-31 17:09:38 -0700 | [diff] [blame] | 1115 | haddr + PAGE_SIZE * i, vma); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1116 | __SetPageUptodate(pages[i]); |
| 1117 | cond_resched(); |
| 1118 | } |
| 1119 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1120 | mmun_start = haddr; |
| 1121 | mmun_end = haddr + HPAGE_PMD_SIZE; |
| 1122 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
| 1123 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1124 | ptl = pmd_lock(mm, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1125 | if (unlikely(!pmd_same(*pmd, orig_pmd))) |
| 1126 | goto out_free_pages; |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1127 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1128 | |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1129 | pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1130 | /* leave pmd empty until pte is filled */ |
| 1131 | |
Aneesh Kumar K.V | 6b0b50b | 2013-06-05 17:14:02 -0700 | [diff] [blame] | 1132 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1133 | pmd_populate(mm, &_pmd, pgtable); |
| 1134 | |
| 1135 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
| 1136 | pte_t *pte, entry; |
| 1137 | entry = mk_pte(pages[i], vma->vm_page_prot); |
| 1138 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1139 | memcg = (void *)page_private(pages[i]); |
| 1140 | set_page_private(pages[i], 0); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1141 | page_add_new_anon_rmap(pages[i], vma, haddr, false); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1142 | mem_cgroup_commit_charge(pages[i], memcg, false, false); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1143 | lru_cache_add_active_or_unevictable(pages[i], vma); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1144 | pte = pte_offset_map(&_pmd, haddr); |
| 1145 | VM_BUG_ON(!pte_none(*pte)); |
| 1146 | set_pte_at(mm, haddr, pte, entry); |
| 1147 | pte_unmap(pte); |
| 1148 | } |
| 1149 | kfree(pages); |
| 1150 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1151 | smp_wmb(); /* make pte visible before pmd */ |
| 1152 | pmd_populate(mm, pmd, pgtable); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1153 | page_remove_rmap(page, true); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1154 | spin_unlock(ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1155 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1156 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
| 1157 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1158 | ret |= VM_FAULT_WRITE; |
| 1159 | put_page(page); |
| 1160 | |
| 1161 | out: |
| 1162 | return ret; |
| 1163 | |
| 1164 | out_free_pages: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1165 | spin_unlock(ptl); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1166 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1167 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1168 | memcg = (void *)page_private(pages[i]); |
| 1169 | set_page_private(pages[i], 0); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1170 | mem_cgroup_cancel_charge(pages[i], memcg, false); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1171 | put_page(pages[i]); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1172 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1173 | kfree(pages); |
| 1174 | goto out; |
| 1175 | } |
| 1176 | |
| 1177 | int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 1178 | unsigned long address, pmd_t *pmd, pmd_t orig_pmd) |
| 1179 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1180 | spinlock_t *ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1181 | int ret = 0; |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1182 | struct page *page = NULL, *new_page; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1183 | struct mem_cgroup *memcg; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1184 | unsigned long haddr; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1185 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 1186 | unsigned long mmun_end; /* For mmu_notifiers */ |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 1187 | gfp_t huge_gfp; /* for allocation and charge */ |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1188 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1189 | ptl = pmd_lockptr(mm, pmd); |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 1190 | VM_BUG_ON_VMA(!vma->anon_vma, vma); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1191 | haddr = address & HPAGE_PMD_MASK; |
| 1192 | if (is_huge_zero_pmd(orig_pmd)) |
| 1193 | goto alloc; |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1194 | spin_lock(ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1195 | if (unlikely(!pmd_same(*pmd, orig_pmd))) |
| 1196 | goto out_unlock; |
| 1197 | |
| 1198 | page = pmd_page(orig_pmd); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1199 | VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page); |
Kirill A. Shutemov | 1f25fe2 | 2016-01-15 16:52:24 -0800 | [diff] [blame] | 1200 | /* |
| 1201 | * We can only reuse the page if nobody else maps the huge page or it's |
| 1202 | * part. We can do it by checking page_mapcount() on each sub-page, but |
| 1203 | * it's expensive. |
| 1204 | * The cheaper way is to check page_count() to be equal 1: every |
| 1205 | * mapcount takes page reference reference, so this way we can |
| 1206 | * guarantee, that the PMD is the only mapping. |
| 1207 | * This can give false negative if somebody pinned the page, but that's |
| 1208 | * fine. |
| 1209 | */ |
| 1210 | if (page_mapcount(page) == 1 && page_count(page) == 1) { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1211 | pmd_t entry; |
| 1212 | entry = pmd_mkyoung(orig_pmd); |
| 1213 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
| 1214 | if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1)) |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 1215 | update_mmu_cache_pmd(vma, address, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1216 | ret |= VM_FAULT_WRITE; |
| 1217 | goto out_unlock; |
| 1218 | } |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1219 | get_page(page); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1220 | spin_unlock(ptl); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1221 | alloc: |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1222 | if (transparent_hugepage_enabled(vma) && |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 1223 | !transparent_hugepage_debug_cow()) { |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 1224 | huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0); |
| 1225 | new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); |
Aneesh Kumar K.V | 077fcf1 | 2015-02-11 15:27:12 -0800 | [diff] [blame] | 1226 | } else |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1227 | new_page = NULL; |
| 1228 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 1229 | if (likely(new_page)) { |
| 1230 | prep_transhuge_page(new_page); |
| 1231 | } else { |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1232 | if (!page) { |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 1233 | split_huge_pmd(vma, pmd, address); |
Kirill A. Shutemov | e9b71ca | 2014-04-03 14:48:17 -0700 | [diff] [blame] | 1234 | ret |= VM_FAULT_FALLBACK; |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1235 | } else { |
| 1236 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
| 1237 | pmd, orig_pmd, page, haddr); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1238 | if (ret & VM_FAULT_OOM) { |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 1239 | split_huge_pmd(vma, pmd, address); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1240 | ret |= VM_FAULT_FALLBACK; |
| 1241 | } |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1242 | put_page(page); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1243 | } |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1244 | count_vm_event(THP_FAULT_FALLBACK); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1245 | goto out; |
| 1246 | } |
| 1247 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1248 | if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg, |
| 1249 | true))) { |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1250 | put_page(new_page); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1251 | if (page) { |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 1252 | split_huge_pmd(vma, pmd, address); |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1253 | put_page(page); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1254 | } else |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 1255 | split_huge_pmd(vma, pmd, address); |
Kirill A. Shutemov | 9845cbb | 2014-02-25 15:01:42 -0800 | [diff] [blame] | 1256 | ret |= VM_FAULT_FALLBACK; |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1257 | count_vm_event(THP_FAULT_FALLBACK); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1258 | goto out; |
| 1259 | } |
| 1260 | |
David Rientjes | 17766dd | 2013-09-12 15:14:06 -0700 | [diff] [blame] | 1261 | count_vm_event(THP_FAULT_ALLOC); |
| 1262 | |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1263 | if (!page) |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1264 | clear_huge_page(new_page, haddr, HPAGE_PMD_NR); |
| 1265 | else |
| 1266 | copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1267 | __SetPageUptodate(new_page); |
| 1268 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1269 | mmun_start = haddr; |
| 1270 | mmun_end = haddr + HPAGE_PMD_SIZE; |
| 1271 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
| 1272 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1273 | spin_lock(ptl); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1274 | if (page) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1275 | put_page(page); |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1276 | if (unlikely(!pmd_same(*pmd, orig_pmd))) { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1277 | spin_unlock(ptl); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1278 | mem_cgroup_cancel_charge(new_page, memcg, true); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1279 | put_page(new_page); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1280 | goto out_mn; |
Andrea Arcangeli | b9bbfbe | 2011-01-13 15:46:57 -0800 | [diff] [blame] | 1281 | } else { |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1282 | pmd_t entry; |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 1283 | entry = mk_huge_pmd(new_page, vma->vm_page_prot); |
| 1284 | entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1285 | pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1286 | page_add_new_anon_rmap(new_page, vma, haddr, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 1287 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 1288 | lru_cache_add_active_or_unevictable(new_page, vma); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1289 | set_pmd_at(mm, haddr, pmd, entry); |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 1290 | update_mmu_cache_pmd(vma, address, pmd); |
Hugh Dickins | eecc1e4 | 2014-01-12 01:25:21 -0800 | [diff] [blame] | 1291 | if (!page) { |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1292 | add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR); |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1293 | put_huge_zero_page(); |
| 1294 | } else { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1295 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1296 | page_remove_rmap(page, true); |
Kirill A. Shutemov | 93b4796 | 2012-12-12 13:50:54 -0800 | [diff] [blame] | 1297 | put_page(page); |
| 1298 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1299 | ret |= VM_FAULT_WRITE; |
| 1300 | } |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1301 | spin_unlock(ptl); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 1302 | out_mn: |
| 1303 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
| 1304 | out: |
| 1305 | return ret; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1306 | out_unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1307 | spin_unlock(ptl); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1308 | return ret; |
| 1309 | } |
| 1310 | |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1311 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1312 | unsigned long addr, |
| 1313 | pmd_t *pmd, |
| 1314 | unsigned int flags) |
| 1315 | { |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1316 | struct mm_struct *mm = vma->vm_mm; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1317 | struct page *page = NULL; |
| 1318 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1319 | assert_spin_locked(pmd_lockptr(mm, pmd)); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1320 | |
| 1321 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
| 1322 | goto out; |
| 1323 | |
Kirill A. Shutemov | 85facf2 | 2013-02-04 14:28:42 -0800 | [diff] [blame] | 1324 | /* Avoid dumping huge zero page */ |
| 1325 | if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd)) |
| 1326 | return ERR_PTR(-EFAULT); |
| 1327 | |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1328 | /* Full NUMA hinting faults to serialise migration in fault paths */ |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1329 | if ((flags & FOLL_NUMA) && pmd_protnone(*pmd)) |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1330 | goto out; |
| 1331 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1332 | page = pmd_page(*pmd); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1333 | VM_BUG_ON_PAGE(!PageHead(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1334 | if (flags & FOLL_TOUCH) { |
| 1335 | pmd_t _pmd; |
| 1336 | /* |
| 1337 | * We should set the dirty bit only for FOLL_WRITE but |
| 1338 | * for now the dirty bit in the pmd is meaningless. |
| 1339 | * And if the dirty bit will become meaningful and |
| 1340 | * we'll only set it with FOLL_WRITE, an atomic |
| 1341 | * set_bit will be required on the pmd to set the |
| 1342 | * young bit, instead of the current set_pmd_at. |
| 1343 | */ |
| 1344 | _pmd = pmd_mkyoung(pmd_mkdirty(*pmd)); |
Aneesh Kumar K.V | 8663890a | 2013-06-06 00:20:34 -0700 | [diff] [blame] | 1345 | if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, |
| 1346 | pmd, _pmd, 1)) |
| 1347 | update_mmu_cache_pmd(vma, addr, pmd); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1348 | } |
Eric B Munson | de60f5f | 2015-11-05 18:51:36 -0800 | [diff] [blame] | 1349 | if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 1350 | /* |
| 1351 | * We don't mlock() pte-mapped THPs. This way we can avoid |
| 1352 | * leaking mlocked pages into non-VM_LOCKED VMAs. |
| 1353 | * |
| 1354 | * In most cases the pmd is the only mapping of the page as we |
| 1355 | * break COW for the mlock() -- see gup_flags |= FOLL_WRITE for |
| 1356 | * writable private mappings in populate_vma_page_range(). |
| 1357 | * |
| 1358 | * The only scenario when we have the page shared here is if we |
| 1359 | * mlocking read-only mapping shared over fork(). We skip |
| 1360 | * mlocking such pages. |
| 1361 | */ |
| 1362 | if (compound_mapcount(page) == 1 && !PageDoubleMap(page) && |
| 1363 | page->mapping && trylock_page(page)) { |
David Rientjes | b676b29 | 2012-10-08 16:34:03 -0700 | [diff] [blame] | 1364 | lru_add_drain(); |
| 1365 | if (page->mapping) |
| 1366 | mlock_vma_page(page); |
| 1367 | unlock_page(page); |
| 1368 | } |
| 1369 | } |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1370 | page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT; |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1371 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1372 | if (flags & FOLL_GET) |
Kirill A. Shutemov | ddc58f2 | 2016-01-15 16:52:56 -0800 | [diff] [blame] | 1373 | get_page(page); |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1374 | |
| 1375 | out: |
| 1376 | return page; |
| 1377 | } |
| 1378 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1379 | /* NUMA hinting page fault entry point for trans huge pmds */ |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1380 | int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, |
| 1381 | unsigned long addr, pmd_t pmd, pmd_t *pmdp) |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1382 | { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1383 | spinlock_t *ptl; |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1384 | struct anon_vma *anon_vma = NULL; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1385 | struct page *page; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1386 | unsigned long haddr = addr & HPAGE_PMD_MASK; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1387 | int page_nid = -1, this_nid = numa_node_id(); |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 1388 | int target_nid, last_cpupid = -1; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1389 | bool page_locked; |
| 1390 | bool migrated = false; |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1391 | bool was_writable; |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1392 | int flags = 0; |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1393 | |
Mel Gorman | c0e7cad | 2015-02-12 14:58:41 -0800 | [diff] [blame] | 1394 | /* A PROT_NONE fault should not end up here */ |
| 1395 | BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); |
| 1396 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1397 | ptl = pmd_lock(mm, pmdp); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1398 | if (unlikely(!pmd_same(pmd, *pmdp))) |
| 1399 | goto out_unlock; |
| 1400 | |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1401 | /* |
| 1402 | * If there are potential migrations, wait for completion and retry |
| 1403 | * without disrupting NUMA hinting information. Do not relock and |
| 1404 | * check_same as the page may no longer be mapped. |
| 1405 | */ |
| 1406 | if (unlikely(pmd_trans_migrating(*pmdp))) { |
Mel Gorman | 5d83306 | 2015-02-12 14:58:16 -0800 | [diff] [blame] | 1407 | page = pmd_page(*pmdp); |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1408 | spin_unlock(ptl); |
Mel Gorman | 5d83306 | 2015-02-12 14:58:16 -0800 | [diff] [blame] | 1409 | wait_on_page_locked(page); |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1410 | goto out; |
| 1411 | } |
| 1412 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1413 | page = pmd_page(pmd); |
Mel Gorman | a1a4618 | 2013-10-07 11:28:50 +0100 | [diff] [blame] | 1414 | BUG_ON(is_huge_zero_page(page)); |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1415 | page_nid = page_to_nid(page); |
Peter Zijlstra | 9057289 | 2013-10-07 11:29:20 +0100 | [diff] [blame] | 1416 | last_cpupid = page_cpupid_last(page); |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 1417 | count_vm_numa_event(NUMA_HINT_FAULTS); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1418 | if (page_nid == this_nid) { |
Mel Gorman | 03c5a6e | 2012-11-02 14:52:48 +0000 | [diff] [blame] | 1419 | count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL); |
Rik van Riel | 04bb2f9 | 2013-10-07 11:29:36 +0100 | [diff] [blame] | 1420 | flags |= TNF_FAULT_LOCAL; |
| 1421 | } |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1422 | |
Mel Gorman | bea66fb | 2015-03-25 15:55:37 -0700 | [diff] [blame] | 1423 | /* See similar comment in do_numa_page for explanation */ |
| 1424 | if (!(vma->vm_flags & VM_WRITE)) |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1425 | flags |= TNF_NO_GROUP; |
| 1426 | |
| 1427 | /* |
Mel Gorman | ff9042b | 2013-10-07 11:28:43 +0100 | [diff] [blame] | 1428 | * Acquire the page lock to serialise THP migrations but avoid dropping |
| 1429 | * page_table_lock if at all possible |
| 1430 | */ |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1431 | page_locked = trylock_page(page); |
| 1432 | target_nid = mpol_misplaced(page, vma, haddr); |
| 1433 | if (target_nid == -1) { |
| 1434 | /* If the page was locked, there are no parallel migrations */ |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1435 | if (page_locked) |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1436 | goto clear_pmdnuma; |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1437 | } |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1438 | |
Mel Gorman | de466bd | 2013-12-18 17:08:42 -0800 | [diff] [blame] | 1439 | /* Migration could have started since the pmd_trans_migrating check */ |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1440 | if (!page_locked) { |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1441 | spin_unlock(ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1442 | wait_on_page_locked(page); |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1443 | page_nid = -1; |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1444 | goto out; |
| 1445 | } |
| 1446 | |
Mel Gorman | 2b4847e | 2013-12-18 17:08:32 -0800 | [diff] [blame] | 1447 | /* |
| 1448 | * Page is misplaced. Page lock serialises migrations. Acquire anon_vma |
| 1449 | * to serialises splits |
| 1450 | */ |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1451 | get_page(page); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1452 | spin_unlock(ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1453 | anon_vma = page_lock_anon_vma_read(page); |
Peter Zijlstra | cbee9f8 | 2012-10-25 14:16:43 +0200 | [diff] [blame] | 1454 | |
Peter Zijlstra | c69307d | 2013-10-07 11:28:41 +0100 | [diff] [blame] | 1455 | /* Confirm the PMD did not change while page_table_lock was released */ |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1456 | spin_lock(ptl); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1457 | if (unlikely(!pmd_same(pmd, *pmdp))) { |
| 1458 | unlock_page(page); |
| 1459 | put_page(page); |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1460 | page_nid = -1; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1461 | goto out_unlock; |
| 1462 | } |
Mel Gorman | ff9042b | 2013-10-07 11:28:43 +0100 | [diff] [blame] | 1463 | |
Mel Gorman | c3a489c | 2013-12-18 17:08:38 -0800 | [diff] [blame] | 1464 | /* Bail if we fail to protect against THP splits for any reason */ |
| 1465 | if (unlikely(!anon_vma)) { |
| 1466 | put_page(page); |
| 1467 | page_nid = -1; |
| 1468 | goto clear_pmdnuma; |
| 1469 | } |
| 1470 | |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1471 | /* |
| 1472 | * Migrate the THP to the requested node, returns with page unlocked |
Mel Gorman | 8a0516e | 2015-02-12 14:58:22 -0800 | [diff] [blame] | 1473 | * and access rights restored. |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1474 | */ |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1475 | spin_unlock(ptl); |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1476 | migrated = migrate_misplaced_transhuge_page(mm, vma, |
Hugh Dickins | 340ef39 | 2013-02-22 16:34:33 -0800 | [diff] [blame] | 1477 | pmdp, pmd, addr, page, target_nid); |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1478 | if (migrated) { |
| 1479 | flags |= TNF_MIGRATED; |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1480 | page_nid = target_nid; |
Mel Gorman | 074c238 | 2015-03-25 15:55:42 -0700 | [diff] [blame] | 1481 | } else |
| 1482 | flags |= TNF_MIGRATE_FAIL; |
Mel Gorman | b32967f | 2012-11-19 12:35:47 +0000 | [diff] [blame] | 1483 | |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1484 | goto out; |
Mel Gorman | 4daae3b | 2012-11-02 11:33:45 +0000 | [diff] [blame] | 1485 | clear_pmdnuma: |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1486 | BUG_ON(!PageLocked(page)); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1487 | was_writable = pmd_write(pmd); |
Mel Gorman | 4d94246 | 2015-02-12 14:58:28 -0800 | [diff] [blame] | 1488 | pmd = pmd_modify(pmd, vma->vm_page_prot); |
Mel Gorman | b7b0400 | 2015-03-25 15:55:45 -0700 | [diff] [blame] | 1489 | pmd = pmd_mkyoung(pmd); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1490 | if (was_writable) |
| 1491 | pmd = pmd_mkwrite(pmd); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1492 | set_pmd_at(mm, haddr, pmdp, pmd); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1493 | update_mmu_cache_pmd(vma, addr, pmdp); |
Mel Gorman | a54a407 | 2013-10-07 11:28:46 +0100 | [diff] [blame] | 1494 | unlock_page(page); |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1495 | out_unlock: |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 1496 | spin_unlock(ptl); |
Mel Gorman | b891663 | 2013-10-07 11:28:44 +0100 | [diff] [blame] | 1497 | |
| 1498 | out: |
| 1499 | if (anon_vma) |
| 1500 | page_unlock_anon_vma_read(anon_vma); |
| 1501 | |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1502 | if (page_nid != -1) |
Peter Zijlstra | 6688cc0 | 2013-10-07 11:29:24 +0100 | [diff] [blame] | 1503 | task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags); |
Mel Gorman | 8191acb | 2013-10-07 11:28:45 +0100 | [diff] [blame] | 1504 | |
Mel Gorman | d10e63f | 2012-10-25 14:16:31 +0200 | [diff] [blame] | 1505 | return 0; |
| 1506 | } |
| 1507 | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 1508 | int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
| 1509 | pmd_t *pmd, unsigned long addr, unsigned long next) |
| 1510 | |
| 1511 | { |
| 1512 | spinlock_t *ptl; |
| 1513 | pmd_t orig_pmd; |
| 1514 | struct page *page; |
| 1515 | struct mm_struct *mm = tlb->mm; |
| 1516 | int ret = 0; |
| 1517 | |
| 1518 | if (!pmd_trans_huge_lock(pmd, vma, &ptl)) |
| 1519 | goto out; |
| 1520 | |
| 1521 | orig_pmd = *pmd; |
| 1522 | if (is_huge_zero_pmd(orig_pmd)) { |
| 1523 | ret = 1; |
| 1524 | goto out; |
| 1525 | } |
| 1526 | |
| 1527 | page = pmd_page(orig_pmd); |
| 1528 | /* |
| 1529 | * If other processes are mapping this page, we couldn't discard |
| 1530 | * the page unless they all do MADV_FREE so let's skip the page. |
| 1531 | */ |
| 1532 | if (page_mapcount(page) != 1) |
| 1533 | goto out; |
| 1534 | |
| 1535 | if (!trylock_page(page)) |
| 1536 | goto out; |
| 1537 | |
| 1538 | /* |
| 1539 | * If user want to discard part-pages of THP, split it so MADV_FREE |
| 1540 | * will deactivate only them. |
| 1541 | */ |
| 1542 | if (next - addr != HPAGE_PMD_SIZE) { |
| 1543 | get_page(page); |
| 1544 | spin_unlock(ptl); |
| 1545 | if (split_huge_page(page)) { |
| 1546 | put_page(page); |
| 1547 | unlock_page(page); |
| 1548 | goto out_unlocked; |
| 1549 | } |
| 1550 | put_page(page); |
| 1551 | unlock_page(page); |
| 1552 | ret = 1; |
| 1553 | goto out_unlocked; |
| 1554 | } |
| 1555 | |
| 1556 | if (PageDirty(page)) |
| 1557 | ClearPageDirty(page); |
| 1558 | unlock_page(page); |
| 1559 | |
| 1560 | if (PageActive(page)) |
| 1561 | deactivate_page(page); |
| 1562 | |
| 1563 | if (pmd_young(orig_pmd) || pmd_dirty(orig_pmd)) { |
| 1564 | orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, |
| 1565 | tlb->fullmm); |
| 1566 | orig_pmd = pmd_mkold(orig_pmd); |
| 1567 | orig_pmd = pmd_mkclean(orig_pmd); |
| 1568 | |
| 1569 | set_pmd_at(mm, addr, pmd, orig_pmd); |
| 1570 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 1571 | } |
| 1572 | ret = 1; |
| 1573 | out: |
| 1574 | spin_unlock(ptl); |
| 1575 | out_unlocked: |
| 1576 | return ret; |
| 1577 | } |
| 1578 | |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1579 | int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, |
Shaohua Li | f21760b | 2012-01-12 17:19:16 -0800 | [diff] [blame] | 1580 | pmd_t *pmd, unsigned long addr) |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1581 | { |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1582 | pmd_t orig_pmd; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1583 | spinlock_t *ptl; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1584 | |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1585 | if (!__pmd_trans_huge_lock(pmd, vma, &ptl)) |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1586 | return 0; |
| 1587 | /* |
| 1588 | * For architectures like ppc64 we look at deposited pgtable |
| 1589 | * when calling pmdp_huge_get_and_clear. So do the |
| 1590 | * pgtable_trans_huge_withdraw after finishing pmdp related |
| 1591 | * operations. |
| 1592 | */ |
| 1593 | orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd, |
| 1594 | tlb->fullmm); |
| 1595 | tlb_remove_pmd_tlb_entry(tlb, pmd, addr); |
| 1596 | if (vma_is_dax(vma)) { |
| 1597 | spin_unlock(ptl); |
| 1598 | if (is_huge_zero_pmd(orig_pmd)) |
Kirill A. Shutemov | 97ae174 | 2012-12-12 13:51:06 -0800 | [diff] [blame] | 1599 | put_huge_zero_page(); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1600 | } else if (is_huge_zero_pmd(orig_pmd)) { |
| 1601 | pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); |
| 1602 | atomic_long_dec(&tlb->mm->nr_ptes); |
| 1603 | spin_unlock(ptl); |
| 1604 | put_huge_zero_page(); |
| 1605 | } else { |
| 1606 | struct page *page = pmd_page(orig_pmd); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 1607 | page_remove_rmap(page, true); |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1608 | VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); |
| 1609 | add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); |
| 1610 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 1611 | pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd)); |
| 1612 | atomic_long_dec(&tlb->mm->nr_ptes); |
| 1613 | spin_unlock(ptl); |
| 1614 | tlb_remove_page(tlb, page); |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1615 | } |
Kirill A. Shutemov | da14676 | 2015-09-08 14:59:31 -0700 | [diff] [blame] | 1616 | return 1; |
Andrea Arcangeli | 71e3aac | 2011-01-13 15:46:52 -0800 | [diff] [blame] | 1617 | } |
| 1618 | |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1619 | bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1620 | unsigned long old_addr, |
| 1621 | unsigned long new_addr, unsigned long old_end, |
| 1622 | pmd_t *old_pmd, pmd_t *new_pmd) |
| 1623 | { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1624 | spinlock_t *old_ptl, *new_ptl; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1625 | pmd_t pmd; |
| 1626 | |
| 1627 | struct mm_struct *mm = vma->vm_mm; |
| 1628 | |
| 1629 | if ((old_addr & ~HPAGE_PMD_MASK) || |
| 1630 | (new_addr & ~HPAGE_PMD_MASK) || |
| 1631 | old_end - old_addr < HPAGE_PMD_SIZE || |
| 1632 | (new_vma->vm_flags & VM_NOHUGEPAGE)) |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1633 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1634 | |
| 1635 | /* |
| 1636 | * The destination pmd shouldn't be established, free_pgtables() |
| 1637 | * should have release it. |
| 1638 | */ |
| 1639 | if (WARN_ON(!pmd_none(*new_pmd))) { |
| 1640 | VM_BUG_ON(pmd_trans_huge(*new_pmd)); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1641 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1642 | } |
| 1643 | |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1644 | /* |
| 1645 | * We don't have to worry about the ordering of src and dst |
| 1646 | * ptlocks because exclusive mmap_sem prevents deadlock. |
| 1647 | */ |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1648 | if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1649 | new_ptl = pmd_lockptr(mm, new_pmd); |
| 1650 | if (new_ptl != old_ptl) |
| 1651 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1652 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1653 | VM_BUG_ON(!pmd_none(*new_pmd)); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1654 | |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 1655 | if (pmd_move_must_withdraw(new_ptl, old_ptl)) { |
| 1656 | pgtable_t pgtable; |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1657 | pgtable = pgtable_trans_huge_withdraw(mm, old_pmd); |
| 1658 | pgtable_trans_huge_deposit(mm, new_pmd, pgtable); |
Kirill A. Shutemov | 3592806 | 2013-12-12 17:12:33 -0800 | [diff] [blame] | 1659 | } |
Aneesh Kumar K.V | b3084f4 | 2014-01-13 11:34:24 +0530 | [diff] [blame] | 1660 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
| 1661 | if (new_ptl != old_ptl) |
| 1662 | spin_unlock(new_ptl); |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1663 | spin_unlock(old_ptl); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1664 | return true; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1665 | } |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1666 | return false; |
Andrea Arcangeli | 37a1c49 | 2011-10-31 17:08:30 -0700 | [diff] [blame] | 1667 | } |
| 1668 | |
Mel Gorman | f123d74 | 2013-10-07 11:28:49 +0100 | [diff] [blame] | 1669 | /* |
| 1670 | * Returns |
| 1671 | * - 0 if PMD could not be locked |
| 1672 | * - 1 if PMD was locked but protections unchange and TLB flush unnecessary |
| 1673 | * - HPAGE_PMD_NR is protections changed and TLB flush necessary |
| 1674 | */ |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1675 | int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1676 | unsigned long addr, pgprot_t newprot, int prot_numa) |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1677 | { |
| 1678 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1679 | spinlock_t *ptl; |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1680 | int ret = 0; |
| 1681 | |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1682 | if (__pmd_trans_huge_lock(pmd, vma, &ptl)) { |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1683 | pmd_t entry; |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1684 | bool preserve_write = prot_numa && pmd_write(*pmd); |
Mel Gorman | ba68bc01 | 2015-03-07 15:20:48 +0000 | [diff] [blame] | 1685 | ret = 1; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1686 | |
| 1687 | /* |
| 1688 | * Avoid trapping faults against the zero page. The read-only |
| 1689 | * data is likely to be read-cached on the local CPU and |
| 1690 | * local/remote hits to the zero page are not interesting. |
| 1691 | */ |
| 1692 | if (prot_numa && is_huge_zero_pmd(*pmd)) { |
| 1693 | spin_unlock(ptl); |
Mel Gorman | ba68bc01 | 2015-03-07 15:20:48 +0000 | [diff] [blame] | 1694 | return ret; |
Mel Gorman | e944fd6 | 2015-02-12 14:58:35 -0800 | [diff] [blame] | 1695 | } |
| 1696 | |
Mel Gorman | 10c1045 | 2015-02-12 14:58:44 -0800 | [diff] [blame] | 1697 | if (!prot_numa || !pmd_protnone(*pmd)) { |
Aneesh Kumar K.V | 8809aa2 | 2015-06-24 16:57:44 -0700 | [diff] [blame] | 1698 | entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd); |
Mel Gorman | 10c1045 | 2015-02-12 14:58:44 -0800 | [diff] [blame] | 1699 | entry = pmd_modify(entry, newprot); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1700 | if (preserve_write) |
| 1701 | entry = pmd_mkwrite(entry); |
Mel Gorman | 10c1045 | 2015-02-12 14:58:44 -0800 | [diff] [blame] | 1702 | ret = HPAGE_PMD_NR; |
| 1703 | set_pmd_at(mm, addr, pmd, entry); |
Mel Gorman | b191f9b | 2015-03-25 15:55:40 -0700 | [diff] [blame] | 1704 | BUG_ON(!preserve_write && pmd_write(entry)); |
Mel Gorman | 10c1045 | 2015-02-12 14:58:44 -0800 | [diff] [blame] | 1705 | } |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1706 | spin_unlock(ptl); |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1707 | } |
Johannes Weiner | cd7548a | 2011-01-13 15:47:04 -0800 | [diff] [blame] | 1708 | |
| 1709 | return ret; |
| 1710 | } |
| 1711 | |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1712 | /* |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1713 | * Returns true if a given pmd maps a thp, false otherwise. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1714 | * |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1715 | * Note that if it returns true, this routine returns without unlocking page |
| 1716 | * table lock. So callers must unlock it. |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1717 | */ |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1718 | bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1719 | spinlock_t **ptl) |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1720 | { |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1721 | *ptl = pmd_lock(vma->vm_mm, pmd); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 1722 | if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd))) |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1723 | return true; |
Kirill A. Shutemov | bf92915 | 2013-11-14 14:30:54 -0800 | [diff] [blame] | 1724 | spin_unlock(*ptl); |
Kirill A. Shutemov | 4b471e8 | 2016-01-15 16:53:39 -0800 | [diff] [blame] | 1725 | return false; |
Naoya Horiguchi | 025c5b2 | 2012-03-21 16:33:57 -0700 | [diff] [blame] | 1726 | } |
| 1727 | |
Vlastimil Babka | 9050d7e | 2014-03-03 15:38:27 -0800 | [diff] [blame] | 1728 | #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) |
Andrea Arcangeli | 78f11a2 | 2011-04-27 15:26:45 -0700 | [diff] [blame] | 1729 | |
Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 1730 | int hugepage_madvise(struct vm_area_struct *vma, |
| 1731 | unsigned long *vm_flags, int advice) |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1732 | { |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1733 | switch (advice) { |
| 1734 | case MADV_HUGEPAGE: |
Alex Thorlton | 1e1836e | 2014-04-07 15:37:09 -0700 | [diff] [blame] | 1735 | #ifdef CONFIG_S390 |
| 1736 | /* |
| 1737 | * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390 |
| 1738 | * can't handle this properly after s390_enable_sie, so we simply |
| 1739 | * ignore the madvise to prevent qemu from causing a SIGSEGV. |
| 1740 | */ |
| 1741 | if (mm_has_pgste(vma->vm_mm)) |
| 1742 | return 0; |
| 1743 | #endif |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1744 | /* |
| 1745 | * Be somewhat over-protective like KSM for now! |
| 1746 | */ |
Jason J. Herne | 1a76361 | 2015-11-20 15:57:04 -0800 | [diff] [blame] | 1747 | if (*vm_flags & VM_NO_THP) |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1748 | return -EINVAL; |
| 1749 | *vm_flags &= ~VM_NOHUGEPAGE; |
| 1750 | *vm_flags |= VM_HUGEPAGE; |
Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 1751 | /* |
| 1752 | * If the vma become good for khugepaged to scan, |
| 1753 | * register it here without waiting a page fault that |
| 1754 | * may not happen any time soon. |
| 1755 | */ |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 1756 | if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags))) |
Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 1757 | return -ENOMEM; |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1758 | break; |
| 1759 | case MADV_NOHUGEPAGE: |
| 1760 | /* |
| 1761 | * Be somewhat over-protective like KSM for now! |
| 1762 | */ |
Jason J. Herne | 1a76361 | 2015-11-20 15:57:04 -0800 | [diff] [blame] | 1763 | if (*vm_flags & VM_NO_THP) |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1764 | return -EINVAL; |
| 1765 | *vm_flags &= ~VM_HUGEPAGE; |
| 1766 | *vm_flags |= VM_NOHUGEPAGE; |
Andrea Arcangeli | 60ab324 | 2011-01-13 15:47:18 -0800 | [diff] [blame] | 1767 | /* |
| 1768 | * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning |
| 1769 | * this vma even if we leave the mm registered in khugepaged if |
| 1770 | * it got registered before VM_NOHUGEPAGE was set. |
| 1771 | */ |
Andrea Arcangeli | a664b2d | 2011-01-13 15:47:17 -0800 | [diff] [blame] | 1772 | break; |
| 1773 | } |
Andrea Arcangeli | 0af4e98 | 2011-01-13 15:46:55 -0800 | [diff] [blame] | 1774 | |
| 1775 | return 0; |
| 1776 | } |
| 1777 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1778 | static int __init khugepaged_slab_init(void) |
| 1779 | { |
| 1780 | mm_slot_cache = kmem_cache_create("khugepaged_mm_slot", |
| 1781 | sizeof(struct mm_slot), |
| 1782 | __alignof__(struct mm_slot), 0, NULL); |
| 1783 | if (!mm_slot_cache) |
| 1784 | return -ENOMEM; |
| 1785 | |
| 1786 | return 0; |
| 1787 | } |
| 1788 | |
Kirill A. Shutemov | 65ebb64 | 2015-04-15 16:14:20 -0700 | [diff] [blame] | 1789 | static void __init khugepaged_slab_exit(void) |
| 1790 | { |
| 1791 | kmem_cache_destroy(mm_slot_cache); |
| 1792 | } |
| 1793 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1794 | static inline struct mm_slot *alloc_mm_slot(void) |
| 1795 | { |
| 1796 | if (!mm_slot_cache) /* initialization failed */ |
| 1797 | return NULL; |
| 1798 | return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); |
| 1799 | } |
| 1800 | |
| 1801 | static inline void free_mm_slot(struct mm_slot *mm_slot) |
| 1802 | { |
| 1803 | kmem_cache_free(mm_slot_cache, mm_slot); |
| 1804 | } |
| 1805 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1806 | static struct mm_slot *get_mm_slot(struct mm_struct *mm) |
| 1807 | { |
| 1808 | struct mm_slot *mm_slot; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1809 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 1810 | hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1811 | if (mm == mm_slot->mm) |
| 1812 | return mm_slot; |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 1813 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1814 | return NULL; |
| 1815 | } |
| 1816 | |
| 1817 | static void insert_to_mm_slots_hash(struct mm_struct *mm, |
| 1818 | struct mm_slot *mm_slot) |
| 1819 | { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1820 | mm_slot->mm = mm; |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 1821 | hash_add(mm_slots_hash, &mm_slot->hash, (long)mm); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1822 | } |
| 1823 | |
| 1824 | static inline int khugepaged_test_exit(struct mm_struct *mm) |
| 1825 | { |
| 1826 | return atomic_read(&mm->mm_users) == 0; |
| 1827 | } |
| 1828 | |
| 1829 | int __khugepaged_enter(struct mm_struct *mm) |
| 1830 | { |
| 1831 | struct mm_slot *mm_slot; |
| 1832 | int wakeup; |
| 1833 | |
| 1834 | mm_slot = alloc_mm_slot(); |
| 1835 | if (!mm_slot) |
| 1836 | return -ENOMEM; |
| 1837 | |
| 1838 | /* __khugepaged_exit() must not run from under us */ |
Sasha Levin | 96dad67 | 2014-10-09 15:28:39 -0700 | [diff] [blame] | 1839 | VM_BUG_ON_MM(khugepaged_test_exit(mm), mm); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1840 | if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { |
| 1841 | free_mm_slot(mm_slot); |
| 1842 | return 0; |
| 1843 | } |
| 1844 | |
| 1845 | spin_lock(&khugepaged_mm_lock); |
| 1846 | insert_to_mm_slots_hash(mm, mm_slot); |
| 1847 | /* |
| 1848 | * Insert just behind the scanning cursor, to let the area settle |
| 1849 | * down a little. |
| 1850 | */ |
| 1851 | wakeup = list_empty(&khugepaged_scan.mm_head); |
| 1852 | list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head); |
| 1853 | spin_unlock(&khugepaged_mm_lock); |
| 1854 | |
| 1855 | atomic_inc(&mm->mm_count); |
| 1856 | if (wakeup) |
| 1857 | wake_up_interruptible(&khugepaged_wait); |
| 1858 | |
| 1859 | return 0; |
| 1860 | } |
| 1861 | |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 1862 | int khugepaged_enter_vma_merge(struct vm_area_struct *vma, |
| 1863 | unsigned long vm_flags) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1864 | { |
| 1865 | unsigned long hstart, hend; |
| 1866 | if (!vma->anon_vma) |
| 1867 | /* |
| 1868 | * Not yet faulted in so we will register later in the |
| 1869 | * page fault if needed. |
| 1870 | */ |
| 1871 | return 0; |
Andrea Arcangeli | 78f11a2 | 2011-04-27 15:26:45 -0700 | [diff] [blame] | 1872 | if (vma->vm_ops) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1873 | /* khugepaged not yet working on file or special mappings */ |
| 1874 | return 0; |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 1875 | VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1876 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
| 1877 | hend = vma->vm_end & HPAGE_PMD_MASK; |
| 1878 | if (hstart < hend) |
David Rientjes | 6d50e60 | 2014-10-29 14:50:31 -0700 | [diff] [blame] | 1879 | return khugepaged_enter(vma, vm_flags); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1880 | return 0; |
| 1881 | } |
| 1882 | |
| 1883 | void __khugepaged_exit(struct mm_struct *mm) |
| 1884 | { |
| 1885 | struct mm_slot *mm_slot; |
| 1886 | int free = 0; |
| 1887 | |
| 1888 | spin_lock(&khugepaged_mm_lock); |
| 1889 | mm_slot = get_mm_slot(mm); |
| 1890 | if (mm_slot && khugepaged_scan.mm_slot != mm_slot) { |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 1891 | hash_del(&mm_slot->hash); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1892 | list_del(&mm_slot->mm_node); |
| 1893 | free = 1; |
| 1894 | } |
Chris Wright | d788e80 | 2011-07-25 17:12:14 -0700 | [diff] [blame] | 1895 | spin_unlock(&khugepaged_mm_lock); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1896 | |
| 1897 | if (free) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1898 | clear_bit(MMF_VM_HUGEPAGE, &mm->flags); |
| 1899 | free_mm_slot(mm_slot); |
| 1900 | mmdrop(mm); |
| 1901 | } else if (mm_slot) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1902 | /* |
| 1903 | * This is required to serialize against |
| 1904 | * khugepaged_test_exit() (which is guaranteed to run |
| 1905 | * under mmap sem read mode). Stop here (after we |
| 1906 | * return all pagetables will be destroyed) until |
| 1907 | * khugepaged has finished working on the pagetables |
| 1908 | * under the mmap_sem. |
| 1909 | */ |
| 1910 | down_write(&mm->mmap_sem); |
| 1911 | up_write(&mm->mmap_sem); |
Chris Wright | d788e80 | 2011-07-25 17:12:14 -0700 | [diff] [blame] | 1912 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1913 | } |
| 1914 | |
| 1915 | static void release_pte_page(struct page *page) |
| 1916 | { |
| 1917 | /* 0 stands for page_is_file_cache(page) == false */ |
| 1918 | dec_zone_page_state(page, NR_ISOLATED_ANON + 0); |
| 1919 | unlock_page(page); |
| 1920 | putback_lru_page(page); |
| 1921 | } |
| 1922 | |
| 1923 | static void release_pte_pages(pte_t *pte, pte_t *_pte) |
| 1924 | { |
| 1925 | while (--_pte >= pte) { |
| 1926 | pte_t pteval = *_pte; |
Ebru Akagunduz | ca0984c | 2015-04-14 15:45:24 -0700 | [diff] [blame] | 1927 | if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval))) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1928 | release_pte_page(pte_page(pteval)); |
| 1929 | } |
| 1930 | } |
| 1931 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1932 | static int __collapse_huge_page_isolate(struct vm_area_struct *vma, |
| 1933 | unsigned long address, |
| 1934 | pte_t *pte) |
| 1935 | { |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1936 | struct page *page = NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1937 | pte_t *_pte; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1938 | int none_or_zero = 0, result = 0; |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 1939 | bool referenced = false, writable = false; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1940 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1941 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; |
| 1942 | _pte++, address += PAGE_SIZE) { |
| 1943 | pte_t pteval = *_pte; |
Minchan Kim | 47aee4d | 2015-10-22 13:32:19 -0700 | [diff] [blame] | 1944 | if (pte_none(pteval) || (pte_present(pteval) && |
| 1945 | is_zero_pfn(pte_pfn(pteval)))) { |
Andrea Arcangeli | c1294d0 | 2015-09-04 15:46:27 -0700 | [diff] [blame] | 1946 | if (!userfaultfd_armed(vma) && |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1947 | ++none_or_zero <= khugepaged_max_ptes_none) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1948 | continue; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1949 | } else { |
| 1950 | result = SCAN_EXCEED_NONE_PTE; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1951 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1952 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1953 | } |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1954 | if (!pte_present(pteval)) { |
| 1955 | result = SCAN_PTE_NON_PRESENT; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1956 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1957 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1958 | page = vm_normal_page(vma, address, pteval); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1959 | if (unlikely(!page)) { |
| 1960 | result = SCAN_PAGE_NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1961 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1962 | } |
Bob Liu | 344aa35 | 2012-12-11 16:00:34 -0800 | [diff] [blame] | 1963 | |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 1964 | VM_BUG_ON_PAGE(PageCompound(page), page); |
| 1965 | VM_BUG_ON_PAGE(!PageAnon(page), page); |
| 1966 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1967 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1968 | /* |
| 1969 | * We can do it before isolate_lru_page because the |
| 1970 | * page can't be freed from under us. NOTE: PG_lock |
| 1971 | * is needed to serialize against split_huge_page |
| 1972 | * when invoked from the VM. |
| 1973 | */ |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1974 | if (!trylock_page(page)) { |
| 1975 | result = SCAN_PAGE_LOCK; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 1976 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1977 | } |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 1978 | |
| 1979 | /* |
| 1980 | * cannot use mapcount: can't collapse if there's a gup pin. |
| 1981 | * The page must only be referenced by the scanned process |
| 1982 | * and page swap cache. |
| 1983 | */ |
| 1984 | if (page_count(page) != 1 + !!PageSwapCache(page)) { |
| 1985 | unlock_page(page); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1986 | result = SCAN_PAGE_COUNT; |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 1987 | goto out; |
| 1988 | } |
| 1989 | if (pte_write(pteval)) { |
| 1990 | writable = true; |
| 1991 | } else { |
| 1992 | if (PageSwapCache(page) && !reuse_swap_page(page)) { |
| 1993 | unlock_page(page); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 1994 | result = SCAN_SWAP_CACHE_PAGE; |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 1995 | goto out; |
| 1996 | } |
| 1997 | /* |
| 1998 | * Page is not in the swap cache. It can be collapsed |
| 1999 | * into a THP. |
| 2000 | */ |
| 2001 | } |
| 2002 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2003 | /* |
| 2004 | * Isolate the page to avoid collapsing an hugepage |
| 2005 | * currently in use by the VM. |
| 2006 | */ |
| 2007 | if (isolate_lru_page(page)) { |
| 2008 | unlock_page(page); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2009 | result = SCAN_DEL_PAGE_LRU; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2010 | goto out; |
| 2011 | } |
| 2012 | /* 0 stands for page_is_file_cache(page) == false */ |
| 2013 | inc_zone_page_state(page, NR_ISOLATED_ANON + 0); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2014 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 2015 | VM_BUG_ON_PAGE(PageLRU(page), page); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2016 | |
| 2017 | /* If there is no mapped pte young don't collapse the page */ |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 2018 | if (pte_young(pteval) || |
| 2019 | page_is_young(page) || PageReferenced(page) || |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2020 | mmu_notifier_test_young(vma->vm_mm, address)) |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 2021 | referenced = true; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2022 | } |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2023 | if (likely(writable)) { |
| 2024 | if (likely(referenced)) { |
| 2025 | result = SCAN_SUCCEED; |
| 2026 | trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero, |
| 2027 | referenced, writable, result); |
| 2028 | return 1; |
| 2029 | } |
| 2030 | } else { |
| 2031 | result = SCAN_PAGE_RO; |
| 2032 | } |
| 2033 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2034 | out: |
Bob Liu | 344aa35 | 2012-12-11 16:00:34 -0800 | [diff] [blame] | 2035 | release_pte_pages(pte, _pte); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2036 | trace_mm_collapse_huge_page_isolate(page_to_pfn(page), none_or_zero, |
| 2037 | referenced, writable, result); |
Bob Liu | 344aa35 | 2012-12-11 16:00:34 -0800 | [diff] [blame] | 2038 | return 0; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2039 | } |
| 2040 | |
| 2041 | static void __collapse_huge_page_copy(pte_t *pte, struct page *page, |
| 2042 | struct vm_area_struct *vma, |
| 2043 | unsigned long address, |
| 2044 | spinlock_t *ptl) |
| 2045 | { |
| 2046 | pte_t *_pte; |
| 2047 | for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) { |
| 2048 | pte_t pteval = *_pte; |
| 2049 | struct page *src_page; |
| 2050 | |
Ebru Akagunduz | ca0984c | 2015-04-14 15:45:24 -0700 | [diff] [blame] | 2051 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2052 | clear_user_highpage(page, address); |
| 2053 | add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); |
Ebru Akagunduz | ca0984c | 2015-04-14 15:45:24 -0700 | [diff] [blame] | 2054 | if (is_zero_pfn(pte_pfn(pteval))) { |
| 2055 | /* |
| 2056 | * ptl mostly unnecessary. |
| 2057 | */ |
| 2058 | spin_lock(ptl); |
| 2059 | /* |
| 2060 | * paravirt calls inside pte_clear here are |
| 2061 | * superfluous. |
| 2062 | */ |
| 2063 | pte_clear(vma->vm_mm, address, _pte); |
| 2064 | spin_unlock(ptl); |
| 2065 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2066 | } else { |
| 2067 | src_page = pte_page(pteval); |
| 2068 | copy_user_highpage(page, src_page, address, vma); |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2069 | VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2070 | release_pte_page(src_page); |
| 2071 | /* |
| 2072 | * ptl mostly unnecessary, but preempt has to |
| 2073 | * be disabled to update the per-cpu stats |
| 2074 | * inside page_remove_rmap(). |
| 2075 | */ |
| 2076 | spin_lock(ptl); |
| 2077 | /* |
| 2078 | * paravirt calls inside pte_clear here are |
| 2079 | * superfluous. |
| 2080 | */ |
| 2081 | pte_clear(vma->vm_mm, address, _pte); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2082 | page_remove_rmap(src_page, false); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2083 | spin_unlock(ptl); |
| 2084 | free_page_and_swap_cache(src_page); |
| 2085 | } |
| 2086 | |
| 2087 | address += PAGE_SIZE; |
| 2088 | page++; |
| 2089 | } |
| 2090 | } |
| 2091 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2092 | static void khugepaged_alloc_sleep(void) |
| 2093 | { |
Petr Mladek | bde43c6 | 2015-09-08 15:04:05 -0700 | [diff] [blame] | 2094 | DEFINE_WAIT(wait); |
| 2095 | |
| 2096 | add_wait_queue(&khugepaged_wait, &wait); |
| 2097 | freezable_schedule_timeout_interruptible( |
| 2098 | msecs_to_jiffies(khugepaged_alloc_sleep_millisecs)); |
| 2099 | remove_wait_queue(&khugepaged_wait, &wait); |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2100 | } |
| 2101 | |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2102 | static int khugepaged_node_load[MAX_NUMNODES]; |
| 2103 | |
David Rientjes | 14a4e21 | 2014-08-06 16:07:29 -0700 | [diff] [blame] | 2104 | static bool khugepaged_scan_abort(int nid) |
| 2105 | { |
| 2106 | int i; |
| 2107 | |
| 2108 | /* |
| 2109 | * If zone_reclaim_mode is disabled, then no extra effort is made to |
| 2110 | * allocate memory locally. |
| 2111 | */ |
| 2112 | if (!zone_reclaim_mode) |
| 2113 | return false; |
| 2114 | |
| 2115 | /* If there is a count for this node already, it must be acceptable */ |
| 2116 | if (khugepaged_node_load[nid]) |
| 2117 | return false; |
| 2118 | |
| 2119 | for (i = 0; i < MAX_NUMNODES; i++) { |
| 2120 | if (!khugepaged_node_load[i]) |
| 2121 | continue; |
| 2122 | if (node_distance(nid, i) > RECLAIM_DISTANCE) |
| 2123 | return true; |
| 2124 | } |
| 2125 | return false; |
| 2126 | } |
| 2127 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2128 | #ifdef CONFIG_NUMA |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2129 | static int khugepaged_find_target_node(void) |
| 2130 | { |
| 2131 | static int last_khugepaged_target_node = NUMA_NO_NODE; |
| 2132 | int nid, target_node = 0, max_value = 0; |
| 2133 | |
| 2134 | /* find first node with max normal pages hit */ |
| 2135 | for (nid = 0; nid < MAX_NUMNODES; nid++) |
| 2136 | if (khugepaged_node_load[nid] > max_value) { |
| 2137 | max_value = khugepaged_node_load[nid]; |
| 2138 | target_node = nid; |
| 2139 | } |
| 2140 | |
| 2141 | /* do some balance if several nodes have the same hit record */ |
| 2142 | if (target_node <= last_khugepaged_target_node) |
| 2143 | for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES; |
| 2144 | nid++) |
| 2145 | if (max_value == khugepaged_node_load[nid]) { |
| 2146 | target_node = nid; |
| 2147 | break; |
| 2148 | } |
| 2149 | |
| 2150 | last_khugepaged_target_node = target_node; |
| 2151 | return target_node; |
| 2152 | } |
| 2153 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2154 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) |
| 2155 | { |
| 2156 | if (IS_ERR(*hpage)) { |
| 2157 | if (!*wait) |
| 2158 | return false; |
| 2159 | |
| 2160 | *wait = false; |
Xiao Guangrong | e3b4126 | 2012-10-08 16:32:57 -0700 | [diff] [blame] | 2161 | *hpage = NULL; |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2162 | khugepaged_alloc_sleep(); |
| 2163 | } else if (*hpage) { |
| 2164 | put_page(*hpage); |
| 2165 | *hpage = NULL; |
| 2166 | } |
| 2167 | |
| 2168 | return true; |
| 2169 | } |
| 2170 | |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 2171 | static struct page * |
| 2172 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, |
Aaron Tomlin | d6669d6 | 2015-11-06 16:28:52 -0800 | [diff] [blame] | 2173 | unsigned long address, int node) |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2174 | { |
Sasha Levin | 309381fea | 2014-01-23 15:52:54 -0800 | [diff] [blame] | 2175 | VM_BUG_ON_PAGE(*hpage, *hpage); |
Vlastimil Babka | 8b16456 | 2014-10-09 15:27:00 -0700 | [diff] [blame] | 2176 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2177 | /* |
Vlastimil Babka | 8b16456 | 2014-10-09 15:27:00 -0700 | [diff] [blame] | 2178 | * Before allocating the hugepage, release the mmap_sem read lock. |
| 2179 | * The allocation can take potentially a long time if it involves |
| 2180 | * sync compaction, and we do not need to hold the mmap_sem during |
| 2181 | * that. We will recheck the vma after taking it again in write mode. |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2182 | */ |
| 2183 | up_read(&mm->mmap_sem); |
Vlastimil Babka | 8b16456 | 2014-10-09 15:27:00 -0700 | [diff] [blame] | 2184 | |
Vlastimil Babka | 96db800 | 2015-09-08 15:03:50 -0700 | [diff] [blame] | 2185 | *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER); |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2186 | if (unlikely(!*hpage)) { |
| 2187 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
| 2188 | *hpage = ERR_PTR(-ENOMEM); |
| 2189 | return NULL; |
| 2190 | } |
| 2191 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2192 | prep_transhuge_page(*hpage); |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2193 | count_vm_event(THP_COLLAPSE_ALLOC); |
| 2194 | return *hpage; |
| 2195 | } |
| 2196 | #else |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2197 | static int khugepaged_find_target_node(void) |
| 2198 | { |
| 2199 | return 0; |
| 2200 | } |
| 2201 | |
Bob Liu | 10dc415 | 2013-11-12 15:07:35 -0800 | [diff] [blame] | 2202 | static inline struct page *alloc_hugepage(int defrag) |
| 2203 | { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 2204 | struct page *page; |
| 2205 | |
| 2206 | page = alloc_pages(alloc_hugepage_gfpmask(defrag, 0), HPAGE_PMD_ORDER); |
| 2207 | if (page) |
| 2208 | prep_transhuge_page(page); |
| 2209 | return page; |
Bob Liu | 10dc415 | 2013-11-12 15:07:35 -0800 | [diff] [blame] | 2210 | } |
| 2211 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2212 | static struct page *khugepaged_alloc_hugepage(bool *wait) |
| 2213 | { |
| 2214 | struct page *hpage; |
| 2215 | |
| 2216 | do { |
| 2217 | hpage = alloc_hugepage(khugepaged_defrag()); |
| 2218 | if (!hpage) { |
| 2219 | count_vm_event(THP_COLLAPSE_ALLOC_FAILED); |
| 2220 | if (!*wait) |
| 2221 | return NULL; |
| 2222 | |
| 2223 | *wait = false; |
| 2224 | khugepaged_alloc_sleep(); |
| 2225 | } else |
| 2226 | count_vm_event(THP_COLLAPSE_ALLOC); |
| 2227 | } while (unlikely(!hpage) && likely(khugepaged_enabled())); |
| 2228 | |
| 2229 | return hpage; |
| 2230 | } |
| 2231 | |
| 2232 | static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) |
| 2233 | { |
| 2234 | if (!*hpage) |
| 2235 | *hpage = khugepaged_alloc_hugepage(wait); |
| 2236 | |
| 2237 | if (unlikely(!*hpage)) |
| 2238 | return false; |
| 2239 | |
| 2240 | return true; |
| 2241 | } |
| 2242 | |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 2243 | static struct page * |
| 2244 | khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm, |
Aaron Tomlin | d6669d6 | 2015-11-06 16:28:52 -0800 | [diff] [blame] | 2245 | unsigned long address, int node) |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2246 | { |
| 2247 | up_read(&mm->mmap_sem); |
| 2248 | VM_BUG_ON(!*hpage); |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 2249 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2250 | return *hpage; |
| 2251 | } |
| 2252 | #endif |
| 2253 | |
Bob Liu | fa475e5 | 2012-12-11 16:00:39 -0800 | [diff] [blame] | 2254 | static bool hugepage_vma_check(struct vm_area_struct *vma) |
| 2255 | { |
| 2256 | if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || |
| 2257 | (vma->vm_flags & VM_NOHUGEPAGE)) |
| 2258 | return false; |
Bob Liu | fa475e5 | 2012-12-11 16:00:39 -0800 | [diff] [blame] | 2259 | if (!vma->anon_vma || vma->vm_ops) |
| 2260 | return false; |
| 2261 | if (is_vma_temporary_stack(vma)) |
| 2262 | return false; |
Sasha Levin | 81d1b09 | 2014-10-09 15:28:10 -0700 | [diff] [blame] | 2263 | VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); |
Bob Liu | fa475e5 | 2012-12-11 16:00:39 -0800 | [diff] [blame] | 2264 | return true; |
| 2265 | } |
| 2266 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2267 | static void collapse_huge_page(struct mm_struct *mm, |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2268 | unsigned long address, |
| 2269 | struct page **hpage, |
| 2270 | struct vm_area_struct *vma, |
| 2271 | int node) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2272 | { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2273 | pmd_t *pmd, _pmd; |
| 2274 | pte_t *pte; |
| 2275 | pgtable_t pgtable; |
| 2276 | struct page *new_page; |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2277 | spinlock_t *pmd_ptl, *pte_ptl; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2278 | int isolated, result = 0; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2279 | unsigned long hstart, hend; |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2280 | struct mem_cgroup *memcg; |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 2281 | unsigned long mmun_start; /* For mmu_notifiers */ |
| 2282 | unsigned long mmun_end; /* For mmu_notifiers */ |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 2283 | gfp_t gfp; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2284 | |
| 2285 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
Andrea Arcangeli | 692e0b3 | 2011-05-24 17:12:14 -0700 | [diff] [blame] | 2286 | |
Michal Hocko | 3b363692 | 2015-04-15 16:13:29 -0700 | [diff] [blame] | 2287 | /* Only allocate from the target node */ |
| 2288 | gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) | |
| 2289 | __GFP_THISNODE; |
| 2290 | |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2291 | /* release the mmap_sem read lock. */ |
Aaron Tomlin | d6669d6 | 2015-11-06 16:28:52 -0800 | [diff] [blame] | 2292 | new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2293 | if (!new_page) { |
| 2294 | result = SCAN_ALLOC_HUGE_PAGE_FAIL; |
| 2295 | goto out_nolock; |
| 2296 | } |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2297 | |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2298 | if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) { |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2299 | result = SCAN_CGROUP_CHARGE_FAIL; |
| 2300 | goto out_nolock; |
| 2301 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2302 | |
| 2303 | /* |
| 2304 | * Prevent all access to pagetables with the exception of |
| 2305 | * gup_fast later hanlded by the ptep_clear_flush and the VM |
| 2306 | * handled by the anon_vma lock + PG_lock. |
| 2307 | */ |
| 2308 | down_write(&mm->mmap_sem); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2309 | if (unlikely(khugepaged_test_exit(mm))) { |
| 2310 | result = SCAN_ANY_PROCESS; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2311 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2312 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2313 | |
| 2314 | vma = find_vma(mm, address); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2315 | if (!vma) { |
| 2316 | result = SCAN_VMA_NULL; |
Libin | a8f531eb | 2013-09-11 14:20:38 -0700 | [diff] [blame] | 2317 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2318 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2319 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
| 2320 | hend = vma->vm_end & HPAGE_PMD_MASK; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2321 | if (address < hstart || address + HPAGE_PMD_SIZE > hend) { |
| 2322 | result = SCAN_ADDRESS_RANGE; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2323 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2324 | } |
| 2325 | if (!hugepage_vma_check(vma)) { |
| 2326 | result = SCAN_VMA_CHECK; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2327 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2328 | } |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 2329 | pmd = mm_find_pmd(mm, address); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2330 | if (!pmd) { |
| 2331 | result = SCAN_PMD_NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2332 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2333 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2334 | |
Ingo Molnar | 4fc3f1d | 2012-12-02 19:56:50 +0000 | [diff] [blame] | 2335 | anon_vma_lock_write(vma->anon_vma); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2336 | |
| 2337 | pte = pte_offset_map(pmd, address); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2338 | pte_ptl = pte_lockptr(mm, pmd); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2339 | |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 2340 | mmun_start = address; |
| 2341 | mmun_end = address + HPAGE_PMD_SIZE; |
| 2342 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2343 | pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2344 | /* |
| 2345 | * After this gup_fast can't run anymore. This also removes |
| 2346 | * any huge TLB entry from the CPU so we won't allow |
| 2347 | * huge and small TLB entries for the same virtual address |
| 2348 | * to avoid the risk of CPU bugs in that area. |
| 2349 | */ |
Aneesh Kumar K.V | 15a25b2 | 2015-06-24 16:57:39 -0700 | [diff] [blame] | 2350 | _pmd = pmdp_collapse_flush(vma, address, pmd); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2351 | spin_unlock(pmd_ptl); |
Sagi Grimberg | 2ec74c3 | 2012-10-08 16:33:33 -0700 | [diff] [blame] | 2352 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2353 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2354 | spin_lock(pte_ptl); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2355 | isolated = __collapse_huge_page_isolate(vma, address, pte); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2356 | spin_unlock(pte_ptl); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2357 | |
| 2358 | if (unlikely(!isolated)) { |
Johannes Weiner | 453c719 | 2011-01-20 14:44:18 -0800 | [diff] [blame] | 2359 | pte_unmap(pte); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2360 | spin_lock(pmd_ptl); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2361 | BUG_ON(!pmd_none(*pmd)); |
Aneesh Kumar K.V | 7c34251 | 2013-05-24 15:55:21 -0700 | [diff] [blame] | 2362 | /* |
| 2363 | * We can only use set_pmd_at when establishing |
| 2364 | * hugepmds and never for establishing regular pmds that |
| 2365 | * points to regular pagetables. Use pmd_populate for that |
| 2366 | */ |
| 2367 | pmd_populate(mm, pmd, pmd_pgtable(_pmd)); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2368 | spin_unlock(pmd_ptl); |
Konstantin Khlebnikov | 08b5270 | 2013-02-22 16:34:40 -0800 | [diff] [blame] | 2369 | anon_vma_unlock_write(vma->anon_vma); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2370 | result = SCAN_FAIL; |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2371 | goto out; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2372 | } |
| 2373 | |
| 2374 | /* |
| 2375 | * All pages are isolated and locked so anon_vma rmap |
| 2376 | * can't run anymore. |
| 2377 | */ |
Konstantin Khlebnikov | 08b5270 | 2013-02-22 16:34:40 -0800 | [diff] [blame] | 2378 | anon_vma_unlock_write(vma->anon_vma); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2379 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2380 | __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); |
Johannes Weiner | 453c719 | 2011-01-20 14:44:18 -0800 | [diff] [blame] | 2381 | pte_unmap(pte); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2382 | __SetPageUptodate(new_page); |
| 2383 | pgtable = pmd_pgtable(_pmd); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2384 | |
Kirill A. Shutemov | 3122359 | 2013-09-12 15:14:01 -0700 | [diff] [blame] | 2385 | _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); |
| 2386 | _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2387 | |
| 2388 | /* |
| 2389 | * spin_lock() below is not the equivalent of smp_wmb(), so |
| 2390 | * this is needed to avoid the copy_huge_page writes to become |
| 2391 | * visible after the set_pmd_at() write. |
| 2392 | */ |
| 2393 | smp_wmb(); |
| 2394 | |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2395 | spin_lock(pmd_ptl); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2396 | BUG_ON(!pmd_none(*pmd)); |
Kirill A. Shutemov | d281ee6 | 2016-01-15 16:52:16 -0800 | [diff] [blame] | 2397 | page_add_new_anon_rmap(new_page, vma, address, true); |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2398 | mem_cgroup_commit_charge(new_page, memcg, false, true); |
Johannes Weiner | 00501b5 | 2014-08-08 14:19:20 -0700 | [diff] [blame] | 2399 | lru_cache_add_active_or_unevictable(new_page, vma); |
Aneesh Kumar K.V | fce144b | 2013-06-05 17:14:06 -0700 | [diff] [blame] | 2400 | pgtable_trans_huge_deposit(mm, pmd, pgtable); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2401 | set_pmd_at(mm, address, pmd, _pmd); |
David Miller | b113da6 | 2012-10-08 16:34:25 -0700 | [diff] [blame] | 2402 | update_mmu_cache_pmd(vma, address, pmd); |
Kirill A. Shutemov | c4088eb | 2013-11-14 14:31:04 -0800 | [diff] [blame] | 2403 | spin_unlock(pmd_ptl); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2404 | |
| 2405 | *hpage = NULL; |
Xiao Guangrong | 420256ef | 2012-10-08 16:29:49 -0700 | [diff] [blame] | 2406 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2407 | khugepaged_pages_collapsed++; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2408 | result = SCAN_SUCCEED; |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2409 | out_up_write: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2410 | up_write(&mm->mmap_sem); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2411 | trace_mm_collapse_huge_page(mm, isolated, result); |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 2412 | return; |
| 2413 | |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2414 | out_nolock: |
| 2415 | trace_mm_collapse_huge_page(mm, isolated, result); |
| 2416 | return; |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2417 | out: |
Kirill A. Shutemov | f627c2f | 2016-01-15 16:52:20 -0800 | [diff] [blame] | 2418 | mem_cgroup_cancel_charge(new_page, memcg, true); |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2419 | goto out_up_write; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2420 | } |
| 2421 | |
| 2422 | static int khugepaged_scan_pmd(struct mm_struct *mm, |
| 2423 | struct vm_area_struct *vma, |
| 2424 | unsigned long address, |
| 2425 | struct page **hpage) |
| 2426 | { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2427 | pmd_t *pmd; |
| 2428 | pte_t *pte, *_pte; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2429 | int ret = 0, none_or_zero = 0, result = 0; |
| 2430 | struct page *page = NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2431 | unsigned long _address; |
| 2432 | spinlock_t *ptl; |
David Rientjes | 00ef2d2 | 2013-02-22 16:35:36 -0800 | [diff] [blame] | 2433 | int node = NUMA_NO_NODE; |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 2434 | bool writable = false, referenced = false; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2435 | |
| 2436 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 2437 | |
Bob Liu | 6219049 | 2012-12-11 16:00:37 -0800 | [diff] [blame] | 2438 | pmd = mm_find_pmd(mm, address); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2439 | if (!pmd) { |
| 2440 | result = SCAN_PMD_NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2441 | goto out; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2442 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2443 | |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2444 | memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2445 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
| 2446 | for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR; |
| 2447 | _pte++, _address += PAGE_SIZE) { |
| 2448 | pte_t pteval = *_pte; |
Ebru Akagunduz | ca0984c | 2015-04-14 15:45:24 -0700 | [diff] [blame] | 2449 | if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { |
Andrea Arcangeli | c1294d0 | 2015-09-04 15:46:27 -0700 | [diff] [blame] | 2450 | if (!userfaultfd_armed(vma) && |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2451 | ++none_or_zero <= khugepaged_max_ptes_none) { |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2452 | continue; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2453 | } else { |
| 2454 | result = SCAN_EXCEED_NONE_PTE; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2455 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2456 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2457 | } |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2458 | if (!pte_present(pteval)) { |
| 2459 | result = SCAN_PTE_NON_PRESENT; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2460 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2461 | } |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 2462 | if (pte_write(pteval)) |
| 2463 | writable = true; |
| 2464 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2465 | page = vm_normal_page(vma, _address, pteval); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2466 | if (unlikely(!page)) { |
| 2467 | result = SCAN_PAGE_NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2468 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2469 | } |
Kirill A. Shutemov | b1caa95 | 2016-01-15 16:52:39 -0800 | [diff] [blame] | 2470 | |
| 2471 | /* TODO: teach khugepaged to collapse THP mapped with pte */ |
| 2472 | if (PageCompound(page)) { |
| 2473 | result = SCAN_PAGE_COMPOUND; |
| 2474 | goto out_unmap; |
| 2475 | } |
| 2476 | |
Andi Kleen | 5c4b4be | 2011-03-04 17:36:32 -0800 | [diff] [blame] | 2477 | /* |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2478 | * Record which node the original page is from and save this |
| 2479 | * information to khugepaged_node_load[]. |
| 2480 | * Khupaged will allocate hugepage from the node has the max |
| 2481 | * hit record. |
Andi Kleen | 5c4b4be | 2011-03-04 17:36:32 -0800 | [diff] [blame] | 2482 | */ |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2483 | node = page_to_nid(page); |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2484 | if (khugepaged_scan_abort(node)) { |
| 2485 | result = SCAN_SCAN_ABORT; |
David Rientjes | 14a4e21 | 2014-08-06 16:07:29 -0700 | [diff] [blame] | 2486 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2487 | } |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2488 | khugepaged_node_load[node]++; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2489 | if (!PageLRU(page)) { |
| 2490 | result = SCAN_SCAN_ABORT; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2491 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2492 | } |
| 2493 | if (PageLocked(page)) { |
| 2494 | result = SCAN_PAGE_LOCK; |
| 2495 | goto out_unmap; |
| 2496 | } |
| 2497 | if (!PageAnon(page)) { |
| 2498 | result = SCAN_PAGE_ANON; |
| 2499 | goto out_unmap; |
| 2500 | } |
| 2501 | |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 2502 | /* |
| 2503 | * cannot use mapcount: can't collapse if there's a gup pin. |
| 2504 | * The page must only be referenced by the scanned process |
| 2505 | * and page swap cache. |
| 2506 | */ |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2507 | if (page_count(page) != 1 + !!PageSwapCache(page)) { |
| 2508 | result = SCAN_PAGE_COUNT; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2509 | goto out_unmap; |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2510 | } |
Vladimir Davydov | 33c3fc7 | 2015-09-09 15:35:45 -0700 | [diff] [blame] | 2511 | if (pte_young(pteval) || |
| 2512 | page_is_young(page) || PageReferenced(page) || |
Andrea Arcangeli | 8ee5382 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2513 | mmu_notifier_test_young(vma->vm_mm, address)) |
Ebru Akagunduz | 1035921 | 2015-02-11 15:28:28 -0800 | [diff] [blame] | 2514 | referenced = true; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2515 | } |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2516 | if (writable) { |
| 2517 | if (referenced) { |
| 2518 | result = SCAN_SUCCEED; |
| 2519 | ret = 1; |
| 2520 | } else { |
| 2521 | result = SCAN_NO_REFERENCED_PAGE; |
| 2522 | } |
| 2523 | } else { |
| 2524 | result = SCAN_PAGE_RO; |
| 2525 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2526 | out_unmap: |
| 2527 | pte_unmap_unlock(pte, ptl); |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2528 | if (ret) { |
| 2529 | node = khugepaged_find_target_node(); |
Andrea Arcangeli | ce83d21 | 2011-01-13 15:47:06 -0800 | [diff] [blame] | 2530 | /* collapse_huge_page will return with the mmap_sem released */ |
Andi Kleen | 5c4b4be | 2011-03-04 17:36:32 -0800 | [diff] [blame] | 2531 | collapse_huge_page(mm, address, hpage, vma, node); |
Bob Liu | 9f1b868 | 2013-11-12 15:07:37 -0800 | [diff] [blame] | 2532 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2533 | out: |
Ebru Akagunduz | 7d2eba05 | 2016-01-14 15:22:19 -0800 | [diff] [blame] | 2534 | trace_mm_khugepaged_scan_pmd(mm, page_to_pfn(page), writable, referenced, |
| 2535 | none_or_zero, result); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2536 | return ret; |
| 2537 | } |
| 2538 | |
| 2539 | static void collect_mm_slot(struct mm_slot *mm_slot) |
| 2540 | { |
| 2541 | struct mm_struct *mm = mm_slot->mm; |
| 2542 | |
Hugh Dickins | b9980cd | 2012-02-08 17:13:40 -0800 | [diff] [blame] | 2543 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2544 | |
| 2545 | if (khugepaged_test_exit(mm)) { |
| 2546 | /* free mm_slot */ |
Sasha Levin | 43b5fbb | 2013-02-22 16:32:27 -0800 | [diff] [blame] | 2547 | hash_del(&mm_slot->hash); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2548 | list_del(&mm_slot->mm_node); |
| 2549 | |
| 2550 | /* |
| 2551 | * Not strictly needed because the mm exited already. |
| 2552 | * |
| 2553 | * clear_bit(MMF_VM_HUGEPAGE, &mm->flags); |
| 2554 | */ |
| 2555 | |
| 2556 | /* khugepaged_mm_lock actually not necessary for the below */ |
| 2557 | free_mm_slot(mm_slot); |
| 2558 | mmdrop(mm); |
| 2559 | } |
| 2560 | } |
| 2561 | |
| 2562 | static unsigned int khugepaged_scan_mm_slot(unsigned int pages, |
| 2563 | struct page **hpage) |
H Hartley Sweeten | 2f1da64 | 2011-10-31 17:09:25 -0700 | [diff] [blame] | 2564 | __releases(&khugepaged_mm_lock) |
| 2565 | __acquires(&khugepaged_mm_lock) |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2566 | { |
| 2567 | struct mm_slot *mm_slot; |
| 2568 | struct mm_struct *mm; |
| 2569 | struct vm_area_struct *vma; |
| 2570 | int progress = 0; |
| 2571 | |
| 2572 | VM_BUG_ON(!pages); |
Hugh Dickins | b9980cd | 2012-02-08 17:13:40 -0800 | [diff] [blame] | 2573 | VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock)); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2574 | |
| 2575 | if (khugepaged_scan.mm_slot) |
| 2576 | mm_slot = khugepaged_scan.mm_slot; |
| 2577 | else { |
| 2578 | mm_slot = list_entry(khugepaged_scan.mm_head.next, |
| 2579 | struct mm_slot, mm_node); |
| 2580 | khugepaged_scan.address = 0; |
| 2581 | khugepaged_scan.mm_slot = mm_slot; |
| 2582 | } |
| 2583 | spin_unlock(&khugepaged_mm_lock); |
| 2584 | |
| 2585 | mm = mm_slot->mm; |
| 2586 | down_read(&mm->mmap_sem); |
| 2587 | if (unlikely(khugepaged_test_exit(mm))) |
| 2588 | vma = NULL; |
| 2589 | else |
| 2590 | vma = find_vma(mm, khugepaged_scan.address); |
| 2591 | |
| 2592 | progress++; |
| 2593 | for (; vma; vma = vma->vm_next) { |
| 2594 | unsigned long hstart, hend; |
| 2595 | |
| 2596 | cond_resched(); |
| 2597 | if (unlikely(khugepaged_test_exit(mm))) { |
| 2598 | progress++; |
| 2599 | break; |
| 2600 | } |
Bob Liu | fa475e5 | 2012-12-11 16:00:39 -0800 | [diff] [blame] | 2601 | if (!hugepage_vma_check(vma)) { |
| 2602 | skip: |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2603 | progress++; |
| 2604 | continue; |
| 2605 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2606 | hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; |
| 2607 | hend = vma->vm_end & HPAGE_PMD_MASK; |
Andrea Arcangeli | a7d6e4e | 2011-02-15 19:02:45 +0100 | [diff] [blame] | 2608 | if (hstart >= hend) |
| 2609 | goto skip; |
| 2610 | if (khugepaged_scan.address > hend) |
| 2611 | goto skip; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2612 | if (khugepaged_scan.address < hstart) |
| 2613 | khugepaged_scan.address = hstart; |
Andrea Arcangeli | a7d6e4e | 2011-02-15 19:02:45 +0100 | [diff] [blame] | 2614 | VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2615 | |
| 2616 | while (khugepaged_scan.address < hend) { |
| 2617 | int ret; |
| 2618 | cond_resched(); |
| 2619 | if (unlikely(khugepaged_test_exit(mm))) |
| 2620 | goto breakouterloop; |
| 2621 | |
| 2622 | VM_BUG_ON(khugepaged_scan.address < hstart || |
| 2623 | khugepaged_scan.address + HPAGE_PMD_SIZE > |
| 2624 | hend); |
| 2625 | ret = khugepaged_scan_pmd(mm, vma, |
| 2626 | khugepaged_scan.address, |
| 2627 | hpage); |
| 2628 | /* move to next address */ |
| 2629 | khugepaged_scan.address += HPAGE_PMD_SIZE; |
| 2630 | progress += HPAGE_PMD_NR; |
| 2631 | if (ret) |
| 2632 | /* we released mmap_sem so break loop */ |
| 2633 | goto breakouterloop_mmap_sem; |
| 2634 | if (progress >= pages) |
| 2635 | goto breakouterloop; |
| 2636 | } |
| 2637 | } |
| 2638 | breakouterloop: |
| 2639 | up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */ |
| 2640 | breakouterloop_mmap_sem: |
| 2641 | |
| 2642 | spin_lock(&khugepaged_mm_lock); |
Andrea Arcangeli | a7d6e4e | 2011-02-15 19:02:45 +0100 | [diff] [blame] | 2643 | VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2644 | /* |
| 2645 | * Release the current mm_slot if this mm is about to die, or |
| 2646 | * if we scanned all vmas of this mm. |
| 2647 | */ |
| 2648 | if (khugepaged_test_exit(mm) || !vma) { |
| 2649 | /* |
| 2650 | * Make sure that if mm_users is reaching zero while |
| 2651 | * khugepaged runs here, khugepaged_exit will find |
| 2652 | * mm_slot not pointing to the exiting mm. |
| 2653 | */ |
| 2654 | if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) { |
| 2655 | khugepaged_scan.mm_slot = list_entry( |
| 2656 | mm_slot->mm_node.next, |
| 2657 | struct mm_slot, mm_node); |
| 2658 | khugepaged_scan.address = 0; |
| 2659 | } else { |
| 2660 | khugepaged_scan.mm_slot = NULL; |
| 2661 | khugepaged_full_scans++; |
| 2662 | } |
| 2663 | |
| 2664 | collect_mm_slot(mm_slot); |
| 2665 | } |
| 2666 | |
| 2667 | return progress; |
| 2668 | } |
| 2669 | |
| 2670 | static int khugepaged_has_work(void) |
| 2671 | { |
| 2672 | return !list_empty(&khugepaged_scan.mm_head) && |
| 2673 | khugepaged_enabled(); |
| 2674 | } |
| 2675 | |
| 2676 | static int khugepaged_wait_event(void) |
| 2677 | { |
| 2678 | return !list_empty(&khugepaged_scan.mm_head) || |
Xiao Guangrong | 2017c0b | 2012-10-08 16:29:44 -0700 | [diff] [blame] | 2679 | kthread_should_stop(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2680 | } |
| 2681 | |
Xiao Guangrong | d516904 | 2012-10-08 16:29:48 -0700 | [diff] [blame] | 2682 | static void khugepaged_do_scan(void) |
| 2683 | { |
| 2684 | struct page *hpage = NULL; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2685 | unsigned int progress = 0, pass_through_head = 0; |
| 2686 | unsigned int pages = khugepaged_pages_to_scan; |
Xiao Guangrong | d516904 | 2012-10-08 16:29:48 -0700 | [diff] [blame] | 2687 | bool wait = true; |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2688 | |
| 2689 | barrier(); /* write khugepaged_pages_to_scan to local stack */ |
| 2690 | |
| 2691 | while (progress < pages) { |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2692 | if (!khugepaged_prealloc_page(&hpage, &wait)) |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 2693 | break; |
Xiao Guangrong | 26234f3 | 2012-10-08 16:29:51 -0700 | [diff] [blame] | 2694 | |
Xiao Guangrong | 420256ef | 2012-10-08 16:29:49 -0700 | [diff] [blame] | 2695 | cond_resched(); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2696 | |
Jiri Kosina | cd09241 | 2015-06-24 16:56:07 -0700 | [diff] [blame] | 2697 | if (unlikely(kthread_should_stop() || try_to_freeze())) |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2698 | break; |
| 2699 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2700 | spin_lock(&khugepaged_mm_lock); |
| 2701 | if (!khugepaged_scan.mm_slot) |
| 2702 | pass_through_head++; |
| 2703 | if (khugepaged_has_work() && |
| 2704 | pass_through_head < 2) |
| 2705 | progress += khugepaged_scan_mm_slot(pages - progress, |
Xiao Guangrong | d516904 | 2012-10-08 16:29:48 -0700 | [diff] [blame] | 2706 | &hpage); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2707 | else |
| 2708 | progress = pages; |
| 2709 | spin_unlock(&khugepaged_mm_lock); |
| 2710 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2711 | |
Xiao Guangrong | d516904 | 2012-10-08 16:29:48 -0700 | [diff] [blame] | 2712 | if (!IS_ERR_OR_NULL(hpage)) |
| 2713 | put_page(hpage); |
Andrea Arcangeli | 0bbbc0b | 2011-01-13 15:47:05 -0800 | [diff] [blame] | 2714 | } |
| 2715 | |
Xiao Guangrong | 2017c0b | 2012-10-08 16:29:44 -0700 | [diff] [blame] | 2716 | static void khugepaged_wait_work(void) |
| 2717 | { |
Xiao Guangrong | 2017c0b | 2012-10-08 16:29:44 -0700 | [diff] [blame] | 2718 | if (khugepaged_has_work()) { |
| 2719 | if (!khugepaged_scan_sleep_millisecs) |
| 2720 | return; |
| 2721 | |
| 2722 | wait_event_freezable_timeout(khugepaged_wait, |
| 2723 | kthread_should_stop(), |
| 2724 | msecs_to_jiffies(khugepaged_scan_sleep_millisecs)); |
| 2725 | return; |
| 2726 | } |
| 2727 | |
| 2728 | if (khugepaged_enabled()) |
| 2729 | wait_event_freezable(khugepaged_wait, khugepaged_wait_event()); |
| 2730 | } |
| 2731 | |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2732 | static int khugepaged(void *none) |
| 2733 | { |
| 2734 | struct mm_slot *mm_slot; |
| 2735 | |
Andrea Arcangeli | 878aee7 | 2011-01-13 15:47:10 -0800 | [diff] [blame] | 2736 | set_freezable(); |
Dongsheng Yang | 8698a74 | 2014-03-11 18:09:12 +0800 | [diff] [blame] | 2737 | set_user_nice(current, MAX_NICE); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2738 | |
Xiao Guangrong | b723178 | 2012-10-08 16:29:54 -0700 | [diff] [blame] | 2739 | while (!kthread_should_stop()) { |
| 2740 | khugepaged_do_scan(); |
| 2741 | khugepaged_wait_work(); |
| 2742 | } |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2743 | |
| 2744 | spin_lock(&khugepaged_mm_lock); |
| 2745 | mm_slot = khugepaged_scan.mm_slot; |
| 2746 | khugepaged_scan.mm_slot = NULL; |
| 2747 | if (mm_slot) |
| 2748 | collect_mm_slot(mm_slot); |
| 2749 | spin_unlock(&khugepaged_mm_lock); |
Andrea Arcangeli | ba76149 | 2011-01-13 15:46:58 -0800 | [diff] [blame] | 2750 | return 0; |
| 2751 | } |
| 2752 | |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2753 | static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, |
| 2754 | unsigned long haddr, pmd_t *pmd) |
| 2755 | { |
| 2756 | struct mm_struct *mm = vma->vm_mm; |
| 2757 | pgtable_t pgtable; |
| 2758 | pmd_t _pmd; |
| 2759 | int i; |
| 2760 | |
| 2761 | /* leave pmd empty until pte is filled */ |
| 2762 | pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
| 2763 | |
| 2764 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2765 | pmd_populate(mm, &_pmd, pgtable); |
| 2766 | |
| 2767 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
| 2768 | pte_t *pte, entry; |
| 2769 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); |
| 2770 | entry = pte_mkspecial(entry); |
| 2771 | pte = pte_offset_map(&_pmd, haddr); |
| 2772 | VM_BUG_ON(!pte_none(*pte)); |
| 2773 | set_pte_at(mm, haddr, pte, entry); |
| 2774 | pte_unmap(pte); |
| 2775 | } |
| 2776 | smp_wmb(); /* make pte visible before pmd */ |
| 2777 | pmd_populate(mm, pmd, pgtable); |
| 2778 | put_huge_zero_page(); |
| 2779 | } |
| 2780 | |
| 2781 | static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2782 | unsigned long haddr, bool freeze) |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2783 | { |
| 2784 | struct mm_struct *mm = vma->vm_mm; |
| 2785 | struct page *page; |
| 2786 | pgtable_t pgtable; |
| 2787 | pmd_t _pmd; |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2788 | bool young, write, dirty; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2789 | int i; |
| 2790 | |
| 2791 | VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); |
| 2792 | VM_BUG_ON_VMA(vma->vm_start > haddr, vma); |
| 2793 | VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 2794 | VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2795 | |
| 2796 | count_vm_event(THP_SPLIT_PMD); |
| 2797 | |
| 2798 | if (vma_is_dax(vma)) { |
| 2799 | pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); |
| 2800 | if (is_huge_zero_pmd(_pmd)) |
| 2801 | put_huge_zero_page(); |
| 2802 | return; |
| 2803 | } else if (is_huge_zero_pmd(*pmd)) { |
| 2804 | return __split_huge_zero_page_pmd(vma, haddr, pmd); |
| 2805 | } |
| 2806 | |
| 2807 | page = pmd_page(*pmd); |
| 2808 | VM_BUG_ON_PAGE(!page_count(page), page); |
| 2809 | atomic_add(HPAGE_PMD_NR - 1, &page->_count); |
| 2810 | write = pmd_write(*pmd); |
| 2811 | young = pmd_young(*pmd); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2812 | dirty = pmd_dirty(*pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2813 | |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2814 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); |
| 2815 | pmd_populate(mm, &_pmd, pgtable); |
| 2816 | |
| 2817 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
| 2818 | pte_t entry, *pte; |
| 2819 | /* |
| 2820 | * Note that NUMA hinting access restrictions are not |
| 2821 | * transferred to avoid any possibility of altering |
| 2822 | * permissions across VMAs. |
| 2823 | */ |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2824 | if (freeze) { |
| 2825 | swp_entry_t swp_entry; |
| 2826 | swp_entry = make_migration_entry(page + i, write); |
| 2827 | entry = swp_entry_to_pte(swp_entry); |
| 2828 | } else { |
| 2829 | entry = mk_pte(page + i, vma->vm_page_prot); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2830 | entry = maybe_mkwrite(entry, vma); |
Kirill A. Shutemov | ba98828 | 2016-01-15 16:53:56 -0800 | [diff] [blame] | 2831 | if (!write) |
| 2832 | entry = pte_wrprotect(entry); |
| 2833 | if (!young) |
| 2834 | entry = pte_mkold(entry); |
| 2835 | } |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 2836 | if (dirty) |
| 2837 | SetPageDirty(page + i); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2838 | pte = pte_offset_map(&_pmd, haddr); |
| 2839 | BUG_ON(!pte_none(*pte)); |
| 2840 | set_pte_at(mm, haddr, pte, entry); |
| 2841 | atomic_inc(&page[i]._mapcount); |
| 2842 | pte_unmap(pte); |
| 2843 | } |
| 2844 | |
| 2845 | /* |
| 2846 | * Set PG_double_map before dropping compound_mapcount to avoid |
| 2847 | * false-negative page_mapped(). |
| 2848 | */ |
| 2849 | if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { |
| 2850 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2851 | atomic_inc(&page[i]._mapcount); |
| 2852 | } |
| 2853 | |
| 2854 | if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
| 2855 | /* Last compound_mapcount is gone. */ |
| 2856 | __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); |
| 2857 | if (TestClearPageDoubleMap(page)) { |
| 2858 | /* No need in mapcount reference anymore */ |
| 2859 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 2860 | atomic_dec(&page[i]._mapcount); |
| 2861 | } |
| 2862 | } |
| 2863 | |
| 2864 | smp_wmb(); /* make pte visible before pmd */ |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2865 | /* |
| 2866 | * Up to this point the pmd is present and huge and userland has the |
| 2867 | * whole access to the hugepage during the split (which happens in |
| 2868 | * place). If we overwrite the pmd with the not-huge version pointing |
| 2869 | * to the pte here (which of course we could if all CPUs were bug |
| 2870 | * free), userland could trigger a small page size TLB miss on the |
| 2871 | * small sized TLB while the hugepage TLB entry is still established in |
| 2872 | * the huge TLB. Some CPU doesn't like that. |
| 2873 | * See http://support.amd.com/us/Processor_TechDocs/41322.pdf, Erratum |
| 2874 | * 383 on page 93. Intel should be safe but is also warns that it's |
| 2875 | * only safe if the permission and cache attributes of the two entries |
| 2876 | * loaded in the two TLB is identical (which should be the case here). |
| 2877 | * But it is generally safer to never allow small and huge TLB entries |
| 2878 | * for the same virtual address to be loaded simultaneously. So instead |
| 2879 | * of doing "pmd_populate(); flush_pmd_tlb_range();" we first mark the |
| 2880 | * current pmd notpresent (atomically because here the pmd_trans_huge |
| 2881 | * and pmd_trans_splitting must remain set at all times on the pmd |
| 2882 | * until the split is complete for this pmd), then we flush the SMP TLB |
| 2883 | * and finally we write the non-huge version of the pmd entry with |
| 2884 | * pmd_populate. |
| 2885 | */ |
| 2886 | pmdp_invalidate(vma, haddr, pmd); |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2887 | pmd_populate(mm, pmd, pgtable); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2888 | |
| 2889 | if (freeze) { |
| 2890 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { |
| 2891 | page_remove_rmap(page + i, false); |
| 2892 | put_page(page + i); |
| 2893 | } |
| 2894 | } |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2895 | } |
| 2896 | |
| 2897 | void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
| 2898 | unsigned long address) |
| 2899 | { |
| 2900 | spinlock_t *ptl; |
| 2901 | struct mm_struct *mm = vma->vm_mm; |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2902 | struct page *page = NULL; |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2903 | unsigned long haddr = address & HPAGE_PMD_MASK; |
| 2904 | |
| 2905 | mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE); |
| 2906 | ptl = pmd_lock(mm, pmd); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 2907 | if (pmd_trans_huge(*pmd)) { |
| 2908 | page = pmd_page(*pmd); |
| 2909 | if (PageMlocked(page)) |
| 2910 | get_page(page); |
| 2911 | else |
| 2912 | page = NULL; |
| 2913 | } else if (!pmd_devmap(*pmd)) |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2914 | goto out; |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2915 | __split_huge_pmd_locked(vma, pmd, haddr, false); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2916 | out: |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2917 | spin_unlock(ptl); |
| 2918 | mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE); |
Kirill A. Shutemov | e90309c | 2016-01-15 16:54:33 -0800 | [diff] [blame] | 2919 | if (page) { |
| 2920 | lock_page(page); |
| 2921 | munlock_vma_page(page); |
| 2922 | unlock_page(page); |
| 2923 | put_page(page); |
| 2924 | } |
Kirill A. Shutemov | eef1b3b | 2016-01-15 16:53:53 -0800 | [diff] [blame] | 2925 | } |
| 2926 | |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2927 | static void split_huge_pmd_address(struct vm_area_struct *vma, |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2928 | unsigned long address) |
| 2929 | { |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2930 | pgd_t *pgd; |
| 2931 | pud_t *pud; |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2932 | pmd_t *pmd; |
| 2933 | |
| 2934 | VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); |
| 2935 | |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2936 | pgd = pgd_offset(vma->vm_mm, address); |
Hugh Dickins | f72e7dc | 2014-06-23 13:22:05 -0700 | [diff] [blame] | 2937 | if (!pgd_present(*pgd)) |
| 2938 | return; |
| 2939 | |
| 2940 | pud = pud_offset(pgd, address); |
| 2941 | if (!pud_present(*pud)) |
| 2942 | return; |
| 2943 | |
| 2944 | pmd = pmd_offset(pud, address); |
Dan Williams | 5c7fb56 | 2016-01-15 16:56:52 -0800 | [diff] [blame^] | 2945 | if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd))) |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2946 | return; |
| 2947 | /* |
| 2948 | * Caller holds the mmap_sem write mode, so a huge pmd cannot |
| 2949 | * materialize from under us. |
| 2950 | */ |
Kirill A. Shutemov | ad0bed2 | 2016-01-15 16:52:53 -0800 | [diff] [blame] | 2951 | split_huge_pmd(vma, pmd, address); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2952 | } |
| 2953 | |
Kirill A. Shutemov | e1b9996 | 2015-09-08 14:58:37 -0700 | [diff] [blame] | 2954 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2955 | unsigned long start, |
| 2956 | unsigned long end, |
| 2957 | long adjust_next) |
| 2958 | { |
| 2959 | /* |
| 2960 | * If the new start address isn't hpage aligned and it could |
| 2961 | * previously contain an hugepage: check if we need to split |
| 2962 | * an huge pmd. |
| 2963 | */ |
| 2964 | if (start & ~HPAGE_PMD_MASK && |
| 2965 | (start & HPAGE_PMD_MASK) >= vma->vm_start && |
| 2966 | (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2967 | split_huge_pmd_address(vma, start); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2968 | |
| 2969 | /* |
| 2970 | * If the new end address isn't hpage aligned and it could |
| 2971 | * previously contain an hugepage: check if we need to split |
| 2972 | * an huge pmd. |
| 2973 | */ |
| 2974 | if (end & ~HPAGE_PMD_MASK && |
| 2975 | (end & HPAGE_PMD_MASK) >= vma->vm_start && |
| 2976 | (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2977 | split_huge_pmd_address(vma, end); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2978 | |
| 2979 | /* |
| 2980 | * If we're also updating the vma->vm_next->vm_start, if the new |
| 2981 | * vm_next->vm_start isn't page aligned and it could previously |
| 2982 | * contain an hugepage: check if we need to split an huge pmd. |
| 2983 | */ |
| 2984 | if (adjust_next > 0) { |
| 2985 | struct vm_area_struct *next = vma->vm_next; |
| 2986 | unsigned long nstart = next->vm_start; |
| 2987 | nstart += adjust_next << PAGE_SHIFT; |
| 2988 | if (nstart & ~HPAGE_PMD_MASK && |
| 2989 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && |
| 2990 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) |
Kirill A. Shutemov | 78ddc53 | 2016-01-15 16:52:42 -0800 | [diff] [blame] | 2991 | split_huge_pmd_address(next, nstart); |
Andrea Arcangeli | 94fcc58 | 2011-01-13 15:47:08 -0800 | [diff] [blame] | 2992 | } |
| 2993 | } |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2994 | |
| 2995 | static void freeze_page_vma(struct vm_area_struct *vma, struct page *page, |
| 2996 | unsigned long address) |
| 2997 | { |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 2998 | unsigned long haddr = address & HPAGE_PMD_MASK; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 2999 | spinlock_t *ptl; |
| 3000 | pgd_t *pgd; |
| 3001 | pud_t *pud; |
| 3002 | pmd_t *pmd; |
| 3003 | pte_t *pte; |
| 3004 | int i, nr = HPAGE_PMD_NR; |
| 3005 | |
| 3006 | /* Skip pages which doesn't belong to the VMA */ |
| 3007 | if (address < vma->vm_start) { |
| 3008 | int off = (vma->vm_start - address) >> PAGE_SHIFT; |
| 3009 | page += off; |
| 3010 | nr -= off; |
| 3011 | address = vma->vm_start; |
| 3012 | } |
| 3013 | |
| 3014 | pgd = pgd_offset(vma->vm_mm, address); |
| 3015 | if (!pgd_present(*pgd)) |
| 3016 | return; |
| 3017 | pud = pud_offset(pgd, address); |
| 3018 | if (!pud_present(*pud)) |
| 3019 | return; |
| 3020 | pmd = pmd_offset(pud, address); |
| 3021 | ptl = pmd_lock(vma->vm_mm, pmd); |
| 3022 | if (!pmd_present(*pmd)) { |
| 3023 | spin_unlock(ptl); |
| 3024 | return; |
| 3025 | } |
| 3026 | if (pmd_trans_huge(*pmd)) { |
| 3027 | if (page == pmd_page(*pmd)) |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3028 | __split_huge_pmd_locked(vma, pmd, haddr, true); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3029 | spin_unlock(ptl); |
| 3030 | return; |
| 3031 | } |
| 3032 | spin_unlock(ptl); |
| 3033 | |
| 3034 | pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3035 | for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) { |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3036 | pte_t entry, swp_pte; |
| 3037 | swp_entry_t swp_entry; |
| 3038 | |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3039 | /* |
| 3040 | * We've just crossed page table boundary: need to map next one. |
| 3041 | * It can happen if THP was mremaped to non PMD-aligned address. |
| 3042 | */ |
| 3043 | if (unlikely(address == haddr + HPAGE_PMD_SIZE)) { |
| 3044 | pte_unmap_unlock(pte - 1, ptl); |
| 3045 | pmd = mm_find_pmd(vma->vm_mm, address); |
| 3046 | if (!pmd) |
| 3047 | return; |
| 3048 | pte = pte_offset_map_lock(vma->vm_mm, pmd, |
| 3049 | address, &ptl); |
| 3050 | } |
| 3051 | |
| 3052 | if (!pte_present(*pte)) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3053 | continue; |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3054 | if (page_to_pfn(page) != pte_pfn(*pte)) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3055 | continue; |
| 3056 | flush_cache_page(vma, address, page_to_pfn(page)); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3057 | entry = ptep_clear_flush(vma, address, pte); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 3058 | if (pte_dirty(entry)) |
| 3059 | SetPageDirty(page); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3060 | swp_entry = make_migration_entry(page, pte_write(entry)); |
| 3061 | swp_pte = swp_entry_to_pte(swp_entry); |
| 3062 | if (pte_soft_dirty(entry)) |
| 3063 | swp_pte = pte_swp_mksoft_dirty(swp_pte); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3064 | set_pte_at(vma->vm_mm, address, pte, swp_pte); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3065 | page_remove_rmap(page, false); |
| 3066 | put_page(page); |
| 3067 | } |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3068 | pte_unmap_unlock(pte - 1, ptl); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3069 | } |
| 3070 | |
| 3071 | static void freeze_page(struct anon_vma *anon_vma, struct page *page) |
| 3072 | { |
| 3073 | struct anon_vma_chain *avc; |
| 3074 | pgoff_t pgoff = page_to_pgoff(page); |
| 3075 | |
| 3076 | VM_BUG_ON_PAGE(!PageHead(page), page); |
| 3077 | |
| 3078 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, |
| 3079 | pgoff + HPAGE_PMD_NR - 1) { |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3080 | unsigned long address = __vma_address(page, avc->vma); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3081 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3082 | mmu_notifier_invalidate_range_start(avc->vma->vm_mm, |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3083 | address, address + HPAGE_PMD_SIZE); |
| 3084 | freeze_page_vma(avc->vma, page, address); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3085 | mmu_notifier_invalidate_range_end(avc->vma->vm_mm, |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3086 | address, address + HPAGE_PMD_SIZE); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3087 | } |
| 3088 | } |
| 3089 | |
| 3090 | static void unfreeze_page_vma(struct vm_area_struct *vma, struct page *page, |
| 3091 | unsigned long address) |
| 3092 | { |
| 3093 | spinlock_t *ptl; |
| 3094 | pmd_t *pmd; |
| 3095 | pte_t *pte, entry; |
| 3096 | swp_entry_t swp_entry; |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3097 | unsigned long haddr = address & HPAGE_PMD_MASK; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3098 | int i, nr = HPAGE_PMD_NR; |
| 3099 | |
| 3100 | /* Skip pages which doesn't belong to the VMA */ |
| 3101 | if (address < vma->vm_start) { |
| 3102 | int off = (vma->vm_start - address) >> PAGE_SHIFT; |
| 3103 | page += off; |
| 3104 | nr -= off; |
| 3105 | address = vma->vm_start; |
| 3106 | } |
| 3107 | |
| 3108 | pmd = mm_find_pmd(vma->vm_mm, address); |
| 3109 | if (!pmd) |
| 3110 | return; |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3111 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3112 | pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3113 | for (i = 0; i < nr; i++, address += PAGE_SIZE, page++, pte++) { |
| 3114 | /* |
| 3115 | * We've just crossed page table boundary: need to map next one. |
| 3116 | * It can happen if THP was mremaped to non-PMD aligned address. |
| 3117 | */ |
| 3118 | if (unlikely(address == haddr + HPAGE_PMD_SIZE)) { |
| 3119 | pte_unmap_unlock(pte - 1, ptl); |
| 3120 | pmd = mm_find_pmd(vma->vm_mm, address); |
| 3121 | if (!pmd) |
| 3122 | return; |
| 3123 | pte = pte_offset_map_lock(vma->vm_mm, pmd, |
| 3124 | address, &ptl); |
| 3125 | } |
| 3126 | |
| 3127 | if (!is_swap_pte(*pte)) |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3128 | continue; |
| 3129 | |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3130 | swp_entry = pte_to_swp_entry(*pte); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3131 | if (!is_migration_entry(swp_entry)) |
| 3132 | continue; |
| 3133 | if (migration_entry_to_page(swp_entry) != page) |
| 3134 | continue; |
| 3135 | |
| 3136 | get_page(page); |
| 3137 | page_add_anon_rmap(page, vma, address, false); |
| 3138 | |
| 3139 | entry = pte_mkold(mk_pte(page, vma->vm_page_prot)); |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 3140 | if (PageDirty(page)) |
| 3141 | entry = pte_mkdirty(entry); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3142 | if (is_write_migration_entry(swp_entry)) |
| 3143 | entry = maybe_mkwrite(entry, vma); |
| 3144 | |
| 3145 | flush_dcache_page(page); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3146 | set_pte_at(vma->vm_mm, address, pte, entry); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3147 | |
| 3148 | /* No need to invalidate - it was non-present before */ |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3149 | update_mmu_cache(vma, address, pte); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3150 | } |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3151 | pte_unmap_unlock(pte - 1, ptl); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3152 | } |
| 3153 | |
| 3154 | static void unfreeze_page(struct anon_vma *anon_vma, struct page *page) |
| 3155 | { |
| 3156 | struct anon_vma_chain *avc; |
| 3157 | pgoff_t pgoff = page_to_pgoff(page); |
| 3158 | |
| 3159 | anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, |
| 3160 | pgoff, pgoff + HPAGE_PMD_NR - 1) { |
| 3161 | unsigned long address = __vma_address(page, avc->vma); |
| 3162 | |
| 3163 | mmu_notifier_invalidate_range_start(avc->vma->vm_mm, |
| 3164 | address, address + HPAGE_PMD_SIZE); |
| 3165 | unfreeze_page_vma(avc->vma, page, address); |
| 3166 | mmu_notifier_invalidate_range_end(avc->vma->vm_mm, |
| 3167 | address, address + HPAGE_PMD_SIZE); |
| 3168 | } |
| 3169 | } |
| 3170 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3171 | static int __split_huge_page_tail(struct page *head, int tail, |
| 3172 | struct lruvec *lruvec, struct list_head *list) |
| 3173 | { |
| 3174 | int mapcount; |
| 3175 | struct page *page_tail = head + tail; |
| 3176 | |
| 3177 | mapcount = atomic_read(&page_tail->_mapcount) + 1; |
| 3178 | VM_BUG_ON_PAGE(atomic_read(&page_tail->_count) != 0, page_tail); |
| 3179 | |
| 3180 | /* |
| 3181 | * tail_page->_count is zero and not changing from under us. But |
| 3182 | * get_page_unless_zero() may be running from under us on the |
| 3183 | * tail_page. If we used atomic_set() below instead of atomic_add(), we |
| 3184 | * would then run atomic_set() concurrently with |
| 3185 | * get_page_unless_zero(), and atomic_set() is implemented in C not |
| 3186 | * using locked ops. spin_unlock on x86 sometime uses locked ops |
| 3187 | * because of PPro errata 66, 92, so unless somebody can guarantee |
| 3188 | * atomic_set() here would be safe on all archs (and not only on x86), |
| 3189 | * it's safer to use atomic_add(). |
| 3190 | */ |
| 3191 | atomic_add(mapcount + 1, &page_tail->_count); |
| 3192 | |
| 3193 | |
| 3194 | page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; |
| 3195 | page_tail->flags |= (head->flags & |
| 3196 | ((1L << PG_referenced) | |
| 3197 | (1L << PG_swapbacked) | |
| 3198 | (1L << PG_mlocked) | |
| 3199 | (1L << PG_uptodate) | |
| 3200 | (1L << PG_active) | |
| 3201 | (1L << PG_locked) | |
Minchan Kim | b8d3c4c | 2016-01-15 16:55:42 -0800 | [diff] [blame] | 3202 | (1L << PG_unevictable) | |
| 3203 | (1L << PG_dirty))); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3204 | |
| 3205 | /* |
| 3206 | * After clearing PageTail the gup refcount can be released. |
| 3207 | * Page flags also must be visible before we make the page non-compound. |
| 3208 | */ |
| 3209 | smp_wmb(); |
| 3210 | |
| 3211 | clear_compound_head(page_tail); |
| 3212 | |
| 3213 | if (page_is_young(head)) |
| 3214 | set_page_young(page_tail); |
| 3215 | if (page_is_idle(head)) |
| 3216 | set_page_idle(page_tail); |
| 3217 | |
| 3218 | /* ->mapping in first tail page is compound_mapcount */ |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3219 | VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3220 | page_tail); |
| 3221 | page_tail->mapping = head->mapping; |
| 3222 | |
| 3223 | page_tail->index = head->index + tail; |
| 3224 | page_cpupid_xchg_last(page_tail, page_cpupid_last(head)); |
| 3225 | lru_add_page_tail(head, page_tail, lruvec, list); |
| 3226 | |
| 3227 | return mapcount; |
| 3228 | } |
| 3229 | |
| 3230 | static void __split_huge_page(struct page *page, struct list_head *list) |
| 3231 | { |
| 3232 | struct page *head = compound_head(page); |
| 3233 | struct zone *zone = page_zone(head); |
| 3234 | struct lruvec *lruvec; |
| 3235 | int i, tail_mapcount; |
| 3236 | |
| 3237 | /* prevent PageLRU to go away from under us, and freeze lru stats */ |
| 3238 | spin_lock_irq(&zone->lru_lock); |
| 3239 | lruvec = mem_cgroup_page_lruvec(head, zone); |
| 3240 | |
| 3241 | /* complete memcg works before add pages to LRU */ |
| 3242 | mem_cgroup_split_huge_fixup(head); |
| 3243 | |
| 3244 | tail_mapcount = 0; |
| 3245 | for (i = HPAGE_PMD_NR - 1; i >= 1; i--) |
| 3246 | tail_mapcount += __split_huge_page_tail(head, i, lruvec, list); |
| 3247 | atomic_sub(tail_mapcount, &head->_count); |
| 3248 | |
| 3249 | ClearPageCompound(head); |
| 3250 | spin_unlock_irq(&zone->lru_lock); |
| 3251 | |
| 3252 | unfreeze_page(page_anon_vma(head), head); |
| 3253 | |
| 3254 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 3255 | struct page *subpage = head + i; |
| 3256 | if (subpage == page) |
| 3257 | continue; |
| 3258 | unlock_page(subpage); |
| 3259 | |
| 3260 | /* |
| 3261 | * Subpages may be freed if there wasn't any mapping |
| 3262 | * like if add_to_swap() is running on a lru page that |
| 3263 | * had its mapping zapped. And freeing these pages |
| 3264 | * requires taking the lru_lock so we do the put_page |
| 3265 | * of the tail pages after the split is complete. |
| 3266 | */ |
| 3267 | put_page(subpage); |
| 3268 | } |
| 3269 | } |
| 3270 | |
Kirill A. Shutemov | b20ce5e | 2016-01-15 16:54:37 -0800 | [diff] [blame] | 3271 | int total_mapcount(struct page *page) |
| 3272 | { |
| 3273 | int i, ret; |
| 3274 | |
| 3275 | VM_BUG_ON_PAGE(PageTail(page), page); |
| 3276 | |
| 3277 | if (likely(!PageCompound(page))) |
| 3278 | return atomic_read(&page->_mapcount) + 1; |
| 3279 | |
| 3280 | ret = compound_mapcount(page); |
| 3281 | if (PageHuge(page)) |
| 3282 | return ret; |
| 3283 | for (i = 0; i < HPAGE_PMD_NR; i++) |
| 3284 | ret += atomic_read(&page[i]._mapcount) + 1; |
| 3285 | if (PageDoubleMap(page)) |
| 3286 | ret -= HPAGE_PMD_NR; |
| 3287 | return ret; |
| 3288 | } |
| 3289 | |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3290 | /* |
| 3291 | * This function splits huge page into normal pages. @page can point to any |
| 3292 | * subpage of huge page to split. Split doesn't change the position of @page. |
| 3293 | * |
| 3294 | * Only caller must hold pin on the @page, otherwise split fails with -EBUSY. |
| 3295 | * The huge page must be locked. |
| 3296 | * |
| 3297 | * If @list is null, tail pages will be added to LRU list, otherwise, to @list. |
| 3298 | * |
| 3299 | * Both head page and tail pages will inherit mapping, flags, and so on from |
| 3300 | * the hugepage. |
| 3301 | * |
| 3302 | * GUP pin and PG_locked transferred to @page. Rest subpages can be freed if |
| 3303 | * they are not mapped. |
| 3304 | * |
| 3305 | * Returns 0 if the hugepage is split successfully. |
| 3306 | * Returns -EBUSY if the page is pinned or if anon_vma disappeared from under |
| 3307 | * us. |
| 3308 | */ |
| 3309 | int split_huge_page_to_list(struct page *page, struct list_head *list) |
| 3310 | { |
| 3311 | struct page *head = compound_head(page); |
| 3312 | struct anon_vma *anon_vma; |
| 3313 | int count, mapcount, ret; |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 3314 | bool mlocked; |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3315 | |
| 3316 | VM_BUG_ON_PAGE(is_huge_zero_page(page), page); |
| 3317 | VM_BUG_ON_PAGE(!PageAnon(page), page); |
| 3318 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
| 3319 | VM_BUG_ON_PAGE(!PageSwapBacked(page), page); |
| 3320 | VM_BUG_ON_PAGE(!PageCompound(page), page); |
| 3321 | |
| 3322 | /* |
| 3323 | * The caller does not necessarily hold an mmap_sem that would prevent |
| 3324 | * the anon_vma disappearing so we first we take a reference to it |
| 3325 | * and then lock the anon_vma for write. This is similar to |
| 3326 | * page_lock_anon_vma_read except the write lock is taken to serialise |
| 3327 | * against parallel split or collapse operations. |
| 3328 | */ |
| 3329 | anon_vma = page_get_anon_vma(head); |
| 3330 | if (!anon_vma) { |
| 3331 | ret = -EBUSY; |
| 3332 | goto out; |
| 3333 | } |
| 3334 | anon_vma_lock_write(anon_vma); |
| 3335 | |
| 3336 | /* |
| 3337 | * Racy check if we can split the page, before freeze_page() will |
| 3338 | * split PMDs |
| 3339 | */ |
| 3340 | if (total_mapcount(head) != page_count(head) - 1) { |
| 3341 | ret = -EBUSY; |
| 3342 | goto out_unlock; |
| 3343 | } |
| 3344 | |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 3345 | mlocked = PageMlocked(page); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3346 | freeze_page(anon_vma, head); |
| 3347 | VM_BUG_ON_PAGE(compound_mapcount(head), head); |
| 3348 | |
Kirill A. Shutemov | d965432 | 2016-01-15 16:54:43 -0800 | [diff] [blame] | 3349 | /* Make sure the page is not on per-CPU pagevec as it takes pin */ |
| 3350 | if (mlocked) |
| 3351 | lru_add_drain(); |
| 3352 | |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3353 | /* Prevent deferred_split_scan() touching ->_count */ |
| 3354 | spin_lock(&split_queue_lock); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3355 | count = page_count(head); |
| 3356 | mapcount = total_mapcount(head); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3357 | if (!mapcount && count == 1) { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3358 | if (!list_empty(page_deferred_list(head))) { |
| 3359 | split_queue_len--; |
| 3360 | list_del(page_deferred_list(head)); |
| 3361 | } |
| 3362 | spin_unlock(&split_queue_lock); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3363 | __split_huge_page(page, list); |
| 3364 | ret = 0; |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3365 | } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3366 | spin_unlock(&split_queue_lock); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3367 | pr_alert("total_mapcount: %u, page_count(): %u\n", |
| 3368 | mapcount, count); |
| 3369 | if (PageTail(page)) |
| 3370 | dump_page(head, NULL); |
Kirill A. Shutemov | bd56086 | 2016-01-15 16:55:46 -0800 | [diff] [blame] | 3371 | dump_page(page, "total_mapcount(head) > 0"); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3372 | BUG(); |
| 3373 | } else { |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3374 | spin_unlock(&split_queue_lock); |
Kirill A. Shutemov | e9b61f1 | 2016-01-15 16:54:10 -0800 | [diff] [blame] | 3375 | unfreeze_page(anon_vma, head); |
| 3376 | ret = -EBUSY; |
| 3377 | } |
| 3378 | |
| 3379 | out_unlock: |
| 3380 | anon_vma_unlock_write(anon_vma); |
| 3381 | put_anon_vma(anon_vma); |
| 3382 | out: |
| 3383 | count_vm_event(!ret ? THP_SPLIT_PAGE : THP_SPLIT_PAGE_FAILED); |
| 3384 | return ret; |
| 3385 | } |
Kirill A. Shutemov | 9a98225 | 2016-01-15 16:54:17 -0800 | [diff] [blame] | 3386 | |
| 3387 | void free_transhuge_page(struct page *page) |
| 3388 | { |
| 3389 | unsigned long flags; |
| 3390 | |
| 3391 | spin_lock_irqsave(&split_queue_lock, flags); |
| 3392 | if (!list_empty(page_deferred_list(page))) { |
| 3393 | split_queue_len--; |
| 3394 | list_del(page_deferred_list(page)); |
| 3395 | } |
| 3396 | spin_unlock_irqrestore(&split_queue_lock, flags); |
| 3397 | free_compound_page(page); |
| 3398 | } |
| 3399 | |
| 3400 | void deferred_split_huge_page(struct page *page) |
| 3401 | { |
| 3402 | unsigned long flags; |
| 3403 | |
| 3404 | VM_BUG_ON_PAGE(!PageTransHuge(page), page); |
| 3405 | |
| 3406 | spin_lock_irqsave(&split_queue_lock, flags); |
| 3407 | if (list_empty(page_deferred_list(page))) { |
| 3408 | list_add_tail(page_deferred_list(page), &split_queue); |
| 3409 | split_queue_len++; |
| 3410 | } |
| 3411 | spin_unlock_irqrestore(&split_queue_lock, flags); |
| 3412 | } |
| 3413 | |
| 3414 | static unsigned long deferred_split_count(struct shrinker *shrink, |
| 3415 | struct shrink_control *sc) |
| 3416 | { |
| 3417 | /* |
| 3418 | * Split a page from split_queue will free up at least one page, |
| 3419 | * at most HPAGE_PMD_NR - 1. We don't track exact number. |
| 3420 | * Let's use HPAGE_PMD_NR / 2 as ballpark. |
| 3421 | */ |
| 3422 | return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2; |
| 3423 | } |
| 3424 | |
| 3425 | static unsigned long deferred_split_scan(struct shrinker *shrink, |
| 3426 | struct shrink_control *sc) |
| 3427 | { |
| 3428 | unsigned long flags; |
| 3429 | LIST_HEAD(list), *pos, *next; |
| 3430 | struct page *page; |
| 3431 | int split = 0; |
| 3432 | |
| 3433 | spin_lock_irqsave(&split_queue_lock, flags); |
| 3434 | list_splice_init(&split_queue, &list); |
| 3435 | |
| 3436 | /* Take pin on all head pages to avoid freeing them under us */ |
| 3437 | list_for_each_safe(pos, next, &list) { |
| 3438 | page = list_entry((void *)pos, struct page, mapping); |
| 3439 | page = compound_head(page); |
| 3440 | /* race with put_compound_page() */ |
| 3441 | if (!get_page_unless_zero(page)) { |
| 3442 | list_del_init(page_deferred_list(page)); |
| 3443 | split_queue_len--; |
| 3444 | } |
| 3445 | } |
| 3446 | spin_unlock_irqrestore(&split_queue_lock, flags); |
| 3447 | |
| 3448 | list_for_each_safe(pos, next, &list) { |
| 3449 | page = list_entry((void *)pos, struct page, mapping); |
| 3450 | lock_page(page); |
| 3451 | /* split_huge_page() removes page from list on success */ |
| 3452 | if (!split_huge_page(page)) |
| 3453 | split++; |
| 3454 | unlock_page(page); |
| 3455 | put_page(page); |
| 3456 | } |
| 3457 | |
| 3458 | spin_lock_irqsave(&split_queue_lock, flags); |
| 3459 | list_splice_tail(&list, &split_queue); |
| 3460 | spin_unlock_irqrestore(&split_queue_lock, flags); |
| 3461 | |
| 3462 | return split * HPAGE_PMD_NR / 2; |
| 3463 | } |
| 3464 | |
| 3465 | static struct shrinker deferred_split_shrinker = { |
| 3466 | .count_objects = deferred_split_count, |
| 3467 | .scan_objects = deferred_split_scan, |
| 3468 | .seeks = DEFAULT_SEEKS, |
| 3469 | }; |
Kirill A. Shutemov | 49071d4 | 2016-01-15 16:54:40 -0800 | [diff] [blame] | 3470 | |
| 3471 | #ifdef CONFIG_DEBUG_FS |
| 3472 | static int split_huge_pages_set(void *data, u64 val) |
| 3473 | { |
| 3474 | struct zone *zone; |
| 3475 | struct page *page; |
| 3476 | unsigned long pfn, max_zone_pfn; |
| 3477 | unsigned long total = 0, split = 0; |
| 3478 | |
| 3479 | if (val != 1) |
| 3480 | return -EINVAL; |
| 3481 | |
| 3482 | for_each_populated_zone(zone) { |
| 3483 | max_zone_pfn = zone_end_pfn(zone); |
| 3484 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) { |
| 3485 | if (!pfn_valid(pfn)) |
| 3486 | continue; |
| 3487 | |
| 3488 | page = pfn_to_page(pfn); |
| 3489 | if (!get_page_unless_zero(page)) |
| 3490 | continue; |
| 3491 | |
| 3492 | if (zone != page_zone(page)) |
| 3493 | goto next; |
| 3494 | |
| 3495 | if (!PageHead(page) || !PageAnon(page) || |
| 3496 | PageHuge(page)) |
| 3497 | goto next; |
| 3498 | |
| 3499 | total++; |
| 3500 | lock_page(page); |
| 3501 | if (!split_huge_page(page)) |
| 3502 | split++; |
| 3503 | unlock_page(page); |
| 3504 | next: |
| 3505 | put_page(page); |
| 3506 | } |
| 3507 | } |
| 3508 | |
| 3509 | pr_info("%lu of %lu THP split", split, total); |
| 3510 | |
| 3511 | return 0; |
| 3512 | } |
| 3513 | DEFINE_SIMPLE_ATTRIBUTE(split_huge_pages_fops, NULL, split_huge_pages_set, |
| 3514 | "%llu\n"); |
| 3515 | |
| 3516 | static int __init split_huge_pages_debugfs(void) |
| 3517 | { |
| 3518 | void *ret; |
| 3519 | |
| 3520 | ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL, |
| 3521 | &split_huge_pages_fops); |
| 3522 | if (!ret) |
| 3523 | pr_warn("Failed to create split_huge_pages in debugfs"); |
| 3524 | return 0; |
| 3525 | } |
| 3526 | late_initcall(split_huge_pages_debugfs); |
| 3527 | #endif |