blob: a1768470e086375c13f0309c7c33cbe770bddffc
1 | /* |
2 | * mm/userfaultfd.c |
3 | * |
4 | * Copyright (C) 2015 Red Hat, Inc. |
5 | * |
6 | * This work is licensed under the terms of the GNU GPL, version 2. See |
7 | * the COPYING file in the top-level directory. |
8 | */ |
9 | |
10 | #include <linux/mm.h> |
11 | #include <linux/pagemap.h> |
12 | #include <linux/rmap.h> |
13 | #include <linux/swap.h> |
14 | #include <linux/swapops.h> |
15 | #include <linux/userfaultfd_k.h> |
16 | #include <linux/mmu_notifier.h> |
17 | #include <asm/tlbflush.h> |
18 | #include "internal.h" |
19 | |
20 | static int mcopy_atomic_pte(struct mm_struct *dst_mm, |
21 | pmd_t *dst_pmd, |
22 | struct vm_area_struct *dst_vma, |
23 | unsigned long dst_addr, |
24 | unsigned long src_addr, |
25 | struct page **pagep) |
26 | { |
27 | struct mem_cgroup *memcg; |
28 | pte_t _dst_pte, *dst_pte; |
29 | spinlock_t *ptl; |
30 | void *page_kaddr; |
31 | int ret; |
32 | struct page *page; |
33 | |
34 | if (!*pagep) { |
35 | ret = -ENOMEM; |
36 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr); |
37 | if (!page) |
38 | goto out; |
39 | |
40 | page_kaddr = kmap_atomic(page); |
41 | ret = copy_from_user(page_kaddr, |
42 | (const void __user *) src_addr, |
43 | PAGE_SIZE); |
44 | kunmap_atomic(page_kaddr); |
45 | |
46 | /* fallback to copy_from_user outside mmap_sem */ |
47 | if (unlikely(ret)) { |
48 | ret = -EFAULT; |
49 | *pagep = page; |
50 | /* don't free the page */ |
51 | goto out; |
52 | } |
53 | } else { |
54 | page = *pagep; |
55 | *pagep = NULL; |
56 | } |
57 | |
58 | /* |
59 | * The memory barrier inside __SetPageUptodate makes sure that |
60 | * preceeding stores to the page contents become visible before |
61 | * the set_pte_at() write. |
62 | */ |
63 | __SetPageUptodate(page); |
64 | |
65 | ret = -ENOMEM; |
66 | if (mem_cgroup_try_charge(page, dst_mm, GFP_KERNEL, &memcg, false)) |
67 | goto out_release; |
68 | |
69 | _dst_pte = mk_pte(page, dst_vma->vm_page_prot); |
70 | if (dst_vma->vm_flags & VM_WRITE) |
71 | _dst_pte = pte_mkwrite(pte_mkdirty(_dst_pte)); |
72 | |
73 | ret = -EEXIST; |
74 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
75 | if (!pte_none(*dst_pte)) |
76 | goto out_release_uncharge_unlock; |
77 | |
78 | inc_mm_counter(dst_mm, MM_ANONPAGES); |
79 | page_add_new_anon_rmap(page, dst_vma, dst_addr, false); |
80 | mem_cgroup_commit_charge(page, memcg, false, false); |
81 | lru_cache_add_active_or_unevictable(page, dst_vma); |
82 | |
83 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
84 | |
85 | /* No need to invalidate - it was non-present before */ |
86 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
87 | |
88 | pte_unmap_unlock(dst_pte, ptl); |
89 | ret = 0; |
90 | out: |
91 | return ret; |
92 | out_release_uncharge_unlock: |
93 | pte_unmap_unlock(dst_pte, ptl); |
94 | mem_cgroup_cancel_charge(page, memcg, false); |
95 | out_release: |
96 | put_page(page); |
97 | goto out; |
98 | } |
99 | |
100 | static int mfill_zeropage_pte(struct mm_struct *dst_mm, |
101 | pmd_t *dst_pmd, |
102 | struct vm_area_struct *dst_vma, |
103 | unsigned long dst_addr) |
104 | { |
105 | pte_t _dst_pte, *dst_pte; |
106 | spinlock_t *ptl; |
107 | int ret; |
108 | |
109 | _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), |
110 | dst_vma->vm_page_prot)); |
111 | ret = -EEXIST; |
112 | dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); |
113 | if (!pte_none(*dst_pte)) |
114 | goto out_unlock; |
115 | set_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); |
116 | /* No need to invalidate - it was non-present before */ |
117 | update_mmu_cache(dst_vma, dst_addr, dst_pte); |
118 | ret = 0; |
119 | out_unlock: |
120 | pte_unmap_unlock(dst_pte, ptl); |
121 | return ret; |
122 | } |
123 | |
124 | static pmd_t *mm_alloc_pmd(struct mm_struct *mm, unsigned long address) |
125 | { |
126 | pgd_t *pgd; |
127 | pud_t *pud; |
128 | pmd_t *pmd = NULL; |
129 | |
130 | pgd = pgd_offset(mm, address); |
131 | pud = pud_alloc(mm, pgd, address); |
132 | if (pud) |
133 | /* |
134 | * Note that we didn't run this because the pmd was |
135 | * missing, the *pmd may be already established and in |
136 | * turn it may also be a trans_huge_pmd. |
137 | */ |
138 | pmd = pmd_alloc(mm, pud, address); |
139 | return pmd; |
140 | } |
141 | |
142 | static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, |
143 | unsigned long dst_start, |
144 | unsigned long src_start, |
145 | unsigned long len, |
146 | bool zeropage) |
147 | { |
148 | struct vm_area_struct *dst_vma; |
149 | ssize_t err; |
150 | pmd_t *dst_pmd; |
151 | unsigned long src_addr, dst_addr; |
152 | long copied; |
153 | struct page *page; |
154 | |
155 | /* |
156 | * Sanitize the command parameters: |
157 | */ |
158 | BUG_ON(dst_start & ~PAGE_MASK); |
159 | BUG_ON(len & ~PAGE_MASK); |
160 | |
161 | /* Does the address range wrap, or is the span zero-sized? */ |
162 | BUG_ON(src_start + len <= src_start); |
163 | BUG_ON(dst_start + len <= dst_start); |
164 | |
165 | src_addr = src_start; |
166 | dst_addr = dst_start; |
167 | copied = 0; |
168 | page = NULL; |
169 | retry: |
170 | down_read(&dst_mm->mmap_sem); |
171 | |
172 | /* |
173 | * Make sure the vma is not shared, that the dst range is |
174 | * both valid and fully within a single existing vma. |
175 | */ |
176 | err = -EINVAL; |
177 | dst_vma = find_vma(dst_mm, dst_start); |
178 | if (!dst_vma || (dst_vma->vm_flags & VM_SHARED)) |
179 | goto out_unlock; |
180 | if (dst_start < dst_vma->vm_start || |
181 | dst_start + len > dst_vma->vm_end) |
182 | goto out_unlock; |
183 | |
184 | /* |
185 | * Check the vma is registered in uffd, this is required to |
186 | * enforce the VM_MAYWRITE check done at uffd registration |
187 | * time. |
188 | */ |
189 | if (!dst_vma->vm_userfaultfd_ctx.ctx) |
190 | goto out_unlock; |
191 | |
192 | /* |
193 | * FIXME: only allow copying on anonymous vmas, tmpfs should |
194 | * be added. |
195 | */ |
196 | if (dst_vma->vm_ops) |
197 | goto out_unlock; |
198 | |
199 | /* |
200 | * Ensure the dst_vma has a anon_vma or this page |
201 | * would get a NULL anon_vma when moved in the |
202 | * dst_vma. |
203 | */ |
204 | err = -ENOMEM; |
205 | if (unlikely(anon_vma_prepare(dst_vma))) |
206 | goto out_unlock; |
207 | |
208 | while (src_addr < src_start + len) { |
209 | pmd_t dst_pmdval; |
210 | |
211 | BUG_ON(dst_addr >= dst_start + len); |
212 | |
213 | dst_pmd = mm_alloc_pmd(dst_mm, dst_addr); |
214 | if (unlikely(!dst_pmd)) { |
215 | err = -ENOMEM; |
216 | break; |
217 | } |
218 | |
219 | dst_pmdval = pmd_read_atomic(dst_pmd); |
220 | /* |
221 | * If the dst_pmd is mapped as THP don't |
222 | * override it and just be strict. |
223 | */ |
224 | if (unlikely(pmd_trans_huge(dst_pmdval))) { |
225 | err = -EEXIST; |
226 | break; |
227 | } |
228 | if (unlikely(pmd_none(dst_pmdval)) && |
229 | unlikely(__pte_alloc(dst_mm, dst_pmd, dst_addr))) { |
230 | err = -ENOMEM; |
231 | break; |
232 | } |
233 | /* If an huge pmd materialized from under us fail */ |
234 | if (unlikely(pmd_trans_huge(*dst_pmd))) { |
235 | err = -EFAULT; |
236 | break; |
237 | } |
238 | |
239 | BUG_ON(pmd_none(*dst_pmd)); |
240 | BUG_ON(pmd_trans_huge(*dst_pmd)); |
241 | |
242 | if (!zeropage) |
243 | err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma, |
244 | dst_addr, src_addr, &page); |
245 | else |
246 | err = mfill_zeropage_pte(dst_mm, dst_pmd, dst_vma, |
247 | dst_addr); |
248 | |
249 | cond_resched(); |
250 | |
251 | if (unlikely(err == -EFAULT)) { |
252 | void *page_kaddr; |
253 | |
254 | up_read(&dst_mm->mmap_sem); |
255 | BUG_ON(!page); |
256 | |
257 | page_kaddr = kmap(page); |
258 | err = copy_from_user(page_kaddr, |
259 | (const void __user *) src_addr, |
260 | PAGE_SIZE); |
261 | kunmap(page); |
262 | if (unlikely(err)) { |
263 | err = -EFAULT; |
264 | goto out; |
265 | } |
266 | goto retry; |
267 | } else |
268 | BUG_ON(page); |
269 | |
270 | if (!err) { |
271 | dst_addr += PAGE_SIZE; |
272 | src_addr += PAGE_SIZE; |
273 | copied += PAGE_SIZE; |
274 | |
275 | if (fatal_signal_pending(current)) |
276 | err = -EINTR; |
277 | } |
278 | if (err) |
279 | break; |
280 | } |
281 | |
282 | out_unlock: |
283 | up_read(&dst_mm->mmap_sem); |
284 | out: |
285 | if (page) |
286 | put_page(page); |
287 | BUG_ON(copied < 0); |
288 | BUG_ON(err > 0); |
289 | BUG_ON(!copied && !err); |
290 | return copied ? copied : err; |
291 | } |
292 | |
293 | ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, |
294 | unsigned long src_start, unsigned long len) |
295 | { |
296 | return __mcopy_atomic(dst_mm, dst_start, src_start, len, false); |
297 | } |
298 | |
299 | ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start, |
300 | unsigned long len) |
301 | { |
302 | return __mcopy_atomic(dst_mm, start, 0, len, true); |
303 | } |
304 |