blob: 07f46720618678acbc038a44be488be0ee8c23c0
1 | #include <linux/mm.h> |
2 | #include <linux/slab.h> |
3 | #include <linux/string.h> |
4 | #include <linux/compiler.h> |
5 | #include <linux/export.h> |
6 | #include <linux/err.h> |
7 | #include <linux/sched.h> |
8 | #include <linux/security.h> |
9 | #include <linux/swap.h> |
10 | #include <linux/swapops.h> |
11 | #include <linux/mman.h> |
12 | #include <linux/hugetlb.h> |
13 | #include <linux/vmalloc.h> |
14 | |
15 | #include <asm/sections.h> |
16 | #include <asm/uaccess.h> |
17 | |
18 | #include "internal.h" |
19 | |
20 | static inline int is_kernel_rodata(unsigned long addr) |
21 | { |
22 | return addr >= (unsigned long)__start_rodata && |
23 | addr < (unsigned long)__end_rodata; |
24 | } |
25 | |
26 | /** |
27 | * kfree_const - conditionally free memory |
28 | * @x: pointer to the memory |
29 | * |
30 | * Function calls kfree only if @x is not in .rodata section. |
31 | */ |
32 | void kfree_const(const void *x) |
33 | { |
34 | if (!is_kernel_rodata((unsigned long)x)) |
35 | kfree(x); |
36 | } |
37 | EXPORT_SYMBOL(kfree_const); |
38 | |
39 | /** |
40 | * kstrdup - allocate space for and copy an existing string |
41 | * @s: the string to duplicate |
42 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
43 | */ |
44 | char *kstrdup(const char *s, gfp_t gfp) |
45 | { |
46 | size_t len; |
47 | char *buf; |
48 | |
49 | if (!s) |
50 | return NULL; |
51 | |
52 | len = strlen(s) + 1; |
53 | buf = kmalloc_track_caller(len, gfp); |
54 | if (buf) |
55 | memcpy(buf, s, len); |
56 | return buf; |
57 | } |
58 | EXPORT_SYMBOL(kstrdup); |
59 | |
60 | /** |
61 | * kstrdup_const - conditionally duplicate an existing const string |
62 | * @s: the string to duplicate |
63 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
64 | * |
65 | * Function returns source string if it is in .rodata section otherwise it |
66 | * fallbacks to kstrdup. |
67 | * Strings allocated by kstrdup_const should be freed by kfree_const. |
68 | */ |
69 | const char *kstrdup_const(const char *s, gfp_t gfp) |
70 | { |
71 | if (is_kernel_rodata((unsigned long)s)) |
72 | return s; |
73 | |
74 | return kstrdup(s, gfp); |
75 | } |
76 | EXPORT_SYMBOL(kstrdup_const); |
77 | |
78 | /** |
79 | * kstrndup - allocate space for and copy an existing string |
80 | * @s: the string to duplicate |
81 | * @max: read at most @max chars from @s |
82 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
83 | * |
84 | * Note: Use kmemdup_nul() instead if the size is known exactly. |
85 | */ |
86 | char *kstrndup(const char *s, size_t max, gfp_t gfp) |
87 | { |
88 | size_t len; |
89 | char *buf; |
90 | |
91 | if (!s) |
92 | return NULL; |
93 | |
94 | len = strnlen(s, max); |
95 | buf = kmalloc_track_caller(len+1, gfp); |
96 | if (buf) { |
97 | memcpy(buf, s, len); |
98 | buf[len] = '\0'; |
99 | } |
100 | return buf; |
101 | } |
102 | EXPORT_SYMBOL(kstrndup); |
103 | |
104 | /** |
105 | * kmemdup - duplicate region of memory |
106 | * |
107 | * @src: memory region to duplicate |
108 | * @len: memory region length |
109 | * @gfp: GFP mask to use |
110 | */ |
111 | void *kmemdup(const void *src, size_t len, gfp_t gfp) |
112 | { |
113 | void *p; |
114 | |
115 | p = kmalloc_track_caller(len, gfp); |
116 | if (p) |
117 | memcpy(p, src, len); |
118 | return p; |
119 | } |
120 | EXPORT_SYMBOL(kmemdup); |
121 | |
122 | /** |
123 | * kmemdup_nul - Create a NUL-terminated string from unterminated data |
124 | * @s: The data to stringify |
125 | * @len: The size of the data |
126 | * @gfp: the GFP mask used in the kmalloc() call when allocating memory |
127 | */ |
128 | char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) |
129 | { |
130 | char *buf; |
131 | |
132 | if (!s) |
133 | return NULL; |
134 | |
135 | buf = kmalloc_track_caller(len + 1, gfp); |
136 | if (buf) { |
137 | memcpy(buf, s, len); |
138 | buf[len] = '\0'; |
139 | } |
140 | return buf; |
141 | } |
142 | EXPORT_SYMBOL(kmemdup_nul); |
143 | |
144 | /** |
145 | * memdup_user - duplicate memory region from user space |
146 | * |
147 | * @src: source address in user space |
148 | * @len: number of bytes to copy |
149 | * |
150 | * Returns an ERR_PTR() on failure. |
151 | */ |
152 | void *memdup_user(const void __user *src, size_t len) |
153 | { |
154 | void *p; |
155 | |
156 | /* |
157 | * Always use GFP_KERNEL, since copy_from_user() can sleep and |
158 | * cause pagefault, which makes it pointless to use GFP_NOFS |
159 | * or GFP_ATOMIC. |
160 | */ |
161 | p = kmalloc_track_caller(len, GFP_KERNEL); |
162 | if (!p) |
163 | return ERR_PTR(-ENOMEM); |
164 | |
165 | if (copy_from_user(p, src, len)) { |
166 | kfree(p); |
167 | return ERR_PTR(-EFAULT); |
168 | } |
169 | |
170 | return p; |
171 | } |
172 | EXPORT_SYMBOL(memdup_user); |
173 | |
174 | /* |
175 | * strndup_user - duplicate an existing string from user space |
176 | * @s: The string to duplicate |
177 | * @n: Maximum number of bytes to copy, including the trailing NUL. |
178 | */ |
179 | char *strndup_user(const char __user *s, long n) |
180 | { |
181 | char *p; |
182 | long length; |
183 | |
184 | length = strnlen_user(s, n); |
185 | |
186 | if (!length) |
187 | return ERR_PTR(-EFAULT); |
188 | |
189 | if (length > n) |
190 | return ERR_PTR(-EINVAL); |
191 | |
192 | p = memdup_user(s, length); |
193 | |
194 | if (IS_ERR(p)) |
195 | return p; |
196 | |
197 | p[length - 1] = '\0'; |
198 | |
199 | return p; |
200 | } |
201 | EXPORT_SYMBOL(strndup_user); |
202 | |
203 | /** |
204 | * memdup_user_nul - duplicate memory region from user space and NUL-terminate |
205 | * |
206 | * @src: source address in user space |
207 | * @len: number of bytes to copy |
208 | * |
209 | * Returns an ERR_PTR() on failure. |
210 | */ |
211 | void *memdup_user_nul(const void __user *src, size_t len) |
212 | { |
213 | char *p; |
214 | |
215 | /* |
216 | * Always use GFP_KERNEL, since copy_from_user() can sleep and |
217 | * cause pagefault, which makes it pointless to use GFP_NOFS |
218 | * or GFP_ATOMIC. |
219 | */ |
220 | p = kmalloc_track_caller(len + 1, GFP_KERNEL); |
221 | if (!p) |
222 | return ERR_PTR(-ENOMEM); |
223 | |
224 | if (copy_from_user(p, src, len)) { |
225 | kfree(p); |
226 | return ERR_PTR(-EFAULT); |
227 | } |
228 | p[len] = '\0'; |
229 | |
230 | return p; |
231 | } |
232 | EXPORT_SYMBOL(memdup_user_nul); |
233 | |
234 | void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
235 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
236 | { |
237 | struct vm_area_struct *next; |
238 | |
239 | vma->vm_prev = prev; |
240 | if (prev) { |
241 | next = prev->vm_next; |
242 | prev->vm_next = vma; |
243 | } else { |
244 | mm->mmap = vma; |
245 | if (rb_parent) |
246 | next = rb_entry(rb_parent, |
247 | struct vm_area_struct, vm_rb); |
248 | else |
249 | next = NULL; |
250 | } |
251 | vma->vm_next = next; |
252 | if (next) |
253 | next->vm_prev = vma; |
254 | } |
255 | |
256 | /* Check if the vma is being used as a stack by this task */ |
257 | int vma_is_stack_for_current(struct vm_area_struct *vma) |
258 | { |
259 | struct task_struct * __maybe_unused t = current; |
260 | |
261 | return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); |
262 | } |
263 | |
264 | #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) |
265 | void arch_pick_mmap_layout(struct mm_struct *mm) |
266 | { |
267 | mm->mmap_base = TASK_UNMAPPED_BASE; |
268 | mm->get_unmapped_area = arch_get_unmapped_area; |
269 | } |
270 | #endif |
271 | |
272 | /* |
273 | * Like get_user_pages_fast() except its IRQ-safe in that it won't fall |
274 | * back to the regular GUP. |
275 | * If the architecture not support this function, simply return with no |
276 | * page pinned |
277 | */ |
278 | int __weak __get_user_pages_fast(unsigned long start, |
279 | int nr_pages, int write, struct page **pages) |
280 | { |
281 | return 0; |
282 | } |
283 | EXPORT_SYMBOL_GPL(__get_user_pages_fast); |
284 | |
285 | /** |
286 | * get_user_pages_fast() - pin user pages in memory |
287 | * @start: starting user address |
288 | * @nr_pages: number of pages from start to pin |
289 | * @write: whether pages will be written to |
290 | * @pages: array that receives pointers to the pages pinned. |
291 | * Should be at least nr_pages long. |
292 | * |
293 | * Returns number of pages pinned. This may be fewer than the number |
294 | * requested. If nr_pages is 0 or negative, returns 0. If no pages |
295 | * were pinned, returns -errno. |
296 | * |
297 | * get_user_pages_fast provides equivalent functionality to get_user_pages, |
298 | * operating on current and current->mm, with force=0 and vma=NULL. However |
299 | * unlike get_user_pages, it must be called without mmap_sem held. |
300 | * |
301 | * get_user_pages_fast may take mmap_sem and page table locks, so no |
302 | * assumptions can be made about lack of locking. get_user_pages_fast is to be |
303 | * implemented in a way that is advantageous (vs get_user_pages()) when the |
304 | * user memory area is already faulted in and present in ptes. However if the |
305 | * pages have to be faulted in, it may turn out to be slightly slower so |
306 | * callers need to carefully consider what to use. On many architectures, |
307 | * get_user_pages_fast simply falls back to get_user_pages. |
308 | */ |
309 | int __weak get_user_pages_fast(unsigned long start, |
310 | int nr_pages, int write, struct page **pages) |
311 | { |
312 | return get_user_pages_unlocked(start, nr_pages, pages, |
313 | write ? FOLL_WRITE : 0); |
314 | } |
315 | EXPORT_SYMBOL_GPL(get_user_pages_fast); |
316 | |
317 | unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, |
318 | unsigned long len, unsigned long prot, |
319 | unsigned long flag, unsigned long pgoff) |
320 | { |
321 | unsigned long ret; |
322 | struct mm_struct *mm = current->mm; |
323 | unsigned long populate; |
324 | |
325 | ret = security_mmap_file(file, prot, flag); |
326 | if (!ret) { |
327 | if (down_write_killable(&mm->mmap_sem)) |
328 | return -EINTR; |
329 | ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff, |
330 | &populate); |
331 | up_write(&mm->mmap_sem); |
332 | if (populate) |
333 | mm_populate(ret, populate); |
334 | } |
335 | return ret; |
336 | } |
337 | |
338 | unsigned long vm_mmap(struct file *file, unsigned long addr, |
339 | unsigned long len, unsigned long prot, |
340 | unsigned long flag, unsigned long offset) |
341 | { |
342 | if (unlikely(offset + PAGE_ALIGN(len) < offset)) |
343 | return -EINVAL; |
344 | if (unlikely(offset_in_page(offset))) |
345 | return -EINVAL; |
346 | |
347 | return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); |
348 | } |
349 | EXPORT_SYMBOL(vm_mmap); |
350 | |
351 | void kvfree(const void *addr) |
352 | { |
353 | if (is_vmalloc_addr(addr)) |
354 | vfree(addr); |
355 | else |
356 | kfree(addr); |
357 | } |
358 | EXPORT_SYMBOL(kvfree); |
359 | |
360 | static inline void *__page_rmapping(struct page *page) |
361 | { |
362 | unsigned long mapping; |
363 | |
364 | mapping = (unsigned long)page->mapping; |
365 | mapping &= ~PAGE_MAPPING_FLAGS; |
366 | |
367 | return (void *)mapping; |
368 | } |
369 | |
370 | /* Neutral page->mapping pointer to address_space or anon_vma or other */ |
371 | void *page_rmapping(struct page *page) |
372 | { |
373 | page = compound_head(page); |
374 | return __page_rmapping(page); |
375 | } |
376 | |
377 | /* |
378 | * Return true if this page is mapped into pagetables. |
379 | * For compound page it returns true if any subpage of compound page is mapped. |
380 | */ |
381 | bool page_mapped(struct page *page) |
382 | { |
383 | int i; |
384 | |
385 | if (likely(!PageCompound(page))) |
386 | return atomic_read(&page->_mapcount) >= 0; |
387 | page = compound_head(page); |
388 | if (atomic_read(compound_mapcount_ptr(page)) >= 0) |
389 | return true; |
390 | if (PageHuge(page)) |
391 | return false; |
392 | for (i = 0; i < (1 << compound_order(page)); i++) { |
393 | if (atomic_read(&page[i]._mapcount) >= 0) |
394 | return true; |
395 | } |
396 | return false; |
397 | } |
398 | EXPORT_SYMBOL(page_mapped); |
399 | |
400 | struct anon_vma *page_anon_vma(struct page *page) |
401 | { |
402 | unsigned long mapping; |
403 | |
404 | page = compound_head(page); |
405 | mapping = (unsigned long)page->mapping; |
406 | if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON) |
407 | return NULL; |
408 | return __page_rmapping(page); |
409 | } |
410 | |
411 | struct address_space *page_mapping(struct page *page) |
412 | { |
413 | struct address_space *mapping; |
414 | |
415 | page = compound_head(page); |
416 | |
417 | /* This happens if someone calls flush_dcache_page on slab page */ |
418 | if (unlikely(PageSlab(page))) |
419 | return NULL; |
420 | |
421 | if (unlikely(PageSwapCache(page))) { |
422 | swp_entry_t entry; |
423 | |
424 | entry.val = page_private(page); |
425 | return swap_address_space(entry); |
426 | } |
427 | |
428 | mapping = page->mapping; |
429 | if ((unsigned long)mapping & PAGE_MAPPING_ANON) |
430 | return NULL; |
431 | |
432 | return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); |
433 | } |
434 | EXPORT_SYMBOL(page_mapping); |
435 | |
436 | /* Slow path of page_mapcount() for compound pages */ |
437 | int __page_mapcount(struct page *page) |
438 | { |
439 | int ret; |
440 | |
441 | ret = atomic_read(&page->_mapcount) + 1; |
442 | /* |
443 | * For file THP page->_mapcount contains total number of mapping |
444 | * of the page: no need to look into compound_mapcount. |
445 | */ |
446 | if (!PageAnon(page) && !PageHuge(page)) |
447 | return ret; |
448 | page = compound_head(page); |
449 | ret += atomic_read(compound_mapcount_ptr(page)) + 1; |
450 | if (PageDoubleMap(page)) |
451 | ret--; |
452 | return ret; |
453 | } |
454 | EXPORT_SYMBOL_GPL(__page_mapcount); |
455 | |
456 | int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; |
457 | int sysctl_overcommit_ratio __read_mostly = 50; |
458 | unsigned long sysctl_overcommit_kbytes __read_mostly; |
459 | int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; |
460 | unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */ |
461 | unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */ |
462 | |
463 | int overcommit_ratio_handler(struct ctl_table *table, int write, |
464 | void __user *buffer, size_t *lenp, |
465 | loff_t *ppos) |
466 | { |
467 | int ret; |
468 | |
469 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
470 | if (ret == 0 && write) |
471 | sysctl_overcommit_kbytes = 0; |
472 | return ret; |
473 | } |
474 | |
475 | int overcommit_kbytes_handler(struct ctl_table *table, int write, |
476 | void __user *buffer, size_t *lenp, |
477 | loff_t *ppos) |
478 | { |
479 | int ret; |
480 | |
481 | ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
482 | if (ret == 0 && write) |
483 | sysctl_overcommit_ratio = 0; |
484 | return ret; |
485 | } |
486 | |
487 | /* |
488 | * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used |
489 | */ |
490 | unsigned long vm_commit_limit(void) |
491 | { |
492 | unsigned long allowed; |
493 | |
494 | if (sysctl_overcommit_kbytes) |
495 | allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10); |
496 | else |
497 | allowed = ((totalram_pages - hugetlb_total_pages()) |
498 | * sysctl_overcommit_ratio / 100); |
499 | allowed += total_swap_pages; |
500 | |
501 | return allowed; |
502 | } |
503 | |
504 | /* |
505 | * Make sure vm_committed_as in one cacheline and not cacheline shared with |
506 | * other variables. It can be updated by several CPUs frequently. |
507 | */ |
508 | struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp; |
509 | |
510 | /* |
511 | * The global memory commitment made in the system can be a metric |
512 | * that can be used to drive ballooning decisions when Linux is hosted |
513 | * as a guest. On Hyper-V, the host implements a policy engine for dynamically |
514 | * balancing memory across competing virtual machines that are hosted. |
515 | * Several metrics drive this policy engine including the guest reported |
516 | * memory commitment. |
517 | */ |
518 | unsigned long vm_memory_committed(void) |
519 | { |
520 | return percpu_counter_read_positive(&vm_committed_as); |
521 | } |
522 | EXPORT_SYMBOL_GPL(vm_memory_committed); |
523 | |
524 | /* |
525 | * Check that a process has enough memory to allocate a new virtual |
526 | * mapping. 0 means there is enough memory for the allocation to |
527 | * succeed and -ENOMEM implies there is not. |
528 | * |
529 | * We currently support three overcommit policies, which are set via the |
530 | * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting |
531 | * |
532 | * Strict overcommit modes added 2002 Feb 26 by Alan Cox. |
533 | * Additional code 2002 Jul 20 by Robert Love. |
534 | * |
535 | * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise. |
536 | * |
537 | * Note this is a helper function intended to be used by LSMs which |
538 | * wish to use this logic. |
539 | */ |
540 | int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) |
541 | { |
542 | long free, allowed, reserve; |
543 | |
544 | VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) < |
545 | -(s64)vm_committed_as_batch * num_online_cpus(), |
546 | "memory commitment underflow"); |
547 | |
548 | vm_acct_memory(pages); |
549 | |
550 | /* |
551 | * Sometimes we want to use more memory than we have |
552 | */ |
553 | if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS) |
554 | return 0; |
555 | |
556 | if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) { |
557 | free = global_page_state(NR_FREE_PAGES); |
558 | free += global_node_page_state(NR_FILE_PAGES); |
559 | |
560 | /* |
561 | * shmem pages shouldn't be counted as free in this |
562 | * case, they can't be purged, only swapped out, and |
563 | * that won't affect the overall amount of available |
564 | * memory in the system. |
565 | */ |
566 | free -= global_node_page_state(NR_SHMEM); |
567 | |
568 | free += get_nr_swap_pages(); |
569 | |
570 | /* |
571 | * Any slabs which are created with the |
572 | * SLAB_RECLAIM_ACCOUNT flag claim to have contents |
573 | * which are reclaimable, under pressure. The dentry |
574 | * cache and most inode caches should fall into this |
575 | */ |
576 | free += global_page_state(NR_SLAB_RECLAIMABLE); |
577 | |
578 | /* |
579 | * Leave reserved pages. The pages are not for anonymous pages. |
580 | */ |
581 | if (free <= totalreserve_pages) |
582 | goto error; |
583 | else |
584 | free -= totalreserve_pages; |
585 | |
586 | /* |
587 | * Reserve some for root |
588 | */ |
589 | if (!cap_sys_admin) |
590 | free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); |
591 | |
592 | if (free > pages) |
593 | return 0; |
594 | |
595 | goto error; |
596 | } |
597 | |
598 | allowed = vm_commit_limit(); |
599 | /* |
600 | * Reserve some for root |
601 | */ |
602 | if (!cap_sys_admin) |
603 | allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10); |
604 | |
605 | /* |
606 | * Don't let a single process grow so big a user can't recover |
607 | */ |
608 | if (mm) { |
609 | reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10); |
610 | allowed -= min_t(long, mm->total_vm / 32, reserve); |
611 | } |
612 | |
613 | if (percpu_counter_read_positive(&vm_committed_as) < allowed) |
614 | return 0; |
615 | error: |
616 | vm_unacct_memory(pages); |
617 | |
618 | return -ENOMEM; |
619 | } |
620 | |
621 | /** |
622 | * get_cmdline() - copy the cmdline value to a buffer. |
623 | * @task: the task whose cmdline value to copy. |
624 | * @buffer: the buffer to copy to. |
625 | * @buflen: the length of the buffer. Larger cmdline values are truncated |
626 | * to this length. |
627 | * Returns the size of the cmdline field copied. Note that the copy does |
628 | * not guarantee an ending NULL byte. |
629 | */ |
630 | int get_cmdline(struct task_struct *task, char *buffer, int buflen) |
631 | { |
632 | int res = 0; |
633 | unsigned int len; |
634 | struct mm_struct *mm = get_task_mm(task); |
635 | unsigned long arg_start, arg_end, env_start, env_end; |
636 | if (!mm) |
637 | goto out; |
638 | if (!mm->arg_end) |
639 | goto out_mm; /* Shh! No looking before we're done */ |
640 | |
641 | down_read(&mm->mmap_sem); |
642 | arg_start = mm->arg_start; |
643 | arg_end = mm->arg_end; |
644 | env_start = mm->env_start; |
645 | env_end = mm->env_end; |
646 | up_read(&mm->mmap_sem); |
647 | |
648 | len = arg_end - arg_start; |
649 | |
650 | if (len > buflen) |
651 | len = buflen; |
652 | |
653 | res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE); |
654 | |
655 | /* |
656 | * If the nul at the end of args has been overwritten, then |
657 | * assume application is using setproctitle(3). |
658 | */ |
659 | if (res > 0 && buffer[res-1] != '\0' && len < buflen) { |
660 | len = strnlen(buffer, res); |
661 | if (len < res) { |
662 | res = len; |
663 | } else { |
664 | len = env_end - env_start; |
665 | if (len > buflen - res) |
666 | len = buflen - res; |
667 | res += access_process_vm(task, env_start, |
668 | buffer+res, len, |
669 | FOLL_FORCE); |
670 | res = strnlen(buffer, res); |
671 | } |
672 | } |
673 | out_mm: |
674 | mmput(mm); |
675 | out: |
676 | return res; |
677 | } |
678 |