xref: /linux-6.15/include/linux/mm.h (revision 776cfebb)
1 #ifndef _LINUX_MM_H
2 #define _LINUX_MM_H
3 
4 #include <linux/sched.h>
5 #include <linux/errno.h>
6 
7 #ifdef __KERNEL__
8 
9 #include <linux/config.h>
10 #include <linux/gfp.h>
11 #include <linux/list.h>
12 #include <linux/mmzone.h>
13 #include <linux/rbtree.h>
14 #include <linux/prio_tree.h>
15 #include <linux/fs.h>
16 
17 struct mempolicy;
18 struct anon_vma;
19 
20 #ifndef CONFIG_DISCONTIGMEM          /* Don't use mapnrs, do it properly */
21 extern unsigned long max_mapnr;
22 #endif
23 
24 extern unsigned long num_physpages;
25 extern void * high_memory;
26 extern unsigned long vmalloc_earlyreserve;
27 extern int page_cluster;
28 
29 #ifdef CONFIG_SYSCTL
30 extern int sysctl_legacy_va_layout;
31 #else
32 #define sysctl_legacy_va_layout 0
33 #endif
34 
35 #include <asm/page.h>
36 #include <asm/pgtable.h>
37 #include <asm/processor.h>
38 #include <asm/atomic.h>
39 
40 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
41 
42 /*
43  * Linux kernel virtual memory manager primitives.
44  * The idea being to have a "virtual" mm in the same way
45  * we have a virtual fs - giving a cleaner interface to the
46  * mm details, and allowing different kinds of memory mappings
47  * (from shared memory to executable loading to arbitrary
48  * mmap() functions).
49  */
50 
51 /*
52  * This struct defines a memory VMM memory area. There is one of these
53  * per VM-area/task.  A VM area is any part of the process virtual memory
54  * space that has a special rule for the page-fault handlers (ie a shared
55  * library, the executable area etc).
56  */
57 struct vm_area_struct {
58 	struct mm_struct * vm_mm;	/* The address space we belong to. */
59 	unsigned long vm_start;		/* Our start address within vm_mm. */
60 	unsigned long vm_end;		/* The first byte after our end address
61 					   within vm_mm. */
62 
63 	/* linked list of VM areas per task, sorted by address */
64 	struct vm_area_struct *vm_next;
65 
66 	pgprot_t vm_page_prot;		/* Access permissions of this VMA. */
67 	unsigned long vm_flags;		/* Flags, listed below. */
68 
69 	struct rb_node vm_rb;
70 
71 	/*
72 	 * For areas with an address space and backing store,
73 	 * linkage into the address_space->i_mmap prio tree, or
74 	 * linkage to the list of like vmas hanging off its node, or
75 	 * linkage of vma in the address_space->i_mmap_nonlinear list.
76 	 */
77 	union {
78 		struct {
79 			struct list_head list;
80 			void *parent;	/* aligns with prio_tree_node parent */
81 			struct vm_area_struct *head;
82 		} vm_set;
83 
84 		struct raw_prio_tree_node prio_tree_node;
85 	} shared;
86 
87 	/*
88 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
89 	 * list, after a COW of one of the file pages.  A MAP_SHARED vma
90 	 * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
91 	 * or brk vma (with NULL file) can only be in an anon_vma list.
92 	 */
93 	struct list_head anon_vma_node;	/* Serialized by anon_vma->lock */
94 	struct anon_vma *anon_vma;	/* Serialized by page_table_lock */
95 
96 	/* Function pointers to deal with this struct. */
97 	struct vm_operations_struct * vm_ops;
98 
99 	/* Information about our backing store: */
100 	unsigned long vm_pgoff;		/* Offset (within vm_file) in PAGE_SIZE
101 					   units, *not* PAGE_CACHE_SIZE */
102 	struct file * vm_file;		/* File we map to (can be NULL). */
103 	void * vm_private_data;		/* was vm_pte (shared mem) */
104 	unsigned long vm_truncate_count;/* truncate_count or restart_addr */
105 
106 #ifndef CONFIG_MMU
107 	atomic_t vm_usage;		/* refcount (VMAs shared if !MMU) */
108 #endif
109 #ifdef CONFIG_NUMA
110 	struct mempolicy *vm_policy;	/* NUMA policy for the VMA */
111 #endif
112 };
113 
114 /*
115  * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is
116  * disabled, then there's a single shared list of VMAs maintained by the
117  * system, and mm's subscribe to these individually
118  */
119 struct vm_list_struct {
120 	struct vm_list_struct	*next;
121 	struct vm_area_struct	*vma;
122 };
123 
124 #ifndef CONFIG_MMU
125 extern struct rb_root nommu_vma_tree;
126 extern struct rw_semaphore nommu_vma_sem;
127 
128 extern unsigned int kobjsize(const void *objp);
129 #endif
130 
131 /*
132  * vm_flags..
133  */
134 #define VM_READ		0x00000001	/* currently active flags */
135 #define VM_WRITE	0x00000002
136 #define VM_EXEC		0x00000004
137 #define VM_SHARED	0x00000008
138 
139 #define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
140 #define VM_MAYWRITE	0x00000020
141 #define VM_MAYEXEC	0x00000040
142 #define VM_MAYSHARE	0x00000080
143 
144 #define VM_GROWSDOWN	0x00000100	/* general info on the segment */
145 #define VM_GROWSUP	0x00000200
146 #define VM_SHM		0x00000400	/* shared memory area, don't swap out */
147 #define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
148 
149 #define VM_EXECUTABLE	0x00001000
150 #define VM_LOCKED	0x00002000
151 #define VM_IO           0x00004000	/* Memory mapped I/O or similar */
152 
153 					/* Used by sys_madvise() */
154 #define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
155 #define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
156 
157 #define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
158 #define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
159 #define VM_RESERVED	0x00080000	/* Don't unmap it from swap_out */
160 #define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
161 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
162 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
163 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
164 
165 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
166 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
167 #endif
168 
169 #ifdef CONFIG_STACK_GROWSUP
170 #define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
171 #else
172 #define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
173 #endif
174 
175 #define VM_READHINTMASK			(VM_SEQ_READ | VM_RAND_READ)
176 #define VM_ClearReadHint(v)		(v)->vm_flags &= ~VM_READHINTMASK
177 #define VM_NormalReadHint(v)		(!((v)->vm_flags & VM_READHINTMASK))
178 #define VM_SequentialReadHint(v)	((v)->vm_flags & VM_SEQ_READ)
179 #define VM_RandomReadHint(v)		((v)->vm_flags & VM_RAND_READ)
180 
181 /*
182  * mapping from the currently active vm_flags protection bits (the
183  * low four bits) to a page protection mask..
184  */
185 extern pgprot_t protection_map[16];
186 
187 
188 /*
189  * These are the virtual MM functions - opening of an area, closing and
190  * unmapping it (needed to keep files on disk up-to-date etc), pointer
191  * to the functions called when a no-page or a wp-page exception occurs.
192  */
193 struct vm_operations_struct {
194 	void (*open)(struct vm_area_struct * area);
195 	void (*close)(struct vm_area_struct * area);
196 	struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
197 	int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
198 #ifdef CONFIG_NUMA
199 	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
200 	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
201 					unsigned long addr);
202 #endif
203 };
204 
205 struct mmu_gather;
206 struct inode;
207 
208 #ifdef ARCH_HAS_ATOMIC_UNSIGNED
209 typedef unsigned page_flags_t;
210 #else
211 typedef unsigned long page_flags_t;
212 #endif
213 
214 /*
215  * Each physical page in the system has a struct page associated with
216  * it to keep track of whatever it is we are using the page for at the
217  * moment. Note that we have no way to track which tasks are using
218  * a page.
219  */
220 struct page {
221 	page_flags_t flags;		/* Atomic flags, some possibly
222 					 * updated asynchronously */
223 	atomic_t _count;		/* Usage count, see below. */
224 	atomic_t _mapcount;		/* Count of ptes mapped in mms,
225 					 * to show when page is mapped
226 					 * & limit reverse map searches.
227 					 */
228 	unsigned long private;		/* Mapping-private opaque data:
229 					 * usually used for buffer_heads
230 					 * if PagePrivate set; used for
231 					 * swp_entry_t if PageSwapCache
232 					 * When page is free, this indicates
233 					 * order in the buddy system.
234 					 */
235 	struct address_space *mapping;	/* If low bit clear, points to
236 					 * inode address_space, or NULL.
237 					 * If page mapped as anonymous
238 					 * memory, low bit is set, and
239 					 * it points to anon_vma object:
240 					 * see PAGE_MAPPING_ANON below.
241 					 */
242 	pgoff_t index;			/* Our offset within mapping. */
243 	struct list_head lru;		/* Pageout list, eg. active_list
244 					 * protected by zone->lru_lock !
245 					 */
246 	/*
247 	 * On machines where all RAM is mapped into kernel address space,
248 	 * we can simply calculate the virtual address. On machines with
249 	 * highmem some memory is mapped into kernel virtual memory
250 	 * dynamically, so we need a place to store that address.
251 	 * Note that this field could be 16 bits on x86 ... ;)
252 	 *
253 	 * Architectures with slow multiplication can define
254 	 * WANT_PAGE_VIRTUAL in asm/page.h
255 	 */
256 #if defined(WANT_PAGE_VIRTUAL)
257 	void *virtual;			/* Kernel virtual address (NULL if
258 					   not kmapped, ie. highmem) */
259 #endif /* WANT_PAGE_VIRTUAL */
260 };
261 
262 /*
263  * FIXME: take this include out, include page-flags.h in
264  * files which need it (119 of them)
265  */
266 #include <linux/page-flags.h>
267 
268 /*
269  * Methods to modify the page usage count.
270  *
271  * What counts for a page usage:
272  * - cache mapping   (page->mapping)
273  * - private data    (page->private)
274  * - page mapped in a task's page tables, each mapping
275  *   is counted separately
276  *
277  * Also, many kernel routines increase the page count before a critical
278  * routine so they can be sure the page doesn't go away from under them.
279  *
280  * Since 2.6.6 (approx), a free page has ->_count = -1.  This is so that we
281  * can use atomic_add_negative(-1, page->_count) to detect when the page
282  * becomes free and so that we can also use atomic_inc_and_test to atomically
283  * detect when we just tried to grab a ref on a page which some other CPU has
284  * already deemed to be freeable.
285  *
286  * NO code should make assumptions about this internal detail!  Use the provided
287  * macros which retain the old rules: page_count(page) == 0 is a free page.
288  */
289 
290 /*
291  * Drop a ref, return true if the logical refcount fell to zero (the page has
292  * no users)
293  */
294 #define put_page_testzero(p)				\
295 	({						\
296 		BUG_ON(page_count(p) == 0);		\
297 		atomic_add_negative(-1, &(p)->_count);	\
298 	})
299 
300 /*
301  * Grab a ref, return true if the page previously had a logical refcount of
302  * zero.  ie: returns true if we just grabbed an already-deemed-to-be-free page
303  */
304 #define get_page_testone(p)	atomic_inc_and_test(&(p)->_count)
305 
306 #define set_page_count(p,v) 	atomic_set(&(p)->_count, v - 1)
307 #define __put_page(p)		atomic_dec(&(p)->_count)
308 
309 extern void FASTCALL(__page_cache_release(struct page *));
310 
311 #ifdef CONFIG_HUGETLB_PAGE
312 
313 static inline int page_count(struct page *p)
314 {
315 	if (PageCompound(p))
316 		p = (struct page *)p->private;
317 	return atomic_read(&(p)->_count) + 1;
318 }
319 
320 static inline void get_page(struct page *page)
321 {
322 	if (unlikely(PageCompound(page)))
323 		page = (struct page *)page->private;
324 	atomic_inc(&page->_count);
325 }
326 
327 void put_page(struct page *page);
328 
329 #else		/* CONFIG_HUGETLB_PAGE */
330 
331 #define page_count(p)		(atomic_read(&(p)->_count) + 1)
332 
333 static inline void get_page(struct page *page)
334 {
335 	atomic_inc(&page->_count);
336 }
337 
338 static inline void put_page(struct page *page)
339 {
340 	if (!PageReserved(page) && put_page_testzero(page))
341 		__page_cache_release(page);
342 }
343 
344 #endif		/* CONFIG_HUGETLB_PAGE */
345 
346 /*
347  * Multiple processes may "see" the same page. E.g. for untouched
348  * mappings of /dev/null, all processes see the same page full of
349  * zeroes, and text pages of executables and shared libraries have
350  * only one copy in memory, at most, normally.
351  *
352  * For the non-reserved pages, page_count(page) denotes a reference count.
353  *   page_count() == 0 means the page is free.
354  *   page_count() == 1 means the page is used for exactly one purpose
355  *   (e.g. a private data page of one process).
356  *
357  * A page may be used for kmalloc() or anyone else who does a
358  * __get_free_page(). In this case the page_count() is at least 1, and
359  * all other fields are unused but should be 0 or NULL. The
360  * management of this page is the responsibility of the one who uses
361  * it.
362  *
363  * The other pages (we may call them "process pages") are completely
364  * managed by the Linux memory manager: I/O, buffers, swapping etc.
365  * The following discussion applies only to them.
366  *
367  * A page may belong to an inode's memory mapping. In this case,
368  * page->mapping is the pointer to the inode, and page->index is the
369  * file offset of the page, in units of PAGE_CACHE_SIZE.
370  *
371  * A page contains an opaque `private' member, which belongs to the
372  * page's address_space.  Usually, this is the address of a circular
373  * list of the page's disk buffers.
374  *
375  * For pages belonging to inodes, the page_count() is the number of
376  * attaches, plus 1 if `private' contains something, plus one for
377  * the page cache itself.
378  *
379  * All pages belonging to an inode are in these doubly linked lists:
380  * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
381  * using the page->list list_head. These fields are also used for
382  * freelist managemet (when page_count()==0).
383  *
384  * There is also a per-mapping radix tree mapping index to the page
385  * in memory if present. The tree is rooted at mapping->root.
386  *
387  * All process pages can do I/O:
388  * - inode pages may need to be read from disk,
389  * - inode pages which have been modified and are MAP_SHARED may need
390  *   to be written to disk,
391  * - private pages which have been modified may need to be swapped out
392  *   to swap space and (later) to be read back into memory.
393  */
394 
395 /*
396  * The zone field is never updated after free_area_init_core()
397  * sets it, so none of the operations on it need to be atomic.
398  * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total,
399  * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits.
400  */
401 #define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT)
402 #define NODEZONE(node, zone)	((node << ZONES_SHIFT) | zone)
403 
404 static inline unsigned long page_zonenum(struct page *page)
405 {
406 	return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT));
407 }
408 static inline unsigned long page_to_nid(struct page *page)
409 {
410 	return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT));
411 }
412 
413 struct zone;
414 extern struct zone *zone_table[];
415 
416 static inline struct zone *page_zone(struct page *page)
417 {
418 	return zone_table[page->flags >> NODEZONE_SHIFT];
419 }
420 
421 static inline void set_page_zone(struct page *page, unsigned long nodezone_num)
422 {
423 	page->flags &= ~(~0UL << NODEZONE_SHIFT);
424 	page->flags |= nodezone_num << NODEZONE_SHIFT;
425 }
426 
427 #ifndef CONFIG_DISCONTIGMEM
428 /* The array of struct pages - for discontigmem use pgdat->lmem_map */
429 extern struct page *mem_map;
430 #endif
431 
432 static inline void *lowmem_page_address(struct page *page)
433 {
434 	return __va(page_to_pfn(page) << PAGE_SHIFT);
435 }
436 
437 #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
438 #define HASHED_PAGE_VIRTUAL
439 #endif
440 
441 #if defined(WANT_PAGE_VIRTUAL)
442 #define page_address(page) ((page)->virtual)
443 #define set_page_address(page, address)			\
444 	do {						\
445 		(page)->virtual = (address);		\
446 	} while(0)
447 #define page_address_init()  do { } while(0)
448 #endif
449 
450 #if defined(HASHED_PAGE_VIRTUAL)
451 void *page_address(struct page *page);
452 void set_page_address(struct page *page, void *virtual);
453 void page_address_init(void);
454 #endif
455 
456 #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
457 #define page_address(page) lowmem_page_address(page)
458 #define set_page_address(page, address)  do { } while(0)
459 #define page_address_init()  do { } while(0)
460 #endif
461 
462 /*
463  * On an anonymous page mapped into a user virtual memory area,
464  * page->mapping points to its anon_vma, not to a struct address_space;
465  * with the PAGE_MAPPING_ANON bit set to distinguish it.
466  *
467  * Please note that, confusingly, "page_mapping" refers to the inode
468  * address_space which maps the page from disk; whereas "page_mapped"
469  * refers to user virtual address space into which the page is mapped.
470  */
471 #define PAGE_MAPPING_ANON	1
472 
473 extern struct address_space swapper_space;
474 static inline struct address_space *page_mapping(struct page *page)
475 {
476 	struct address_space *mapping = page->mapping;
477 
478 	if (unlikely(PageSwapCache(page)))
479 		mapping = &swapper_space;
480 	else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
481 		mapping = NULL;
482 	return mapping;
483 }
484 
485 static inline int PageAnon(struct page *page)
486 {
487 	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
488 }
489 
490 /*
491  * Return the pagecache index of the passed page.  Regular pagecache pages
492  * use ->index whereas swapcache pages use ->private
493  */
494 static inline pgoff_t page_index(struct page *page)
495 {
496 	if (unlikely(PageSwapCache(page)))
497 		return page->private;
498 	return page->index;
499 }
500 
501 /*
502  * The atomic page->_mapcount, like _count, starts from -1:
503  * so that transitions both from it and to it can be tracked,
504  * using atomic_inc_and_test and atomic_add_negative(-1).
505  */
506 static inline void reset_page_mapcount(struct page *page)
507 {
508 	atomic_set(&(page)->_mapcount, -1);
509 }
510 
511 static inline int page_mapcount(struct page *page)
512 {
513 	return atomic_read(&(page)->_mapcount) + 1;
514 }
515 
516 /*
517  * Return true if this page is mapped into pagetables.
518  */
519 static inline int page_mapped(struct page *page)
520 {
521 	return atomic_read(&(page)->_mapcount) >= 0;
522 }
523 
524 /*
525  * Error return values for the *_nopage functions
526  */
527 #define NOPAGE_SIGBUS	(NULL)
528 #define NOPAGE_OOM	((struct page *) (-1))
529 
530 /*
531  * Different kinds of faults, as returned by handle_mm_fault().
532  * Used to decide whether a process gets delivered SIGBUS or
533  * just gets major/minor fault counters bumped up.
534  */
535 #define VM_FAULT_OOM	(-1)
536 #define VM_FAULT_SIGBUS	0
537 #define VM_FAULT_MINOR	1
538 #define VM_FAULT_MAJOR	2
539 
540 #define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
541 
542 extern void show_free_areas(void);
543 
544 #ifdef CONFIG_SHMEM
545 struct page *shmem_nopage(struct vm_area_struct *vma,
546 			unsigned long address, int *type);
547 int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
548 struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
549 					unsigned long addr);
550 int shmem_lock(struct file *file, int lock, struct user_struct *user);
551 #else
552 #define shmem_nopage filemap_nopage
553 #define shmem_lock(a, b, c) 	({0;})	/* always in memory, no need to lock */
554 #define shmem_set_policy(a, b)	(0)
555 #define shmem_get_policy(a, b)	(NULL)
556 #endif
557 struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
558 
559 int shmem_zero_setup(struct vm_area_struct *);
560 
561 static inline int can_do_mlock(void)
562 {
563 	if (capable(CAP_IPC_LOCK))
564 		return 1;
565 	if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
566 		return 1;
567 	return 0;
568 }
569 extern int user_shm_lock(size_t, struct user_struct *);
570 extern void user_shm_unlock(size_t, struct user_struct *);
571 
572 /*
573  * Parameter block passed down to zap_pte_range in exceptional cases.
574  */
575 struct zap_details {
576 	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
577 	struct address_space *check_mapping;	/* Check page->mapping if set */
578 	pgoff_t	first_index;			/* Lowest page->index to unmap */
579 	pgoff_t last_index;			/* Highest page->index to unmap */
580 	spinlock_t *i_mmap_lock;		/* For unmap_mapping_range: */
581 	unsigned long truncate_count;		/* Compare vm_truncate_count */
582 };
583 
584 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
585 		unsigned long size, struct zap_details *);
586 unsigned long unmap_vmas(struct mmu_gather **tlb, struct mm_struct *mm,
587 		struct vm_area_struct *start_vma, unsigned long start_addr,
588 		unsigned long end_addr, unsigned long *nr_accounted,
589 		struct zap_details *);
590 void free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
591 		unsigned long end, unsigned long floor, unsigned long ceiling);
592 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma,
593 		unsigned long floor, unsigned long ceiling);
594 int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
595 			struct vm_area_struct *vma);
596 int zeromap_page_range(struct vm_area_struct *vma, unsigned long from,
597 			unsigned long size, pgprot_t prot);
598 void unmap_mapping_range(struct address_space *mapping,
599 		loff_t const holebegin, loff_t const holelen, int even_cows);
600 
601 static inline void unmap_shared_mapping_range(struct address_space *mapping,
602 		loff_t const holebegin, loff_t const holelen)
603 {
604 	unmap_mapping_range(mapping, holebegin, holelen, 0);
605 }
606 
607 extern int vmtruncate(struct inode * inode, loff_t offset);
608 extern pud_t *FASTCALL(__pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
609 extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address));
610 extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
611 extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
612 extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot);
613 extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot);
614 extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
615 extern int make_pages_present(unsigned long addr, unsigned long end);
616 extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
617 void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
618 
619 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
620 		int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
621 
622 int __set_page_dirty_buffers(struct page *page);
623 int __set_page_dirty_nobuffers(struct page *page);
624 int redirty_page_for_writepage(struct writeback_control *wbc,
625 				struct page *page);
626 int FASTCALL(set_page_dirty(struct page *page));
627 int set_page_dirty_lock(struct page *page);
628 int clear_page_dirty_for_io(struct page *page);
629 
630 extern unsigned long do_mremap(unsigned long addr,
631 			       unsigned long old_len, unsigned long new_len,
632 			       unsigned long flags, unsigned long new_addr);
633 
634 /*
635  * Prototype to add a shrinker callback for ageable caches.
636  *
637  * These functions are passed a count `nr_to_scan' and a gfpmask.  They should
638  * scan `nr_to_scan' objects, attempting to free them.
639  *
640  * The callback must the number of objects which remain in the cache.
641  *
642  * The callback will be passes nr_to_scan == 0 when the VM is querying the
643  * cache size, so a fastpath for that case is appropriate.
644  */
645 typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask);
646 
647 /*
648  * Add an aging callback.  The int is the number of 'seeks' it takes
649  * to recreate one of the objects that these functions age.
650  */
651 
652 #define DEFAULT_SEEKS 2
653 struct shrinker;
654 extern struct shrinker *set_shrinker(int, shrinker_t);
655 extern void remove_shrinker(struct shrinker *shrinker);
656 
657 /*
658  * On a two-level or three-level page table, this ends up being trivial. Thus
659  * the inlining and the symmetry break with pte_alloc_map() that does all
660  * of this out-of-line.
661  */
662 /*
663  * The following ifdef needed to get the 4level-fixup.h header to work.
664  * Remove it when 4level-fixup.h has been removed.
665  */
666 #ifdef CONFIG_MMU
667 #ifndef __ARCH_HAS_4LEVEL_HACK
668 static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
669 {
670 	if (pgd_none(*pgd))
671 		return __pud_alloc(mm, pgd, address);
672 	return pud_offset(pgd, address);
673 }
674 
675 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
676 {
677 	if (pud_none(*pud))
678 		return __pmd_alloc(mm, pud, address);
679 	return pmd_offset(pud, address);
680 }
681 #endif
682 #endif /* CONFIG_MMU */
683 
684 extern void free_area_init(unsigned long * zones_size);
685 extern void free_area_init_node(int nid, pg_data_t *pgdat,
686 	unsigned long * zones_size, unsigned long zone_start_pfn,
687 	unsigned long *zholes_size);
688 extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long);
689 extern void mem_init(void);
690 extern void show_mem(void);
691 extern void si_meminfo(struct sysinfo * val);
692 extern void si_meminfo_node(struct sysinfo *val, int nid);
693 
694 /* prio_tree.c */
695 void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
696 void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
697 void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
698 struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
699 	struct prio_tree_iter *iter);
700 
701 #define vma_prio_tree_foreach(vma, iter, root, begin, end)	\
702 	for (prio_tree_iter_init(iter, root, begin, end), vma = NULL;	\
703 		(vma = vma_prio_tree_next(vma, iter)); )
704 
705 static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
706 					struct list_head *list)
707 {
708 	vma->shared.vm_set.parent = NULL;
709 	list_add_tail(&vma->shared.vm_set.list, list);
710 }
711 
712 /* mmap.c */
713 extern int __vm_enough_memory(long pages, int cap_sys_admin);
714 extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
715 	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
716 extern struct vm_area_struct *vma_merge(struct mm_struct *,
717 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
718 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
719 	struct mempolicy *);
720 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
721 extern int split_vma(struct mm_struct *,
722 	struct vm_area_struct *, unsigned long addr, int new_below);
723 extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
724 extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
725 	struct rb_node **, struct rb_node *);
726 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
727 	unsigned long addr, unsigned long len, pgoff_t pgoff);
728 extern void exit_mmap(struct mm_struct *);
729 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
730 
731 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
732 
733 extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
734 	unsigned long len, unsigned long prot,
735 	unsigned long flag, unsigned long pgoff);
736 
737 static inline unsigned long do_mmap(struct file *file, unsigned long addr,
738 	unsigned long len, unsigned long prot,
739 	unsigned long flag, unsigned long offset)
740 {
741 	unsigned long ret = -EINVAL;
742 	if ((offset + PAGE_ALIGN(len)) < offset)
743 		goto out;
744 	if (!(offset & ~PAGE_MASK))
745 		ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
746 out:
747 	return ret;
748 }
749 
750 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
751 
752 extern unsigned long do_brk(unsigned long, unsigned long);
753 
754 /* filemap.c */
755 extern unsigned long page_unuse(struct page *);
756 extern void truncate_inode_pages(struct address_space *, loff_t);
757 
758 /* generic vm_area_ops exported for stackable file systems */
759 extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *);
760 extern int filemap_populate(struct vm_area_struct *, unsigned long,
761 		unsigned long, pgprot_t, unsigned long, int);
762 
763 /* mm/page-writeback.c */
764 int write_one_page(struct page *page, int wait);
765 
766 /* readahead.c */
767 #define VM_MAX_READAHEAD	128	/* kbytes */
768 #define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
769 #define VM_MAX_CACHE_HIT    	256	/* max pages in a row in cache before
770 					 * turning readahead off */
771 
772 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
773 			unsigned long offset, unsigned long nr_to_read);
774 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
775 			unsigned long offset, unsigned long nr_to_read);
776 unsigned long  page_cache_readahead(struct address_space *mapping,
777 			  struct file_ra_state *ra,
778 			  struct file *filp,
779 			  unsigned long offset,
780 			  unsigned long size);
781 void handle_ra_miss(struct address_space *mapping,
782 		    struct file_ra_state *ra, pgoff_t offset);
783 unsigned long max_sane_readahead(unsigned long nr);
784 
785 /* Do stack extension */
786 extern int expand_stack(struct vm_area_struct * vma, unsigned long address);
787 
788 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
789 extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
790 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
791 					     struct vm_area_struct **pprev);
792 
793 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
794    NULL if none.  Assume start_addr < end_addr. */
795 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
796 {
797 	struct vm_area_struct * vma = find_vma(mm,start_addr);
798 
799 	if (vma && end_addr <= vma->vm_start)
800 		vma = NULL;
801 	return vma;
802 }
803 
804 static inline unsigned long vma_pages(struct vm_area_struct *vma)
805 {
806 	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
807 }
808 
809 extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
810 
811 extern struct page * vmalloc_to_page(void *addr);
812 extern unsigned long vmalloc_to_pfn(void *addr);
813 extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
814 		int write);
815 extern int check_user_page_readable(struct mm_struct *mm, unsigned long address);
816 int remap_pfn_range(struct vm_area_struct *, unsigned long,
817 		unsigned long, unsigned long, pgprot_t);
818 
819 #ifdef CONFIG_PROC_FS
820 void __vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
821 #else
822 static inline void __vm_stat_account(struct mm_struct *mm,
823 			unsigned long flags, struct file *file, long pages)
824 {
825 }
826 #endif /* CONFIG_PROC_FS */
827 
828 static inline void vm_stat_account(struct vm_area_struct *vma)
829 {
830 	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
831 							vma_pages(vma));
832 }
833 
834 static inline void vm_stat_unaccount(struct vm_area_struct *vma)
835 {
836 	__vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
837 							-vma_pages(vma));
838 }
839 
840 /* update per process rss and vm hiwater data */
841 extern void update_mem_hiwater(struct task_struct *tsk);
842 
843 #ifndef CONFIG_DEBUG_PAGEALLOC
844 static inline void
845 kernel_map_pages(struct page *page, int numpages, int enable)
846 {
847 }
848 #endif
849 
850 extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
851 #ifdef	__HAVE_ARCH_GATE_AREA
852 int in_gate_area_no_task(unsigned long addr);
853 int in_gate_area(struct task_struct *task, unsigned long addr);
854 #else
855 int in_gate_area_no_task(unsigned long addr);
856 #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
857 #endif	/* __HAVE_ARCH_GATE_AREA */
858 
859 /* /proc/<pid>/oom_adj set to -17 protects from the oom-killer */
860 #define OOM_DISABLE -17
861 
862 #endif /* __KERNEL__ */
863 #endif /* _LINUX_MM_H */
864