xref: /linux-6.15/include/linux/mm_types.h (revision eb2bce7f)
1 #ifndef _LINUX_MM_TYPES_H
2 #define _LINUX_MM_TYPES_H
3 
4 #include <linux/types.h>
5 #include <linux/threads.h>
6 #include <linux/list.h>
7 #include <linux/spinlock.h>
8 
9 struct address_space;
10 
11 /*
12  * Each physical page in the system has a struct page associated with
13  * it to keep track of whatever it is we are using the page for at the
14  * moment. Note that we have no way to track which tasks are using
15  * a page, though if it is a pagecache page, rmap structures can tell us
16  * who is mapping it.
17  */
18 struct page {
19 	unsigned long flags;		/* Atomic flags, some possibly
20 					 * updated asynchronously */
21 	atomic_t _count;		/* Usage count, see below. */
22 	union {
23 		atomic_t _mapcount;	/* Count of ptes mapped in mms,
24 					 * to show when page is mapped
25 					 * & limit reverse map searches.
26 					 */
27 		struct {	/* SLUB uses */
28 			short unsigned int inuse;
29 			short unsigned int offset;
30 		};
31 	};
32 	union {
33 	    struct {
34 		unsigned long private;		/* Mapping-private opaque data:
35 					 	 * usually used for buffer_heads
36 						 * if PagePrivate set; used for
37 						 * swp_entry_t if PageSwapCache;
38 						 * indicates order in the buddy
39 						 * system if PG_buddy is set.
40 						 */
41 		struct address_space *mapping;	/* If low bit clear, points to
42 						 * inode address_space, or NULL.
43 						 * If page mapped as anonymous
44 						 * memory, low bit is set, and
45 						 * it points to anon_vma object:
46 						 * see PAGE_MAPPING_ANON below.
47 						 */
48 	    };
49 #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
50 	    spinlock_t ptl;
51 #endif
52 	    struct {			/* SLUB uses */
53 		struct page *first_page;	/* Compound pages */
54 		struct kmem_cache *slab;	/* Pointer to slab */
55 	    };
56 	};
57 	union {
58 		pgoff_t index;		/* Our offset within mapping. */
59 		void *freelist;		/* SLUB: pointer to free object */
60 	};
61 	struct list_head lru;		/* Pageout list, eg. active_list
62 					 * protected by zone->lru_lock !
63 					 */
64 	/*
65 	 * On machines where all RAM is mapped into kernel address space,
66 	 * we can simply calculate the virtual address. On machines with
67 	 * highmem some memory is mapped into kernel virtual memory
68 	 * dynamically, so we need a place to store that address.
69 	 * Note that this field could be 16 bits on x86 ... ;)
70 	 *
71 	 * Architectures with slow multiplication can define
72 	 * WANT_PAGE_VIRTUAL in asm/page.h
73 	 */
74 #if defined(WANT_PAGE_VIRTUAL)
75 	void *virtual;			/* Kernel virtual address (NULL if
76 					   not kmapped, ie. highmem) */
77 #endif /* WANT_PAGE_VIRTUAL */
78 };
79 
80 #endif /* _LINUX_MM_TYPES_H */
81