19740ca4eSMichel Lespinasse #ifndef _LINUX_MMAP_LOCK_H
29740ca4eSMichel Lespinasse #define _LINUX_MMAP_LOCK_H
39740ca4eSMichel Lespinasse
42b5067a8SAxel Rasmussen #include <linux/lockdep.h>
52b5067a8SAxel Rasmussen #include <linux/mm_types.h>
642fc5414SMichel Lespinasse #include <linux/mmdebug.h>
72b5067a8SAxel Rasmussen #include <linux/rwsem.h>
82b5067a8SAxel Rasmussen #include <linux/tracepoint-defs.h>
92b5067a8SAxel Rasmussen #include <linux/types.h>
1042fc5414SMichel Lespinasse
1114c3656bSMichel Lespinasse #define MMAP_LOCK_INITIALIZER(name) \
12da1c55f1SMichel Lespinasse .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock),
1314c3656bSMichel Lespinasse
142b5067a8SAxel Rasmussen DECLARE_TRACEPOINT(mmap_lock_start_locking);
152b5067a8SAxel Rasmussen DECLARE_TRACEPOINT(mmap_lock_acquire_returned);
162b5067a8SAxel Rasmussen DECLARE_TRACEPOINT(mmap_lock_released);
172b5067a8SAxel Rasmussen
182b5067a8SAxel Rasmussen #ifdef CONFIG_TRACING
192b5067a8SAxel Rasmussen
202b5067a8SAxel Rasmussen void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write);
212b5067a8SAxel Rasmussen void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write,
222b5067a8SAxel Rasmussen bool success);
232b5067a8SAxel Rasmussen void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write);
242b5067a8SAxel Rasmussen
__mmap_lock_trace_start_locking(struct mm_struct * mm,bool write)252b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
262b5067a8SAxel Rasmussen bool write)
272b5067a8SAxel Rasmussen {
282b5067a8SAxel Rasmussen if (tracepoint_enabled(mmap_lock_start_locking))
292b5067a8SAxel Rasmussen __mmap_lock_do_trace_start_locking(mm, write);
302b5067a8SAxel Rasmussen }
312b5067a8SAxel Rasmussen
__mmap_lock_trace_acquire_returned(struct mm_struct * mm,bool write,bool success)322b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
332b5067a8SAxel Rasmussen bool write, bool success)
342b5067a8SAxel Rasmussen {
352b5067a8SAxel Rasmussen if (tracepoint_enabled(mmap_lock_acquire_returned))
362b5067a8SAxel Rasmussen __mmap_lock_do_trace_acquire_returned(mm, write, success);
372b5067a8SAxel Rasmussen }
382b5067a8SAxel Rasmussen
__mmap_lock_trace_released(struct mm_struct * mm,bool write)392b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
402b5067a8SAxel Rasmussen {
412b5067a8SAxel Rasmussen if (tracepoint_enabled(mmap_lock_released))
422b5067a8SAxel Rasmussen __mmap_lock_do_trace_released(mm, write);
432b5067a8SAxel Rasmussen }
442b5067a8SAxel Rasmussen
452b5067a8SAxel Rasmussen #else /* !CONFIG_TRACING */
462b5067a8SAxel Rasmussen
__mmap_lock_trace_start_locking(struct mm_struct * mm,bool write)472b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm,
482b5067a8SAxel Rasmussen bool write)
492b5067a8SAxel Rasmussen {
502b5067a8SAxel Rasmussen }
512b5067a8SAxel Rasmussen
__mmap_lock_trace_acquire_returned(struct mm_struct * mm,bool write,bool success)522b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm,
532b5067a8SAxel Rasmussen bool write, bool success)
542b5067a8SAxel Rasmussen {
552b5067a8SAxel Rasmussen }
562b5067a8SAxel Rasmussen
__mmap_lock_trace_released(struct mm_struct * mm,bool write)572b5067a8SAxel Rasmussen static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write)
582b5067a8SAxel Rasmussen {
592b5067a8SAxel Rasmussen }
602b5067a8SAxel Rasmussen
612b5067a8SAxel Rasmussen #endif /* CONFIG_TRACING */
622b5067a8SAxel Rasmussen
mmap_assert_locked(const struct mm_struct * mm)63ba168b52SMatthew Wilcox (Oracle) static inline void mmap_assert_locked(const struct mm_struct *mm)
64438b6e12SSuren Baghdasaryan {
65ba168b52SMatthew Wilcox (Oracle) rwsem_assert_held(&mm->mmap_lock);
66438b6e12SSuren Baghdasaryan }
67438b6e12SSuren Baghdasaryan
mmap_assert_write_locked(const struct mm_struct * mm)68ba168b52SMatthew Wilcox (Oracle) static inline void mmap_assert_write_locked(const struct mm_struct *mm)
69438b6e12SSuren Baghdasaryan {
70ba168b52SMatthew Wilcox (Oracle) rwsem_assert_held_write(&mm->mmap_lock);
71438b6e12SSuren Baghdasaryan }
72438b6e12SSuren Baghdasaryan
735e31275cSSuren Baghdasaryan #ifdef CONFIG_PER_VMA_LOCK
74*03a001b1SSuren Baghdasaryan
mm_lock_seqcount_init(struct mm_struct * mm)75eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_init(struct mm_struct *mm)
76eb449bd9SSuren Baghdasaryan {
77eb449bd9SSuren Baghdasaryan seqcount_init(&mm->mm_lock_seq);
78eb449bd9SSuren Baghdasaryan }
79eb449bd9SSuren Baghdasaryan
mm_lock_seqcount_begin(struct mm_struct * mm)80eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_begin(struct mm_struct *mm)
81eb449bd9SSuren Baghdasaryan {
82eb449bd9SSuren Baghdasaryan do_raw_write_seqcount_begin(&mm->mm_lock_seq);
83eb449bd9SSuren Baghdasaryan }
84eb449bd9SSuren Baghdasaryan
mm_lock_seqcount_end(struct mm_struct * mm)85eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_end(struct mm_struct *mm)
86eb449bd9SSuren Baghdasaryan {
87eb449bd9SSuren Baghdasaryan ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq);
88eb449bd9SSuren Baghdasaryan do_raw_write_seqcount_end(&mm->mm_lock_seq);
89eb449bd9SSuren Baghdasaryan }
90eb449bd9SSuren Baghdasaryan
mmap_lock_speculate_try_begin(struct mm_struct * mm,unsigned int * seq)91*03a001b1SSuren Baghdasaryan static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
92*03a001b1SSuren Baghdasaryan {
93*03a001b1SSuren Baghdasaryan /*
94*03a001b1SSuren Baghdasaryan * Since mmap_lock is a sleeping lock, and waiting for it to become
95*03a001b1SSuren Baghdasaryan * unlocked is more or less equivalent with taking it ourselves, don't
96*03a001b1SSuren Baghdasaryan * bother with the speculative path if mmap_lock is already write-locked
97*03a001b1SSuren Baghdasaryan * and take the slow path, which takes the lock.
98*03a001b1SSuren Baghdasaryan */
99*03a001b1SSuren Baghdasaryan return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq);
100*03a001b1SSuren Baghdasaryan }
101*03a001b1SSuren Baghdasaryan
mmap_lock_speculate_retry(struct mm_struct * mm,unsigned int seq)102*03a001b1SSuren Baghdasaryan static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
103*03a001b1SSuren Baghdasaryan {
104*03a001b1SSuren Baghdasaryan return read_seqcount_retry(&mm->mm_lock_seq, seq);
105*03a001b1SSuren Baghdasaryan }
106*03a001b1SSuren Baghdasaryan
107*03a001b1SSuren Baghdasaryan #else /* CONFIG_PER_VMA_LOCK */
108*03a001b1SSuren Baghdasaryan
mm_lock_seqcount_init(struct mm_struct * mm)109eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
mm_lock_seqcount_begin(struct mm_struct * mm)110eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {}
mm_lock_seqcount_end(struct mm_struct * mm)111eb449bd9SSuren Baghdasaryan static inline void mm_lock_seqcount_end(struct mm_struct *mm) {}
112*03a001b1SSuren Baghdasaryan
mmap_lock_speculate_try_begin(struct mm_struct * mm,unsigned int * seq)113*03a001b1SSuren Baghdasaryan static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq)
114*03a001b1SSuren Baghdasaryan {
115*03a001b1SSuren Baghdasaryan return false;
116*03a001b1SSuren Baghdasaryan }
117*03a001b1SSuren Baghdasaryan
mmap_lock_speculate_retry(struct mm_struct * mm,unsigned int seq)118*03a001b1SSuren Baghdasaryan static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq)
119*03a001b1SSuren Baghdasaryan {
120*03a001b1SSuren Baghdasaryan return true;
121*03a001b1SSuren Baghdasaryan }
122*03a001b1SSuren Baghdasaryan
123*03a001b1SSuren Baghdasaryan #endif /* CONFIG_PER_VMA_LOCK */
124eb449bd9SSuren Baghdasaryan
mmap_write_lock(struct mm_struct * mm)125eb449bd9SSuren Baghdasaryan static inline void mmap_write_lock(struct mm_struct *mm)
126eb449bd9SSuren Baghdasaryan {
127eb449bd9SSuren Baghdasaryan __mmap_lock_trace_start_locking(mm, true);
128eb449bd9SSuren Baghdasaryan down_write(&mm->mmap_lock);
129eb449bd9SSuren Baghdasaryan mm_lock_seqcount_begin(mm);
130eb449bd9SSuren Baghdasaryan __mmap_lock_trace_acquire_returned(mm, true, true);
131eb449bd9SSuren Baghdasaryan }
132eb449bd9SSuren Baghdasaryan
mmap_write_lock_nested(struct mm_struct * mm,int subclass)133eb449bd9SSuren Baghdasaryan static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
134eb449bd9SSuren Baghdasaryan {
135eb449bd9SSuren Baghdasaryan __mmap_lock_trace_start_locking(mm, true);
136eb449bd9SSuren Baghdasaryan down_write_nested(&mm->mmap_lock, subclass);
137eb449bd9SSuren Baghdasaryan mm_lock_seqcount_begin(mm);
138eb449bd9SSuren Baghdasaryan __mmap_lock_trace_acquire_returned(mm, true, true);
139eb449bd9SSuren Baghdasaryan }
140eb449bd9SSuren Baghdasaryan
mmap_write_lock_killable(struct mm_struct * mm)141eb449bd9SSuren Baghdasaryan static inline int mmap_write_lock_killable(struct mm_struct *mm)
142eb449bd9SSuren Baghdasaryan {
143eb449bd9SSuren Baghdasaryan int ret;
144eb449bd9SSuren Baghdasaryan
145eb449bd9SSuren Baghdasaryan __mmap_lock_trace_start_locking(mm, true);
146eb449bd9SSuren Baghdasaryan ret = down_write_killable(&mm->mmap_lock);
147eb449bd9SSuren Baghdasaryan if (!ret)
148eb449bd9SSuren Baghdasaryan mm_lock_seqcount_begin(mm);
149eb449bd9SSuren Baghdasaryan __mmap_lock_trace_acquire_returned(mm, true, ret == 0);
150eb449bd9SSuren Baghdasaryan return ret;
151eb449bd9SSuren Baghdasaryan }
152eb449bd9SSuren Baghdasaryan
15390717566SJann Horn /*
15490717566SJann Horn * Drop all currently-held per-VMA locks.
15590717566SJann Horn * This is called from the mmap_lock implementation directly before releasing
15690717566SJann Horn * a write-locked mmap_lock (or downgrading it to read-locked).
15790717566SJann Horn * This should normally NOT be called manually from other places.
15890717566SJann Horn * If you want to call this manually anyway, keep in mind that this will release
15990717566SJann Horn * *all* VMA write locks, including ones from further up the stack.
16090717566SJann Horn */
vma_end_write_all(struct mm_struct * mm)1615e31275cSSuren Baghdasaryan static inline void vma_end_write_all(struct mm_struct *mm)
1625e31275cSSuren Baghdasaryan {
1635e31275cSSuren Baghdasaryan mmap_assert_write_locked(mm);
164eb449bd9SSuren Baghdasaryan mm_lock_seqcount_end(mm);
1659740ca4eSMichel Lespinasse }
1669740ca4eSMichel Lespinasse
mmap_write_unlock(struct mm_struct * mm)1679740ca4eSMichel Lespinasse static inline void mmap_write_unlock(struct mm_struct *mm)
1689740ca4eSMichel Lespinasse {
1692b5067a8SAxel Rasmussen __mmap_lock_trace_released(mm, true);
1705e31275cSSuren Baghdasaryan vma_end_write_all(mm);
17110994316SLiam Howlett up_write(&mm->mmap_lock);
1729740ca4eSMichel Lespinasse }
1739740ca4eSMichel Lespinasse
mmap_write_downgrade(struct mm_struct * mm)1749740ca4eSMichel Lespinasse static inline void mmap_write_downgrade(struct mm_struct *mm)
1759740ca4eSMichel Lespinasse {
1762b5067a8SAxel Rasmussen __mmap_lock_trace_acquire_returned(mm, false, true);
1775e31275cSSuren Baghdasaryan vma_end_write_all(mm);
17810994316SLiam Howlett downgrade_write(&mm->mmap_lock);
1799740ca4eSMichel Lespinasse }
1809740ca4eSMichel Lespinasse
mmap_read_lock(struct mm_struct * mm)1819740ca4eSMichel Lespinasse static inline void mmap_read_lock(struct mm_struct *mm)
1829740ca4eSMichel Lespinasse {
1832b5067a8SAxel Rasmussen __mmap_lock_trace_start_locking(mm, false);
184da1c55f1SMichel Lespinasse down_read(&mm->mmap_lock);
1852b5067a8SAxel Rasmussen __mmap_lock_trace_acquire_returned(mm, false, true);
1869740ca4eSMichel Lespinasse }
1879740ca4eSMichel Lespinasse
mmap_read_lock_killable(struct mm_struct * mm)1889740ca4eSMichel Lespinasse static inline int mmap_read_lock_killable(struct mm_struct *mm)
1899740ca4eSMichel Lespinasse {
1902b5067a8SAxel Rasmussen int ret;
1912b5067a8SAxel Rasmussen
1922b5067a8SAxel Rasmussen __mmap_lock_trace_start_locking(mm, false);
1932b5067a8SAxel Rasmussen ret = down_read_killable(&mm->mmap_lock);
1942b5067a8SAxel Rasmussen __mmap_lock_trace_acquire_returned(mm, false, ret == 0);
1952b5067a8SAxel Rasmussen return ret;
1969740ca4eSMichel Lespinasse }
1979740ca4eSMichel Lespinasse
mmap_read_trylock(struct mm_struct * mm)1989740ca4eSMichel Lespinasse static inline bool mmap_read_trylock(struct mm_struct *mm)
1999740ca4eSMichel Lespinasse {
2002b5067a8SAxel Rasmussen bool ret;
2012b5067a8SAxel Rasmussen
2022b5067a8SAxel Rasmussen __mmap_lock_trace_start_locking(mm, false);
2032b5067a8SAxel Rasmussen ret = down_read_trylock(&mm->mmap_lock) != 0;
2042b5067a8SAxel Rasmussen __mmap_lock_trace_acquire_returned(mm, false, ret);
2052b5067a8SAxel Rasmussen return ret;
2069740ca4eSMichel Lespinasse }
2079740ca4eSMichel Lespinasse
mmap_read_unlock(struct mm_struct * mm)2089740ca4eSMichel Lespinasse static inline void mmap_read_unlock(struct mm_struct *mm)
2099740ca4eSMichel Lespinasse {
2102b5067a8SAxel Rasmussen __mmap_lock_trace_released(mm, false);
21110994316SLiam Howlett up_read(&mm->mmap_lock);
2129740ca4eSMichel Lespinasse }
2139740ca4eSMichel Lespinasse
mmap_read_unlock_non_owner(struct mm_struct * mm)2140cc55a02SMichel Lespinasse static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
2150cc55a02SMichel Lespinasse {
2162b5067a8SAxel Rasmussen __mmap_lock_trace_released(mm, false);
21710994316SLiam Howlett up_read_non_owner(&mm->mmap_lock);
2180cc55a02SMichel Lespinasse }
2190cc55a02SMichel Lespinasse
mmap_lock_is_contended(struct mm_struct * mm)22007e5bfe6SChinwen Chang static inline int mmap_lock_is_contended(struct mm_struct *mm)
22107e5bfe6SChinwen Chang {
22207e5bfe6SChinwen Chang return rwsem_is_contended(&mm->mmap_lock);
22307e5bfe6SChinwen Chang }
22407e5bfe6SChinwen Chang
2259740ca4eSMichel Lespinasse #endif /* _LINUX_MMAP_LOCK_H */
226