1c942fddfSThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-or-later */
2133ff0eaSJérôme Glisse /*
3133ff0eaSJérôme Glisse * Copyright 2013 Red Hat Inc.
4133ff0eaSJérôme Glisse *
5f813f219SJérôme Glisse * Authors: Jérôme Glisse <[email protected]>
6133ff0eaSJérôme Glisse *
7*ee65728eSMike Rapoport * See Documentation/mm/hmm.rst for reasons and overview of what HMM is.
8133ff0eaSJérôme Glisse */
9133ff0eaSJérôme Glisse #ifndef LINUX_HMM_H
10133ff0eaSJérôme Glisse #define LINUX_HMM_H
11133ff0eaSJérôme Glisse
12730ff521SChristoph Hellwig #include <linux/mm.h>
13133ff0eaSJérôme Glisse
14730ff521SChristoph Hellwig struct mmu_interval_notifier;
154ef589dcSJérôme Glisse
16133ff0eaSJérôme Glisse /*
172733ea14SJason Gunthorpe * On output:
182733ea14SJason Gunthorpe * 0 - The page is faultable and a future call with
192733ea14SJason Gunthorpe * HMM_PFN_REQ_FAULT could succeed.
202733ea14SJason Gunthorpe * HMM_PFN_VALID - the pfn field points to a valid PFN. This PFN is at
212733ea14SJason Gunthorpe * least readable. If dev_private_owner is !NULL then this could
222733ea14SJason Gunthorpe * point at a DEVICE_PRIVATE page.
232733ea14SJason Gunthorpe * HMM_PFN_WRITE - if the page memory can be written to (requires HMM_PFN_VALID)
242733ea14SJason Gunthorpe * HMM_PFN_ERROR - accessing the pfn is impossible and the device should
252733ea14SJason Gunthorpe * fail. ie poisoned memory, special pages, no vma, etc
26f88a1e90SJérôme Glisse *
272733ea14SJason Gunthorpe * On input:
282733ea14SJason Gunthorpe * 0 - Return the current state of the page, do not fault it.
292733ea14SJason Gunthorpe * HMM_PFN_REQ_FAULT - The output must have HMM_PFN_VALID or hmm_range_fault()
302733ea14SJason Gunthorpe * will fail
312733ea14SJason Gunthorpe * HMM_PFN_REQ_WRITE - The output must have HMM_PFN_WRITE or hmm_range_fault()
322733ea14SJason Gunthorpe * will fail. Must be combined with HMM_PFN_REQ_FAULT.
33f88a1e90SJérôme Glisse */
342733ea14SJason Gunthorpe enum hmm_pfn_flags {
353b50a6e5SRalph Campbell /* Output fields and flags */
362733ea14SJason Gunthorpe HMM_PFN_VALID = 1UL << (BITS_PER_LONG - 1),
372733ea14SJason Gunthorpe HMM_PFN_WRITE = 1UL << (BITS_PER_LONG - 2),
382733ea14SJason Gunthorpe HMM_PFN_ERROR = 1UL << (BITS_PER_LONG - 3),
393b50a6e5SRalph Campbell HMM_PFN_ORDER_SHIFT = (BITS_PER_LONG - 8),
402733ea14SJason Gunthorpe
412733ea14SJason Gunthorpe /* Input flags */
422733ea14SJason Gunthorpe HMM_PFN_REQ_FAULT = HMM_PFN_VALID,
432733ea14SJason Gunthorpe HMM_PFN_REQ_WRITE = HMM_PFN_WRITE,
442733ea14SJason Gunthorpe
453b50a6e5SRalph Campbell HMM_PFN_FLAGS = 0xFFUL << HMM_PFN_ORDER_SHIFT,
46f88a1e90SJérôme Glisse };
47f88a1e90SJérôme Glisse
48f88a1e90SJérôme Glisse /*
492733ea14SJason Gunthorpe * hmm_pfn_to_page() - return struct page pointed to by a device entry
50f88a1e90SJérôme Glisse *
512733ea14SJason Gunthorpe * This must be called under the caller 'user_lock' after a successful
522733ea14SJason Gunthorpe * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
532733ea14SJason Gunthorpe * already.
54133ff0eaSJérôme Glisse */
hmm_pfn_to_page(unsigned long hmm_pfn)552733ea14SJason Gunthorpe static inline struct page *hmm_pfn_to_page(unsigned long hmm_pfn)
562733ea14SJason Gunthorpe {
572733ea14SJason Gunthorpe return pfn_to_page(hmm_pfn & ~HMM_PFN_FLAGS);
582733ea14SJason Gunthorpe }
59f88a1e90SJérôme Glisse
60f88a1e90SJérôme Glisse /*
613b50a6e5SRalph Campbell * hmm_pfn_to_map_order() - return the CPU mapping size order
623b50a6e5SRalph Campbell *
633b50a6e5SRalph Campbell * This is optionally useful to optimize processing of the pfn result
643b50a6e5SRalph Campbell * array. It indicates that the page starts at the order aligned VA and is
653b50a6e5SRalph Campbell * 1<<order bytes long. Every pfn within an high order page will have the
663b50a6e5SRalph Campbell * same pfn flags, both access protections and the map_order. The caller must
673b50a6e5SRalph Campbell * be careful with edge cases as the start and end VA of the given page may
683b50a6e5SRalph Campbell * extend past the range used with hmm_range_fault().
693b50a6e5SRalph Campbell *
703b50a6e5SRalph Campbell * This must be called under the caller 'user_lock' after a successful
713b50a6e5SRalph Campbell * mmu_interval_read_begin(). The caller must have tested for HMM_PFN_VALID
723b50a6e5SRalph Campbell * already.
733b50a6e5SRalph Campbell */
hmm_pfn_to_map_order(unsigned long hmm_pfn)743b50a6e5SRalph Campbell static inline unsigned int hmm_pfn_to_map_order(unsigned long hmm_pfn)
753b50a6e5SRalph Campbell {
763b50a6e5SRalph Campbell return (hmm_pfn >> HMM_PFN_ORDER_SHIFT) & 0x1F;
773b50a6e5SRalph Campbell }
783b50a6e5SRalph Campbell
793b50a6e5SRalph Campbell /*
80f88a1e90SJérôme Glisse * struct hmm_range - track invalidation lock on virtual address range
81f88a1e90SJérôme Glisse *
82a22dd506SJason Gunthorpe * @notifier: a mmu_interval_notifier that includes the start/end
83a22dd506SJason Gunthorpe * @notifier_seq: result of mmu_interval_read_begin()
84f88a1e90SJérôme Glisse * @start: range virtual start address (inclusive)
85f88a1e90SJérôme Glisse * @end: range virtual end address (exclusive)
862733ea14SJason Gunthorpe * @hmm_pfns: array of pfns (big enough for the range)
87023a019aSJérôme Glisse * @default_flags: default flags for the range (write, read, ... see hmm doc)
88023a019aSJérôme Glisse * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
8908dddddaSChristoph Hellwig * @dev_private_owner: owner of device private pages
90f88a1e90SJérôme Glisse */
91f88a1e90SJérôme Glisse struct hmm_range {
9204ec32fbSJason Gunthorpe struct mmu_interval_notifier *notifier;
9304ec32fbSJason Gunthorpe unsigned long notifier_seq;
94f88a1e90SJérôme Glisse unsigned long start;
95f88a1e90SJérôme Glisse unsigned long end;
962733ea14SJason Gunthorpe unsigned long *hmm_pfns;
972733ea14SJason Gunthorpe unsigned long default_flags;
982733ea14SJason Gunthorpe unsigned long pfn_flags_mask;
9908dddddaSChristoph Hellwig void *dev_private_owner;
100f88a1e90SJérôme Glisse };
101133ff0eaSJérôme Glisse
102133ff0eaSJérôme Glisse /*
103*ee65728eSMike Rapoport * Please see Documentation/mm/hmm.rst for how to use the range API.
104da4c3c73SJérôme Glisse */
105be957c88SJason Gunthorpe int hmm_range_fault(struct hmm_range *range);
10674eee180SJérôme Glisse
10774eee180SJérôme Glisse /*
108a3e0d41cSJérôme Glisse * HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
10974eee180SJérôme Glisse *
110a3e0d41cSJérôme Glisse * When waiting for mmu notifiers we need some kind of time out otherwise we
11106c88398SZhen Lei * could potentially wait for ever, 1000ms ie 1s sounds like a long time to
112a3e0d41cSJérôme Glisse * wait already.
11374eee180SJérôme Glisse */
114a3e0d41cSJérôme Glisse #define HMM_RANGE_DEFAULT_TIMEOUT 1000
115a3e0d41cSJérôme Glisse
116133ff0eaSJérôme Glisse #endif /* LINUX_HMM_H */
117