xref: /linux-6.15/scripts/gdb/linux/mm.py (revision 04a40bae)
1# SPDX-License-Identifier: GPL-2.0
2#
3# Copyright (c) 2023 MediaTek Inc.
4#
5# Authors:
6#  Kuan-Ying Lee <[email protected]>
7#
8
9import gdb
10import math
11from linux import utils, constants
12
13def DIV_ROUND_UP(n,d):
14    return ((n) + (d) - 1) // (d)
15
16def test_bit(nr, addr):
17    if addr.dereference() & (0x1 << nr):
18        return True
19    else:
20        return False
21
22class page_ops():
23    ops = None
24    def __init__(self):
25        if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
26            raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now')
27        if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'):
28            self.ops = aarch64_page_ops()
29        else:
30            raise gdb.GdbError('Only support aarch64 now')
31
32class aarch64_page_ops():
33    def __init__(self):
34        self.SUBSECTION_SHIFT = 21
35        self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
36        self.MODULES_VSIZE = 2 * 1024 * 1024 * 1024
37
38        if constants.LX_CONFIG_ARM64_64K_PAGES:
39            self.SECTION_SIZE_BITS = 29
40        else:
41            self.SECTION_SIZE_BITS = 27
42        self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS
43
44        self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
45        self.PAGE_SIZE = 1 << self.PAGE_SHIFT
46        self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
47
48        self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS
49        if self.VA_BITS > 48:
50            self.VA_BITS_MIN = 48
51            tcr_el1 = gdb.execute("info registers $TCR_EL1", to_string=True)
52            tcr_el1 = int(tcr_el1.split()[1], 16)
53            self.vabits_actual = 64 - ((tcr_el1 >> 16) & 63)
54        else:
55            self.VA_BITS_MIN = self.VA_BITS
56            self.vabits_actual = self.VA_BITS
57        self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1)
58
59        self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
60
61        if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit():
62            self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER
63        else:
64            self.MAX_ORDER = 10
65
66        self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER)
67        self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
68        self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
69        self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
70        self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
71
72        if constants.LX_CONFIG_SPARSEMEM_EXTREME:
73            self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
74        else:
75            self.SECTIONS_PER_ROOT = 1
76
77        self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
78        self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
79        self.SUBSECTION_SHIFT = 21
80        self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
81        self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT
82        self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT
83
84        self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
85        self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
86
87        self.struct_page_size = utils.get_page_type().sizeof
88        self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2))
89
90        self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
91        self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN)
92        self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE
93
94        self.VMEMMAP_RANGE = self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET
95        self.VMEMMAP_SIZE = (self.VMEMMAP_RANGE >> self.PAGE_SHIFT) * self.struct_page_size
96        self.VMEMMAP_END = (-(1 * 1024 * 1024 * 1024)) & 0xffffffffffffffff
97        self.VMEMMAP_START = self.VMEMMAP_END - self.VMEMMAP_SIZE
98
99        self.VMALLOC_START = self.MODULES_END
100        self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
101
102        self.memstart_addr = gdb.parse_and_eval("memstart_addr")
103        self.PHYS_OFFSET = self.memstart_addr
104        self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT)
105
106        self.KERNEL_START = gdb.parse_and_eval("_text")
107        self.KERNEL_END = gdb.parse_and_eval("_end")
108
109        if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
110            if constants.LX_CONFIG_KASAN_GENERIC:
111                self.KASAN_SHADOW_SCALE_SHIFT = 3
112            else:
113                self.KASAN_SHADOW_SCALE_SHIFT = 4
114            self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET
115            self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET
116            self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT))
117        else:
118            self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN)
119
120        if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
121            self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
122        else:
123            self.NODE_SHIFT = 0
124
125        self.MAX_NUMNODES = 1 << self.NODE_SHIFT
126
127    def SECTION_NR_TO_ROOT(self, sec):
128        return sec // self.SECTIONS_PER_ROOT
129
130    def __nr_to_section(self, nr):
131        root = self.SECTION_NR_TO_ROOT(nr)
132        mem_section = gdb.parse_and_eval("mem_section")
133        return mem_section[root][nr & self.SECTION_ROOT_MASK]
134
135    def pfn_to_section_nr(self, pfn):
136        return pfn >> self.PFN_SECTION_SHIFT
137
138    def section_nr_to_pfn(self, sec):
139        return sec << self.PFN_SECTION_SHIFT
140
141    def __pfn_to_section(self, pfn):
142        return self.__nr_to_section(self.pfn_to_section_nr(pfn))
143
144    def pfn_to_section(self, pfn):
145        return self.__pfn_to_section(pfn)
146
147    def subsection_map_index(self, pfn):
148        return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
149
150    def pfn_section_valid(self, ms, pfn):
151        if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
152            idx = self.subsection_map_index(pfn)
153            return test_bit(idx, ms['usage']['subsection_map'])
154        else:
155            return True
156
157    def valid_section(self, mem_section):
158        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
159            return True
160        return False
161
162    def early_section(self, mem_section):
163        if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
164            return True
165        return False
166
167    def pfn_valid(self, pfn):
168        ms = None
169        if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
170            return False
171        if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
172            return False
173        ms = self.__pfn_to_section(pfn)
174
175        if not self.valid_section(ms):
176            return False
177        return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
178
179    def _PAGE_OFFSET(self, va):
180        return (-(1 << (va))) & 0xffffffffffffffff
181
182    def _PAGE_END(self, va):
183        return (-(1 << (va - 1))) & 0xffffffffffffffff
184
185    def kasan_reset_tag(self, addr):
186        if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS:
187            return int(addr) | (0xff << 56)
188        else:
189            return addr
190
191    def __is_lm_address(self, addr):
192        if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET):
193            return True
194        else:
195            return False
196    def __lm_to_phys(self, addr):
197        return addr - self.PAGE_OFFSET + self.PHYS_OFFSET
198
199    def __kimg_to_phys(self, addr):
200        return addr - self.kimage_voffset
201
202    def __virt_to_phys_nodebug(self, va):
203        untagged_va = self.kasan_reset_tag(va)
204        if self.__is_lm_address(untagged_va):
205            return self.__lm_to_phys(untagged_va)
206        else:
207            return self.__kimg_to_phys(untagged_va)
208
209    def __virt_to_phys(self, va):
210        if constants.LX_CONFIG_DEBUG_VIRTUAL:
211            if not self.__is_lm_address(self.kasan_reset_tag(va)):
212                raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va)
213        return self.__virt_to_phys_nodebug(va)
214
215    def virt_to_phys(self, va):
216        return self.__virt_to_phys(va)
217
218    def PFN_PHYS(self, pfn):
219        return pfn << self.PAGE_SHIFT
220
221    def PHYS_PFN(self, phys):
222        return phys >> self.PAGE_SHIFT
223
224    def __phys_to_virt(self, pa):
225        return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET
226
227    def __phys_to_pfn(self, pa):
228        return self.PHYS_PFN(pa)
229
230    def __pfn_to_phys(self, pfn):
231        return self.PFN_PHYS(pfn)
232
233    def __pa_symbol_nodebug(self, x):
234        return self.__kimg_to_phys(x)
235
236    def __phys_addr_symbol(self, x):
237        if constants.LX_CONFIG_DEBUG_VIRTUAL:
238            if x < self.KERNEL_START or x > self.KERNEL_END:
239                raise gdb.GdbError("0x%x exceed kernel range" % x)
240        return self.__pa_symbol_nodebug(x)
241
242    def __pa_symbol(self, x):
243        return self.__phys_addr_symbol(x)
244
245    def __va(self, pa):
246        return self.__phys_to_virt(pa)
247
248    def pfn_to_kaddr(self, pfn):
249        return self.__va(pfn << self.PAGE_SHIFT)
250
251    def virt_to_pfn(self, va):
252        return self.__phys_to_pfn(self.__virt_to_phys(va))
253
254    def sym_to_pfn(self, x):
255        return self.__phys_to_pfn(self.__pa_symbol(x))
256
257    def page_to_pfn(self, page):
258        return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer()))
259
260    def page_to_phys(self, page):
261        return self.__pfn_to_phys(self.page_to_pfn(page))
262
263    def pfn_to_page(self, pfn):
264        return (self.vmemmap + pfn).cast(utils.get_page_type().pointer())
265
266    def page_to_virt(self, page):
267        if constants.LX_CONFIG_DEBUG_VIRTUAL:
268            return self.__va(self.page_to_phys(page))
269        else:
270            __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size
271            return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE)
272
273    def virt_to_page(self, va):
274        if constants.LX_CONFIG_DEBUG_VIRTUAL:
275            return self.pfn_to_page(self.virt_to_pfn(va))
276        else:
277            __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE
278            addr = self.VMEMMAP_START + (__idx * self.struct_page_size)
279            return gdb.Value(addr).cast(utils.get_page_type().pointer())
280
281    def page_address(self, page):
282        return self.page_to_virt(page)
283
284    def folio_address(self, folio):
285        return self.page_address(folio['page'].address)
286
287class LxPFN2Page(gdb.Command):
288    """PFN to struct page"""
289
290    def __init__(self):
291        super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER)
292
293    def invoke(self, arg, from_tty):
294        argv = gdb.string_to_argv(arg)
295        pfn = int(argv[0])
296        page = page_ops().ops.pfn_to_page(pfn)
297        gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page))
298
299LxPFN2Page()
300
301class LxPage2PFN(gdb.Command):
302    """struct page to PFN"""
303
304    def __init__(self):
305        super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER)
306
307    def invoke(self, arg, from_tty):
308        argv = gdb.string_to_argv(arg)
309        struct_page_addr = int(argv[0], 16)
310        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
311        pfn = page_ops().ops.page_to_pfn(page)
312        gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn))
313
314LxPage2PFN()
315
316class LxPageAddress(gdb.Command):
317    """struct page to linear mapping address"""
318
319    def __init__(self):
320        super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER)
321
322    def invoke(self, arg, from_tty):
323        argv = gdb.string_to_argv(arg)
324        struct_page_addr = int(argv[0], 16)
325        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
326        addr = page_ops().ops.page_address(page)
327        gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr))
328
329LxPageAddress()
330
331class LxPage2Phys(gdb.Command):
332    """struct page to physical address"""
333
334    def __init__(self):
335        super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER)
336
337    def invoke(self, arg, from_tty):
338        argv = gdb.string_to_argv(arg)
339        struct_page_addr = int(argv[0], 16)
340        page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
341        phys_addr = page_ops().ops.page_to_phys(page)
342        gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr))
343
344LxPage2Phys()
345
346class LxVirt2Phys(gdb.Command):
347    """virtual address to physical address"""
348
349    def __init__(self):
350        super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER)
351
352    def invoke(self, arg, from_tty):
353        argv = gdb.string_to_argv(arg)
354        linear_addr = int(argv[0], 16)
355        phys_addr = page_ops().ops.virt_to_phys(linear_addr)
356        gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr))
357
358LxVirt2Phys()
359
360class LxVirt2Page(gdb.Command):
361    """virtual address to struct page"""
362
363    def __init__(self):
364        super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER)
365
366    def invoke(self, arg, from_tty):
367        argv = gdb.string_to_argv(arg)
368        linear_addr = int(argv[0], 16)
369        page = page_ops().ops.virt_to_page(linear_addr)
370        gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page))
371
372LxVirt2Page()
373
374class LxSym2PFN(gdb.Command):
375    """symbol address to PFN"""
376
377    def __init__(self):
378        super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER)
379
380    def invoke(self, arg, from_tty):
381        argv = gdb.string_to_argv(arg)
382        sym_addr = int(argv[0], 16)
383        pfn = page_ops().ops.sym_to_pfn(sym_addr)
384        gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn))
385
386LxSym2PFN()
387
388class LxPFN2Kaddr(gdb.Command):
389    """PFN to kernel address"""
390
391    def __init__(self):
392        super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER)
393
394    def invoke(self, arg, from_tty):
395        argv = gdb.string_to_argv(arg)
396        pfn = int(argv[0])
397        kaddr = page_ops().ops.pfn_to_kaddr(pfn)
398        gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr))
399
400LxPFN2Kaddr()
401