Merge tag 'asm-generic-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd...
[linux-2.6-block.git] / scripts / gdb / linux / mm.py
CommitLineData
eb985b5d 1# SPDX-License-Identifier: GPL-2.0
e36903b0 2#
eb985b5d 3# Copyright (c) 2023 MediaTek Inc.
e36903b0
DB
4#
5# Authors:
eb985b5d 6# Kuan-Ying Lee <Kuan-Ying.Lee@mediatek.com>
e36903b0
DB
7#
8
9import gdb
eb985b5d
KYL
10import math
11from linux import utils, constants
12
13def DIV_ROUND_UP(n,d):
14 return ((n) + (d) - 1) // (d)
e36903b0 15
eb985b5d
KYL
16def test_bit(nr, addr):
17 if addr.dereference() & (0x1 << nr):
18 return True
19 else:
20 return False
e36903b0 21
eb985b5d
KYL
22class page_ops():
23 ops = None
24 def __init__(self):
25 if not constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
26 raise gdb.GdbError('Only support CONFIG_SPARSEMEM_VMEMMAP now')
27 if constants.LX_CONFIG_ARM64 and utils.is_target_arch('aarch64'):
28 self.ops = aarch64_page_ops()
29 else:
30 raise gdb.GdbError('Only support aarch64 now')
e36903b0 31
eb985b5d
KYL
32class aarch64_page_ops():
33 def __init__(self):
34 self.SUBSECTION_SHIFT = 21
35 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
36 self.MODULES_VSIZE = 128 * 1024 * 1024
e36903b0 37
eb985b5d
KYL
38 if constants.LX_CONFIG_ARM64_64K_PAGES:
39 self.SECTION_SIZE_BITS = 29
40 else:
41 self.SECTION_SIZE_BITS = 27
42 self.MAX_PHYSMEM_BITS = constants.LX_CONFIG_ARM64_VA_BITS
43
d3e5bab9 44 self.PAGE_SHIFT = constants.LX_CONFIG_PAGE_SHIFT
eb985b5d
KYL
45 self.PAGE_SIZE = 1 << self.PAGE_SHIFT
46 self.PAGE_MASK = (~(self.PAGE_SIZE - 1)) & ((1 << 64) - 1)
47
48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS
49 if self.VA_BITS > 48:
50 self.VA_BITS_MIN = 48
51 self.vabits_actual = gdb.parse_and_eval('vabits_actual')
52 else:
53 self.VA_BITS_MIN = self.VA_BITS
54 self.vabits_actual = self.VA_BITS
55 self.kimage_voffset = gdb.parse_and_eval('kimage_voffset') & ((1 << 64) - 1)
56
57 self.SECTIONS_SHIFT = self.MAX_PHYSMEM_BITS - self.SECTION_SIZE_BITS
58
59 if str(constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER).isdigit():
60 self.MAX_ORDER = constants.LX_CONFIG_ARCH_FORCE_MAX_ORDER
61 else:
62 self.MAX_ORDER = 11
63
64 self.MAX_ORDER_NR_PAGES = 1 << (self.MAX_ORDER - 1)
65 self.PFN_SECTION_SHIFT = self.SECTION_SIZE_BITS - self.PAGE_SHIFT
66 self.NR_MEM_SECTIONS = 1 << self.SECTIONS_SHIFT
67 self.PAGES_PER_SECTION = 1 << self.PFN_SECTION_SHIFT
68 self.PAGE_SECTION_MASK = (~(self.PAGES_PER_SECTION - 1)) & ((1 << 64) - 1)
69
70 if constants.LX_CONFIG_SPARSEMEM_EXTREME:
71 self.SECTIONS_PER_ROOT = self.PAGE_SIZE // gdb.lookup_type("struct mem_section").sizeof
72 else:
73 self.SECTIONS_PER_ROOT = 1
74
75 self.NR_SECTION_ROOTS = DIV_ROUND_UP(self.NR_MEM_SECTIONS, self.SECTIONS_PER_ROOT)
76 self.SECTION_ROOT_MASK = self.SECTIONS_PER_ROOT - 1
77 self.SUBSECTION_SHIFT = 21
78 self.SEBSECTION_SIZE = 1 << self.SUBSECTION_SHIFT
79 self.PFN_SUBSECTION_SHIFT = self.SUBSECTION_SHIFT - self.PAGE_SHIFT
80 self.PAGES_PER_SUBSECTION = 1 << self.PFN_SUBSECTION_SHIFT
81
82 self.SECTION_HAS_MEM_MAP = 1 << int(gdb.parse_and_eval('SECTION_HAS_MEM_MAP_BIT'))
83 self.SECTION_IS_EARLY = 1 << int(gdb.parse_and_eval('SECTION_IS_EARLY_BIT'))
84
85 self.struct_page_size = utils.get_page_type().sizeof
86 self.STRUCT_PAGE_MAX_SHIFT = (int)(math.log(self.struct_page_size, 2))
87
88 self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS)
89 self.MODULES_VADDR = self._PAGE_END(self.VA_BITS_MIN)
90 self.MODULES_END = self.MODULES_VADDR + self.MODULES_VSIZE
91
92 self.VMEMMAP_SHIFT = (self.PAGE_SHIFT - self.STRUCT_PAGE_MAX_SHIFT)
93 self.VMEMMAP_SIZE = ((self._PAGE_END(self.VA_BITS_MIN) - self.PAGE_OFFSET) >> self.VMEMMAP_SHIFT)
94 self.VMEMMAP_START = (-(1 << (self.VA_BITS - self.VMEMMAP_SHIFT))) & 0xffffffffffffffff
95 self.VMEMMAP_END = self.VMEMMAP_START + self.VMEMMAP_SIZE
96
97 self.VMALLOC_START = self.MODULES_END
98 self.VMALLOC_END = self.VMEMMAP_START - 256 * 1024 * 1024
99
100 self.memstart_addr = gdb.parse_and_eval("memstart_addr")
101 self.PHYS_OFFSET = self.memstart_addr
102 self.vmemmap = gdb.Value(self.VMEMMAP_START).cast(utils.get_page_type().pointer()) - (self.memstart_addr >> self.PAGE_SHIFT)
103
104 self.KERNEL_START = gdb.parse_and_eval("_text")
105 self.KERNEL_END = gdb.parse_and_eval("_end")
106
107 if constants.LX_CONFIG_KASAN_GENERIC or constants.LX_CONFIG_KASAN_SW_TAGS:
108 if constants.LX_CONFIG_KASAN_GENERIC:
109 self.KASAN_SHADOW_SCALE_SHIFT = 3
e36903b0 110 else:
eb985b5d
KYL
111 self.KASAN_SHADOW_SCALE_SHIFT = 4
112 self.KASAN_SHADOW_OFFSET = constants.LX_CONFIG_KASAN_SHADOW_OFFSET
113 self.KASAN_SHADOW_END = (1 << (64 - self.KASAN_SHADOW_SCALE_SHIFT)) + self.KASAN_SHADOW_OFFSET
114 self.PAGE_END = self.KASAN_SHADOW_END - (1 << (self.vabits_actual - self.KASAN_SHADOW_SCALE_SHIFT))
115 else:
116 self.PAGE_END = self._PAGE_END(self.VA_BITS_MIN)
117
118 if constants.LX_CONFIG_NUMA and constants.LX_CONFIG_NODES_SHIFT:
119 self.NODE_SHIFT = constants.LX_CONFIG_NODES_SHIFT
120 else:
121 self.NODE_SHIFT = 0
122
123 self.MAX_NUMNODES = 1 << self.NODE_SHIFT
124
125 def SECTION_NR_TO_ROOT(self, sec):
126 return sec // self.SECTIONS_PER_ROOT
127
128 def __nr_to_section(self, nr):
129 root = self.SECTION_NR_TO_ROOT(nr)
130 mem_section = gdb.parse_and_eval("mem_section")
131 return mem_section[root][nr & self.SECTION_ROOT_MASK]
132
133 def pfn_to_section_nr(self, pfn):
134 return pfn >> self.PFN_SECTION_SHIFT
135
136 def section_nr_to_pfn(self, sec):
137 return sec << self.PFN_SECTION_SHIFT
138
139 def __pfn_to_section(self, pfn):
140 return self.__nr_to_section(self.pfn_to_section_nr(pfn))
141
142 def pfn_to_section(self, pfn):
143 return self.__pfn_to_section(pfn)
144
145 def subsection_map_index(self, pfn):
146 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION
147
148 def pfn_section_valid(self, ms, pfn):
149 if constants.LX_CONFIG_SPARSEMEM_VMEMMAP:
150 idx = self.subsection_map_index(pfn)
151 return test_bit(idx, ms['usage']['subsection_map'])
152 else:
153 return True
154
155 def valid_section(self, mem_section):
156 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_HAS_MEM_MAP):
157 return True
158 return False
159
160 def early_section(self, mem_section):
161 if mem_section != None and (mem_section['section_mem_map'] & self.SECTION_IS_EARLY):
162 return True
163 return False
164
165 def pfn_valid(self, pfn):
166 ms = None
167 if self.PHYS_PFN(self.PFN_PHYS(pfn)) != pfn:
168 return False
169 if self.pfn_to_section_nr(pfn) >= self.NR_MEM_SECTIONS:
170 return False
171 ms = self.__pfn_to_section(pfn)
172
173 if not self.valid_section(ms):
174 return False
175 return self.early_section(ms) or self.pfn_section_valid(ms, pfn)
176
177 def _PAGE_OFFSET(self, va):
178 return (-(1 << (va))) & 0xffffffffffffffff
179
180 def _PAGE_END(self, va):
181 return (-(1 << (va - 1))) & 0xffffffffffffffff
182
183 def kasan_reset_tag(self, addr):
184 if constants.LX_CONFIG_KASAN_SW_TAGS or constants.LX_CONFIG_KASAN_HW_TAGS:
185 return int(addr) | (0xff << 56)
186 else:
187 return addr
188
189 def __is_lm_address(self, addr):
190 if (addr - self.PAGE_OFFSET) < (self.PAGE_END - self.PAGE_OFFSET):
191 return True
192 else:
193 return False
194 def __lm_to_phys(self, addr):
195 return addr - self.PAGE_OFFSET + self.PHYS_OFFSET
196
197 def __kimg_to_phys(self, addr):
198 return addr - self.kimage_voffset
199
200 def __virt_to_phys_nodebug(self, va):
201 untagged_va = self.kasan_reset_tag(va)
202 if self.__is_lm_address(untagged_va):
203 return self.__lm_to_phys(untagged_va)
204 else:
205 return self.__kimg_to_phys(untagged_va)
206
207 def __virt_to_phys(self, va):
208 if constants.LX_CONFIG_DEBUG_VIRTUAL:
209 if not self.__is_lm_address(self.kasan_reset_tag(va)):
210 raise gdb.GdbError("Warning: virt_to_phys used for non-linear address: 0x%lx\n" % va)
211 return self.__virt_to_phys_nodebug(va)
212
213 def virt_to_phys(self, va):
214 return self.__virt_to_phys(va)
215
216 def PFN_PHYS(self, pfn):
217 return pfn << self.PAGE_SHIFT
218
219 def PHYS_PFN(self, phys):
220 return phys >> self.PAGE_SHIFT
221
222 def __phys_to_virt(self, pa):
223 return (pa - self.PHYS_OFFSET) | self.PAGE_OFFSET
224
225 def __phys_to_pfn(self, pa):
226 return self.PHYS_PFN(pa)
227
228 def __pfn_to_phys(self, pfn):
229 return self.PFN_PHYS(pfn)
230
231 def __pa_symbol_nodebug(self, x):
232 return self.__kimg_to_phys(x)
233
234 def __phys_addr_symbol(self, x):
235 if constants.LX_CONFIG_DEBUG_VIRTUAL:
236 if x < self.KERNEL_START or x > self.KERNEL_END:
237 raise gdb.GdbError("0x%x exceed kernel range" % x)
238 return self.__pa_symbol_nodebug(x)
239
240 def __pa_symbol(self, x):
241 return self.__phys_addr_symbol(x)
242
243 def __va(self, pa):
244 return self.__phys_to_virt(pa)
245
246 def pfn_to_kaddr(self, pfn):
247 return self.__va(pfn << self.PAGE_SHIFT)
248
249 def virt_to_pfn(self, va):
250 return self.__phys_to_pfn(self.__virt_to_phys(va))
251
252 def sym_to_pfn(self, x):
253 return self.__phys_to_pfn(self.__pa_symbol(x))
254
255 def page_to_pfn(self, page):
256 return int(page.cast(utils.get_page_type().pointer()) - self.vmemmap.cast(utils.get_page_type().pointer()))
257
258 def page_to_phys(self, page):
259 return self.__pfn_to_phys(self.page_to_pfn(page))
260
261 def pfn_to_page(self, pfn):
262 return (self.vmemmap + pfn).cast(utils.get_page_type().pointer())
263
264 def page_to_virt(self, page):
265 if constants.LX_CONFIG_DEBUG_VIRTUAL:
266 return self.__va(self.page_to_phys(page))
e36903b0 267 else:
eb985b5d
KYL
268 __idx = int((page.cast(gdb.lookup_type("unsigned long")) - self.VMEMMAP_START).cast(utils.get_ulong_type())) // self.struct_page_size
269 return self.PAGE_OFFSET + (__idx * self.PAGE_SIZE)
270
271 def virt_to_page(self, va):
272 if constants.LX_CONFIG_DEBUG_VIRTUAL:
273 return self.pfn_to_page(self.virt_to_pfn(va))
e36903b0 274 else:
eb985b5d
KYL
275 __idx = int(self.kasan_reset_tag(va) - self.PAGE_OFFSET) // self.PAGE_SIZE
276 addr = self.VMEMMAP_START + (__idx * self.struct_page_size)
277 return gdb.Value(addr).cast(utils.get_page_type().pointer())
278
279 def page_address(self, page):
280 return self.page_to_virt(page)
281
282 def folio_address(self, folio):
283 return self.page_address(folio['page'].address)
284
285class LxPFN2Page(gdb.Command):
286 """PFN to struct page"""
e36903b0
DB
287
288 def __init__(self):
eb985b5d 289 super(LxPFN2Page, self).__init__("lx-pfn_to_page", gdb.COMMAND_USER)
e36903b0
DB
290
291 def invoke(self, arg, from_tty):
eb985b5d
KYL
292 argv = gdb.string_to_argv(arg)
293 pfn = int(argv[0])
294 page = page_ops().ops.pfn_to_page(pfn)
295 gdb.write("pfn_to_page(0x%x) = 0x%x\n" % (pfn, page))
296
297LxPFN2Page()
298
299class LxPage2PFN(gdb.Command):
300 """struct page to PFN"""
301
302 def __init__(self):
303 super(LxPage2PFN, self).__init__("lx-page_to_pfn", gdb.COMMAND_USER)
304
305 def invoke(self, arg, from_tty):
306 argv = gdb.string_to_argv(arg)
307 struct_page_addr = int(argv[0], 16)
308 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
309 pfn = page_ops().ops.page_to_pfn(page)
310 gdb.write("page_to_pfn(0x%x) = 0x%x\n" % (page, pfn))
311
312LxPage2PFN()
313
314class LxPageAddress(gdb.Command):
315 """struct page to linear mapping address"""
316
317 def __init__(self):
318 super(LxPageAddress, self).__init__("lx-page_address", gdb.COMMAND_USER)
319
320 def invoke(self, arg, from_tty):
321 argv = gdb.string_to_argv(arg)
322 struct_page_addr = int(argv[0], 16)
323 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
324 addr = page_ops().ops.page_address(page)
325 gdb.write("page_address(0x%x) = 0x%x\n" % (page, addr))
e36903b0 326
eb985b5d
KYL
327LxPageAddress()
328
329class LxPage2Phys(gdb.Command):
330 """struct page to physical address"""
331
332 def __init__(self):
333 super(LxPage2Phys, self).__init__("lx-page_to_phys", gdb.COMMAND_USER)
334
335 def invoke(self, arg, from_tty):
336 argv = gdb.string_to_argv(arg)
337 struct_page_addr = int(argv[0], 16)
338 page = gdb.Value(struct_page_addr).cast(utils.get_page_type().pointer())
339 phys_addr = page_ops().ops.page_to_phys(page)
340 gdb.write("page_to_phys(0x%x) = 0x%x\n" % (page, phys_addr))
341
342LxPage2Phys()
343
344class LxVirt2Phys(gdb.Command):
345 """virtual address to physical address"""
346
347 def __init__(self):
348 super(LxVirt2Phys, self).__init__("lx-virt_to_phys", gdb.COMMAND_USER)
349
350 def invoke(self, arg, from_tty):
351 argv = gdb.string_to_argv(arg)
352 linear_addr = int(argv[0], 16)
353 phys_addr = page_ops().ops.virt_to_phys(linear_addr)
354 gdb.write("virt_to_phys(0x%x) = 0x%x\n" % (linear_addr, phys_addr))
355
356LxVirt2Phys()
357
358class LxVirt2Page(gdb.Command):
359 """virtual address to struct page"""
360
361 def __init__(self):
362 super(LxVirt2Page, self).__init__("lx-virt_to_page", gdb.COMMAND_USER)
363
364 def invoke(self, arg, from_tty):
365 argv = gdb.string_to_argv(arg)
366 linear_addr = int(argv[0], 16)
367 page = page_ops().ops.virt_to_page(linear_addr)
368 gdb.write("virt_to_page(0x%x) = 0x%x\n" % (linear_addr, page))
369
370LxVirt2Page()
371
372class LxSym2PFN(gdb.Command):
373 """symbol address to PFN"""
374
375 def __init__(self):
376 super(LxSym2PFN, self).__init__("lx-sym_to_pfn", gdb.COMMAND_USER)
377
378 def invoke(self, arg, from_tty):
379 argv = gdb.string_to_argv(arg)
380 sym_addr = int(argv[0], 16)
381 pfn = page_ops().ops.sym_to_pfn(sym_addr)
382 gdb.write("sym_to_pfn(0x%x) = %d\n" % (sym_addr, pfn))
383
384LxSym2PFN()
385
386class LxPFN2Kaddr(gdb.Command):
387 """PFN to kernel address"""
388
389 def __init__(self):
390 super(LxPFN2Kaddr, self).__init__("lx-pfn_to_kaddr", gdb.COMMAND_USER)
391
392 def invoke(self, arg, from_tty):
393 argv = gdb.string_to_argv(arg)
394 pfn = int(argv[0])
395 kaddr = page_ops().ops.pfn_to_kaddr(pfn)
396 gdb.write("pfn_to_kaddr(%d) = 0x%x\n" % (pfn, kaddr))
e36903b0 397
eb985b5d 398LxPFN2Kaddr()