1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
260e64d46SVivek Goyal #ifndef LINUX_CRASH_DUMP_H
360e64d46SVivek Goyal #define LINUX_CRASH_DUMP_H
460e64d46SVivek Goyal
560e64d46SVivek Goyal #include <linux/kexec.h>
660e64d46SVivek Goyal #include <linux/proc_fs.h>
71f536b9eSFabio Estevam #include <linux/elf.h>
865fddcfcSMike Rapoport #include <linux/pgtable.h>
92724273eSRahul Lakkireddy #include <uapi/linux/vmcore.h>
1060e64d46SVivek Goyal
1133709413SGeert Uytterhoeven /* For IS_ENABLED(CONFIG_CRASH_DUMP) */
12666bfddbSVivek Goyal #define ELFCORE_ADDR_MAX (-1ULL)
1385a0ee34SSimon Horman #define ELFCORE_ADDR_ERR (-2ULL)
1436ac2617SIngo Molnar
152030eae5SVivek Goyal extern unsigned long long elfcorehdr_addr;
16d3bf3795SMichael Holzheu extern unsigned long long elfcorehdr_size;
1736ac2617SIngo Molnar
1833709413SGeert Uytterhoeven #ifdef CONFIG_CRASH_DUMP
195ab03ac5SBjorn Helgaas extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
205ab03ac5SBjorn Helgaas extern void elfcorehdr_free(unsigned long long addr);
215ab03ac5SBjorn Helgaas extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
225ab03ac5SBjorn Helgaas extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
23*7ad4d1f6SDavid Hildenbrand void elfcorehdr_fill_device_ram_ptload_elf64(Elf64_Phdr *phdr,
24*7ad4d1f6SDavid Hildenbrand unsigned long long paddr, unsigned long long size);
255ab03ac5SBjorn Helgaas extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
269cb21813SMichael Holzheu unsigned long from, unsigned long pfn,
279cb21813SMichael Holzheu unsigned long size, pgprot_t prot);
28be8a8d06SMichael Holzheu
295d8de293SMatthew Wilcox (Oracle) ssize_t copy_oldmem_page(struct iov_iter *i, unsigned long pfn, size_t csize,
305d8de293SMatthew Wilcox (Oracle) unsigned long offset);
315d8de293SMatthew Wilcox (Oracle) ssize_t copy_oldmem_page_encrypted(struct iov_iter *iter, unsigned long pfn,
325d8de293SMatthew Wilcox (Oracle) size_t csize, unsigned long offset);
33992b649aSLianbo Jiang
3482e0703bSRashika Kheria void vmcore_cleanup(void);
35666bfddbSVivek Goyal
3679e03011SIan Campbell /* Architecture code defines this if there are other possible ELF
3779e03011SIan Campbell * machine types, e.g. on bi-arch capable hardware. */
3879e03011SIan Campbell #ifndef vmcore_elf_check_arch_cross
3979e03011SIan Campbell #define vmcore_elf_check_arch_cross(x) 0
4079e03011SIan Campbell #endif
4179e03011SIan Campbell
429833c394SMika Westerberg /*
439833c394SMika Westerberg * Architecture code can redefine this if there are any special checks
44e55d5312SDaniel Wagner * needed for 32-bit ELF or 64-bit ELF vmcores. In case of 32-bit
45e55d5312SDaniel Wagner * only architecture, vmcore_elf64_check_arch can be set to zero.
469833c394SMika Westerberg */
47e55d5312SDaniel Wagner #ifndef vmcore_elf32_check_arch
48e55d5312SDaniel Wagner #define vmcore_elf32_check_arch(x) elf_check_arch(x)
49e55d5312SDaniel Wagner #endif
50e55d5312SDaniel Wagner
519833c394SMika Westerberg #ifndef vmcore_elf64_check_arch
529833c394SMika Westerberg #define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
539833c394SMika Westerberg #endif
5479e03011SIan Campbell
5586328b33SHari Bathini #ifndef is_kdump_kernel
5657cac4d1SVivek Goyal /*
5757cac4d1SVivek Goyal * is_kdump_kernel() checks whether this kernel is booting after a panic of
5857cac4d1SVivek Goyal * previous kernel or not. This is determined by checking if previous kernel
5957cac4d1SVivek Goyal * has passed the elf core header address on command line.
6057cac4d1SVivek Goyal *
6157cac4d1SVivek Goyal * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
622650cb0cSYaowei Bai * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
632650cb0cSYaowei Bai * of previous kernel.
6457cac4d1SVivek Goyal */
6557cac4d1SVivek Goyal
is_kdump_kernel(void)662650cb0cSYaowei Bai static inline bool is_kdump_kernel(void)
6795b68decSChandru {
682650cb0cSYaowei Bai return elfcorehdr_addr != ELFCORE_ADDR_MAX;
6995b68decSChandru }
7086328b33SHari Bathini #endif
7185a0ee34SSimon Horman
7285a0ee34SSimon Horman /* is_vmcore_usable() checks if the kernel is booting after a panic and
7385a0ee34SSimon Horman * the vmcore region is usable.
7485a0ee34SSimon Horman *
7585a0ee34SSimon Horman * This makes use of the fact that due to alignment -2ULL is not
7685a0ee34SSimon Horman * a valid pointer, much in the vain of IS_ERR(), except
7785a0ee34SSimon Horman * dealing directly with an unsigned long long rather than a pointer.
7885a0ee34SSimon Horman */
7985a0ee34SSimon Horman
is_vmcore_usable(void)8085a0ee34SSimon Horman static inline int is_vmcore_usable(void)
8185a0ee34SSimon Horman {
8286328b33SHari Bathini return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
8386328b33SHari Bathini elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
8485a0ee34SSimon Horman }
8585a0ee34SSimon Horman
8685a0ee34SSimon Horman /* vmcore_unusable() marks the vmcore as unusable,
8785a0ee34SSimon Horman * without disturbing the logic of is_kdump_kernel()
8885a0ee34SSimon Horman */
8985a0ee34SSimon Horman
vmcore_unusable(void)9085a0ee34SSimon Horman static inline void vmcore_unusable(void)
9185a0ee34SSimon Horman {
9285a0ee34SSimon Horman elfcorehdr_addr = ELFCORE_ADDR_ERR;
9385a0ee34SSimon Horman }
94997c136fSOlaf Hering
95cc5f2704SDavid Hildenbrand /**
96cc5f2704SDavid Hildenbrand * struct vmcore_cb - driver callbacks for /proc/vmcore handling
97cc5f2704SDavid Hildenbrand * @pfn_is_ram: check whether a PFN really is RAM and should be accessed when
98cc5f2704SDavid Hildenbrand * reading the vmcore. Will return "true" if it is RAM or if the
99cc5f2704SDavid Hildenbrand * callback cannot tell. If any callback returns "false", it's not
100cc5f2704SDavid Hildenbrand * RAM and the page must not be accessed; zeroes should be
101cc5f2704SDavid Hildenbrand * indicated in the vmcore instead. For example, a ballooned page
102cc5f2704SDavid Hildenbrand * contains no data and reading from such a page will cause high
103cc5f2704SDavid Hildenbrand * load in the hypervisor.
104*7ad4d1f6SDavid Hildenbrand * @get_device_ram: query RAM ranges that can only be detected by device
105*7ad4d1f6SDavid Hildenbrand * drivers, such as the virtio-mem driver, so they can be included in
106*7ad4d1f6SDavid Hildenbrand * the crash dump on architectures that allocate the elfcore hdr in the dump
107*7ad4d1f6SDavid Hildenbrand * ("2nd") kernel. Indicated RAM ranges may contain holes to reduce the
108*7ad4d1f6SDavid Hildenbrand * total number of ranges; such holes can be detected using the pfn_is_ram
109*7ad4d1f6SDavid Hildenbrand * callback just like for other RAM.
110cc5f2704SDavid Hildenbrand * @next: List head to manage registered callbacks internally; initialized by
111cc5f2704SDavid Hildenbrand * register_vmcore_cb().
112cc5f2704SDavid Hildenbrand *
113cc5f2704SDavid Hildenbrand * vmcore callbacks allow drivers managing physical memory ranges to
114cc5f2704SDavid Hildenbrand * coordinate with vmcore handling code, for example, to prevent accessing
115cc5f2704SDavid Hildenbrand * physical memory ranges that should not be accessed when reading the vmcore,
116cc5f2704SDavid Hildenbrand * although included in the vmcore header as memory ranges to dump.
117cc5f2704SDavid Hildenbrand */
118cc5f2704SDavid Hildenbrand struct vmcore_cb {
119cc5f2704SDavid Hildenbrand bool (*pfn_is_ram)(struct vmcore_cb *cb, unsigned long pfn);
120*7ad4d1f6SDavid Hildenbrand int (*get_device_ram)(struct vmcore_cb *cb, struct list_head *list);
121cc5f2704SDavid Hildenbrand struct list_head next;
122cc5f2704SDavid Hildenbrand };
123cc5f2704SDavid Hildenbrand extern void register_vmcore_cb(struct vmcore_cb *cb);
124cc5f2704SDavid Hildenbrand extern void unregister_vmcore_cb(struct vmcore_cb *cb);
125997c136fSOlaf Hering
126819403c8SDavid Hildenbrand struct vmcore_range {
127819403c8SDavid Hildenbrand struct list_head list;
128819403c8SDavid Hildenbrand unsigned long long paddr;
129819403c8SDavid Hildenbrand unsigned long long size;
130819403c8SDavid Hildenbrand loff_t offset;
131819403c8SDavid Hildenbrand };
132819403c8SDavid Hildenbrand
133e017b1f4SDavid Hildenbrand /* Allocate a vmcore range and add it to the list. */
vmcore_alloc_add_range(struct list_head * list,unsigned long long paddr,unsigned long long size)134e017b1f4SDavid Hildenbrand static inline int vmcore_alloc_add_range(struct list_head *list,
135e017b1f4SDavid Hildenbrand unsigned long long paddr, unsigned long long size)
136e017b1f4SDavid Hildenbrand {
137e017b1f4SDavid Hildenbrand struct vmcore_range *m = kzalloc(sizeof(*m), GFP_KERNEL);
138e017b1f4SDavid Hildenbrand
139e017b1f4SDavid Hildenbrand if (!m)
140e017b1f4SDavid Hildenbrand return -ENOMEM;
141e017b1f4SDavid Hildenbrand m->paddr = paddr;
142e017b1f4SDavid Hildenbrand m->size = size;
143e017b1f4SDavid Hildenbrand list_add_tail(&m->list, list);
144e017b1f4SDavid Hildenbrand return 0;
145e017b1f4SDavid Hildenbrand }
146e017b1f4SDavid Hildenbrand
147e29e9acaSDavid Hildenbrand /* Free a list of vmcore ranges. */
vmcore_free_ranges(struct list_head * list)148e29e9acaSDavid Hildenbrand static inline void vmcore_free_ranges(struct list_head *list)
149e29e9acaSDavid Hildenbrand {
150e29e9acaSDavid Hildenbrand struct vmcore_range *m, *tmp;
151e29e9acaSDavid Hildenbrand
152e29e9acaSDavid Hildenbrand list_for_each_entry_safe(m, tmp, list, list) {
153e29e9acaSDavid Hildenbrand list_del(&m->list);
154e29e9acaSDavid Hildenbrand kfree(m);
155e29e9acaSDavid Hildenbrand }
156e29e9acaSDavid Hildenbrand }
157e29e9acaSDavid Hildenbrand
15895b68decSChandru #else /* !CONFIG_CRASH_DUMP */
is_kdump_kernel(void)1595605f419SChangcheng Deng static inline bool is_kdump_kernel(void) { return false; }
16060e64d46SVivek Goyal #endif /* CONFIG_CRASH_DUMP */
16195b68decSChandru
1622724273eSRahul Lakkireddy /* Device Dump information to be filled by drivers */
1632724273eSRahul Lakkireddy struct vmcoredd_data {
1642724273eSRahul Lakkireddy char dump_name[VMCOREDD_MAX_NAME_BYTES]; /* Unique name of the dump */
1652724273eSRahul Lakkireddy unsigned int size; /* Size of the dump */
1662724273eSRahul Lakkireddy /* Driver's registered callback to be invoked to collect dump */
1672724273eSRahul Lakkireddy int (*vmcoredd_callback)(struct vmcoredd_data *data, void *buf);
1682724273eSRahul Lakkireddy };
1692724273eSRahul Lakkireddy
1702724273eSRahul Lakkireddy #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1712724273eSRahul Lakkireddy int vmcore_add_device_dump(struct vmcoredd_data *data);
1722724273eSRahul Lakkireddy #else
vmcore_add_device_dump(struct vmcoredd_data * data)1732724273eSRahul Lakkireddy static inline int vmcore_add_device_dump(struct vmcoredd_data *data)
1742724273eSRahul Lakkireddy {
1752724273eSRahul Lakkireddy return -EOPNOTSUPP;
1762724273eSRahul Lakkireddy }
1772724273eSRahul Lakkireddy #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
178ae7eb82aSThiago Jung Bauermann
179ae7eb82aSThiago Jung Bauermann #ifdef CONFIG_PROC_VMCORE
180e0690479SMatthew Wilcox (Oracle) ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
181e0690479SMatthew Wilcox (Oracle) u64 *ppos, bool encrypted);
182ae7eb82aSThiago Jung Bauermann #else
read_from_oldmem(struct iov_iter * iter,size_t count,u64 * ppos,bool encrypted)183e0690479SMatthew Wilcox (Oracle) static inline ssize_t read_from_oldmem(struct iov_iter *iter, size_t count,
184e0690479SMatthew Wilcox (Oracle) u64 *ppos, bool encrypted)
185ae7eb82aSThiago Jung Bauermann {
186ae7eb82aSThiago Jung Bauermann return -EOPNOTSUPP;
187ae7eb82aSThiago Jung Bauermann }
188ae7eb82aSThiago Jung Bauermann #endif /* CONFIG_PROC_VMCORE */
189ae7eb82aSThiago Jung Bauermann
19060e64d46SVivek Goyal #endif /* LINUX_CRASHDUMP_H */
191