|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Kexec image loader |
| 4 | +
|
| 5 | + * Adapted from arch/arm64/kernel/kexec_image.c |
| 6 | + * Copyright (C) 2018 Linaro Limited |
| 7 | + * Author: AKASHI Takahiro <[email protected]> |
| 8 | + */ |
| 9 | +#define pr_fmt(fmt) "kexec_file(Image): " fmt |
| 10 | + |
| 11 | +#include <linux/err.h> |
| 12 | +#include <linux/errno.h> |
| 13 | +#include <linux/kernel.h> |
| 14 | +#include <linux/kexec.h> |
| 15 | +#include <linux/pe.h> |
| 16 | +#include <linux/string.h> |
| 17 | +#include <linux/verification.h> |
| 18 | +#include <linux/of.h> |
| 19 | +#include <linux/of_fdt.h> |
| 20 | +#include <linux/libfdt.h> |
| 21 | +#include <linux/slab.h> |
| 22 | +#include <linux/memblock.h> |
| 23 | +#include <linux/types.h> |
| 24 | +#include <asm/byteorder.h> |
| 25 | +#include <asm/image.h> |
| 26 | + |
| 27 | +static int prepare_elf_headers(void **addr, unsigned long *sz) |
| 28 | +{ |
| 29 | + struct crash_mem *cmem; |
| 30 | + unsigned int nr_ranges; |
| 31 | + int ret; |
| 32 | + u64 i; |
| 33 | + phys_addr_t start, end; |
| 34 | + |
| 35 | + nr_ranges = 2; /* for exclusion of crashkernel region */ |
| 36 | + for_each_mem_range(i, &start, &end) |
| 37 | + nr_ranges++; |
| 38 | + |
| 39 | + cmem = kmalloc(struct_size(cmem, ranges, nr_ranges), GFP_KERNEL); |
| 40 | + if (!cmem) |
| 41 | + return -ENOMEM; |
| 42 | + |
| 43 | + cmem->max_nr_ranges = nr_ranges; |
| 44 | + cmem->nr_ranges = 0; |
| 45 | + for_each_mem_range(i, &start, &end) { |
| 46 | + cmem->ranges[cmem->nr_ranges].start = start; |
| 47 | + cmem->ranges[cmem->nr_ranges].end = end - 1; |
| 48 | + cmem->nr_ranges++; |
| 49 | + } |
| 50 | + |
| 51 | + /* Exclude crashkernel region */ |
| 52 | + ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end); |
| 53 | + if (ret) |
| 54 | + goto out; |
| 55 | + |
| 56 | + if (crashk_low_res.end) { |
| 57 | + ret = crash_exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end); |
| 58 | + if (ret) |
| 59 | + goto out; |
| 60 | + } |
| 61 | + |
| 62 | + ret = crash_prepare_elf64_headers(cmem, true, addr, sz); |
| 63 | + |
| 64 | +out: |
| 65 | + kfree(cmem); |
| 66 | + return ret; |
| 67 | +} |
| 68 | + |
| 69 | +/* |
| 70 | + * Tries to add the initrd and DTB to the image. If it is not possible to find |
| 71 | + * valid locations, this function will undo changes to the image and return non |
| 72 | + * zero. |
| 73 | + */ |
| 74 | +static int load_other_segments(struct kimage *image, |
| 75 | + unsigned long kernel_load_addr, |
| 76 | + unsigned long kernel_size, |
| 77 | + char *initrd, unsigned long initrd_len, |
| 78 | + char *cmdline) |
| 79 | +{ |
| 80 | + struct kexec_buf kbuf; |
| 81 | + void *headers, *fdt = NULL; |
| 82 | + unsigned long headers_sz, initrd_load_addr = 0, |
| 83 | + orig_segments = image->nr_segments; |
| 84 | + int ret = 0; |
| 85 | + |
| 86 | + kbuf.image = image; |
| 87 | + /* not allocate anything below the kernel */ |
| 88 | + kbuf.buf_min = kernel_load_addr + kernel_size; |
| 89 | + |
| 90 | + /* load elf core header */ |
| 91 | + if (image->type == KEXEC_TYPE_CRASH) { |
| 92 | + ret = prepare_elf_headers(&headers, &headers_sz); |
| 93 | + if (ret) { |
| 94 | + pr_err("Preparing elf core header failed\n"); |
| 95 | + goto out_err; |
| 96 | + } |
| 97 | + |
| 98 | + kbuf.buffer = headers; |
| 99 | + kbuf.bufsz = headers_sz; |
| 100 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 101 | + kbuf.memsz = headers_sz; |
| 102 | + kbuf.buf_align = PAGE_SIZE; |
| 103 | + kbuf.buf_max = ULONG_MAX; |
| 104 | + kbuf.top_down = true; |
| 105 | + |
| 106 | + ret = kexec_add_buffer(&kbuf); |
| 107 | + if (ret) { |
| 108 | + vfree(headers); |
| 109 | + goto out_err; |
| 110 | + } |
| 111 | + image->elf_headers = headers; |
| 112 | + image->elf_load_addr = kbuf.mem; |
| 113 | + image->elf_headers_sz = headers_sz; |
| 114 | + |
| 115 | + pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
| 116 | + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); |
| 117 | + } |
| 118 | + |
| 119 | + /* load initrd */ |
| 120 | + if (initrd) { |
| 121 | + kbuf.buffer = initrd; |
| 122 | + kbuf.bufsz = initrd_len; |
| 123 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 124 | + kbuf.memsz = initrd_len; |
| 125 | + kbuf.buf_align = PAGE_SIZE; |
| 126 | + /* avoid to overlap kernel address */ |
| 127 | + kbuf.buf_min = round_up(kernel_load_addr, SZ_1G); |
| 128 | + kbuf.buf_max = ULONG_MAX; |
| 129 | + kbuf.top_down = false; |
| 130 | + |
| 131 | + ret = kexec_add_buffer(&kbuf); |
| 132 | + if (ret) |
| 133 | + goto out_err; |
| 134 | + initrd_load_addr = kbuf.mem; |
| 135 | + |
| 136 | + pr_debug("Loaded initrd at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
| 137 | + initrd_load_addr, kbuf.bufsz, kbuf.memsz); |
| 138 | + } |
| 139 | + |
| 140 | + /* load dtb */ |
| 141 | + fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr, |
| 142 | + initrd_len, cmdline, 0); |
| 143 | + if (!fdt) { |
| 144 | + pr_err("Preparing for new dtb failed\n"); |
| 145 | + ret = -EINVAL; |
| 146 | + goto out_err; |
| 147 | + } |
| 148 | + |
| 149 | + /* trim it */ |
| 150 | + fdt_pack(fdt); |
| 151 | + kbuf.buffer = fdt; |
| 152 | + kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt); |
| 153 | + kbuf.buf_align = PAGE_SIZE; |
| 154 | + kbuf.buf_max = ULONG_MAX; |
| 155 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 156 | + kbuf.top_down = false; |
| 157 | + |
| 158 | + ret = kexec_add_buffer(&kbuf); |
| 159 | + if (ret) |
| 160 | + goto out_err; |
| 161 | + /* Cache the fdt buffer address for memory cleanup */ |
| 162 | + image->arch.fdt = fdt; |
| 163 | + image->arch.fdt_addr = kbuf.mem; |
| 164 | + |
| 165 | + pr_debug("Loaded dtb at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
| 166 | + kbuf.mem, kbuf.bufsz, kbuf.memsz); |
| 167 | + |
| 168 | + return 0; |
| 169 | + |
| 170 | +out_err: |
| 171 | + image->nr_segments = orig_segments; |
| 172 | + kvfree(fdt); |
| 173 | + return ret; |
| 174 | +} |
| 175 | + |
| 176 | +static int image_probe(const char *kernel_buf, unsigned long kernel_len) |
| 177 | +{ |
| 178 | + const struct riscv_image_header *h = |
| 179 | + (const struct riscv_image_header *)(kernel_buf); |
| 180 | + |
| 181 | + if (!h || (kernel_len < sizeof(*h))) |
| 182 | + return -EINVAL; |
| 183 | + |
| 184 | + if (memcmp(&h->magic2, RISCV_IMAGE_MAGIC2, sizeof(h->magic2))) |
| 185 | + return -EINVAL; |
| 186 | + |
| 187 | + return 0; |
| 188 | +} |
| 189 | + |
| 190 | +static void *image_load(struct kimage *image, |
| 191 | + char *kernel, unsigned long kernel_len, |
| 192 | + char *initrd, unsigned long initrd_len, |
| 193 | + char *cmdline, unsigned long cmdline_len) |
| 194 | +{ |
| 195 | + struct riscv_image_header *h; |
| 196 | + u64 flags; |
| 197 | + bool be_image, be_kernel; |
| 198 | + struct kexec_buf kbuf; |
| 199 | + unsigned long text_offset, kernel_segment_number; |
| 200 | + unsigned long kernel_start; |
| 201 | + struct kexec_segment *kernel_segment; |
| 202 | + int ret; |
| 203 | + |
| 204 | + h = (struct riscv_image_header *)kernel; |
| 205 | + if (!h->image_size) |
| 206 | + return ERR_PTR(-EINVAL); |
| 207 | + |
| 208 | + /* Check cpu features */ |
| 209 | + flags = le64_to_cpu(h->flags); |
| 210 | + be_image = __HEAD_FLAG(BE); |
| 211 | + be_kernel = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); |
| 212 | + if (be_image != be_kernel) |
| 213 | + return ERR_PTR(-EINVAL); |
| 214 | + |
| 215 | + /* Load the kernel */ |
| 216 | + kbuf.image = image; |
| 217 | + kbuf.buf_min = 0; |
| 218 | + kbuf.buf_max = ULONG_MAX; |
| 219 | + kbuf.top_down = false; |
| 220 | + |
| 221 | + kbuf.buffer = kernel; |
| 222 | + kbuf.bufsz = kernel_len; |
| 223 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 224 | + kbuf.memsz = le64_to_cpu(h->image_size); |
| 225 | + text_offset = le64_to_cpu(h->text_offset); |
| 226 | + kbuf.buf_align = PMD_SIZE; |
| 227 | + |
| 228 | + /* Adjust kernel segment with TEXT_OFFSET */ |
| 229 | + kbuf.memsz += text_offset; |
| 230 | + |
| 231 | + kernel_segment_number = image->nr_segments; |
| 232 | + |
| 233 | + /* |
| 234 | + * The location of the kernel segment may make it impossible to satisfy |
| 235 | + * the other segment requirements, so we try repeatedly to find a |
| 236 | + * location that will work. |
| 237 | + */ |
| 238 | + while ((ret = kexec_add_buffer(&kbuf)) == 0) { |
| 239 | + /* Try to load additional data */ |
| 240 | + kernel_segment = &image->segment[kernel_segment_number]; |
| 241 | + ret = load_other_segments(image, kernel_segment->mem, |
| 242 | + kernel_segment->memsz, initrd, |
| 243 | + initrd_len, cmdline); |
| 244 | + if (!ret) |
| 245 | + break; |
| 246 | + |
| 247 | + /* |
| 248 | + * We couldn't find space for the other segments; erase the |
| 249 | + * kernel segment and try the next available hole. |
| 250 | + */ |
| 251 | + image->nr_segments -= 1; |
| 252 | + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; |
| 253 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 254 | + } |
| 255 | + |
| 256 | + if (ret) { |
| 257 | + pr_err("Could not find any suitable kernel location!"); |
| 258 | + return ERR_PTR(ret); |
| 259 | + } |
| 260 | + |
| 261 | + kernel_segment = &image->segment[kernel_segment_number]; |
| 262 | + kernel_segment->mem += text_offset; |
| 263 | + kernel_segment->memsz -= text_offset; |
| 264 | + kernel_start = kernel_segment->mem; |
| 265 | + image->start = kernel_start; |
| 266 | + |
| 267 | + |
| 268 | + pr_debug("Loaded kernel at 0x%lx bufsz=0x%lx memsz=0x%lx\n", |
| 269 | + kernel_segment->mem, kbuf.bufsz, |
| 270 | + kernel_segment->memsz); |
| 271 | + |
| 272 | +#ifdef CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY |
| 273 | + /* Add purgatory to the image */ |
| 274 | + kbuf.top_down = true; |
| 275 | + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; |
| 276 | + ret = kexec_load_purgatory(image, &kbuf); |
| 277 | + if (ret) { |
| 278 | + pr_err("Error loading purgatory ret=%d\n", ret); |
| 279 | + return ERR_PTR(ret); |
| 280 | + } |
| 281 | + ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", |
| 282 | + &kernel_start, |
| 283 | + sizeof(kernel_start), 0); |
| 284 | + if (ret) |
| 285 | + pr_err("Error update purgatory ret=%d\n", ret); |
| 286 | +#endif /* CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY */ |
| 287 | + |
| 288 | + return ret ? ERR_PTR(ret) : NULL; |
| 289 | +} |
| 290 | + |
| 291 | +#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG |
| 292 | +static int image_verify_sig(const char *kernel, unsigned long kernel_len) |
| 293 | +{ |
| 294 | + return verify_pefile_signature(kernel, kernel_len, NULL, |
| 295 | + VERIFYING_KEXEC_PE_SIGNATURE); |
| 296 | +} |
| 297 | +#endif |
| 298 | + |
| 299 | +const struct kexec_file_ops image_kexec_ops = { |
| 300 | + .probe = image_probe, |
| 301 | + .load = image_load, |
| 302 | +#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG |
| 303 | + .verify_sig = image_verify_sig, |
| 304 | +#endif |
| 305 | +}; |
0 commit comments