1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright 1996-1998 John D. Polstra. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 * $FreeBSD$ 28 */ 29 30 #include <sys/param.h> 31 #include <sys/mman.h> 32 #include <sys/stat.h> 33 34 #include <errno.h> 35 #include <stddef.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <unistd.h> 39 40 #include "debug.h" 41 #include "rtld.h" 42 43 static Elf_Ehdr *get_elf_header(int, const char *, const struct stat *); 44 static int convert_flags(int); /* Elf flags -> mmap flags */ 45 46 int __getosreldate(void); 47 48 /* 49 * Map a shared object into memory. The "fd" argument is a file descriptor, 50 * which must be open on the object and positioned at its beginning. 51 * The "path" argument is a pathname that is used only for error messages. 52 * 53 * The return value is a pointer to a newly-allocated Obj_Entry structure 54 * for the shared object. Returns NULL on failure. 55 */ 56 Obj_Entry * 57 map_object(int fd, const char *path, const struct stat *sb) 58 { 59 Obj_Entry *obj; 60 Elf_Ehdr *hdr; 61 int i; 62 Elf_Phdr *phdr; 63 Elf_Phdr *phlimit; 64 Elf_Phdr **segs; 65 int nsegs; 66 Elf_Phdr *phdyn; 67 Elf_Phdr *phinterp; 68 Elf_Phdr *phtls; 69 caddr_t mapbase; 70 size_t mapsize; 71 Elf_Addr base_vaddr; 72 Elf_Addr base_vlimit; 73 caddr_t base_addr; 74 int base_flags; 75 Elf_Off data_offset; 76 Elf_Addr data_vaddr; 77 Elf_Addr data_vlimit; 78 caddr_t data_addr; 79 int data_prot; 80 int data_flags; 81 Elf_Addr clear_vaddr; 82 caddr_t clear_addr; 83 caddr_t clear_page; 84 Elf_Addr phdr_vaddr; 85 size_t nclear, phsize; 86 Elf_Addr bss_vaddr; 87 Elf_Addr bss_vlimit; 88 caddr_t bss_addr; 89 Elf_Word stack_flags; 90 Elf_Addr relro_page; 91 size_t relro_size; 92 Elf_Addr note_start; 93 Elf_Addr note_end; 94 char *note_map; 95 size_t note_map_len; 96 97 hdr = get_elf_header(fd, path, sb); 98 if (hdr == NULL) 99 return (NULL); 100 101 /* 102 * Scan the program header entries, and save key information. 103 * 104 * We expect that the loadable segments are ordered by load address. 105 */ 106 phdr = (Elf_Phdr *)((char *)hdr + hdr->e_phoff); 107 phsize = hdr->e_phnum * sizeof (phdr[0]); 108 phlimit = phdr + hdr->e_phnum; 109 nsegs = -1; 110 phdyn = phinterp = phtls = NULL; 111 phdr_vaddr = 0; 112 relro_page = 0; 113 relro_size = 0; 114 note_start = 0; 115 note_end = 0; 116 note_map = NULL; 117 note_map_len = 0; 118 segs = alloca(sizeof(segs[0]) * hdr->e_phnum); 119 stack_flags = RTLD_DEFAULT_STACK_PF_EXEC | PF_R | PF_W; 120 while (phdr < phlimit) { 121 switch (phdr->p_type) { 122 123 case PT_INTERP: 124 phinterp = phdr; 125 break; 126 127 case PT_LOAD: 128 segs[++nsegs] = phdr; 129 if ((segs[nsegs]->p_align & (PAGE_SIZE - 1)) != 0) { 130 _rtld_error("%s: PT_LOAD segment %d not page-aligned", 131 path, nsegs); 132 goto error; 133 } 134 break; 135 136 case PT_PHDR: 137 phdr_vaddr = phdr->p_vaddr; 138 phsize = phdr->p_memsz; 139 break; 140 141 case PT_DYNAMIC: 142 phdyn = phdr; 143 break; 144 145 case PT_TLS: 146 phtls = phdr; 147 break; 148 149 case PT_GNU_STACK: 150 stack_flags = phdr->p_flags; 151 break; 152 153 case PT_GNU_RELRO: 154 relro_page = phdr->p_vaddr; 155 relro_size = phdr->p_memsz; 156 break; 157 158 case PT_NOTE: 159 if (phdr->p_offset > PAGE_SIZE || 160 phdr->p_offset + phdr->p_filesz > PAGE_SIZE) { 161 note_map_len = round_page(phdr->p_offset + 162 phdr->p_filesz) - trunc_page(phdr->p_offset); 163 note_map = mmap(NULL, note_map_len, PROT_READ, 164 MAP_PRIVATE, fd, trunc_page(phdr->p_offset)); 165 if (note_map == MAP_FAILED) { 166 _rtld_error("%s: error mapping PT_NOTE (%d)", path, errno); 167 goto error; 168 } 169 note_start = (Elf_Addr)(note_map + phdr->p_offset - 170 trunc_page(phdr->p_offset)); 171 } else { 172 note_start = (Elf_Addr)(char *)hdr + phdr->p_offset; 173 } 174 note_end = note_start + phdr->p_filesz; 175 break; 176 } 177 178 ++phdr; 179 } 180 if (phdyn == NULL) { 181 _rtld_error("%s: object is not dynamically-linked", path); 182 goto error; 183 } 184 185 if (nsegs < 0) { 186 _rtld_error("%s: too few PT_LOAD segments", path); 187 goto error; 188 } 189 190 /* 191 * Map the entire address space of the object, to stake out our 192 * contiguous region, and to establish the base address for relocation. 193 */ 194 base_vaddr = trunc_page(segs[0]->p_vaddr); 195 base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); 196 mapsize = base_vlimit - base_vaddr; 197 base_addr = (caddr_t) base_vaddr; 198 base_flags = __getosreldate() >= P_OSREL_MAP_GUARD ? MAP_GUARD : 199 MAP_PRIVATE | MAP_ANON | MAP_NOCORE; 200 if (npagesizes > 1 && round_page(segs[0]->p_filesz) >= pagesizes[1]) 201 base_flags |= MAP_ALIGNED_SUPER; 202 if (base_vaddr != 0) 203 base_flags |= MAP_FIXED | MAP_EXCL; 204 205 mapbase = mmap(base_addr, mapsize, PROT_NONE, base_flags, -1, 0); 206 if (mapbase == (caddr_t) -1) { 207 _rtld_error("%s: mmap of entire address space failed: %s", 208 path, rtld_strerror(errno)); 209 goto error; 210 } 211 if (base_addr != NULL && mapbase != base_addr) { 212 _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", 213 path, base_addr, mapbase); 214 goto error1; 215 } 216 217 for (i = 0; i <= nsegs; i++) { 218 /* Overlay the segment onto the proper region. */ 219 data_offset = trunc_page(segs[i]->p_offset); 220 data_vaddr = trunc_page(segs[i]->p_vaddr); 221 data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); 222 data_addr = mapbase + (data_vaddr - base_vaddr); 223 data_prot = convert_prot(segs[i]->p_flags); 224 data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; 225 if (mmap(data_addr, data_vlimit - data_vaddr, data_prot, 226 data_flags | MAP_PREFAULT_READ, fd, data_offset) == (caddr_t) -1) { 227 _rtld_error("%s: mmap of data failed: %s", path, 228 rtld_strerror(errno)); 229 goto error1; 230 } 231 232 /* Do BSS setup */ 233 if (segs[i]->p_filesz != segs[i]->p_memsz) { 234 235 /* Clear any BSS in the last page of the segment. */ 236 clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; 237 clear_addr = mapbase + (clear_vaddr - base_vaddr); 238 clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); 239 240 if ((nclear = data_vlimit - clear_vaddr) > 0) { 241 /* Make sure the end of the segment is writable */ 242 if ((data_prot & PROT_WRITE) == 0 && -1 == 243 mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { 244 _rtld_error("%s: mprotect failed: %s", path, 245 rtld_strerror(errno)); 246 goto error1; 247 } 248 249 memset(clear_addr, 0, nclear); 250 251 /* Reset the data protection back */ 252 if ((data_prot & PROT_WRITE) == 0) 253 mprotect(clear_page, PAGE_SIZE, data_prot); 254 } 255 256 /* Overlay the BSS segment onto the proper region. */ 257 bss_vaddr = data_vlimit; 258 bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); 259 bss_addr = mapbase + (bss_vaddr - base_vaddr); 260 if (bss_vlimit > bss_vaddr) { /* There is something to do */ 261 if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, 262 data_flags | MAP_ANON, -1, 0) == (caddr_t)-1) { 263 _rtld_error("%s: mmap of bss failed: %s", path, 264 rtld_strerror(errno)); 265 goto error1; 266 } 267 } 268 } 269 270 if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff && 271 (data_vlimit - data_vaddr + data_offset) >= 272 (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) { 273 phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset; 274 } 275 } 276 277 obj = obj_new(); 278 if (sb != NULL) { 279 obj->dev = sb->st_dev; 280 obj->ino = sb->st_ino; 281 } 282 obj->mapbase = mapbase; 283 obj->mapsize = mapsize; 284 obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - 285 base_vaddr; 286 obj->vaddrbase = base_vaddr; 287 obj->relocbase = mapbase - base_vaddr; 288 obj->dynamic = (const Elf_Dyn *)(obj->relocbase + phdyn->p_vaddr); 289 if (hdr->e_entry != 0) 290 obj->entry = (caddr_t)(obj->relocbase + hdr->e_entry); 291 if (phdr_vaddr != 0) { 292 obj->phdr = (const Elf_Phdr *)(obj->relocbase + phdr_vaddr); 293 } else { 294 obj->phdr = malloc(phsize); 295 if (obj->phdr == NULL) { 296 obj_free(obj); 297 _rtld_error("%s: cannot allocate program header", path); 298 goto error1; 299 } 300 memcpy(__DECONST(char *, obj->phdr), (char *)hdr + hdr->e_phoff, phsize); 301 obj->phdr_alloc = true; 302 } 303 obj->phsize = phsize; 304 if (phinterp != NULL) 305 obj->interp = (const char *)(obj->relocbase + phinterp->p_vaddr); 306 if (phtls != NULL) { 307 tls_dtv_generation++; 308 obj->tlsindex = ++tls_max_index; 309 obj->tlssize = phtls->p_memsz; 310 obj->tlsalign = phtls->p_align; 311 obj->tlsinitsize = phtls->p_filesz; 312 obj->tlsinit = mapbase + phtls->p_vaddr; 313 } 314 obj->stack_flags = stack_flags; 315 obj->relro_page = obj->relocbase + trunc_page(relro_page); 316 obj->relro_size = round_page(relro_size); 317 if (note_start < note_end) 318 digest_notes(obj, note_start, note_end); 319 if (note_map != NULL) 320 munmap(note_map, note_map_len); 321 munmap(hdr, PAGE_SIZE); 322 return (obj); 323 324 error1: 325 munmap(mapbase, mapsize); 326 error: 327 if (note_map != NULL && note_map != MAP_FAILED) 328 munmap(note_map, note_map_len); 329 munmap(hdr, PAGE_SIZE); 330 return (NULL); 331 } 332 333 static Elf_Ehdr * 334 get_elf_header(int fd, const char *path, const struct stat *sbp) 335 { 336 Elf_Ehdr *hdr; 337 338 /* Make sure file has enough data for the ELF header */ 339 if (sbp != NULL && sbp->st_size < (off_t)sizeof(Elf_Ehdr)) { 340 _rtld_error("%s: invalid file format", path); 341 return (NULL); 342 } 343 344 hdr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ, 345 fd, 0); 346 if (hdr == (Elf_Ehdr *)MAP_FAILED) { 347 _rtld_error("%s: read error: %s", path, rtld_strerror(errno)); 348 return (NULL); 349 } 350 351 /* Make sure the file is valid */ 352 if (!IS_ELF(*hdr)) { 353 _rtld_error("%s: invalid file format", path); 354 goto error; 355 } 356 if (hdr->e_ident[EI_CLASS] != ELF_TARG_CLASS || 357 hdr->e_ident[EI_DATA] != ELF_TARG_DATA) { 358 _rtld_error("%s: unsupported file layout", path); 359 goto error; 360 } 361 if (hdr->e_ident[EI_VERSION] != EV_CURRENT || 362 hdr->e_version != EV_CURRENT) { 363 _rtld_error("%s: unsupported file version", path); 364 goto error; 365 } 366 if (hdr->e_type != ET_EXEC && hdr->e_type != ET_DYN) { 367 _rtld_error("%s: unsupported file type", path); 368 goto error; 369 } 370 if (hdr->e_machine != ELF_TARG_MACH) { 371 _rtld_error("%s: unsupported machine", path); 372 goto error; 373 } 374 375 /* 376 * We rely on the program header being in the first page. This is 377 * not strictly required by the ABI specification, but it seems to 378 * always true in practice. And, it simplifies things considerably. 379 */ 380 if (hdr->e_phentsize != sizeof(Elf_Phdr)) { 381 _rtld_error( 382 "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); 383 goto error; 384 } 385 if (hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr) > 386 (size_t)PAGE_SIZE) { 387 _rtld_error("%s: program header too large", path); 388 goto error; 389 } 390 return (hdr); 391 392 error: 393 munmap(hdr, PAGE_SIZE); 394 return (NULL); 395 } 396 397 void 398 obj_free(Obj_Entry *obj) 399 { 400 Objlist_Entry *elm; 401 402 if (obj->tls_done) 403 free_tls_offset(obj); 404 while (obj->needed != NULL) { 405 Needed_Entry *needed = obj->needed; 406 obj->needed = needed->next; 407 free(needed); 408 } 409 while (!STAILQ_EMPTY(&obj->names)) { 410 Name_Entry *entry = STAILQ_FIRST(&obj->names); 411 STAILQ_REMOVE_HEAD(&obj->names, link); 412 free(entry); 413 } 414 while (!STAILQ_EMPTY(&obj->dldags)) { 415 elm = STAILQ_FIRST(&obj->dldags); 416 STAILQ_REMOVE_HEAD(&obj->dldags, link); 417 free(elm); 418 } 419 while (!STAILQ_EMPTY(&obj->dagmembers)) { 420 elm = STAILQ_FIRST(&obj->dagmembers); 421 STAILQ_REMOVE_HEAD(&obj->dagmembers, link); 422 free(elm); 423 } 424 if (obj->vertab) 425 free(obj->vertab); 426 if (obj->origin_path) 427 free(obj->origin_path); 428 if (obj->z_origin) 429 free(__DECONST(void*, obj->rpath)); 430 if (obj->priv) 431 free(obj->priv); 432 if (obj->path) 433 free(obj->path); 434 if (obj->phdr_alloc) 435 free(__DECONST(void *, obj->phdr)); 436 free(obj); 437 } 438 439 Obj_Entry * 440 obj_new(void) 441 { 442 Obj_Entry *obj; 443 444 obj = CNEW(Obj_Entry); 445 STAILQ_INIT(&obj->dldags); 446 STAILQ_INIT(&obj->dagmembers); 447 STAILQ_INIT(&obj->names); 448 return obj; 449 } 450 451 /* 452 * Given a set of ELF protection flags, return the corresponding protection 453 * flags for MMAP. 454 */ 455 int 456 convert_prot(int elfflags) 457 { 458 int prot = 0; 459 if (elfflags & PF_R) 460 prot |= PROT_READ; 461 if (elfflags & PF_W) 462 prot |= PROT_WRITE; 463 if (elfflags & PF_X) 464 prot |= PROT_EXEC; 465 return prot; 466 } 467 468 static int 469 convert_flags(int elfflags) 470 { 471 int flags = MAP_PRIVATE; /* All mappings are private */ 472 473 /* 474 * Readonly mappings are marked "MAP_NOCORE", because they can be 475 * reconstructed by a debugger. 476 */ 477 if (!(elfflags & PF_W)) 478 flags |= MAP_NOCORE; 479 return flags; 480 } 481