1 /* Machine-dependent ELF dynamic relocation inline functions. Sparc64 version.
2 Copyright (C) 1997-2022 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <https://www.gnu.org/licenses/>. */
18
19 #ifndef dl_machine_h
20 #define dl_machine_h
21
22 #define ELF_MACHINE_NAME "sparc64"
23
24 #include <string.h>
25 #include <sys/param.h>
26 #include <ldsodefs.h>
27 #include <sysdep.h>
28 #include <dl-plt.h>
29 #include <dl-static-tls.h>
30 #include <dl-machine-rel.h>
31
32 #define ELF64_R_TYPE_ID(info) ((info) & 0xff)
33 #define ELF64_R_TYPE_DATA(info) ((info) >> 8)
34
35 /* Return nonzero iff ELF header is compatible with the running host. */
36 static inline int
elf_machine_matches_host(const Elf64_Ehdr * ehdr)37 elf_machine_matches_host (const Elf64_Ehdr *ehdr)
38 {
39 return ehdr->e_machine == EM_SPARCV9;
40 }
41
42 /* We have to do this because elf_machine_{dynamic,load_address} can be
43 invoked from functions that have no GOT references, and thus the compiler
44 has no obligation to load the PIC register. */
45 #define LOAD_PIC_REG(PIC_REG) \
46 do { Elf64_Addr tmp; \
47 __asm("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t" \
48 "rd %%pc, %0\n\t" \
49 "add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t" \
50 "add %0, %1, %0" \
51 : "=r" (PIC_REG), "=r" (tmp)); \
52 } while (0)
53
54 /* Return the link-time address of _DYNAMIC. Conveniently, this is the
55 first element of the GOT. This must be inlined in a function which
56 uses global data. */
57 static inline Elf64_Addr
elf_machine_dynamic(void)58 elf_machine_dynamic (void)
59 {
60 register Elf64_Addr *elf_pic_register __asm__("%l7");
61
62 LOAD_PIC_REG (elf_pic_register);
63
64 return *elf_pic_register;
65 }
66
67 /* Return the run-time load address of the shared object. */
68 static inline Elf64_Addr
elf_machine_load_address(void)69 elf_machine_load_address (void)
70 {
71 register Elf32_Addr *pc __asm ("%o7");
72 register Elf64_Addr *got __asm ("%l7");
73
74 __asm ("sethi %%hi(_GLOBAL_OFFSET_TABLE_-4), %1\n\t"
75 "call 1f\n\t"
76 " add %1, %%lo(_GLOBAL_OFFSET_TABLE_+4), %1\n\t"
77 "call _DYNAMIC\n\t"
78 "call _GLOBAL_OFFSET_TABLE_\n"
79 "1:\tadd %1, %0, %1\n\t" : "=r" (pc), "=r" (got));
80
81 /* got is now l_addr + _GLOBAL_OFFSET_TABLE_
82 *got is _DYNAMIC
83 pc[2]*4 is l_addr + _DYNAMIC - (long)pc - 8
84 pc[3]*4 is l_addr + _GLOBAL_OFFSET_TABLE_ - (long)pc - 12 */
85 return (Elf64_Addr) got - *got + (Elf32_Sword) ((pc[2] - pc[3]) * 4) - 4;
86 }
87
88 static inline Elf64_Addr __attribute__ ((always_inline))
elf_machine_fixup_plt(struct link_map * map,lookup_t t,const ElfW (Sym)* refsym,const ElfW (Sym)* sym,const Elf64_Rela * reloc,Elf64_Addr * reloc_addr,Elf64_Addr value)89 elf_machine_fixup_plt (struct link_map *map, lookup_t t,
90 const ElfW(Sym) *refsym, const ElfW(Sym) *sym,
91 const Elf64_Rela *reloc,
92 Elf64_Addr *reloc_addr, Elf64_Addr value)
93 {
94 sparc64_fixup_plt (map, reloc, reloc_addr, value + reloc->r_addend,
95 reloc->r_addend, 1);
96 return value;
97 }
98
99 /* Return the final value of a plt relocation. */
100 static inline Elf64_Addr
elf_machine_plt_value(struct link_map * map,const Elf64_Rela * reloc,Elf64_Addr value)101 elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
102 Elf64_Addr value)
103 {
104 /* Don't add addend here, but in elf_machine_fixup_plt instead.
105 value + reloc->r_addend is the value which should actually be
106 stored into .plt data slot. */
107 return value;
108 }
109
110 /* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry, so
111 PLT entries should not be allowed to define the value.
112 ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one
113 of the main executable's symbols, as for a COPY reloc. */
114 #define elf_machine_type_class(type) \
115 ((((type) == R_SPARC_JMP_SLOT \
116 || ((type) >= R_SPARC_TLS_GD_HI22 && (type) <= R_SPARC_TLS_TPOFF64)) \
117 * ELF_RTYPE_CLASS_PLT) \
118 | (((type) == R_SPARC_COPY) * ELF_RTYPE_CLASS_COPY))
119
120 /* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
121 #define ELF_MACHINE_JMP_SLOT R_SPARC_JMP_SLOT
122
123 /* Set up the loaded object described by L so its unrelocated PLT
124 entries will jump to the on-demand fixup code in dl-runtime.c. */
125
126 static inline int
elf_machine_runtime_setup(struct link_map * l,struct r_scope_elem * scope[],int lazy,int profile)127 elf_machine_runtime_setup (struct link_map *l, struct r_scope_elem *scope[],
128 int lazy, int profile)
129 {
130 if (l->l_info[DT_JMPREL] && lazy)
131 {
132 extern void _dl_runtime_resolve_0 (void);
133 extern void _dl_runtime_resolve_1 (void);
134 extern void _dl_runtime_profile_0 (void);
135 extern void _dl_runtime_profile_1 (void);
136 Elf64_Addr res0_addr, res1_addr;
137 unsigned int *plt = (void *) D_PTR (l, l_info[DT_PLTGOT]);
138
139 if (__builtin_expect(profile, 0))
140 {
141 res0_addr = (Elf64_Addr) &_dl_runtime_profile_0;
142 res1_addr = (Elf64_Addr) &_dl_runtime_profile_1;
143
144 if (GLRO(dl_profile) != NULL
145 && _dl_name_match_p (GLRO(dl_profile), l))
146 GL(dl_profile_map) = l;
147 }
148 else
149 {
150 res0_addr = (Elf64_Addr) &_dl_runtime_resolve_0;
151 res1_addr = (Elf64_Addr) &_dl_runtime_resolve_1;
152 }
153
154 /* PLT0 looks like:
155
156 sethi %uhi(_dl_runtime_{resolve,profile}_0), %g4
157 sethi %hi(_dl_runtime_{resolve,profile}_0), %g5
158 or %g4, %ulo(_dl_runtime_{resolve,profile}_0), %g4
159 or %g5, %lo(_dl_runtime_{resolve,profile}_0), %g5
160 sllx %g4, 32, %g4
161 add %g4, %g5, %g5
162 jmpl %g5, %g4
163 nop
164 */
165
166 plt[0] = 0x09000000 | (res0_addr >> (64 - 22));
167 plt[1] = 0x0b000000 | ((res0_addr >> 10) & 0x003fffff);
168 plt[2] = 0x88112000 | ((res0_addr >> 32) & 0x3ff);
169 plt[3] = 0x8a116000 | (res0_addr & 0x3ff);
170 plt[4] = 0x89293020;
171 plt[5] = 0x8a010005;
172 plt[6] = 0x89c14000;
173 plt[7] = 0x01000000;
174
175 /* PLT1 looks like:
176
177 sethi %uhi(_dl_runtime_{resolve,profile}_1), %g4
178 sethi %hi(_dl_runtime_{resolve,profile}_1), %g5
179 or %g4, %ulo(_dl_runtime_{resolve,profile}_1), %g4
180 or %g5, %lo(_dl_runtime_{resolve,profile}_1), %g5
181 sllx %g4, 32, %g4
182 add %g4, %g5, %g5
183 jmpl %g5, %g4
184 nop
185 */
186
187 plt[8] = 0x09000000 | (res1_addr >> (64 - 22));
188 plt[9] = 0x0b000000 | ((res1_addr >> 10) & 0x003fffff);
189 plt[10] = 0x88112000 | ((res1_addr >> 32) & 0x3ff);
190 plt[11] = 0x8a116000 | (res1_addr & 0x3ff);
191 plt[12] = 0x89293020;
192 plt[13] = 0x8a010005;
193 plt[14] = 0x89c14000;
194 plt[15] = 0x01000000;
195
196 /* Now put the magic cookie at the beginning of .PLT2
197 Entry .PLT3 is unused by this implementation. */
198 *((struct link_map **)(&plt[16])) = l;
199 }
200
201 return lazy;
202 }
203
204 /* The PLT uses Elf64_Rela relocs. */
205 #define elf_machine_relplt elf_machine_rela
206
207 /* Undo the sub %sp, 6*8, %sp; add %sp, STACK_BIAS + 22*8, %o0 below
208 (but w/o STACK_BIAS) to get at the value we want in __libc_stack_end. */
209 #define DL_STACK_END(cookie) \
210 ((void *) (((long) (cookie)) - (22 - 6) * 8))
211
212 /* Initial entry point code for the dynamic linker.
213 The C function `_dl_start' is the real entry point;
214 its return value is the user program's entry point. */
215
216 #define RTLD_GOT_ADDRESS(pic_reg, reg, symbol) \
217 "sethi %gdop_hix22(" #symbol "), " #reg "\n\t" \
218 "xor " #reg ", %gdop_lox10(" #symbol "), " #reg "\n\t" \
219 "ldx [" #pic_reg " + " #reg "], " #reg ", %gdop(" #symbol ")\n"
220
221 #define __S1(x) #x
222 #define __S(x) __S1(x)
223
224 #define RTLD_START __asm__ ( "\n" \
225 " .text\n" \
226 " .global _start\n" \
227 " .type _start, @function\n" \
228 " .align 32\n" \
229 "_start:\n" \
230 " /* Make room for functions to drop their arguments on the stack. */\n" \
231 " sub %sp, 6*8, %sp\n" \
232 " /* Pass pointer to argument block to _dl_start. */\n" \
233 " call _dl_start\n" \
234 " add %sp," __S(STACK_BIAS) "+22*8,%o0\n" \
235 " /* FALLTHRU */\n" \
236 " .size _start, .-_start\n" \
237 "\n" \
238 " .global _dl_start_user\n" \
239 " .type _dl_start_user, @function\n" \
240 "_dl_start_user:\n" \
241 " /* Load the GOT register. */\n" \
242 "1: call 11f\n" \
243 " sethi %hi(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
244 "11: or %l7, %lo(_GLOBAL_OFFSET_TABLE_-(1b-.)), %l7\n" \
245 " add %l7, %o7, %l7\n" \
246 " /* Save the user entry point address in %l0. */\n" \
247 " mov %o0, %l0\n" \
248 " ldx [%sp + " __S(STACK_BIAS) " + 22*8], %i5\n" \
249 " /* %o0 = _dl_loaded, %o1 = argc, %o2 = argv, %o3 = envp. */\n" \
250 "" RTLD_GOT_ADDRESS(%l7, %o0, _rtld_local) \
251 " sllx %i5, 3, %o3\n" \
252 " add %sp, " __S(STACK_BIAS) " + 23*8, %o2\n" \
253 " add %o3, 8, %o3\n" \
254 " mov %i5, %o1\n" \
255 " add %o2, %o3, %o3\n" \
256 " call _dl_init\n" \
257 " ldx [%o0], %o0\n" \
258 " /* Pass our finalizer function to the user in %g1. */\n" \
259 RTLD_GOT_ADDRESS(%l7, %g1, _dl_fini) \
260 " /* Jump to the user's entry point and deallocate the extra stack we got. */\n" \
261 " jmp %l0\n" \
262 " add %sp, 6*8, %sp\n" \
263 " .size _dl_start_user, . - _dl_start_user\n" \
264 " .previous\n");
265
266 #endif /* dl_machine_h */
267
268 #define ARCH_LA_PLTENTER sparc64_gnu_pltenter
269 #define ARCH_LA_PLTEXIT sparc64_gnu_pltexit
270
271 #ifdef RESOLVE_MAP
272
273 /* Perform the relocation specified by RELOC and SYM (which is fully resolved).
274 MAP is the object containing the reloc. */
275
276 static inline void
277 __attribute__ ((always_inline))
elf_machine_rela(struct link_map * map,struct r_scope_elem * scope[],const Elf64_Rela * reloc,const Elf64_Sym * sym,const struct r_found_version * version,void * const reloc_addr_arg,int skip_ifunc)278 elf_machine_rela (struct link_map *map, struct r_scope_elem *scope[],
279 const Elf64_Rela *reloc, const Elf64_Sym *sym,
280 const struct r_found_version *version,
281 void *const reloc_addr_arg, int skip_ifunc)
282 {
283 Elf64_Addr *const reloc_addr = reloc_addr_arg;
284 #if !defined RTLD_BOOTSTRAP
285 const Elf64_Sym *const refsym = sym;
286 #endif
287 Elf64_Addr value;
288 const unsigned long int r_type = ELF64_R_TYPE_ID (reloc->r_info);
289 struct link_map *sym_map = NULL;
290
291 if (__glibc_unlikely (r_type == R_SPARC_NONE))
292 return;
293
294 if (__glibc_unlikely (r_type == R_SPARC_SIZE64))
295 {
296 *reloc_addr = sym->st_size + reloc->r_addend;
297 return;
298 }
299
300 #if !defined RTLD_BOOTSTRAP
301 if (__glibc_unlikely (r_type == R_SPARC_RELATIVE))
302 {
303 *reloc_addr += map->l_addr + reloc->r_addend;
304 return;
305 }
306 #endif
307
308 if (__builtin_expect (ELF64_ST_BIND (sym->st_info) == STB_LOCAL, 0)
309 && sym->st_shndx != SHN_UNDEF)
310 {
311 sym_map = map;
312 value = map->l_addr;
313 }
314 else
315 {
316 sym_map = RESOLVE_MAP (map, scope, &sym, version, r_type);
317 value = SYMBOL_ADDRESS (sym_map, sym, true);
318 }
319
320 value += reloc->r_addend; /* Assume copy relocs have zero addend. */
321
322 if (sym != NULL
323 && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0)
324 && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1)
325 && __builtin_expect (!skip_ifunc, 1))
326 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
327
328 switch (r_type)
329 {
330 #if !defined RTLD_BOOTSTRAP
331 case R_SPARC_COPY:
332 if (sym == NULL)
333 /* This can happen in trace mode if an object could not be
334 found. */
335 break;
336 if (sym->st_size > refsym->st_size
337 || (GLRO(dl_verbose) && sym->st_size < refsym->st_size))
338 {
339 const char *strtab;
340
341 strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]);
342 _dl_error_printf ("\
343 %s: Symbol `%s' has different size in shared object, consider re-linking\n",
344 RTLD_PROGNAME, strtab + refsym->st_name);
345 }
346 memcpy (reloc_addr_arg, (void *) value,
347 MIN (sym->st_size, refsym->st_size));
348 break;
349 #endif
350 case R_SPARC_64:
351 case R_SPARC_GLOB_DAT:
352 *reloc_addr = value;
353 break;
354 case R_SPARC_IRELATIVE:
355 if (__glibc_likely (!skip_ifunc))
356 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
357 *reloc_addr = value;
358 break;
359 case R_SPARC_JMP_IREL:
360 if (__glibc_likely (!skip_ifunc))
361 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
362 /* 'high' is always zero, for large PLT entries the linker
363 emits an R_SPARC_IRELATIVE. */
364 sparc64_fixup_plt (map, reloc, reloc_addr, value, 0, 0);
365 break;
366 case R_SPARC_JMP_SLOT:
367 sparc64_fixup_plt (map, reloc, reloc_addr, value, reloc->r_addend, 0);
368 break;
369 case R_SPARC_TLS_DTPMOD64:
370 /* Get the information from the link map returned by the
371 resolv function. */
372 if (sym_map != NULL)
373 *reloc_addr = sym_map->l_tls_modid;
374 break;
375 case R_SPARC_TLS_DTPOFF64:
376 /* During relocation all TLS symbols are defined and used.
377 Therefore the offset is already correct. */
378 *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend;
379 break;
380 case R_SPARC_TLS_TPOFF64:
381 /* The offset is negative, forward from the thread pointer. */
382 /* We know the offset of object the symbol is contained in.
383 It is a negative value which will be added to the
384 thread pointer. */
385 if (sym != NULL)
386 {
387 CHECK_STATIC_TLS (map, sym_map);
388 *reloc_addr = sym->st_value - sym_map->l_tls_offset
389 + reloc->r_addend;
390 }
391 break;
392 #ifndef RTLD_BOOTSTRAP
393 case R_SPARC_TLS_LE_HIX22:
394 case R_SPARC_TLS_LE_LOX10:
395 if (sym != NULL)
396 {
397 CHECK_STATIC_TLS (map, sym_map);
398 value = sym->st_value - sym_map->l_tls_offset
399 + reloc->r_addend;
400 if (r_type == R_SPARC_TLS_LE_HIX22)
401 *(unsigned int *)reloc_addr =
402 ((*(unsigned int *)reloc_addr & 0xffc00000)
403 | (((~value) >> 10) & 0x3fffff));
404 else
405 *(unsigned int *)reloc_addr =
406 ((*(unsigned int *)reloc_addr & 0xffffe000) | (value & 0x3ff)
407 | 0x1c00);
408 }
409 break;
410 #endif
411 #ifndef RTLD_BOOTSTRAP
412 case R_SPARC_8:
413 *(char *) reloc_addr = value;
414 break;
415 case R_SPARC_16:
416 *(short *) reloc_addr = value;
417 break;
418 case R_SPARC_32:
419 *(unsigned int *) reloc_addr = value;
420 break;
421 case R_SPARC_DISP8:
422 *(char *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
423 break;
424 case R_SPARC_DISP16:
425 *(short *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
426 break;
427 case R_SPARC_DISP32:
428 *(unsigned int *) reloc_addr = (value - (Elf64_Addr) reloc_addr);
429 break;
430 case R_SPARC_DISP64:
431 *reloc_addr = (value - (Elf64_Addr) reloc_addr);
432 break;
433 case R_SPARC_REGISTER:
434 *reloc_addr = value;
435 break;
436 case R_SPARC_WDISP30:
437 *(unsigned int *) reloc_addr =
438 ((*(unsigned int *)reloc_addr & 0xc0000000)
439 | (((value - (Elf64_Addr) reloc_addr) >> 2) & 0x3fffffff));
440 break;
441
442 /* MEDLOW code model relocs */
443 case R_SPARC_LO10:
444 *(unsigned int *) reloc_addr =
445 ((*(unsigned int *)reloc_addr & ~0x3ff)
446 | (value & 0x3ff));
447 break;
448 case R_SPARC_HI22:
449 *(unsigned int *) reloc_addr =
450 ((*(unsigned int *)reloc_addr & 0xffc00000)
451 | ((value >> 10) & 0x3fffff));
452 break;
453 case R_SPARC_OLO10:
454 *(unsigned int *) reloc_addr =
455 ((*(unsigned int *)reloc_addr & ~0x1fff)
456 | (((value & 0x3ff) + ELF64_R_TYPE_DATA (reloc->r_info)) & 0x1fff));
457 break;
458
459 /* ABS34 code model reloc */
460 case R_SPARC_H34:
461 *(unsigned int *) reloc_addr =
462 ((*(unsigned int *)reloc_addr & 0xffc00000)
463 | ((value >> 12) & 0x3fffff));
464 break;
465
466 /* MEDMID code model relocs */
467 case R_SPARC_H44:
468 *(unsigned int *) reloc_addr =
469 ((*(unsigned int *)reloc_addr & 0xffc00000)
470 | ((value >> 22) & 0x3fffff));
471 break;
472 case R_SPARC_M44:
473 *(unsigned int *) reloc_addr =
474 ((*(unsigned int *)reloc_addr & ~0x3ff)
475 | ((value >> 12) & 0x3ff));
476 break;
477 case R_SPARC_L44:
478 *(unsigned int *) reloc_addr =
479 ((*(unsigned int *)reloc_addr & ~0xfff)
480 | (value & 0xfff));
481 break;
482
483 /* MEDANY code model relocs */
484 case R_SPARC_HH22:
485 *(unsigned int *) reloc_addr =
486 ((*(unsigned int *)reloc_addr & 0xffc00000)
487 | (value >> 42));
488 break;
489 case R_SPARC_HM10:
490 *(unsigned int *) reloc_addr =
491 ((*(unsigned int *)reloc_addr & ~0x3ff)
492 | ((value >> 32) & 0x3ff));
493 break;
494 case R_SPARC_LM22:
495 *(unsigned int *) reloc_addr =
496 ((*(unsigned int *)reloc_addr & 0xffc00000)
497 | ((value >> 10) & 0x003fffff));
498 break;
499 case R_SPARC_UA16:
500 ((unsigned char *) reloc_addr_arg) [0] = value >> 8;
501 ((unsigned char *) reloc_addr_arg) [1] = value;
502 break;
503 case R_SPARC_UA32:
504 ((unsigned char *) reloc_addr_arg) [0] = value >> 24;
505 ((unsigned char *) reloc_addr_arg) [1] = value >> 16;
506 ((unsigned char *) reloc_addr_arg) [2] = value >> 8;
507 ((unsigned char *) reloc_addr_arg) [3] = value;
508 break;
509 case R_SPARC_UA64:
510 if (! ((long) reloc_addr_arg & 3))
511 {
512 /* Common in .eh_frame */
513 ((unsigned int *) reloc_addr_arg) [0] = value >> 32;
514 ((unsigned int *) reloc_addr_arg) [1] = value;
515 break;
516 }
517 ((unsigned char *) reloc_addr_arg) [0] = value >> 56;
518 ((unsigned char *) reloc_addr_arg) [1] = value >> 48;
519 ((unsigned char *) reloc_addr_arg) [2] = value >> 40;
520 ((unsigned char *) reloc_addr_arg) [3] = value >> 32;
521 ((unsigned char *) reloc_addr_arg) [4] = value >> 24;
522 ((unsigned char *) reloc_addr_arg) [5] = value >> 16;
523 ((unsigned char *) reloc_addr_arg) [6] = value >> 8;
524 ((unsigned char *) reloc_addr_arg) [7] = value;
525 break;
526 #endif
527 #if !defined RTLD_BOOTSTRAP || defined _NDEBUG
528 default:
529 _dl_reloc_bad_type (map, r_type, 0);
530 break;
531 #endif
532 }
533 }
534
535 static inline void
536 __attribute__ ((always_inline))
elf_machine_rela_relative(Elf64_Addr l_addr,const Elf64_Rela * reloc,void * const reloc_addr_arg)537 elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
538 void *const reloc_addr_arg)
539 {
540 Elf64_Addr *const reloc_addr = reloc_addr_arg;
541 *reloc_addr = l_addr + reloc->r_addend;
542 }
543
544 static inline void
545 __attribute__ ((always_inline))
elf_machine_lazy_rel(struct link_map * map,struct r_scope_elem * scope[],Elf64_Addr l_addr,const Elf64_Rela * reloc,int skip_ifunc)546 elf_machine_lazy_rel (struct link_map *map, struct r_scope_elem *scope[],
547 Elf64_Addr l_addr, const Elf64_Rela *reloc,
548 int skip_ifunc)
549 {
550 Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
551 const unsigned int r_type = ELF64_R_TYPE (reloc->r_info);
552
553 if (__glibc_likely (r_type == R_SPARC_JMP_SLOT))
554 ;
555 else if (r_type == R_SPARC_JMP_IREL
556 || r_type == R_SPARC_IRELATIVE)
557 {
558 Elf64_Addr value = map->l_addr + reloc->r_addend;
559 if (__glibc_likely (!skip_ifunc))
560 value = ((Elf64_Addr (*) (int)) value) (GLRO(dl_hwcap));
561 if (r_type == R_SPARC_JMP_IREL)
562 {
563 /* 'high' is always zero, for large PLT entries the linker
564 emits an R_SPARC_IRELATIVE. */
565 sparc64_fixup_plt (map, reloc, reloc_addr, value, 0, 1);
566 }
567 else
568 *reloc_addr = value;
569 }
570 else if (r_type == R_SPARC_NONE)
571 ;
572 else
573 _dl_reloc_bad_type (map, r_type, 1);
574 }
575
576 #endif /* RESOLVE_MAP */
577