1 /* Load a shared object at runtime, relocate it, and run its initializer.
2    Copyright (C) 1996-2022 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4 
5    The GNU C Library is free software; you can redistribute it and/or
6    modify it under the terms of the GNU Lesser General Public
7    License as published by the Free Software Foundation; either
8    version 2.1 of the License, or (at your option) any later version.
9 
10    The GNU C Library is distributed in the hope that it will be useful,
11    but WITHOUT ANY WARRANTY; without even the implied warranty of
12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13    Lesser General Public License for more details.
14 
15    You should have received a copy of the GNU Lesser General Public
16    License along with the GNU C Library; if not, see
17    <https://www.gnu.org/licenses/>.  */
18 
19 #include <assert.h>
20 #include <dlfcn.h>
21 #include <errno.h>
22 #include <libintl.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>		/* Check whether MAP_COPY is defined.  */
28 #include <sys/param.h>
29 #include <libc-lock.h>
30 #include <ldsodefs.h>
31 #include <sysdep-cancel.h>
32 #include <tls.h>
33 #include <stap-probe.h>
34 #include <atomic.h>
35 #include <libc-internal.h>
36 #include <array_length.h>
37 #include <libc-early-init.h>
38 #include <gnu/lib-names.h>
39 #include <dl-find_object.h>
40 
41 #include <dl-dst.h>
42 #include <dl-prop.h>
43 
44 
45 /* We must be careful not to leave us in an inconsistent state.  Thus we
46    catch any error and re-raise it after cleaning up.  */
47 
48 struct dl_open_args
49 {
50   const char *file;
51   int mode;
52   /* This is the caller of the dlopen() function.  */
53   const void *caller_dlopen;
54   struct link_map *map;
55   /* Namespace ID.  */
56   Lmid_t nsid;
57 
58   /* Original value of _ns_global_scope_pending_adds.  Set by
59      dl_open_worker.  Only valid if nsid is a real namespace
60      (non-negative).  */
61   unsigned int original_global_scope_pending_adds;
62 
63   /* Set to true by dl_open_worker if libc.so was already loaded into
64      the namespace at the time dl_open_worker was called.  This is
65      used to determine whether libc.so early initialization has
66      already been done before, and whether to roll back the cached
67      libc_map value in the namespace in case of a dlopen failure.  */
68   bool libc_already_loaded;
69 
70   /* Set to true if the end of dl_open_worker_begin was reached.  */
71   bool worker_continue;
72 
73   /* Original parameters to the program and the current environment.  */
74   int argc;
75   char **argv;
76   char **env;
77 };
78 
79 /* Called in case the global scope cannot be extended.  */
80 static void __attribute__ ((noreturn))
add_to_global_resize_failure(struct link_map * new)81 add_to_global_resize_failure (struct link_map *new)
82 {
83   _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
84 		    N_ ("cannot extend global scope"));
85 }
86 
87 /* Grow the global scope array for the namespace, so that all the new
88    global objects can be added later in add_to_global_update, without
89    risk of memory allocation failure.  add_to_global_resize raises
90    exceptions for memory allocation errors.  */
91 static void
add_to_global_resize(struct link_map * new)92 add_to_global_resize (struct link_map *new)
93 {
94   struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
95 
96   /* Count the objects we have to put in the global scope.  */
97   unsigned int to_add = 0;
98   for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
99     if (new->l_searchlist.r_list[cnt]->l_global == 0)
100       ++to_add;
101 
102   /* The symbols of the new objects and its dependencies are to be
103      introduced into the global scope that will be used to resolve
104      references from other dynamically-loaded objects.
105 
106      The global scope is the searchlist in the main link map.  We
107      extend this list if necessary.  There is one problem though:
108      since this structure was allocated very early (before the libc
109      is loaded) the memory it uses is allocated by the malloc()-stub
110      in the ld.so.  When we come here these functions are not used
111      anymore.  Instead the malloc() implementation of the libc is
112      used.  But this means the block from the main map cannot be used
113      in an realloc() call.  Therefore we allocate a completely new
114      array the first time we have to add something to the locale scope.  */
115 
116   if (__builtin_add_overflow (ns->_ns_global_scope_pending_adds, to_add,
117 			      &ns->_ns_global_scope_pending_adds))
118     add_to_global_resize_failure (new);
119 
120   unsigned int new_size = 0; /* 0 means no new allocation.  */
121   void *old_global = NULL; /* Old allocation if free-able.  */
122 
123   /* Minimum required element count for resizing.  Adjusted below for
124      an exponential resizing policy.  */
125   size_t required_new_size;
126   if (__builtin_add_overflow (ns->_ns_main_searchlist->r_nlist,
127 			      ns->_ns_global_scope_pending_adds,
128 			      &required_new_size))
129     add_to_global_resize_failure (new);
130 
131   if (ns->_ns_global_scope_alloc == 0)
132     {
133       if (__builtin_add_overflow (required_new_size, 8, &new_size))
134 	add_to_global_resize_failure (new);
135     }
136   else if (required_new_size > ns->_ns_global_scope_alloc)
137     {
138       if (__builtin_mul_overflow (required_new_size, 2, &new_size))
139 	add_to_global_resize_failure (new);
140 
141       /* The old array was allocated with our malloc, not the minimal
142 	 malloc.  */
143       old_global = ns->_ns_main_searchlist->r_list;
144     }
145 
146   if (new_size > 0)
147     {
148       size_t allocation_size;
149       if (__builtin_mul_overflow (new_size, sizeof (struct link_map *),
150 				  &allocation_size))
151 	add_to_global_resize_failure (new);
152       struct link_map **new_global = malloc (allocation_size);
153       if (new_global == NULL)
154 	add_to_global_resize_failure (new);
155 
156       /* Copy over the old entries.  */
157       memcpy (new_global, ns->_ns_main_searchlist->r_list,
158 	      ns->_ns_main_searchlist->r_nlist * sizeof (struct link_map *));
159 
160       ns->_ns_global_scope_alloc = new_size;
161       ns->_ns_main_searchlist->r_list = new_global;
162 
163       if (!RTLD_SINGLE_THREAD_P)
164 	THREAD_GSCOPE_WAIT ();
165 
166       free (old_global);
167     }
168 }
169 
170 /* Actually add the new global objects to the global scope.  Must be
171    called after add_to_global_resize.  This function cannot fail.  */
172 static void
add_to_global_update(struct link_map * new)173 add_to_global_update (struct link_map *new)
174 {
175   struct link_namespaces *ns = &GL (dl_ns)[new->l_ns];
176 
177   /* Now add the new entries.  */
178   unsigned int new_nlist = ns->_ns_main_searchlist->r_nlist;
179   for (unsigned int cnt = 0; cnt < new->l_searchlist.r_nlist; ++cnt)
180     {
181       struct link_map *map = new->l_searchlist.r_list[cnt];
182 
183       if (map->l_global == 0)
184 	{
185 	  map->l_global = 1;
186 
187 	  /* The array has been resized by add_to_global_resize.  */
188 	  assert (new_nlist < ns->_ns_global_scope_alloc);
189 
190 	  ns->_ns_main_searchlist->r_list[new_nlist++] = map;
191 
192 	  /* We modify the global scope.  Report this.  */
193 	  if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
194 	    _dl_debug_printf ("\nadd %s [%lu] to global scope\n",
195 			      map->l_name, map->l_ns);
196 	}
197     }
198 
199   /* Some of the pending adds have been performed by the loop above.
200      Adjust the counter accordingly.  */
201   unsigned int added = new_nlist - ns->_ns_main_searchlist->r_nlist;
202   assert (added <= ns->_ns_global_scope_pending_adds);
203   ns->_ns_global_scope_pending_adds -= added;
204 
205   atomic_write_barrier ();
206   ns->_ns_main_searchlist->r_nlist = new_nlist;
207 }
208 
209 /* Search link maps in all namespaces for the DSO that contains the object at
210    address ADDR.  Returns the pointer to the link map of the matching DSO, or
211    NULL if a match is not found.  */
212 struct link_map *
_dl_find_dso_for_object(const ElfW (Addr)addr)213 _dl_find_dso_for_object (const ElfW(Addr) addr)
214 {
215   struct link_map *l;
216 
217   /* Find the highest-addressed object that ADDR is not below.  */
218   for (Lmid_t ns = 0; ns < GL(dl_nns); ++ns)
219     for (l = GL(dl_ns)[ns]._ns_loaded; l != NULL; l = l->l_next)
220       if (addr >= l->l_map_start && addr < l->l_map_end
221 	  && (l->l_contiguous
222 	      || _dl_addr_inside_object (l, (ElfW(Addr)) addr)))
223 	{
224 	  assert (ns == l->l_ns);
225 	  return l;
226 	}
227   return NULL;
228 }
229 rtld_hidden_def (_dl_find_dso_for_object);
230 
231 /* Return true if NEW is found in the scope for MAP.  */
232 static size_t
scope_has_map(struct link_map * map,struct link_map * new)233 scope_has_map (struct link_map *map, struct link_map *new)
234 {
235   size_t cnt;
236   for (cnt = 0; map->l_scope[cnt] != NULL; ++cnt)
237     if (map->l_scope[cnt] == &new->l_searchlist)
238       return true;
239   return false;
240 }
241 
242 /* Return the length of the scope for MAP.  */
243 static size_t
scope_size(struct link_map * map)244 scope_size (struct link_map *map)
245 {
246   size_t cnt;
247   for (cnt = 0; map->l_scope[cnt] != NULL; )
248     ++cnt;
249   return cnt;
250 }
251 
252 /* Resize the scopes of depended-upon objects, so that the new object
253    can be added later without further allocation of memory.  This
254    function can raise an exceptions due to malloc failure.  */
255 static void
resize_scopes(struct link_map * new)256 resize_scopes (struct link_map *new)
257 {
258   /* If the file is not loaded now as a dependency, add the search
259      list of the newly loaded object to the scope.  */
260   for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
261     {
262       struct link_map *imap = new->l_searchlist.r_list[i];
263 
264       /* If the initializer has been called already, the object has
265 	 not been loaded here and now.  */
266       if (imap->l_init_called && imap->l_type == lt_loaded)
267 	{
268 	  if (scope_has_map (imap, new))
269 	    /* Avoid duplicates.  */
270 	    continue;
271 
272 	  size_t cnt = scope_size (imap);
273 	  if (__glibc_unlikely (cnt + 1 >= imap->l_scope_max))
274 	    {
275 	      /* The l_scope array is too small.  Allocate a new one
276 		 dynamically.  */
277 	      size_t new_size;
278 	      struct r_scope_elem **newp;
279 
280 	      if (imap->l_scope != imap->l_scope_mem
281 		  && imap->l_scope_max < array_length (imap->l_scope_mem))
282 		{
283 		  /* If the current l_scope memory is not pointing to
284 		     the static memory in the structure, but the
285 		     static memory in the structure is large enough to
286 		     use for cnt + 1 scope entries, then switch to
287 		     using the static memory.  */
288 		  new_size = array_length (imap->l_scope_mem);
289 		  newp = imap->l_scope_mem;
290 		}
291 	      else
292 		{
293 		  new_size = imap->l_scope_max * 2;
294 		  newp = (struct r_scope_elem **)
295 		    malloc (new_size * sizeof (struct r_scope_elem *));
296 		  if (newp == NULL)
297 		    _dl_signal_error (ENOMEM, "dlopen", NULL,
298 				      N_("cannot create scope list"));
299 		}
300 
301 	      /* Copy the array and the terminating NULL.  */
302 	      memcpy (newp, imap->l_scope,
303 		      (cnt + 1) * sizeof (imap->l_scope[0]));
304 	      struct r_scope_elem **old = imap->l_scope;
305 
306 	      imap->l_scope = newp;
307 
308 	      if (old != imap->l_scope_mem)
309 		_dl_scope_free (old);
310 
311 	      imap->l_scope_max = new_size;
312 	    }
313 	}
314     }
315 }
316 
317 /* Second stage of resize_scopes: Add NEW to the scopes.  Also print
318    debugging information about scopes if requested.
319 
320    This function cannot raise an exception because all required memory
321    has been allocated by a previous call to resize_scopes.  */
322 static void
update_scopes(struct link_map * new)323 update_scopes (struct link_map *new)
324 {
325   for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
326     {
327       struct link_map *imap = new->l_searchlist.r_list[i];
328       int from_scope = 0;
329 
330       if (imap->l_init_called && imap->l_type == lt_loaded)
331 	{
332 	  if (scope_has_map (imap, new))
333 	    /* Avoid duplicates.  */
334 	    continue;
335 
336 	  size_t cnt = scope_size (imap);
337 	  /* Assert that resize_scopes has sufficiently enlarged the
338 	     array.  */
339 	  assert (cnt + 1 < imap->l_scope_max);
340 
341 	  /* First terminate the extended list.  Otherwise a thread
342 	     might use the new last element and then use the garbage
343 	     at offset IDX+1.  */
344 	  imap->l_scope[cnt + 1] = NULL;
345 	  atomic_write_barrier ();
346 	  imap->l_scope[cnt] = &new->l_searchlist;
347 
348 	  from_scope = cnt;
349 	}
350 
351       /* Print scope information.  */
352       if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
353 	_dl_show_scope (imap, from_scope);
354     }
355 }
356 
357 /* Call _dl_add_to_slotinfo with DO_ADD set to false, to allocate
358    space in GL (dl_tls_dtv_slotinfo_list).  This can raise an
359    exception.  The return value is true if any of the new objects use
360    TLS.  */
361 static bool
resize_tls_slotinfo(struct link_map * new)362 resize_tls_slotinfo (struct link_map *new)
363 {
364   bool any_tls = false;
365   for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
366     {
367       struct link_map *imap = new->l_searchlist.r_list[i];
368 
369       /* Only add TLS memory if this object is loaded now and
370 	 therefore is not yet initialized.  */
371       if (! imap->l_init_called && imap->l_tls_blocksize > 0)
372 	{
373 	  _dl_add_to_slotinfo (imap, false);
374 	  any_tls = true;
375 	}
376     }
377   return any_tls;
378 }
379 
380 /* Second stage of TLS update, after resize_tls_slotinfo.  This
381    function does not raise any exception.  It should only be called if
382    resize_tls_slotinfo returned true.  */
383 static void
update_tls_slotinfo(struct link_map * new)384 update_tls_slotinfo (struct link_map *new)
385 {
386   unsigned int first_static_tls = new->l_searchlist.r_nlist;
387   for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
388     {
389       struct link_map *imap = new->l_searchlist.r_list[i];
390 
391       /* Only add TLS memory if this object is loaded now and
392 	 therefore is not yet initialized.  */
393       if (! imap->l_init_called && imap->l_tls_blocksize > 0)
394 	{
395 	  _dl_add_to_slotinfo (imap, true);
396 
397 	  if (imap->l_need_tls_init
398 	      && first_static_tls == new->l_searchlist.r_nlist)
399 	    first_static_tls = i;
400 	}
401     }
402 
403   size_t newgen = GL(dl_tls_generation) + 1;
404   if (__glibc_unlikely (newgen == 0))
405     _dl_fatal_printf (N_("\
406 TLS generation counter wrapped!  Please report this."));
407   /* Can be read concurrently.  */
408   atomic_store_relaxed (&GL(dl_tls_generation), newgen);
409 
410   /* We need a second pass for static tls data, because
411      _dl_update_slotinfo must not be run while calls to
412      _dl_add_to_slotinfo are still pending.  */
413   for (unsigned int i = first_static_tls; i < new->l_searchlist.r_nlist; ++i)
414     {
415       struct link_map *imap = new->l_searchlist.r_list[i];
416 
417       if (imap->l_need_tls_init
418 	  && ! imap->l_init_called
419 	  && imap->l_tls_blocksize > 0)
420 	{
421 	  /* For static TLS we have to allocate the memory here and
422 	     now, but we can delay updating the DTV.  */
423 	  imap->l_need_tls_init = 0;
424 #ifdef SHARED
425 	  /* Update the slot information data for at least the
426 	     generation of the DSO we are allocating data for.  */
427 
428 	  /* FIXME: This can terminate the process on memory
429 	     allocation failure.  It is not possible to raise
430 	     exceptions from this context; to fix this bug,
431 	     _dl_update_slotinfo would have to be split into two
432 	     operations, similar to resize_scopes and update_scopes
433 	     above.  This is related to bug 16134.  */
434 	  _dl_update_slotinfo (imap->l_tls_modid);
435 #endif
436 
437 	  dl_init_static_tls (imap);
438 	  assert (imap->l_need_tls_init == 0);
439 	}
440     }
441 }
442 
443 /* Mark the objects as NODELETE if required.  This is delayed until
444    after dlopen failure is not possible, so that _dl_close can clean
445    up objects if necessary.  */
446 static void
activate_nodelete(struct link_map * new)447 activate_nodelete (struct link_map *new)
448 {
449   /* It is necessary to traverse the entire namespace.  References to
450      objects in the global scope and unique symbol bindings can force
451      NODELETE status for objects outside the local scope.  */
452   for (struct link_map *l = GL (dl_ns)[new->l_ns]._ns_loaded; l != NULL;
453        l = l->l_next)
454     if (l->l_nodelete_pending)
455       {
456 	if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES))
457 	  _dl_debug_printf ("activating NODELETE for %s [%lu]\n",
458 			    l->l_name, l->l_ns);
459 
460 	/* The flag can already be true at this point, e.g. a signal
461 	   handler may have triggered lazy binding and set NODELETE
462 	   status immediately.  */
463 	l->l_nodelete_active = true;
464 
465 	/* This is just a debugging aid, to indicate that
466 	   activate_nodelete has run for this map.  */
467 	l->l_nodelete_pending = false;
468       }
469 }
470 
471 /* struct dl_init_args and call_dl_init are used to call _dl_init with
472    exception handling disabled.  */
473 struct dl_init_args
474 {
475   struct link_map *new;
476   int argc;
477   char **argv;
478   char **env;
479 };
480 
481 static void
call_dl_init(void * closure)482 call_dl_init (void *closure)
483 {
484   struct dl_init_args *args = closure;
485   _dl_init (args->new, args->argc, args->argv, args->env);
486 }
487 
488 static void
dl_open_worker_begin(void * a)489 dl_open_worker_begin (void *a)
490 {
491   struct dl_open_args *args = a;
492   const char *file = args->file;
493   int mode = args->mode;
494   struct link_map *call_map = NULL;
495 
496   /* Determine the caller's map if necessary.  This is needed in case
497      we have a DST, when we don't know the namespace ID we have to put
498      the new object in, or when the file name has no path in which
499      case we need to look along the RUNPATH/RPATH of the caller.  */
500   const char *dst = strchr (file, '$');
501   if (dst != NULL || args->nsid == __LM_ID_CALLER
502       || strchr (file, '/') == NULL)
503     {
504       const void *caller_dlopen = args->caller_dlopen;
505 
506       /* We have to find out from which object the caller is calling.
507 	 By default we assume this is the main application.  */
508       call_map = GL(dl_ns)[LM_ID_BASE]._ns_loaded;
509 
510       struct link_map *l = _dl_find_dso_for_object ((ElfW(Addr)) caller_dlopen);
511 
512       if (l)
513 	call_map = l;
514 
515       if (args->nsid == __LM_ID_CALLER)
516 	args->nsid = call_map->l_ns;
517     }
518 
519   /* The namespace ID is now known.  Keep track of whether libc.so was
520      already loaded, to determine whether it is necessary to call the
521      early initialization routine (or clear libc_map on error).  */
522   args->libc_already_loaded = GL(dl_ns)[args->nsid].libc_map != NULL;
523 
524   /* Retain the old value, so that it can be restored.  */
525   args->original_global_scope_pending_adds
526     = GL (dl_ns)[args->nsid]._ns_global_scope_pending_adds;
527 
528   /* One might be tempted to assert that we are RT_CONSISTENT at this point, but that
529      may not be true if this is a recursive call to dlopen.  */
530   _dl_debug_initialize (0, args->nsid);
531 
532   /* Load the named object.  */
533   struct link_map *new;
534   args->map = new = _dl_map_object (call_map, file, lt_loaded, 0,
535 				    mode | __RTLD_CALLMAP, args->nsid);
536 
537   /* If the pointer returned is NULL this means the RTLD_NOLOAD flag is
538      set and the object is not already loaded.  */
539   if (new == NULL)
540     {
541       assert (mode & RTLD_NOLOAD);
542       return;
543     }
544 
545   if (__glibc_unlikely (mode & __RTLD_SPROF))
546     /* This happens only if we load a DSO for 'sprof'.  */
547     return;
548 
549   /* This object is directly loaded.  */
550   ++new->l_direct_opencount;
551 
552   /* It was already open.  */
553   if (__glibc_unlikely (new->l_searchlist.r_list != NULL))
554     {
555       /* Let the user know about the opencount.  */
556       if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
557 	_dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
558 			  new->l_name, new->l_ns, new->l_direct_opencount);
559 
560       /* If the user requested the object to be in the global
561 	 namespace but it is not so far, prepare to add it now.  This
562 	 can raise an exception to do a malloc failure.  */
563       if ((mode & RTLD_GLOBAL) && new->l_global == 0)
564 	add_to_global_resize (new);
565 
566       /* Mark the object as not deletable if the RTLD_NODELETE flags
567 	 was passed.  */
568       if (__glibc_unlikely (mode & RTLD_NODELETE))
569 	{
570 	  if (__glibc_unlikely (GLRO (dl_debug_mask) & DL_DEBUG_FILES)
571 	      && !new->l_nodelete_active)
572 	    _dl_debug_printf ("marking %s [%lu] as NODELETE\n",
573 			      new->l_name, new->l_ns);
574 	  new->l_nodelete_active = true;
575 	}
576 
577       /* Finalize the addition to the global scope.  */
578       if ((mode & RTLD_GLOBAL) && new->l_global == 0)
579 	add_to_global_update (new);
580 
581       assert (_dl_debug_update (args->nsid)->r_state == RT_CONSISTENT);
582 
583       return;
584     }
585 
586   /* Schedule NODELETE marking for the directly loaded object if
587      requested.  */
588   if (__glibc_unlikely (mode & RTLD_NODELETE))
589     new->l_nodelete_pending = true;
590 
591   /* Load that object's dependencies.  */
592   _dl_map_object_deps (new, NULL, 0, 0,
593 		       mode & (__RTLD_DLOPEN | RTLD_DEEPBIND | __RTLD_AUDIT));
594 
595   /* So far, so good.  Now check the versions.  */
596   for (unsigned int i = 0; i < new->l_searchlist.r_nlist; ++i)
597     if (new->l_searchlist.r_list[i]->l_real->l_versions == NULL)
598       {
599 	struct link_map *map = new->l_searchlist.r_list[i]->l_real;
600 	_dl_check_map_versions (map, 0, 0);
601 #ifndef SHARED
602 	/* During static dlopen, check if ld.so has been loaded.
603 	   Perform partial initialization in this case.  This must
604 	   come after the symbol versioning initialization in
605 	   _dl_check_map_versions.  */
606 	if (map->l_info[DT_SONAME] != NULL
607 	    && strcmp (((const char *) D_PTR (map, l_info[DT_STRTAB])
608 			+ map->l_info[DT_SONAME]->d_un.d_val), LD_SO) == 0)
609 	  __rtld_static_init (map);
610 #endif
611       }
612 
613 #ifdef SHARED
614   /* Auditing checkpoint: we have added all objects.  */
615   _dl_audit_activity_nsid (new->l_ns, LA_ACT_CONSISTENT);
616 #endif
617 
618   /* Notify the debugger all new objects are now ready to go.  */
619   struct r_debug *r = _dl_debug_update (args->nsid);
620   r->r_state = RT_CONSISTENT;
621   _dl_debug_state ();
622   LIBC_PROBE (map_complete, 3, args->nsid, r, new);
623 
624   _dl_open_check (new);
625 
626   /* Print scope information.  */
627   if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_SCOPES))
628     _dl_show_scope (new, 0);
629 
630   /* Only do lazy relocation if `LD_BIND_NOW' is not set.  */
631   int reloc_mode = mode & __RTLD_AUDIT;
632   if (GLRO(dl_lazy))
633     reloc_mode |= mode & RTLD_LAZY;
634 
635   /* Objects must be sorted by dependency for the relocation process.
636      This allows IFUNC relocations to work and it also means copy
637      relocation of dependencies are if necessary overwritten.
638      __dl_map_object_deps has already sorted l_initfini for us.  */
639   unsigned int first = UINT_MAX;
640   unsigned int last = 0;
641   unsigned int j = 0;
642   struct link_map *l = new->l_initfini[0];
643   do
644     {
645       if (! l->l_real->l_relocated)
646 	{
647 	  if (first == UINT_MAX)
648 	    first = j;
649 	  last = j + 1;
650 	}
651       l = new->l_initfini[++j];
652     }
653   while (l != NULL);
654 
655   int relocation_in_progress = 0;
656 
657   /* Perform relocation.  This can trigger lazy binding in IFUNC
658      resolvers.  For NODELETE mappings, these dependencies are not
659      recorded because the flag has not been applied to the newly
660      loaded objects.  This means that upon dlopen failure, these
661      NODELETE objects can be unloaded despite existing references to
662      them.  However, such relocation dependencies in IFUNC resolvers
663      are undefined anyway, so this is not a problem.  */
664 
665   for (unsigned int i = last; i-- > first; )
666     {
667       l = new->l_initfini[i];
668 
669       if (l->l_real->l_relocated)
670 	continue;
671 
672       if (! relocation_in_progress)
673 	{
674 	  /* Notify the debugger that relocations are about to happen.  */
675 	  LIBC_PROBE (reloc_start, 2, args->nsid, r);
676 	  relocation_in_progress = 1;
677 	}
678 
679 #ifdef SHARED
680       if (__glibc_unlikely (GLRO(dl_profile) != NULL))
681 	{
682 	  /* If this here is the shared object which we want to profile
683 	     make sure the profile is started.  We can find out whether
684 	     this is necessary or not by observing the `_dl_profile_map'
685 	     variable.  If it was NULL but is not NULL afterwards we must
686 	     start the profiling.  */
687 	  struct link_map *old_profile_map = GL(dl_profile_map);
688 
689 	  _dl_relocate_object (l, l->l_scope, reloc_mode | RTLD_LAZY, 1);
690 
691 	  if (old_profile_map == NULL && GL(dl_profile_map) != NULL)
692 	    {
693 	      /* We must prepare the profiling.  */
694 	      _dl_start_profile ();
695 
696 	      /* Prevent unloading the object.  */
697 	      GL(dl_profile_map)->l_nodelete_active = true;
698 	    }
699 	}
700       else
701 #endif
702 	_dl_relocate_object (l, l->l_scope, reloc_mode, 0);
703     }
704 
705   /* This only performs the memory allocations.  The actual update of
706      the scopes happens below, after failure is impossible.  */
707   resize_scopes (new);
708 
709   /* Increase the size of the GL (dl_tls_dtv_slotinfo_list) data
710      structure.  */
711   bool any_tls = resize_tls_slotinfo (new);
712 
713   /* Perform the necessary allocations for adding new global objects
714      to the global scope below.  */
715   if (mode & RTLD_GLOBAL)
716     add_to_global_resize (new);
717 
718   /* Demarcation point: After this, no recoverable errors are allowed.
719      All memory allocations for new objects must have happened
720      before.  */
721 
722   /* Finalize the NODELETE status first.  This comes before
723      update_scopes, so that lazy binding will not see pending NODELETE
724      state for newly loaded objects.  There is a compiler barrier in
725      update_scopes which ensures that the changes from
726      activate_nodelete are visible before new objects show up in the
727      local scope.  */
728   activate_nodelete (new);
729 
730   /* Second stage after resize_scopes: Actually perform the scope
731      update.  After this, dlsym and lazy binding can bind to new
732      objects.  */
733   update_scopes (new);
734 
735   if (!_dl_find_object_update (new))
736     _dl_signal_error (ENOMEM, new->l_libname->name, NULL,
737 		      N_ ("cannot allocate address lookup data"));
738 
739   /* FIXME: It is unclear whether the order here is correct.
740      Shouldn't new objects be made available for binding (and thus
741      execution) only after there TLS data has been set up fully?
742      Fixing bug 16134 will likely make this distinction less
743      important.  */
744 
745   /* Second stage after resize_tls_slotinfo: Update the slotinfo data
746      structures.  */
747   if (any_tls)
748     /* FIXME: This calls _dl_update_slotinfo, which aborts the process
749        on memory allocation failure.  See bug 16134.  */
750     update_tls_slotinfo (new);
751 
752   /* Notify the debugger all new objects have been relocated.  */
753   if (relocation_in_progress)
754     LIBC_PROBE (reloc_complete, 3, args->nsid, r, new);
755 
756   /* If libc.so was not there before, attempt to call its early
757      initialization routine.  Indicate to the initialization routine
758      whether the libc being initialized is the one in the base
759      namespace.  */
760   if (!args->libc_already_loaded)
761     {
762       /* dlopen cannot be used to load an initial libc by design.  */
763       struct link_map *libc_map = GL(dl_ns)[args->nsid].libc_map;
764       _dl_call_libc_early_init (libc_map, false);
765     }
766 
767   args->worker_continue = true;
768 }
769 
770 static void
dl_open_worker(void * a)771 dl_open_worker (void *a)
772 {
773   struct dl_open_args *args = a;
774 
775   args->worker_continue = false;
776 
777   {
778     /* Protects global and module specific TLS state.  */
779     __rtld_lock_lock_recursive (GL(dl_load_tls_lock));
780 
781     struct dl_exception ex;
782     int err = _dl_catch_exception (&ex, dl_open_worker_begin, args);
783 
784     __rtld_lock_unlock_recursive (GL(dl_load_tls_lock));
785 
786     if (__glibc_unlikely (ex.errstring != NULL))
787       /* Reraise the error.  */
788       _dl_signal_exception (err, &ex, NULL);
789   }
790 
791   if (!args->worker_continue)
792     return;
793 
794   int mode = args->mode;
795   struct link_map *new = args->map;
796 
797   /* Run the initializer functions of new objects.  Temporarily
798      disable the exception handler, so that lazy binding failures are
799      fatal.  */
800   {
801     struct dl_init_args init_args =
802       {
803         .new = new,
804         .argc = args->argc,
805         .argv = args->argv,
806         .env = args->env
807       };
808     _dl_catch_exception (NULL, call_dl_init, &init_args);
809   }
810 
811   /* Now we can make the new map available in the global scope.  */
812   if (mode & RTLD_GLOBAL)
813     add_to_global_update (new);
814 
815   /* Let the user know about the opencount.  */
816   if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
817     _dl_debug_printf ("opening file=%s [%lu]; direct_opencount=%u\n\n",
818 		      new->l_name, new->l_ns, new->l_direct_opencount);
819 }
820 
821 void *
_dl_open(const char * file,int mode,const void * caller_dlopen,Lmid_t nsid,int argc,char * argv[],char * env[])822 _dl_open (const char *file, int mode, const void *caller_dlopen, Lmid_t nsid,
823 	  int argc, char *argv[], char *env[])
824 {
825   if ((mode & RTLD_BINDING_MASK) == 0)
826     /* One of the flags must be set.  */
827     _dl_signal_error (EINVAL, file, NULL, N_("invalid mode for dlopen()"));
828 
829   /* Make sure we are alone.  */
830   __rtld_lock_lock_recursive (GL(dl_load_lock));
831 
832   if (__glibc_unlikely (nsid == LM_ID_NEWLM))
833     {
834       /* Find a new namespace.  */
835       for (nsid = 1; DL_NNS > 1 && nsid < GL(dl_nns); ++nsid)
836 	if (GL(dl_ns)[nsid]._ns_loaded == NULL)
837 	  break;
838 
839       if (__glibc_unlikely (nsid == DL_NNS))
840 	{
841 	  /* No more namespace available.  */
842 	  __rtld_lock_unlock_recursive (GL(dl_load_lock));
843 
844 	  _dl_signal_error (EINVAL, file, NULL, N_("\
845 no more namespaces available for dlmopen()"));
846 	}
847       else if (nsid == GL(dl_nns))
848 	{
849 	  __rtld_lock_initialize (GL(dl_ns)[nsid]._ns_unique_sym_table.lock);
850 	  ++GL(dl_nns);
851 	}
852 
853       _dl_debug_update (nsid)->r_state = RT_CONSISTENT;
854     }
855   /* Never allow loading a DSO in a namespace which is empty.  Such
856      direct placements is only causing problems.  Also don't allow
857      loading into a namespace used for auditing.  */
858   else if (__glibc_unlikely (nsid != LM_ID_BASE && nsid != __LM_ID_CALLER)
859 	   && (__glibc_unlikely (nsid < 0 || nsid >= GL(dl_nns))
860 	       /* This prevents the [NSID] index expressions from being
861 		  evaluated, so the compiler won't think that we are
862 		  accessing an invalid index here in the !SHARED case where
863 		  DL_NNS is 1 and so any NSID != 0 is invalid.  */
864 	       || DL_NNS == 1
865 	       || GL(dl_ns)[nsid]._ns_nloaded == 0
866 	       || GL(dl_ns)[nsid]._ns_loaded->l_auditing))
867     _dl_signal_error (EINVAL, file, NULL,
868 		      N_("invalid target namespace in dlmopen()"));
869 
870   struct dl_open_args args;
871   args.file = file;
872   args.mode = mode;
873   args.caller_dlopen = caller_dlopen;
874   args.map = NULL;
875   args.nsid = nsid;
876   /* args.libc_already_loaded is always assigned by dl_open_worker
877      (before any explicit/non-local returns).  */
878   args.argc = argc;
879   args.argv = argv;
880   args.env = env;
881 
882   struct dl_exception exception;
883   int errcode = _dl_catch_exception (&exception, dl_open_worker, &args);
884 
885 #if defined USE_LDCONFIG && !defined MAP_COPY
886   /* We must unmap the cache file.  */
887   _dl_unload_cache ();
888 #endif
889 
890   /* Do this for both the error and success cases.  The old value has
891      only been determined if the namespace ID was assigned (i.e., it
892      is not __LM_ID_CALLER).  In the success case, we actually may
893      have consumed more pending adds than planned (because the local
894      scopes overlap in case of a recursive dlopen, the inner dlopen
895      doing some of the globalization work of the outer dlopen), so the
896      old pending adds value is larger than absolutely necessary.
897      Since it is just a conservative upper bound, this is harmless.
898      The top-level dlopen call will restore the field to zero.  */
899   if (args.nsid >= 0)
900     GL (dl_ns)[args.nsid]._ns_global_scope_pending_adds
901       = args.original_global_scope_pending_adds;
902 
903   /* See if an error occurred during loading.  */
904   if (__glibc_unlikely (exception.errstring != NULL))
905     {
906       /* Avoid keeping around a dangling reference to the libc.so link
907 	 map in case it has been cached in libc_map.  */
908       if (!args.libc_already_loaded)
909 	GL(dl_ns)[args.nsid].libc_map = NULL;
910 
911       /* Remove the object from memory.  It may be in an inconsistent
912 	 state if relocation failed, for example.  */
913       if (args.map)
914 	{
915 	  _dl_close_worker (args.map, true);
916 
917 	  /* All l_nodelete_pending objects should have been deleted
918 	     at this point, which is why it is not necessary to reset
919 	     the flag here.  */
920 	}
921 
922       /* Release the lock.  */
923       __rtld_lock_unlock_recursive (GL(dl_load_lock));
924 
925       /* Reraise the error.  */
926       _dl_signal_exception (errcode, &exception, NULL);
927     }
928 
929   assert (_dl_debug_update (args.nsid)->r_state == RT_CONSISTENT);
930 
931   /* Release the lock.  */
932   __rtld_lock_unlock_recursive (GL(dl_load_lock));
933 
934   return args.map;
935 }
936 
937 
938 void
_dl_show_scope(struct link_map * l,int from)939 _dl_show_scope (struct link_map *l, int from)
940 {
941   _dl_debug_printf ("object=%s [%lu]\n",
942 		    DSO_FILENAME (l->l_name), l->l_ns);
943   if (l->l_scope != NULL)
944     for (int scope_cnt = from; l->l_scope[scope_cnt] != NULL; ++scope_cnt)
945       {
946 	_dl_debug_printf (" scope %u:", scope_cnt);
947 
948 	for (unsigned int cnt = 0; cnt < l->l_scope[scope_cnt]->r_nlist; ++cnt)
949 	  if (*l->l_scope[scope_cnt]->r_list[cnt]->l_name)
950 	    _dl_debug_printf_c (" %s",
951 				l->l_scope[scope_cnt]->r_list[cnt]->l_name);
952 	  else
953 	    _dl_debug_printf_c (" %s", RTLD_PROGNAME);
954 
955 	_dl_debug_printf_c ("\n");
956       }
957   else
958     _dl_debug_printf (" no scope\n");
959   _dl_debug_printf ("\n");
960 }
961