[notify] glibc: pulled commits from upstream, fix for CVE-2023-4806

This commit is contained in:
Tim Biermann 2024-01-14 11:36:33 +01:00
parent 6a5efb8741
commit 1d82db8751
Signed by: tb
GPG Key ID: 42F8B4E30B673606
3 changed files with 92 additions and 643 deletions

View File

@ -1,10 +1,10 @@
untrusted comment: verify with /etc/ports/core.pub
RWRJc1FUaeVeqpoW2OjCDQ3pK1hhqCCUSYuX9ciV1KAwTYdVuCv2RJf3OhZnimlfsBVUR/o0d8Asuyu4xcDxPYBt7fy7gMe/BAI=
SHA256 (Pkgfile) = a7ea5f73724ac39fc56dda2879c73fd72c04e378b5694035d5ca8feea914ac92
RWRJc1FUaeVeqnXAHCjzBaV3UybbGrGs0ZsT2e5IygAmAXt1moVQwX7ieVJChxNrbMTmAPexHOWHS6nLiYNWoNmGnb619GvU+wE=
SHA256 (Pkgfile) = fa3878caf7961200bf8479e2b6d2144782ab58a63f92da278bf529ff957336d0
SHA256 (.footprint) = 9eff4b9e8478995dd0aceacfe9301a250827c8b27cb1d4605c696ac55eec873c
SHA256 (glibc-2.36.tar.xz) = 1c959fea240906226062cb4b1e7ebce71a9f0e3c0836c09e7e3423d434fcfe75
SHA256 (linux-5.15.55.tar.xz) = 1ef6bd508b6c3af3bef2d5b337e4477254dba284c79e329aa38f9763ae3bfdcc
SHA256 (glibc-2.36-7.patch) = c167ddd1eb9e63fade7e60a2dc612a18e0900519d8c0ec1e0cf06fa92fca638a
SHA256 (glibc-2.36-8.patch) = df6b79339aa767fa3bea1443e1908ea124084b8eeee9e94ab03cc3afab0e4354
SHA256 (hosts) = 5c02b256c105f1d4a12fb738d71c1bab9eb126533074d7a0c8a14b92670c9431
SHA256 (resolv.conf) = 72ccb58768a72a771ec37142bc361a18478a07ec9de6e925a20760794389bf51
SHA256 (nsswitch.conf) = 859b8984e5e90aff3cce8f9779996ae4033b280d2122840e9411e2f44a1c2e61

View File

@ -4,12 +4,12 @@
name=glibc
version=2.36
release=7
release=8
_kernel_version=5.15.55
source=(https://ftp.gnu.org/gnu/glibc/glibc-$version.tar.xz
https://www.kernel.org/pub/linux/kernel/v5.x/linux-$_kernel_version.tar.xz
glibc-$version-7.patch
glibc-$version-8.patch
hosts resolv.conf nsswitch.conf host.conf ld.so.conf
locale-gen locale.gen.in)
@ -19,7 +19,7 @@ build() {
make -C $SRC/linux-$_kernel_version INSTALL_HDR_PATH=$PKG/usr headers_install
chown root:root $PKG/usr
patch -p1 -d $SRC/$name-${version:0:4} -i $SRC/$name-$version-7.patch
patch -p1 -d $SRC/$name-${version:0:4} -i $SRC/$name-$version-8.patch
mkdir $SRC/build
cd $SRC/build

View File

@ -66,10 +66,10 @@ index d1e139d03c..09c0cf8357 100644
else # -s
verbose :=
diff --git a/NEWS b/NEWS
index f61e521fc8..5358e0cbe3 100644
index f61e521fc8..0f0ebce3f0 100644
--- a/NEWS
+++ b/NEWS
@@ -5,6 +5,90 @@ See the end for copying conditions.
@@ -5,6 +5,94 @@ See the end for copying conditions.
Please send GNU C library bug reports via <https://sourceware.org/bugzilla/>
using `glibc' in the "product" field.
@ -116,6 +116,7 @@ index f61e521fc8..5358e0cbe3 100644
+ [24816] Fix tst-nss-files-hosts-long on single-stack hosts
+ [27576] gmon: improve mcount overflow handling
+ [28846] CMSG_NXTHDR may trigger -Wstrict-overflow warning
+ [29039] Corrupt DTV after reuse of a TLS module ID following dlclose with unused TLS
+ [29444] gmon: Fix allocated buffer overflow (bug 29444)
+ [29864] libc: __libc_start_main() should obtain program headers
+ address (_dl_phdr) from the auxv, not the ELF header.
@ -152,10 +153,13 @@ index f61e521fc8..5358e0cbe3 100644
+ [30305] x86_64: Fix asm constraints in feraiseexcept
+ [30477] libc: [RISCV]: time64 does not work on riscv32
+ [30515] _dl_find_object incorrectly returns 1 during early startup
+ [30785] Always call destructors in reverse constructor order
+ [30745] Slight bug in cache info codes for x86
+ [30804] F_GETLK, F_SETLK, and F_SETLKW value change for powerpc64 with
+ -D_FILE_OFFSET_BITS=64
+ [30842] Stack read overflow in getaddrinfo in no-aaaa mode (CVE-2023-4527)
+ [30843] potential use-after-free in getcanonname (CVE-2023-4806)
+ [31184] FAIL: elf/tst-tlsgap
+ [31185] Incorrect thread point access in _dl_tlsdesc_undefweak and _dl_tlsdesc_dynamic
+
Version 2.36
@ -504,7 +508,7 @@ index 0000000000..9e7ba10fa2
+ DL_CALL_DT_FINI (map, ((void *) map->l_addr + fini->d_un.d_ptr));
+}
diff --git a/elf/dl-close.c b/elf/dl-close.c
index bcd6e206e9..640bbd88c3 100644
index bcd6e206e9..14deca2e2b 100644
--- a/elf/dl-close.c
+++ b/elf/dl-close.c
@@ -36,11 +36,6 @@
@ -551,126 +555,10 @@ index bcd6e206e9..640bbd88c3 100644
void
_dl_close_worker (struct link_map *map, bool force)
{
@@ -168,30 +138,31 @@ _dl_close_worker (struct link_map *map, bool force)
bool any_tls = false;
const unsigned int nloaded = ns->_ns_nloaded;
- struct link_map *maps[nloaded];
- /* Run over the list and assign indexes to the link maps and enter
- them into the MAPS array. */
+ /* Run over the list and assign indexes to the link maps. */
int idx = 0;
for (struct link_map *l = ns->_ns_loaded; l != NULL; l = l->l_next)
{
l->l_map_used = 0;
l->l_map_done = 0;
l->l_idx = idx;
- maps[idx] = l;
++idx;
}
assert (idx == nloaded);
- /* Keep track of the lowest index link map we have covered already. */
- int done_index = -1;
- while (++done_index < nloaded)
+ /* Keep marking link maps until no new link maps are found. */
+ for (struct link_map *l = ns->_ns_loaded; l != NULL; )
{
- struct link_map *l = maps[done_index];
+ /* next is reset to earlier link maps for remarking. */
+ struct link_map *next = l->l_next;
+ int next_idx = l->l_idx + 1; /* next->l_idx, but covers next == NULL. */
if (l->l_map_done)
- /* Already handled. */
- continue;
+ {
+ /* Already handled. */
+ l = next;
+ continue;
+ }
/* Check whether this object is still used. */
if (l->l_type == lt_loaded
@@ -201,7 +172,10 @@ _dl_close_worker (struct link_map *map, bool force)
acquire is sufficient and correct. */
&& atomic_load_acquire (&l->l_tls_dtor_count) == 0
&& !l->l_map_used)
- continue;
+ {
+ l = next;
+ continue;
+ }
/* We need this object and we handle it now. */
l->l_map_used = 1;
@@ -228,8 +202,11 @@ _dl_close_worker (struct link_map *map, bool force)
already processed it, then we need to go back
and process again from that point forward to
ensure we keep all of its dependencies also. */
- if ((*lp)->l_idx - 1 < done_index)
- done_index = (*lp)->l_idx - 1;
+ if ((*lp)->l_idx < next_idx)
+ {
+ next = *lp;
+ next_idx = next->l_idx;
+ }
}
}
@@ -249,54 +226,65 @@ _dl_close_worker (struct link_map *map, bool force)
if (!jmap->l_map_used)
{
jmap->l_map_used = 1;
- if (jmap->l_idx - 1 < done_index)
- done_index = jmap->l_idx - 1;
+ if (jmap->l_idx < next_idx)
+ {
+ next = jmap;
+ next_idx = next->l_idx;
+ }
}
}
}
- }
- /* Sort the entries. We can skip looking for the binary itself which is
- at the front of the search list for the main namespace. */
- _dl_sort_maps (maps, nloaded, (nsid == LM_ID_BASE), true);
+ l = next;
+ }
- /* Call all termination functions at once. */
- bool unload_any = false;
- bool scope_mem_left = false;
- unsigned int unload_global = 0;
- unsigned int first_loaded = ~0;
- for (unsigned int i = 0; i < nloaded; ++i)
+ /* Call the destructors in reverse constructor order, and remove the
+ closed link maps from the list. */
+ for (struct link_map **init_called_head = &_dl_init_called_list;
+ *init_called_head != NULL; )
{
- struct link_map *imap = maps[i];
-
- /* All elements must be in the same namespace. */
- assert (imap->l_ns == nsid);
+ struct link_map *imap = *init_called_head;
- if (!imap->l_map_used)
+ /* _dl_init_called_list is global, to produce a global odering.
+ Ignore the other namespaces (and link maps that are still used). */
+ if (imap->l_ns != nsid || imap->l_map_used)
+ init_called_head = &imap->l_init_called_next;
+ else
{
assert (imap->l_type == lt_loaded && !imap->l_nodelete_active);
- /* Call its termination function. Do not do it for
- half-cooked objects. Temporarily disable exception
- handling, so that errors are fatal. */
- if (imap->l_init_called)
@@ -280,17 +250,7 @@ _dl_close_worker (struct link_map *map, bool force)
half-cooked objects. Temporarily disable exception
handling, so that errors are fatal. */
if (imap->l_init_called)
- {
- /* When debugging print a message first. */
- if (__builtin_expect (GLRO(dl_debug_mask) & DL_DEBUG_IMPCALLS,
@ -682,88 +570,10 @@ index bcd6e206e9..640bbd88c3 100644
- || imap->l_info[DT_FINI] != NULL)
- _dl_catch_exception (NULL, call_destructors, imap);
- }
+ /* _dl_init_called_list is updated at the same time as
+ l_init_called. */
+ assert (imap->l_init_called);
+
+ if (imap->l_info[DT_FINI_ARRAY] != NULL
+ || imap->l_info[DT_FINI] != NULL)
+ _dl_catch_exception (NULL, _dl_call_fini, imap);
#ifdef SHARED
/* Auditing checkpoint: we remove an object. */
_dl_audit_objclose (imap);
#endif
+ /* Unlink this link map. */
+ *init_called_head = imap->l_init_called_next;
+ }
+ }
+
+ bool unload_any = false;
+ bool scope_mem_left = false;
+ unsigned int unload_global = 0;
+
+ /* For skipping un-unloadable link maps in the second loop. */
+ struct link_map *first_loaded = ns->_ns_loaded;
+
+ /* Iterate over the namespace to find objects to unload. Some
+ unloadable objects may not be on _dl_init_called_list due to
+ dlopen failure. */
+ for (struct link_map *imap = first_loaded; imap != NULL; imap = imap->l_next)
+ {
+ if (!imap->l_map_used)
+ {
/* This object must not be used anymore. */
imap->l_removed = 1;
@@ -307,8 +295,8 @@ _dl_close_worker (struct link_map *map, bool force)
++unload_global;
/* Remember where the first dynamically loaded object is. */
- if (i < first_loaded)
- first_loaded = i;
+ if (first_loaded == NULL)
+ first_loaded = imap;
}
/* Else imap->l_map_used. */
else if (imap->l_type == lt_loaded)
@@ -444,8 +432,8 @@ _dl_close_worker (struct link_map *map, bool force)
imap->l_loader = NULL;
/* Remember where the first dynamically loaded object is. */
- if (i < first_loaded)
- first_loaded = i;
+ if (first_loaded == NULL)
+ first_loaded = imap;
}
}
@@ -516,10 +504,11 @@ _dl_close_worker (struct link_map *map, bool force)
/* Check each element of the search list to see if all references to
it are gone. */
- for (unsigned int i = first_loaded; i < nloaded; ++i)
+ for (struct link_map *imap = first_loaded; imap != NULL; )
{
- struct link_map *imap = maps[i];
- if (!imap->l_map_used)
+ if (imap->l_map_used)
+ imap = imap->l_next;
+ else
{
assert (imap->l_type == lt_loaded);
@@ -730,7 +719,9 @@ _dl_close_worker (struct link_map *map, bool force)
if (imap == GL(dl_initfirst))
GL(dl_initfirst) = NULL;
+ struct link_map *next = imap->l_next;
free (imap);
+ imap = next;
}
}
diff --git a/elf/dl-find_object.c b/elf/dl-find_object.c
index 4d5831b6f4..2e5b456c11 100644
--- a/elf/dl-find_object.c
@ -778,10 +588,10 @@ index 4d5831b6f4..2e5b456c11 100644
/* Object not found. */
diff --git a/elf/dl-fini.c b/elf/dl-fini.c
index 030b1fcbcd..50087a1bfc 100644
index 030b1fcbcd..50ff94db16 100644
--- a/elf/dl-fini.c
+++ b/elf/dl-fini.c
@@ -21,155 +21,71 @@
@@ -21,11 +21,6 @@
#include <ldsodefs.h>
#include <elf-initfini.h>
@ -793,122 +603,10 @@ index 030b1fcbcd..50087a1bfc 100644
void
_dl_fini (void)
{
- /* Lots of fun ahead. We have to call the destructors for all still
- loaded objects, in all namespaces. The problem is that the ELF
- specification now demands that dependencies between the modules
- are taken into account. I.e., the destructor for a module is
- called before the ones for any of its dependencies.
-
- To make things more complicated, we cannot simply use the reverse
- order of the constructors. Since the user might have loaded objects
- using `dlopen' there are possibly several other modules with its
- dependencies to be taken into account. Therefore we have to start
- determining the order of the modules once again from the beginning. */
-
- /* We run the destructors of the main namespaces last. As for the
- other namespaces, we pick run the destructors in them in reverse
- order of the namespace ID. */
+ /* Call destructors strictly in the reverse order of constructors.
+ This causes fewer surprises than some arbitrary reordering based
+ on new (relocation) dependencies. None of the objects are
+ unmapped, so applications can deal with this if their DSOs remain
+ in a consistent state after destructors have run. */
+
+ /* Protect against concurrent loads and unloads. */
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
+
+ /* Ignore objects which are opened during shutdown. */
+ struct link_map *local_init_called_list = _dl_init_called_list;
+
+ for (struct link_map *l = local_init_called_list; l != NULL;
+ l = l->l_init_called_next)
+ /* Bump l_direct_opencount of all objects so that they
+ are not dlclose()ed from underneath us. */
+ ++l->l_direct_opencount;
+
+ /* After this point, everything linked from local_init_called_list
+ cannot be unloaded because of the reference counter update. */
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+
+ /* Perform two passes: One for non-audit modules, one for audit
+ modules. This way, audit modules receive unload notifications
+ for non-audit objects, and the destructors for audit modules
+ still run. */
#ifdef SHARED
- int do_audit = 0;
- again:
+ int last_pass = GLRO(dl_naudit) > 0;
+ Lmid_t last_ns = -1;
+ for (int do_audit = 0; do_audit <= last_pass; ++do_audit)
#endif
- for (Lmid_t ns = GL(dl_nns) - 1; ns >= 0; --ns)
- {
- /* Protect against concurrent loads and unloads. */
- __rtld_lock_lock_recursive (GL(dl_load_lock));
-
- unsigned int nloaded = GL(dl_ns)[ns]._ns_nloaded;
- /* No need to do anything for empty namespaces or those used for
- auditing DSOs. */
- if (nloaded == 0
-#ifdef SHARED
- || GL(dl_ns)[ns]._ns_loaded->l_auditing != do_audit
-#endif
- )
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
- else
- {
-#ifdef SHARED
- _dl_audit_activity_nsid (ns, LA_ACT_DELETE);
-#endif
-
- /* Now we can allocate an array to hold all the pointers and
- copy the pointers in. */
- struct link_map *maps[nloaded];
-
- unsigned int i;
- struct link_map *l;
- assert (nloaded != 0 || GL(dl_ns)[ns]._ns_loaded == NULL);
- for (l = GL(dl_ns)[ns]._ns_loaded, i = 0; l != NULL; l = l->l_next)
- /* Do not handle ld.so in secondary namespaces. */
- if (l == l->l_real)
- {
- assert (i < nloaded);
-
- maps[i] = l;
- l->l_idx = i;
- ++i;
-
- /* Bump l_direct_opencount of all objects so that they
- are not dlclose()ed from underneath us. */
- ++l->l_direct_opencount;
- }
- assert (ns != LM_ID_BASE || i == nloaded);
- assert (ns == LM_ID_BASE || i == nloaded || i == nloaded - 1);
- unsigned int nmaps = i;
-
- /* Now we have to do the sorting. We can skip looking for the
- binary itself which is at the front of the search list for
- the main namespace. */
- _dl_sort_maps (maps, nmaps, (ns == LM_ID_BASE), true);
-
- /* We do not rely on the linked list of loaded object anymore
- from this point on. We have our own list here (maps). The
- various members of this list cannot vanish since the open
- count is too high and will be decremented in this loop. So
- we release the lock so that some code which might be called
- from a destructor can directly or indirectly access the
- lock. */
- __rtld_lock_unlock_recursive (GL(dl_load_lock));
-
- /* 'maps' now contains the objects in the right order. Now
- call the destructors. We have to process this array from
- the front. */
- for (i = 0; i < nmaps; ++i)
- {
- struct link_map *l = maps[i];
-
- if (l->l_init_called)
- {
@@ -116,38 +111,7 @@ _dl_fini (void)
if (l->l_init_called)
{
- /* Make sure nothing happens if we are called twice. */
- l->l_init_called = 0;
-
@ -941,54 +639,10 @@ index 030b1fcbcd..50087a1bfc 100644
- (l, l->l_addr + l->l_info[DT_FINI]->d_un.d_ptr);
- }
-
+ for (struct link_map *l = local_init_called_list; l != NULL;
+ l = l->l_init_called_next)
+ {
+ _dl_call_fini (l);
#ifdef SHARED
- /* Auditing checkpoint: another object closed. */
- _dl_audit_objclose (l);
+ if (GL(dl_ns)[l->l_ns]._ns_loaded->l_auditing != do_audit)
+ continue;
+
+ /* Avoid back-to-back calls of _dl_audit_activity_nsid for the
+ same namespace. */
+ if (last_ns != l->l_ns)
+ {
+ if (last_ns >= 0)
+ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
+ _dl_audit_activity_nsid (l->l_ns, LA_ACT_DELETE);
+ last_ns = l->l_ns;
+ }
#endif
- }
- /* Correct the previous increment. */
- --l->l_direct_opencount;
- }
+ /* There is no need to re-enable exceptions because _dl_fini
+ is not called from a context where exceptions are caught. */
+ _dl_call_fini (l);
#ifdef SHARED
- _dl_audit_activity_nsid (ns, LA_ACT_CONSISTENT);
+ /* Auditing checkpoint: another object closed. */
+ _dl_audit_objclose (l);
#endif
- }
- }
+ }
#ifdef SHARED
- if (! do_audit && GLRO(dl_naudit) > 0)
- {
- do_audit = 1;
- goto again;
- }
+ if (last_ns >= 0)
+ _dl_audit_activity_nsid (last_ns, LA_ACT_CONSISTENT);
if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_STATISTICS))
_dl_debug_printf ("\nruntime linker statistics:\n"
/* Auditing checkpoint: another object closed. */
_dl_audit_objclose (l);
diff --git a/elf/dl-hwcaps.c b/elf/dl-hwcaps.c
index 6f161f6ad5..92eb53790e 100644
--- a/elf/dl-hwcaps.c
@ -1026,15 +680,10 @@ index 6f161f6ad5..92eb53790e 100644
= malloc (*sz * sizeof (*result) + total);
if (overall_result == NULL)
diff --git a/elf/dl-init.c b/elf/dl-init.c
index deefeb099a..77b2edd838 100644
index deefeb099a..fca8e3a05e 100644
--- a/elf/dl-init.c
+++ b/elf/dl-init.c
@@ -21,14 +21,19 @@
#include <ldsodefs.h>
#include <elf-initfini.h>
+struct link_map *_dl_init_called_list;
@@ -25,10 +25,14 @@
static void
call_init (struct link_map *l, int argc, char **argv, char **env)
{
@ -1051,70 +700,6 @@ index deefeb099a..77b2edd838 100644
if (l->l_init_called)
/* This object is all done. */
@@ -38,6 +43,21 @@ call_init (struct link_map *l, int argc, char **argv, char **env)
dependency. */
l->l_init_called = 1;
+ /* Help an already-running dlclose: The just-loaded object must not
+ be removed during the current pass. (No effect if no dlclose in
+ progress.) */
+ l->l_map_used = 1;
+
+ /* Record execution before starting any initializers. This way, if
+ the initializers themselves call dlopen, their ELF destructors
+ will eventually be run before this object is destructed, matching
+ that their ELF constructors have run before this object was
+ constructed. _dl_fini uses this list for audit callbacks, so
+ register objects on the list even if they do not have a
+ constructor. */
+ l->l_init_called_next = _dl_init_called_list;
+ _dl_init_called_list = l;
+
/* Check for object which constructors we do not run here. */
if (__builtin_expect (l->l_name[0], 'a') == '\0'
&& l->l_type == lt_executable)
diff --git a/elf/dl-load.c b/elf/dl-load.c
index 1ad0868dad..cb59c21ce7 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1263,7 +1263,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
/* Now process the load commands and map segments into memory.
This is responsible for filling in:
- l_map_start, l_map_end, l_addr, l_contiguous, l_text_end, l_phdr
+ l_map_start, l_map_end, l_addr, l_contiguous, l_phdr
*/
errstring = _dl_map_segments (l, fd, header, type, loadcmds, nloadcmds,
maplength, has_holes, loader);
diff --git a/elf/dl-load.h b/elf/dl-load.h
index f98d264e90..ebf7d74cd0 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -83,14 +83,11 @@ struct loadcmd
/* This is a subroutine of _dl_map_segments. It should be called for each
load command, some time after L->l_addr has been set correctly. It is
- responsible for setting up the l_text_end and l_phdr fields. */
+ responsible for setting the l_phdr fields */
static __always_inline void
_dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
const struct loadcmd *c)
{
- if (c->prot & PROT_EXEC)
- l->l_text_end = l->l_addr + c->mapend;
-
if (l->l_phdr == 0
&& c->mapoff <= header->e_phoff
&& ((size_t) (c->mapend - c->mapstart + c->mapoff)
@@ -103,7 +100,7 @@ _dl_postprocess_loadcmd (struct link_map *l, const ElfW(Ehdr) *header,
/* This is a subroutine of _dl_map_object_from_fd. It is responsible
for filling in several fields in *L: l_map_start, l_map_end, l_addr,
- l_contiguous, l_text_end, l_phdr. On successful return, all the
+ l_contiguous, l_phdr. On successful return, all the
segments are mapped (or copied, or whatever) from the file into their
final places in the address space, with the correct page permissions,
and any bss-like regions already zeroed. It returns a null pointer
diff --git a/elf/dl-lookup.c b/elf/dl-lookup.c
index 4c86dc694e..67fb2e31e2 100644
--- a/elf/dl-lookup.c
@ -1314,6 +899,18 @@ index 4af0b5b2ce..f45b630ba5 100644
call_function_static_weak (_dl_find_object_init);
diff --git a/elf/dl-tls.c b/elf/dl-tls.c
index 093cdddb7e..bf0ff0d9e8 100644
--- a/elf/dl-tls.c
+++ b/elf/dl-tls.c
@@ -160,6 +160,7 @@ _dl_assign_tls_modid (struct link_map *l)
{
/* Mark the entry as used, so any dependency see it. */
atomic_store_relaxed (&runp->slotinfo[result - disp].map, l);
+ atomic_store_relaxed (&runp->slotinfo[result - disp].gen, 0);
break;
}
diff --git a/elf/dl-tunables.c b/elf/dl-tunables.c
index 8e7ee9df10..76cf8b9da3 100644
--- a/elf/dl-tunables.c
@ -1373,34 +970,20 @@ index e6a56b3070..9fa3b484cf 100644
+ }
}
diff --git a/elf/dso-sort-tests-1.def b/elf/dso-sort-tests-1.def
index 5f7f18ef27..61dc54f8ae 100644
index 5f7f18ef27..4bf9052db1 100644
--- a/elf/dso-sort-tests-1.def
+++ b/elf/dso-sort-tests-1.def
@@ -53,14 +53,14 @@ tst-dso-ordering10: {}->a->b->c;soname({})=c
output: b>a>{}<a<b
# Complex example from Bugzilla #15311, under-linked and with circular
-# relocation(dynamic) dependencies. While this is technically unspecified, the
-# presumed reasonable practical behavior is for the destructor order to respect
-# the static DT_NEEDED links (here this means the a->b->c->d order).
-# The older dynamic_sort=1 algorithm does not achieve this, while the DFS-based
-# dynamic_sort=2 algorithm does, although it is still arguable whether going
-# beyond spec to do this is the right thing to do.
-# The below expected outputs are what the two algorithms currently produce
-# respectively, for regression testing purposes.
+# relocation(dynamic) dependencies. For both sorting algorithms, the
+# destruction order is the reverse of the construction order, and
+# relocation dependencies are not taken into account.
@@ -64,3 +64,10 @@ output: b>a>{}<a<b
tst-bz15311: {+a;+e;+f;+g;+d;%d;-d;-g;-f;-e;-a};a->b->c->d;d=>[ba];c=>a;b=>e=>a;c=>f=>b;d=>g=>c
-output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
-output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
+output: {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<e<a<b<c<d];}
output(glibc.rtld.dynamic_sort=1): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<a<c<d<g<f<b<e];}
output(glibc.rtld.dynamic_sort=2): {+a[d>c>b>a>];+e[e>];+f[f>];+g[g>];+d[];%d(b(e(a()))a()g(c(a()f(b(e(a()))))));-d[];-g[];-f[];-e[];-a[<g<f<a<b<c<d<e];}
+
+# Test that even in the presence of dependency loops involving dlopen'ed
+# object, that object is initialized last (and not unloaded prematurely).
+# Final destructor order is the opposite of constructor order.
+# Final destructor order is indeterminate due to the cycle.
+tst-bz28937: {+a;+b;-b;+c;%c};a->a1;a->a2;a2->a;b->b1;c->a1;c=>a1
+output: {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<c<a<a1<a2
+output(glibc.rtld.dynamic_sort=1): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a<a2<c<a1
+output(glibc.rtld.dynamic_sort=2): {+a[a2>a1>a>];+b[b1>b>];-b[<b<b1];+c[c>];%c(a1());}<a2<a<c<a1
diff --git a/elf/elf.h b/elf/elf.h
index 02a1b3f52f..014393f3cc 100644
--- a/elf/elf.h
@ -1433,44 +1016,10 @@ index ca00dd1fe2..3c5e273f2b 100644
else # -s
verbose :=
diff --git a/elf/rtld.c b/elf/rtld.c
index cbbaf4a331..dd45930ff7 100644
index cbbaf4a331..3e771a93d8 100644
--- a/elf/rtld.c
+++ b/elf/rtld.c
@@ -479,7 +479,6 @@ _dl_start_final (void *arg, struct dl_start_final_info *info)
GL(dl_rtld_map).l_real = &GL(dl_rtld_map);
GL(dl_rtld_map).l_map_start = (ElfW(Addr)) &__ehdr_start;
GL(dl_rtld_map).l_map_end = (ElfW(Addr)) _end;
- GL(dl_rtld_map).l_text_end = (ElfW(Addr)) _etext;
/* Copy the TLS related data if necessary. */
#ifndef DONT_USE_BOOTSTRAP_MAP
# if NO_TLS_OFFSET != 0
@@ -1124,7 +1123,6 @@ rtld_setup_main_map (struct link_map *main_map)
bool has_interp = false;
main_map->l_map_end = 0;
- main_map->l_text_end = 0;
/* Perhaps the executable has no PT_LOAD header entries at all. */
main_map->l_map_start = ~0;
/* And it was opened directly. */
@@ -1216,8 +1214,6 @@ rtld_setup_main_map (struct link_map *main_map)
allocend = main_map->l_addr + ph->p_vaddr + ph->p_memsz;
if (main_map->l_map_end < allocend)
main_map->l_map_end = allocend;
- if ((ph->p_flags & PF_X) && allocend > main_map->l_text_end)
- main_map->l_text_end = allocend;
/* The next expected address is the page following this load
segment. */
@@ -1277,8 +1273,6 @@ rtld_setup_main_map (struct link_map *main_map)
= (char *) main_map->l_tls_initimage + main_map->l_addr;
if (! main_map->l_map_end)
main_map->l_map_end = ~0;
- if (! main_map->l_text_end)
- main_map->l_text_end = ~0;
if (! GL(dl_rtld_map).l_libname && GL(dl_rtld_map).l_name)
{
/* We were invoked directly, so the program might not have a
@@ -2122,6 +2116,12 @@ dl_main (const ElfW(Phdr) *phdr,
@@ -2122,6 +2122,12 @@ dl_main (const ElfW(Phdr) *phdr,
if (l->l_faked)
/* The library was not found. */
_dl_printf ("\t%s => not found\n", l->l_libname->name);
@ -1483,127 +1032,6 @@ index cbbaf4a331..dd45930ff7 100644
else
_dl_printf ("\t%s => %s (0x%0*Zx)\n",
DSO_FILENAME (l->l_libname->name),
diff --git a/elf/setup-vdso.h b/elf/setup-vdso.h
index c0807ea82b..415d5057c3 100644
--- a/elf/setup-vdso.h
+++ b/elf/setup-vdso.h
@@ -51,9 +51,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
l->l_addr = ph->p_vaddr;
if (ph->p_vaddr + ph->p_memsz >= l->l_map_end)
l->l_map_end = ph->p_vaddr + ph->p_memsz;
- if ((ph->p_flags & PF_X)
- && ph->p_vaddr + ph->p_memsz >= l->l_text_end)
- l->l_text_end = ph->p_vaddr + ph->p_memsz;
}
else
/* There must be no TLS segment. */
@@ -62,7 +59,6 @@ setup_vdso (struct link_map *main_map __attribute__ ((unused)),
l->l_map_start = (ElfW(Addr)) GLRO(dl_sysinfo_dso);
l->l_addr = l->l_map_start - l->l_addr;
l->l_map_end += l->l_addr;
- l->l_text_end += l->l_addr;
l->l_ld = (void *) ((ElfW(Addr)) l->l_ld + l->l_addr);
elf_get_dynamic_info (l, false, false);
_dl_setup_hash (l);
diff --git a/elf/tst-audit23.c b/elf/tst-audit23.c
index 4904cf1340..f40760bd70 100644
--- a/elf/tst-audit23.c
+++ b/elf/tst-audit23.c
@@ -98,6 +98,8 @@ do_test (int argc, char *argv[])
char *lname;
uintptr_t laddr;
Lmid_t lmid;
+ uintptr_t cookie;
+ uintptr_t namespace;
bool closed;
} objs[max_objs] = { [0 ... max_objs-1] = { .closed = false } };
size_t nobjs = 0;
@@ -117,6 +119,9 @@ do_test (int argc, char *argv[])
size_t buffer_length = 0;
while (xgetline (&buffer, &buffer_length, out))
{
+ *strchrnul (buffer, '\n') = '\0';
+ printf ("info: subprocess output: %s\n", buffer);
+
if (startswith (buffer, "la_activity: "))
{
uintptr_t cookie;
@@ -125,29 +130,26 @@ do_test (int argc, char *argv[])
&cookie);
TEST_COMPARE (r, 2);
- /* The cookie identifies the object at the head of the link map,
- so we only add a new namespace if it changes from the previous
- one. This works since dlmopen is the last in the test body. */
- if (cookie != last_act_cookie && last_act_cookie != -1)
- TEST_COMPARE (last_act, LA_ACT_CONSISTENT);
-
if (this_act == LA_ACT_ADD && acts[nacts] != cookie)
{
+ /* The cookie identifies the object at the head of the
+ link map, so we only add a new namespace if it
+ changes from the previous one. This works since
+ dlmopen is the last in the test body. */
+ if (cookie != last_act_cookie && last_act_cookie != -1)
+ TEST_COMPARE (last_act, LA_ACT_CONSISTENT);
+
acts[nacts++] = cookie;
last_act_cookie = cookie;
}
- /* The LA_ACT_DELETE is called in the reverse order of LA_ACT_ADD
- at program termination (if the tests adds a dlclose or a library
- with extra dependencies this will need to be adapted). */
+ /* LA_ACT_DELETE is called multiple times for each
+ namespace, depending on destruction order. */
else if (this_act == LA_ACT_DELETE)
- {
- last_act_cookie = acts[--nacts];
- TEST_COMPARE (acts[nacts], cookie);
- acts[nacts] = 0;
- }
+ last_act_cookie = cookie;
else if (this_act == LA_ACT_CONSISTENT)
{
TEST_COMPARE (cookie, last_act_cookie);
+ last_act_cookie = -1;
/* LA_ACT_DELETE must always be followed by an la_objclose. */
if (last_act == LA_ACT_DELETE)
@@ -179,6 +181,8 @@ do_test (int argc, char *argv[])
objs[nobjs].lname = lname;
objs[nobjs].laddr = laddr;
objs[nobjs].lmid = lmid;
+ objs[nobjs].cookie = cookie;
+ objs[nobjs].namespace = last_act_cookie;
objs[nobjs].closed = false;
nobjs++;
@@ -201,6 +205,12 @@ do_test (int argc, char *argv[])
if (strcmp (lname, objs[i].lname) == 0 && lmid == objs[i].lmid)
{
TEST_COMPARE (objs[i].closed, false);
+ TEST_COMPARE (objs[i].cookie, cookie);
+ if (objs[i].namespace == -1)
+ /* No LA_ACT_ADD before the first la_objopen call. */
+ TEST_COMPARE (acts[0], last_act_cookie);
+ else
+ TEST_COMPARE (objs[i].namespace, last_act_cookie);
objs[i].closed = true;
break;
}
@@ -209,11 +219,7 @@ do_test (int argc, char *argv[])
/* la_objclose should be called after la_activity(LA_ACT_DELETE) for
the closed object's namespace. */
TEST_COMPARE (last_act, LA_ACT_DELETE);
- if (!seen_first_objclose)
- {
- TEST_COMPARE (last_act_cookie, cookie);
- seen_first_objclose = true;
- }
+ seen_first_objclose = true;
}
}
diff --git a/elf/tst-auditmod28.c b/elf/tst-auditmod28.c
index db7ba95abe..9e0a122c38 100644
--- a/elf/tst-auditmod28.c
@ -2602,22 +2030,20 @@ index 0000000000..00b1b93342
@@ -0,0 +1 @@
+#include <wcsmbs/bits/wchar2-decl.h>
diff --git a/include/link.h b/include/link.h
index 0ac82d7c77..4eb8fe0d96 100644
index 0ac82d7c77..87966e8397 100644
--- a/include/link.h
+++ b/include/link.h
@@ -253,8 +253,10 @@ struct link_map
/* Start and finish of memory map for this object. l_map_start
need not be the same as l_addr. */
ElfW(Addr) l_map_start, l_map_end;
- /* End of the executable part of the mapping. */
- ElfW(Addr) l_text_end;
+
@@ -278,6 +278,10 @@ struct link_map
/* List of object in order of the init and fini calls. */
struct link_map **l_initfini;
+ /* Linked list of objects in reverse ELF constructor execution
+ order. Head of list is stored in _dl_init_called_list. */
+ struct link_map *l_init_called_next;
/* Default array for 'l_scope'. */
struct r_scope_elem *l_scope_mem[4];
+
/* List of the dependencies introduced through symbol binding. */
struct link_map_reldeps
{
diff --git a/include/resolv.h b/include/resolv.h
index 3590b6f496..4dbbac3800 100644
--- a/include/resolv.h
@ -8197,7 +7623,7 @@ index 909b208578..d66f0b9c45 100644
ldp q2, q3, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*1]
ldp q4, q5, [x29, #OFFSET_RV + DL_OFFSET_RV_V0 + 32*2]
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 050a3032de..ab8a7fbf84 100644
index 050a3032de..c2627fced7 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -105,6 +105,9 @@ typedef struct link_map *lookup_t;
@ -8210,15 +7636,7 @@ index 050a3032de..ab8a7fbf84 100644
/* On some architectures a pointer to a function is not just a pointer
to the actual code of the function but rather an architecture
specific descriptor. */
@@ -1044,13 +1047,24 @@ extern int _dl_check_map_versions (struct link_map *map, int verbose,
extern void _dl_init (struct link_map *main_map, int argc, char **argv,
char **env) attribute_hidden;
+/* List of ELF objects in reverse order of their constructor
+ invocation. */
+extern struct link_map *_dl_init_called_list attribute_hidden;
+
/* Call the finalizer functions of all shared objects whose
@@ -1048,9 +1051,16 @@ extern void _dl_init (struct link_map *main_map, int argc, char **argv,
initializer functions have completed. */
extern void _dl_fini (void) attribute_hidden;
@ -10696,6 +10114,37 @@ index 3c4480aba7..06f6c9663e 100644
#define MOVBE_X86_ISA_LEVEL 3
/* ISA level >= 2 guaranteed includes. */
diff --git a/sysdeps/x86_64/dl-tlsdesc.S b/sysdeps/x86_64/dl-tlsdesc.S
index 0db2cb4152..7619e743e1 100644
--- a/sysdeps/x86_64/dl-tlsdesc.S
+++ b/sysdeps/x86_64/dl-tlsdesc.S
@@ -61,7 +61,7 @@ _dl_tlsdesc_return:
_dl_tlsdesc_undefweak:
_CET_ENDBR
movq 8(%rax), %rax
- subq %fs:0, %rax
+ sub %fs:0, %RAX_LP
ret
cfi_endproc
.size _dl_tlsdesc_undefweak, .-_dl_tlsdesc_undefweak
@@ -102,7 +102,7 @@ _dl_tlsdesc_dynamic:
/* Preserve call-clobbered registers that we modify.
We need two scratch regs anyway. */
movq %rsi, -16(%rsp)
- movq %fs:DTV_OFFSET, %rsi
+ mov %fs:DTV_OFFSET, %RSI_LP
movq %rdi, -8(%rsp)
movq TLSDESC_ARG(%rax), %rdi
movq (%rsi), %rax
@@ -116,7 +116,7 @@ _dl_tlsdesc_dynamic:
addq TLSDESC_MODOFF(%rdi), %rax
.Lret:
movq -16(%rsp), %rsi
- subq %fs:0, %rax
+ sub %fs:0, %RAX_LP
movq -8(%rsp), %rdi
ret
.Lslow:
diff --git a/sysdeps/x86_64/fpu/fraiseexcpt.c b/sysdeps/x86_64/fpu/fraiseexcpt.c
index 864f4777a2..23446ff4ac 100644
--- a/sysdeps/x86_64/fpu/fraiseexcpt.c