? a.out
? align.cc
? align.s
? h
? o
? rtld.c.new
Index: rtld.c
===================================================================
RCS file: /cvsroot/src/libexec/ld.elf_so/rtld.c,v
retrieving revision 1.195
diff -u -r1.195 rtld.c
--- rtld.c	30 Dec 2018 01:48:37 -0000	1.195
+++ rtld.c	10 Apr 2019 22:25:09 -0000
@@ -122,6 +122,13 @@
 #endif /* RTLD_DEBUG */
 extern Elf_Dyn  _DYNAMIC;
 
+#define	RTLD_EXCLUSIVE_MASK	0x80000000U
+#define WMUTEX 0
+#define WSHARED 1
+#define WEXCL 2
+static volatile unsigned int _rtld_mutex[3];
+static volatile unsigned int _rtld_phdr_mutex[3];
+
 static void _rtld_call_fini_functions(sigset_t *, int);
 static void _rtld_call_init_functions(sigset_t *);
 static void _rtld_initlist_visit(Objlist *, Obj_Entry *, int);
@@ -134,6 +141,8 @@
 static void _rtld_unload_object(sigset_t *, Obj_Entry *, bool);
 static void _rtld_unref_dag(Obj_Entry *);
 static Obj_Entry *_rtld_obj_from_addr(const void *);
+static void _rtld_mutex_shared_enter(volatile unsigned int *);
+static void _rtld_mutex_shared_exit(volatile unsigned int *);
 
 static inline void
 _rtld_call_initfini_function(const Obj_Entry *obj, Elf_Addr func, sigset_t *mask)
@@ -1444,6 +1453,7 @@
 	dbg(("dl_iterate_phdr"));
 
 	_rtld_shared_enter();
+	_rtld_mutex_shared_enter(_rtld_phdr_mutex);
 
 	for (obj = _rtld_objlist;  obj != NULL;  obj = obj->next) {
 		phdr_info.dlpi_addr = (Elf_Addr)obj->relocbase;
@@ -1462,11 +1472,13 @@
 		phdr_info.dlpi_subs = _rtld_objloads - _rtld_objcount;
 
 		/* XXXlocking: exit point */
+		_rtld_shared_exit();
 		error = callback(&phdr_info, sizeof(phdr_info), param);
+		_rtld_shared_enter();
 		if (error)
 			break;
 	}
-
+	_rtld_mutex_shared_exit(_rtld_phdr_mutex);
 	_rtld_shared_exit();
 	return error;
 }
@@ -1621,13 +1633,8 @@
 	}
 }
 
-#define	RTLD_EXCLUSIVE_MASK	0x80000000U
-static volatile unsigned int _rtld_mutex;
-static volatile unsigned int _rtld_waiter_exclusive;
-static volatile unsigned int _rtld_waiter_shared;
-
-void
-_rtld_shared_enter(void)
+static void
+_rtld_mutex_shared_enter(volatile unsigned int *m)
 {
 	unsigned int cur;
 	lwpid_t waiter, self = 0;
@@ -1635,13 +1642,13 @@
 	membar_enter();
 
 	for (;;) {
-		cur = _rtld_mutex;
+		cur = *m;
 		/*
 		 * First check if we are currently not exclusively locked.
 		 */
 		if ((cur & RTLD_EXCLUSIVE_MASK) == 0) {
 			/* Yes, so increment use counter */
-			if (atomic_cas_uint(&_rtld_mutex, cur, cur + 1) != cur)
+			if (atomic_cas_uint(m, cur, cur + 1) != cur)
 				continue;
 			membar_enter();
 			return;
@@ -1657,24 +1664,29 @@
 			_rtld_error("dead lock detected");
 			_rtld_die();
 		}
-		waiter = atomic_swap_uint(&_rtld_waiter_shared, self);
+		waiter = atomic_swap_uint(&m[WSHARED], self);
 		/*
 		 * Check for race against _rtld_exclusive_exit before sleeping.
 		 */
 		membar_sync();
-		if ((_rtld_mutex & RTLD_EXCLUSIVE_MASK) ||
-		    _rtld_waiter_exclusive)
+		if ((*m & RTLD_EXCLUSIVE_MASK) || m[WEXCL])
 			_lwp_park(CLOCK_REALTIME, 0, NULL, 0,
-			    __UNVOLATILE(&_rtld_mutex), NULL);
+			    __UNVOLATILE(m), NULL);
 		/* Try to remove us from the waiter list. */
-		atomic_cas_uint(&_rtld_waiter_shared, self, 0);
+		atomic_cas_uint(&m[WSHARED], self, 0);
 		if (waiter)
-			_lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
+			_lwp_unpark(waiter, __UNVOLATILE(m));
 	}
 }
 
 void
-_rtld_shared_exit(void)
+_rtld_shared_enter(void)
+{
+	_rtld_mutex_shared_enter(_rtld_mutex);
+}
+
+static void
+_rtld_mutex_shared_exit(volatile unsigned int *m)
 {
 	lwpid_t waiter;
 
@@ -1682,7 +1694,7 @@
 	 * Shared lock taken after an exclusive lock.
 	 * Just assume this is a partial recursion.
 	 */
-	if (_rtld_mutex & RTLD_EXCLUSIVE_MASK)
+	if (*m & RTLD_EXCLUSIVE_MASK)
 		return;
 
 	/*
@@ -1690,15 +1702,21 @@
 	 * LWP on the shared lock.
 	 */
 	membar_exit();
-	if (atomic_dec_uint_nv(&_rtld_mutex))
+	if (atomic_dec_uint_nv(m))
 		return;
 	membar_sync();
-	if ((waiter = _rtld_waiter_exclusive) != 0)
-		_lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
+	if ((waiter = m[WEXCL]) != 0)
+		_lwp_unpark(waiter, __UNVOLATILE(m));
 }
 
 void
-_rtld_exclusive_enter(sigset_t *mask)
+_rtld_shared_exit(void)
+{
+	_rtld_mutex_shared_exit(_rtld_mutex);
+}
+
+static void
+_rtld_mutex_exclusive_enter(volatile unsigned int *m, sigset_t *mask)
 {
 	lwpid_t waiter, self = _lwp_self();
 	unsigned int locked_value = (unsigned int)self | RTLD_EXCLUSIVE_MASK;
@@ -1710,43 +1728,55 @@
 	sigprocmask(SIG_BLOCK, &blockmask, mask);
 
 	for (;;) {
-		if (atomic_cas_uint(&_rtld_mutex, 0, locked_value) == 0) {
+		if (atomic_cas_uint(m, 0, locked_value) == 0) {
 			membar_enter();
 			break;
 		}
-		waiter = atomic_swap_uint(&_rtld_waiter_exclusive, self);
+		waiter = atomic_swap_uint(&m[WEXCL], self);
 		membar_sync();
-		cur = _rtld_mutex;
+		cur = *m;
 		if (cur == locked_value) {
 			_rtld_error("dead lock detected");
 			_rtld_die();
 		}
 		if (cur)
 			_lwp_park(CLOCK_REALTIME, 0, NULL, 0,
-			    __UNVOLATILE(&_rtld_mutex), NULL);
-		atomic_cas_uint(&_rtld_waiter_exclusive, self, 0);
+			    __UNVOLATILE(m), NULL);
+		atomic_cas_uint(&m[WEXCL], self, 0);
 		if (waiter)
-			_lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
+			_lwp_unpark(waiter, __UNVOLATILE(m));
 	}
 }
 
 void
-_rtld_exclusive_exit(sigset_t *mask)
+_rtld_exclusive_enter(sigset_t *mask)
+{
+	_rtld_mutex_exclusive_enter(_rtld_mutex, mask);
+}
+
+static void
+_rtld_mutex_exclusive_exit(volatile unsigned int *m, sigset_t *mask)
 {
 	lwpid_t waiter;
 
 	membar_exit();
-	_rtld_mutex = 0;
+	*m = 0;
 	membar_sync();
-	if ((waiter = _rtld_waiter_exclusive) != 0)
-		_lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
+	if ((waiter = m[WEXCL]) != 0)
+		_lwp_unpark(waiter, __UNVOLATILE(m));
 
-	if ((waiter = _rtld_waiter_shared) != 0)
-		_lwp_unpark(waiter, __UNVOLATILE(&_rtld_mutex));
+	if ((waiter = m[WSHARED]) != 0)
+		_lwp_unpark(waiter, __UNVOLATILE(m));
 
 	sigprocmask(SIG_SETMASK, mask, NULL);
 }
 
+void
+_rtld_exclusive_exit(sigset_t *mask)
+{
+	_rtld_mutex_exclusive_exit(_rtld_mutex, mask);
+}
+
 int
 _rtld_relro(const Obj_Entry *obj, bool wantmain)
 {