From 2959b127e028b27cc1f91e3b88b5b22e1d28c76a Mon Sep 17 00:00:00 2001 From: li-miaomiao_zhr Date: Thu, 1 Jun 2023 19:21:20 +0800 Subject: [PATCH] processing of annotated patch files in spec files --- .glibc.spec.swp | Bin 0 -> 16384 bytes ...iles-for-libphtread-condition-family.patch | 2321 --------------- ...-header-files-for-libphtread_2_17_so.patch | 2609 ----------------- ...ript-and-files-of-libpthread_2_17_so.patch | 135 - ...eader-files-with-some-deleted-macros.patch | 166 -- 0005-add-pthread-functions_h.patch | 140 - ...on-which-moved-to-libc-in-glibc-2.34.patch | 587 ---- 0007-add-lowlevellock_2_17_c.patch | 68 - 0008-add-pause_nocancel_2_17.patch | 56 - 0009-add-unwind-with-longjmp.patch | 161 - 10 files changed, 6243 deletions(-) create mode 100644 .glibc.spec.swp delete mode 100644 0001-add-base-files-for-libphtread-condition-family.patch delete mode 100644 0002-add-header-files-for-libphtread_2_17_so.patch delete mode 100644 0003-add-build-script-and-files-of-libpthread_2_17_so.patch delete mode 100644 0004-add-two-header-files-with-some-deleted-macros.patch delete mode 100644 0005-add-pthread-functions_h.patch delete mode 100644 0006-add-elsion-function-which-moved-to-libc-in-glibc-2.34.patch delete mode 100644 0007-add-lowlevellock_2_17_c.patch delete mode 100644 0008-add-pause_nocancel_2_17.patch delete mode 100644 0009-add-unwind-with-longjmp.patch diff --git a/.glibc.spec.swp b/.glibc.spec.swp new file mode 100644 index 0000000000000000000000000000000000000000..a5b39257ee91ba5a51299488c3285ec838cd493a GIT binary patch literal 16384 zcmeHOU5p&rRW1Sv_AcQk(Sk%?Zfr(Qc5~bH|1)cowP$QI*|FEMcWtu~bnahu7gJr; zt{>0LdS@dPZQx}$h?gY%Jw)&T4+J3)0U{xgqJ%^!iFhDLY>;@_ydVN(AwMMW-TLY2 zX+~PI=}1B9mcHq#d+MHZ?>Xn5d(N#MZr<9uO2UC{;Paz~@xrrv@|6=07$5po!zl8+ z(#@~-Npr!{ab8w~=1S%hmSo3tdQLMuJ4zES+P{Nxs>Cr5u9{XFSZUz9pn*3Uj%}Hm z@##}$ z_xCM)|9AK4%XPIt|JDNiQw#KOcJ!ya_D4Day`2B*=fhV^gA8>BMbC@?dU(>(Z9Yx|8ht7JNlhB9Od6N zufA3qSZQFTft3bU8dzyyrGb?ORvK7oV5NbT2L7LFfT7_05sdxMb)k#*|9$=Wk7)cN z@B!d4;P-ymFgAd%ywNZ|2Yej(eIN&}03mP!ca7;8VaK0>1^k zA9yeD9Iy?X0^SOo0RH}jVSF0+An+?d2K?O*8pbDqKLN&o54?=}_Y1&Z10M%|9e5w` zvw*IR9|8>EWz^As3;YrA8^H6x9pE-_0XPM`1^7weTd3(j0sIoM3A`Ej9L^Db8F&o% zCe9@O3V05<3Y-K^0NOTx)*nPbZ1=g&Dlv{s(_AN2U1U_MnAK@T@3FL2G>_?C=no4u z9hOyI@?kYDR5{L5IgImSGPEBbR{7AnI+$uUhS6}d>~*5jnBGw6Qz)8Bk!Nu-qWU)* zK20XmG!aQf%RFNfkn>R@=*6dL$E4+9t&pO*DB zFRCLQFoCj?5JAhTh>LteFGxv6KHV>qQAVqr#>pPNcHtULz*n=SMN*1+9??1(P0)TB z53*GM45d<%@;Z)HLGyd6h|_!*_akaYuItWs4cEz4wxfzVRb?aS>V?NI=|SJ5tkeUT zsH~#U8Lg)xpCs9cPVoFZi(s9=3>Gc-OQ;0en?NL0dsV^c?l`GbIb}jouBu(7Frj1u z{j$lFvP{nN@jE^vX#F02s#Mf>DTlt3tilw)qr@d`$ipcsN~O&slL9lF=i{xD?If%B z=w?o{9Ol|drc@4-ET<;LY>K31R468O@9uk^g$tXsv(YL|Sus)+qb}J9U14!M@*ID@ zgVW2@&#SDS(70@!X9%rLFjF{ar9_p$Oqc|?(@B10*l@nI8MbE8(X(nWrnMGJcav&N zVW>={bc~U!qMRQ9+^+3?HJQLGy9co-l18L9;Kl7HHXa||+skSxiRx*rt=-pg;DD~(#}X@H*@HpFioxD9hVj&m?R9eUr0$RIySuxy?qwcVyBKK4!|0~G zesb+TmM)aiXyCxOS;K9LeQX}`{(l{*oV+8iDoN z{b74G6w_L}+wdM1XI>m&V?vx$j3;^_YL+2bl>JXLM*Addc7WIvQ? zqK+RkVOlQ6ewsXi4Ud%hL=oMDKq0U*W0GTbPhpQ^6?Afa>k`2xsCPjyZ)40_oplD) zsvcqo9~DU^jfHlZi*m7TR~1EGEVecazX@0K0gB`Im)K(26alK2%n=6Uu(9x=Se2H_ zSUV);bwv)TGe!4JonDUT+JvQQXb-JmW~0{FGsP`PYNvTrdOyv>Rx+E-{7Bo7*UkL! zVLgFa^j`L2JWUV?HrwnAxlg*w_DfRK*-T}vXo8)h8Y@JF1Pg7>KYDjSl+c`Rpl3PB zcU0Hyh;2xf6U5pi-Y0S&!50pd=6WyGogbb_Y&k8CiPuRg3Dk?^EYseYXPwN84W*HE zgxoA$3>ur*-8v~0daEgX78?#Ca4WMh*HIl@YE}`cOJqxg-fmsAd1Ow|Vf(|RGw)F@ z*YfJn$WyTD2iv=`Zqq8A3$xI{ppIZ7j26`9m!)p!oF6*4VySvUDoJD5)0Gj6akZIYt1 z?H7Ff9ca6aNYjRwMtGbf9p2Rw$=evezHImJ10?nxgykj;AZu?^2oB_w662h_2NVpo zr#-8aH$pA)EZf~7XYNlC>MHV6&cT_sg0>;E-X;3$S%E-AB{@US5E|S4{XpLwGs$)euZf+d*%@o2H0LnH zP$!;3eQA00$WG6Tp`+ua|+} z1oRwV09K!s2EI2nK*{yCU?54BRK~;Pa*>dCv5Z(Du}v#N3|HADoKzH~*+?tJCgfbR zRq4yeG+Ak~uG4-Rxr9BiLuz|iH{=O~|4H%;w(ulF64>r6CDA+0<8_9PBfaC`h@nI} z`7pK-Nv`Ghh~<1Ik`YcQ*~)Zw{|;0{`YZ_vai0n!rEd0GffQe#6_F+Z(Yr-RFf7J# zjDn#&Ij)AT&vQKwWmXgaG8m2|b=%&=&V zga{6}ni@bWy|EgO(D1)giZk|nl>Te@zMV9R!-uiAB8mQ5a zeENdDY>q8-o!!>#Np<#})ALPjIv6F@qwG9JKEcKKyf9UM(E<=m^ru5@j#(^@WZJVYq{U~;M2$NYNG(F2%hO8L&u&p36xo3u!@}%kdzRhgc zRJOu&L*H~%9QsR<B^=B3xX*;C!q`_~33~(~mtT6dqH`biznU({W=gg2-fH7`oDzo-3C!FUL}`K5SQZEG+KW z0asGMHDiJG96GUW2g3K9rNk&-*iq!#g2yJjUoba_;ptf5o~;<#;rBidtauqZnlm%(nm8)!4xGP@{mYW@Z9A5M8wZZb1yc^9hG{X|j;zRYT~FG|3K?W|`>@Ds zyamBg9Zrzj3R}N+BGof(!?CHsUDVJTIyvaCon!NZsLu~_UgjxYdX#vVGU0jJWCv+? zln)_S^Q_74e54=9L2iRWZjS4moZH1^i7PiaPJ)Af;cQczKKqz3QRF&Rs#U`=_;xv3 znq#LOCYx6#C>y&|pTn%Cu9CFuk6R#5XNi{;CrC?XG*Qrravlyi*i3^3J zu?$D+IVZbK*}XT#QH<6(p3)i9sGH(dRuD@kZP41GHD1jyj-!RPC%mM$4a#x>t~wQL zxaxFw-WIPm5x3#FC(>H#WH5_y?p+~^!`noiz@ps$4h1yHyUq%mgprhm80eFP_Dro! aj+=o;?ZEeIof?n!fpc@$?y-L48~+0WVQpgo literal 0 HcmV?d00001 diff --git a/0001-add-base-files-for-libphtread-condition-family.patch b/0001-add-base-files-for-libphtread-condition-family.patch deleted file mode 100644 index 9ca2225..0000000 --- a/0001-add-base-files-for-libphtread-condition-family.patch +++ /dev/null @@ -1,2321 +0,0 @@ -From 76a50749f7af5935ba3739e815aa6a16ae4440d1 Mon Sep 17 00:00:00 2001 -From: Ulrich Drepper -Date: Tue Nov 26 22:50:54 2002 +0000 -Subject: [PATCH 1/9] 0001 - -since https://sourceware.org/git/?p=glibc.git;a=commit;h=ed19993b5b0d05d62cc883571519a67dae481a14 -delete pthread_condtion function.However, using these interfaces has better performance. -Therefore, we add a subpacket to use these interfaces. -you can use it by adding LD_PRELOAD=./libpthreadcond.so in front of your program (eg: -LD_PRELOAD=./libpthreadcond.so ./test). use with-compat_2_17 to compile it. -WARNING:2.17 version does not meet the posix standard, you should pay attention when using it. -add pthread_cond_clockwait to prevent process hang up when libpthread-2.17 and libpthread-2.28 are used together. -use pthread_cond_common to implement the public functions of pthread_cond_clockwait,pthread_cond_clockwait and pthread_cond_timedwait. - -Add some base files for the libpthread_condition family. -Including but not limited to the following submission: -6efd481484e -a88c9263686 -76a50749f7a -69431c9a21f -5bd8a24966d - ---- - nptl_2_17/cancellation_2_17.c | 60 ++ - nptl_2_17/cleanup_compat_2_17.c | 50 ++ - nptl_2_17/pthread_cond_broadcast_2_17.c | 101 +++ - nptl_2_17/pthread_cond_destroy_2_17.c | 86 +++ - nptl_2_17/pthread_cond_init_2_17.c | 49 ++ - nptl_2_17/pthread_cond_signal_2_17.c | 84 +++ - nptl_2_17/pthread_cond_wait_2_17.c | 329 ++++++++++ - nptl_2_17/pthread_condattr_getclock_2_17.c | 28 + - nptl_2_17/pthread_condattr_getpshared_2_17.c | 28 + - nptl_2_17/pthread_condattr_init_2_17.c | 33 + - nptl_2_17/pthread_condattr_setclock_2_17.c | 45 ++ - nptl_2_17/pthread_mutex_cond_lock_2_17.c | 21 + - nptl_2_17/pthread_mutex_lock_2_17.c | 652 +++++++++++++++++++ - nptl_2_17/pthread_mutex_unlock_2_17.c | 361 ++++++++++ - nptl_2_17/tpp_2_17.c | 195 ++++++ - nptl_2_17/vars_2_17.c | 43 ++ - 16 files changed, 2165 insertions(+) - create mode 100644 nptl_2_17/cancellation_2_17.c - create mode 100644 nptl_2_17/cleanup_compat_2_17.c - create mode 100644 nptl_2_17/pthread_cond_broadcast_2_17.c - create mode 100644 nptl_2_17/pthread_cond_destroy_2_17.c - create mode 100644 nptl_2_17/pthread_cond_init_2_17.c - create mode 100644 nptl_2_17/pthread_cond_signal_2_17.c - create mode 100644 nptl_2_17/pthread_cond_wait_2_17.c - create mode 100644 nptl_2_17/pthread_condattr_getclock_2_17.c - create mode 100644 nptl_2_17/pthread_condattr_getpshared_2_17.c - create mode 100644 nptl_2_17/pthread_condattr_init_2_17.c - create mode 100644 nptl_2_17/pthread_condattr_setclock_2_17.c - create mode 100644 nptl_2_17/pthread_mutex_cond_lock_2_17.c - create mode 100644 nptl_2_17/pthread_mutex_lock_2_17.c - create mode 100644 nptl_2_17/pthread_mutex_unlock_2_17.c - create mode 100644 nptl_2_17/tpp_2_17.c - create mode 100644 nptl_2_17/vars_2_17.c - -diff --git a/nptl_2_17/cancellation_2_17.c b/nptl_2_17/cancellation_2_17.c -new file mode 100644 -index 00000000..5c9ce572 ---- /dev/null -+++ b/nptl_2_17/cancellation_2_17.c -@@ -0,0 +1,60 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+ -+int -+__pthread_enable_asynccancel (void) -+{ -+ struct pthread *self = THREAD_SELF; -+ -+ int oldval = THREAD_GETMEM (self, canceltype); -+ THREAD_SETMEM (self, canceltype, PTHREAD_CANCEL_ASYNCHRONOUS); -+ -+ int ch = THREAD_GETMEM (self, cancelhandling); -+ -+ if (self->cancelstate == PTHREAD_CANCEL_ENABLE -+ && (ch & CANCELED_BITMASK) -+ && !(ch & EXITING_BITMASK) -+ && !(ch & TERMINATED_BITMASK)) -+ { -+ THREAD_SETMEM (self, result, PTHREAD_CANCELED); -+ __do_cancel (); -+ } -+ -+ return oldval; -+} -+libc_hidden_def (__pthread_enable_asynccancel) -+ -+/* See the comment for __pthread_enable_asynccancel regarding -+ the AS-safety of this function. */ -+void -+__pthread_disable_asynccancel (int oldtype) -+{ -+ /* If asynchronous cancellation was enabled before we do not have -+ anything to do. */ -+ if (oldtype == PTHREAD_CANCEL_ASYNCHRONOUS) -+ return; -+ -+ struct pthread *self = THREAD_SELF; -+ self->canceltype = PTHREAD_CANCEL_DEFERRED; -+} -+libc_hidden_def (__pthread_disable_asynccancel) -diff --git a/nptl_2_17/cleanup_compat_2_17.c b/nptl_2_17/cleanup_compat_2_17.c -new file mode 100644 -index 00000000..53cf903d ---- /dev/null -+++ b/nptl_2_17/cleanup_compat_2_17.c -@@ -0,0 +1,50 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+ -+ -+void -+_pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, -+ void (*routine) (void *), void *arg) -+{ -+ struct pthread *self = THREAD_SELF; -+ -+ buffer->__routine = routine; -+ buffer->__arg = arg; -+ buffer->__prev = THREAD_GETMEM (self, cleanup); -+ -+ THREAD_SETMEM (self, cleanup, buffer); -+} -+strong_alias (_pthread_cleanup_push, __pthread_cleanup_push) -+ -+ -+void -+_pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, int execute) -+{ -+ struct pthread *self __attribute ((unused)) = THREAD_SELF; -+ -+ THREAD_SETMEM (self, cleanup, buffer->__prev); -+ -+ /* If necessary call the cleanup routine after we removed the -+ current cleanup block from the list. */ -+ if (execute) -+ buffer->__routine (buffer->__arg); -+} -+strong_alias (_pthread_cleanup_pop, __pthread_cleanup_pop) -diff --git a/nptl_2_17/pthread_cond_broadcast_2_17.c b/nptl_2_17/pthread_cond_broadcast_2_17.c -new file mode 100644 -index 00000000..df39c99b ---- /dev/null -+++ b/nptl_2_17/pthread_cond_broadcast_2_17.c -@@ -0,0 +1,101 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Martin Schwidefsky , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "kernel-features_2_17.h" -+#include "pthread_2_17.h" -+#include "pthreadP_2_17.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+ -+/* We do the following steps from __pthread_cond_signal in one critical -+ section: (1) signal all waiters in G1, (2) close G1 so that it can become -+ the new G2 and make G2 the new G1, and (3) signal all waiters in the new -+ G1. We don't need to do all these steps if there are no waiters in G1 -+ and/or G2. See __pthread_cond_signal for further details. */ -+int -+__pthread_cond_broadcast (pthread_cond_t *cond) -+{ -+ LIBC_PROBE (cond_broadcast, 1, cond); -+ -+ int pshared = (cond->__data.__mutex == (void *) ~0l) -+ ? LLL_SHARED : LLL_PRIVATE; -+ /* Make sure we are alone. */ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ /* Are there any waiters to be woken? */ -+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) -+ -+ { -+ /* Yes. Mark them all as woken. */ -+ cond->__data.__wakeup_seq = cond->__data.__total_seq; -+ cond->__data.__woken_seq = cond->__data.__total_seq; -+ cond->__data.__futex = (unsigned int) cond->__data.__total_seq * 2; -+ int futex_val = cond->__data.__futex; -+ /* Signal that a broadcast happened. */ -+ ++cond->__data.__broadcast_seq; -+ -+ /* We are done. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ /* Wake everybody. */ -+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; -+ -+ /* Do not use requeue for pshared condvars. */ -+ if (mut == (void *) ~0l -+ || PTHREAD_MUTEX_PSHARED (mut) & PTHREAD_MUTEX_PSHARED_BIT) -+ goto wake_all; -+ -+#if (defined lll_futex_cmp_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+ if (USE_REQUEUE_PI (mut)) -+ { -+ if (lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, INT_MAX, -+ &mut->__data.__lock, futex_val, -+ LLL_PRIVATE) == 0) -+ return 0; -+ } -+ else -+#endif -+ /* lll_futex_requeue returns 0 for success and non-zero -+ for errors. */ -+ if (!__builtin_expect (lll_futex_requeue (&cond->__data.__futex, 1, -+ INT_MAX, &mut->__data.__lock, -+ futex_val, LLL_PRIVATE), 0)) -+ return 0; -+ -+wake_all: -+ lll_futex_wake (&cond->__data.__futex, INT_MAX, pshared); -+ return 0; -+ } -+ /* We are done. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ return 0; -+} -+ -+versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, -+ GLIBC_2_3_2); -diff --git a/nptl_2_17/pthread_cond_destroy_2_17.c b/nptl_2_17/pthread_cond_destroy_2_17.c -new file mode 100644 -index 00000000..6342f471 ---- /dev/null -+++ b/nptl_2_17/pthread_cond_destroy_2_17.c -@@ -0,0 +1,86 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+int -+__pthread_cond_destroy (pthread_cond_t *cond) -+{ -+ int pshared = (cond->__data.__mutex == (void *) ~0l) -+ ? LLL_SHARED : LLL_PRIVATE; -+ -+ LIBC_PROBE (cond_destroy, 1, cond); -+ -+ /* Make sure we are alone. */ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) -+ { -+ /* If there are still some waiters which have not been -+ woken up, this is an application bug. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ return EBUSY; -+ } -+ -+ /* Tell pthread_cond_*wait that this condvar is being destroyed. */ -+ cond->__data.__total_seq = -1ULL; -+ -+ /* If there are waiters which have been already signalled or -+ broadcasted, but still are using the pthread_cond_t structure, -+ pthread_cond_destroy needs to wait for them. */ -+ unsigned int nwaiters = cond->__data.__nwaiters; -+ -+ if (nwaiters >= (1 << COND_NWAITERS_SHIFT)) -+ -+ { -+ /* Wake everybody on the associated mutex in case there are -+ threads that have been requeued to it. -+ Without this, pthread_cond_destroy could block potentially -+ for a long time or forever, as it would depend on other -+ thread's using the mutex. -+ When all threads waiting on the mutex are woken up, pthread_cond_wait -+ only waits for threads to acquire and release the internal -+ condvar lock. */ -+ if (cond->__data.__mutex != NULL -+ && cond->__data.__mutex != (void *) ~0l) -+ { -+ pthread_mutex_t *mut = (pthread_mutex_t *) cond->__data.__mutex; -+ lll_futex_wake (&mut->__data.__lock, INT_MAX, -+ PTHREAD_MUTEX_PSHARED (mut)); -+ } -+ -+ do -+ { -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ lll_futex_wait (&cond->__data.__nwaiters, nwaiters, pshared); -+ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ nwaiters = cond->__data.__nwaiters; -+ } -+ while (nwaiters >= (1 << COND_NWAITERS_SHIFT)); -+ } -+ -+ return 0; -+} -+versioned_symbol (libpthread, __pthread_cond_destroy, -+ pthread_cond_destroy, GLIBC_2_3_2); -diff --git a/nptl_2_17/pthread_cond_init_2_17.c b/nptl_2_17/pthread_cond_init_2_17.c -new file mode 100644 -index 00000000..d590d1d0 ---- /dev/null -+++ b/nptl_2_17/pthread_cond_init_2_17.c -@@ -0,0 +1,49 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+ -+ -+int -+__pthread_cond_init (pthread_cond_t *cond, const pthread_condattr_t *cond_attr) -+{ -+ ASSERT_TYPE_SIZE (pthread_cond_t, __SIZEOF_PTHREAD_COND_T); -+ -+ struct pthread_condattr *icond_attr = (struct pthread_condattr *) cond_attr; -+ -+ cond->__data.__lock = LLL_LOCK_INITIALIZER; -+ cond->__data.__futex = 0; -+ cond->__data.__nwaiters = (icond_attr != NULL -+ ? ((icond_attr->value >> 1) & ((1 << COND_NWAITERS_SHIFT) - 1)) -+ : CLOCK_REALTIME); -+ cond->__data.__total_seq = 0; -+ cond->__data.__wakeup_seq = 0; -+ cond->__data.__woken_seq = 0; -+ cond->__data.__mutex = (icond_attr == NULL || (icond_attr->value & 1) == 0 -+ ? NULL : (void *) ~0l); -+ cond->__data.__broadcast_seq = 0; -+ -+ -+ LIBC_PROBE (cond_init, 2, cond, cond_attr); -+ -+ return 0; -+} -+versioned_symbol (libpthread, __pthread_cond_init, -+ pthread_cond_init, GLIBC_2_3_2); -diff --git a/nptl_2_17/pthread_cond_signal_2_17.c b/nptl_2_17/pthread_cond_signal_2_17.c -new file mode 100644 -index 00000000..e6f08ac8 ---- /dev/null -+++ b/nptl_2_17/pthread_cond_signal_2_17.c -@@ -0,0 +1,84 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Martin Schwidefsky , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "kernel-features_2_17.h" -+#include "pthread_2_17.h" -+#include "pthreadP_2_17.h" -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+int -+__pthread_cond_signal (pthread_cond_t *cond) -+{ -+ int pshared = (cond->__data.__mutex == (void *) ~0l) -+ ? LLL_SHARED : LLL_PRIVATE; -+ -+ LIBC_PROBE (cond_signal, 1, cond); -+ -+ /* Make sure we are alone. */ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ /* Are there any waiters to be woken? */ -+ if (cond->__data.__total_seq > cond->__data.__wakeup_seq) -+ { -+ /* Yes. Mark one of them as woken. */ -+ ++cond->__data.__wakeup_seq; -+ ++cond->__data.__futex; -+ -+#if (defined lll_futex_cmp_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+ pthread_mutex_t *mut = cond->__data.__mutex; -+ -+ if (USE_REQUEUE_PI (mut) -+ /* This can only really fail with a ENOSYS, since nobody can modify -+ futex while we have the cond_lock. */ -+ && lll_futex_cmp_requeue_pi (&cond->__data.__futex, 1, 0, -+ &mut->__data.__lock, -+ cond->__data.__futex, pshared) == 0) -+ { -+ lll_unlock (cond->__data.__lock, pshared); -+ return 0; -+ } -+ else -+#endif -+ /* Wake one. */ -+ if (! __builtin_expect (lll_futex_wake_unlock (&cond->__data.__futex, -+ 1, 1, -+ &cond->__data.__lock, -+ pshared), 0)) -+ return 0; -+ -+ /* Fallback if neither of them work. */ -+ lll_futex_wake (&cond->__data.__futex, 1, pshared); -+ } -+/* We are done. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ return 0; -+} -+ -+versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, -+ GLIBC_2_3_2); -diff --git a/nptl_2_17/pthread_cond_wait_2_17.c b/nptl_2_17/pthread_cond_wait_2_17.c -new file mode 100644 -index 00000000..ff651a00 ---- /dev/null -+++ b/nptl_2_17/pthread_cond_wait_2_17.c -@@ -0,0 +1,329 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Martin Schwidefsky , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "kernel-features_2_17.h" -+#include "pthread_2_17.h" -+#include "pthreadP_2_17.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+ -+struct _condvar_cleanup_buffer -+{ -+ int oldtype; -+ pthread_cond_t *cond; -+ pthread_mutex_t *mutex; -+ unsigned int bc_seq; -+}; -+ -+void -+__attribute__ ((visibility ("hidden"))) -+__condvar_cleanup (void *arg) -+{ -+ struct _condvar_cleanup_buffer *cbuffer = -+ (struct _condvar_cleanup_buffer *) arg; -+ unsigned int destroying; -+ int pshared = (cbuffer->cond->__data.__mutex == (void *) ~0l) -+ ? LLL_SHARED : LLL_PRIVATE; -+ -+ /* We are going to modify shared data. */ -+ lll_lock (cbuffer->cond->__data.__lock, pshared); -+ -+ if (cbuffer->bc_seq == cbuffer->cond->__data.__broadcast_seq) -+ { -+ /* This thread is not waiting anymore. Adjust the sequence counters -+ * appropriately. We do not increment WAKEUP_SEQ if this would -+ * bump it over the value of TOTAL_SEQ. This can happen if a thread -+ * was woken and then canceled. */ -+ if (cbuffer->cond->__data.__wakeup_seq -+ < cbuffer->cond->__data.__total_seq) -+ { -+ ++cbuffer->cond->__data.__wakeup_seq; -+ ++cbuffer->cond->__data.__futex; -+ } -+ ++cbuffer->cond->__data.__woken_seq; -+ } -+ -+ cbuffer->cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; -+ -+ /* If pthread_cond_destroy was called on this variable already, -+ notify the pthread_cond_destroy caller all waiters have left -+ and it can be successfully destroyed. */ -+ destroying = 0; -+ if (cbuffer->cond->__data.__total_seq == -1ULL -+ && cbuffer->cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) -+ { -+ lll_futex_wake (&cbuffer->cond->__data.__nwaiters, 1, pshared); -+ destroying = 1; -+ } -+ -+ /* We are done. */ -+ lll_unlock (cbuffer->cond->__data.__lock, pshared); -+ -+ /* Wake everybody to make sure no condvar signal gets lost. */ -+ if (! destroying) -+ lll_futex_wake (&cbuffer->cond->__data.__futex, INT_MAX, pshared); -+ -+ /* Get the mutex before returning unless asynchronous cancellation -+ is in effect. We don't try to get the mutex if we already own it. */ -+ if (!(USE_REQUEUE_PI (cbuffer->mutex)) -+ || ((cbuffer->mutex->__data.__lock & FUTEX_TID_MASK) -+ != THREAD_GETMEM (THREAD_SELF, tid))) -+ { -+ __pthread_mutex_cond_lock (cbuffer->mutex); -+ } -+ else -+ __pthread_mutex_cond_lock_adjust (cbuffer->mutex); -+} -+ -+static __always_inline int -+__pthread_cond_wait_common (pthread_cond_t *cond, pthread_mutex_t *mutex, -+ clockid_t clockid, -+ const struct timespec *abstime) -+{ -+ struct _pthread_cleanup_buffer buffer; -+ struct _condvar_cleanup_buffer cbuffer; -+ int result = 0; -+ -+ int pshared = (cond->__data.__mutex == (void *) ~0l) -+ ? LLL_SHARED : LLL_PRIVATE; -+ -+ #if (defined lll_futex_wait_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+ int pi_flag = 0; -+#endif -+ LIBC_PROBE (cond_wait, 2, cond, mutex); -+ /* clockid will already have been checked by -+ __pthread_cond_clockwait or pthread_condattr_setclock, or we -+ don't use it if abstime is NULL, so we don't need to check it -+ here. */ -+ /* Make sure we are alone. */ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ /* Now we can release the mutex. */ -+ int err = __pthread_mutex_unlock_usercnt (mutex, 0); -+ if (__glibc_unlikely (err)) -+ { -+ lll_unlock (cond->__data.__lock, pshared); -+ return err; -+ } -+ -+ /* We have one new user of the condvar. */ -+ ++cond->__data.__total_seq; -+ ++cond->__data.__futex; -+ cond->__data.__nwaiters += 1 << COND_NWAITERS_SHIFT; -+ -+ /* Work around the fact that the kernel rejects negative timeout values -+ despite them being valid. */ -+ if (abstime != NULL && __glibc_unlikely (abstime->tv_sec < 0)) -+ goto timeout; -+ -+ /* Remember the mutex we are using here. If there is already a -+ different address store this is a bad user bug. Do not store -+ anything for pshared condvars. */ -+ if (cond->__data.__mutex != (void *) ~0l) -+ cond->__data.__mutex = mutex; -+ -+ /* Prepare structure passed to cancellation handler. */ -+ cbuffer.cond = cond; -+ cbuffer.mutex = mutex; -+ -+ /* Before we block we enable cancellation. Therefore we have to -+ install a cancellation handler. */ -+ __pthread_cleanup_push (&buffer, __condvar_cleanup, &cbuffer); -+ -+ /* The current values of the wakeup counter. The "woken" counter -+ must exceed this value. */ -+ unsigned long long int val; -+ unsigned long long int seq; -+ val = seq = cond->__data.__wakeup_seq; -+ /* Remember the broadcast counter. */ -+ cbuffer.bc_seq = cond->__data.__broadcast_seq; -+ -+ while (1) -+ { -+ unsigned int futex_val = cond->__data.__futex; -+ -+ /* Prepare to wait. Release the condvar futex. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ /* Enable asynchronous cancellation. Required by the standard. */ -+ cbuffer.oldtype = __pthread_enable_asynccancel (); -+ -+#if (defined lll_futex_wait_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+ /* If pi_flag remained 1 then it means that we had the lock and the mutex -+ but a spurious waker raced ahead of us. Give back the mutex before -+ going into wait again. */ -+ if (pi_flag) -+ { -+ __pthread_mutex_cond_lock_adjust (mutex); -+ __pthread_mutex_unlock_usercnt (mutex, 0); -+ } -+ pi_flag = USE_REQUEUE_PI (mutex); -+ -+ if (pi_flag) -+ { -+ if (abstime == NULL) -+ { -+ err = lll_futex_wait_requeue_pi (&cond->__data.__futex, -+ futex_val, &mutex->__data.__lock, -+ pshared); -+ } -+ else -+ { -+ unsigned int clockbit = (clockid == CLOCK_REALTIME) -+ ? FUTEX_CLOCK_REALTIME : 0; -+ -+ err = lll_futex_timed_wait_requeue_pi (&cond->__data.__futex, -+ futex_val, abstime, clockbit, -+ &mutex->__data.__lock, -+ pshared); -+ } -+ pi_flag = (err == 0); -+ } -+ else -+#endif -+ /* Wait until woken by signal or broadcast. */ -+ { -+ if (abstime == NULL) -+ { -+ lll_futex_wait (&cond->__data.__futex, futex_val, pshared); -+ } -+ else -+ { -+ err = lll_futex_clock_wait_bitset (&cond->__data.__futex, futex_val, -+ clockid, abstime, pshared); -+ } -+ } -+ /* Disable asynchronous cancellation. */ -+ __pthread_disable_asynccancel (cbuffer.oldtype); -+ -+ /* We are going to look at shared data again, so get the lock. */ -+ lll_lock (cond->__data.__lock, pshared); -+ -+ /* If a broadcast happened, we are done. */ -+ if (cbuffer.bc_seq != cond->__data.__broadcast_seq) -+ goto bc_out; -+ -+ /* Check whether we are eligible for wakeup. */ -+ val = cond->__data.__wakeup_seq; -+ if (val != seq && cond->__data.__woken_seq != val) -+ break; -+ -+ /* Not woken yet. Maybe the time expired? */ -+ if (abstime != NULL && __glibc_unlikely (err == -ETIMEDOUT)) -+ { -+ timeout: -+ /* Yep. Adjust the counters. */ -+ ++cond->__data.__wakeup_seq; -+ ++cond->__data.__futex; -+ -+ /* The error value. */ -+ result = ETIMEDOUT; -+ break; -+ } -+ } -+ -+ /* Another thread woken up. */ -+ ++cond->__data.__woken_seq; -+ -+bc_out: -+ cond->__data.__nwaiters -= 1 << COND_NWAITERS_SHIFT; -+ -+ /* If pthread_cond_destroy was called on this variable already, -+ notify the pthread_cond_destroy caller all waiters have left -+ and it can be successfully destroyed. */ -+ if (cond->__data.__total_seq == -1ULL -+ && cond->__data.__nwaiters < (1 << COND_NWAITERS_SHIFT)) -+ lll_futex_wake (&cond->__data.__nwaiters, 1, pshared); -+ -+ /* We are done with the condvar. */ -+ lll_unlock (cond->__data.__lock, pshared); -+ -+ /* The cancellation handling is back to normal, remove the handler. */ -+ __pthread_cleanup_pop (&buffer, 0); -+ -+ /* Get the mutex before returning. */ -+#if (defined lll_futex_wait_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+ if (pi_flag) -+ { -+ __pthread_mutex_cond_lock_adjust (mutex); -+ err = 0; -+ } -+ else -+#endif -+ err = __pthread_mutex_cond_lock (mutex); -+ return err ?: result; -+} -+/* See __pthread_cond_wait_common. */ -+int -+__pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) -+{ -+ /* clockid is unused when abstime is NULL. */ -+ return __pthread_cond_wait_common (cond, mutex, 0, NULL); -+} -+ -+/* See __pthread_cond_wait_common. */ -+int -+__pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, -+ const struct timespec *abstime) -+{ -+ /* Check parameter validity. This should also tell the compiler that -+ it can assume that abstime is not NULL. */ -+ if (abstime == NULL || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) -+ return EINVAL; -+ -+ clockid_t clockid = cond->__data.__nwaiters & 1; -+ -+ return __pthread_cond_wait_common (cond, mutex, clockid, abstime); -+} -+ -+/* See __pthread_cond_wait_common. */ -+int -+__pthread_cond_clockwait (pthread_cond_t *cond, pthread_mutex_t *mutex, -+ clockid_t clockid, -+ const struct timespec *abstime) -+{ -+ /* Check parameter validity. This should also tell the compiler that -+ it can assume that abstime is not NULL. */ -+ if (abstime == NULL || abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) -+ return EINVAL; -+ -+ if (!futex_abstimed_supported_clockid (clockid)) -+ return EINVAL; -+ -+ return __pthread_cond_wait_common (cond, mutex, clockid, abstime); -+} -+ -+versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, -+ GLIBC_2_3_2); -+versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, -+ GLIBC_2_3_2); -+versioned_symbol (libpthread, __pthread_cond_clockwait, pthread_cond_clockwait, -+ GLIBC_2_34); -diff --git a/nptl_2_17/pthread_condattr_getclock_2_17.c b/nptl_2_17/pthread_condattr_getclock_2_17.c -new file mode 100644 -index 00000000..414a6856 ---- /dev/null -+++ b/nptl_2_17/pthread_condattr_getclock_2_17.c -@@ -0,0 +1,28 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+ -+ -+int -+pthread_condattr_getclock (const pthread_condattr_t *attr, clockid_t *clock_id) -+{ -+ *clock_id = (((((const struct pthread_condattr *) attr)->value) >> 1) -+ & ((1 << COND_NWAITERS_SHIFT) - 1)); -+ return 0; -+} -diff --git a/nptl_2_17/pthread_condattr_getpshared_2_17.c b/nptl_2_17/pthread_condattr_getpshared_2_17.c -new file mode 100644 -index 00000000..2b85506f ---- /dev/null -+++ b/nptl_2_17/pthread_condattr_getpshared_2_17.c -@@ -0,0 +1,28 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+ -+ -+int -+pthread_condattr_getpshared (const pthread_condattr_t *attr, int *pshared) -+{ -+ *pshared = ((const struct pthread_condattr *) attr)->value & 1; -+ -+ return 0; -+} -diff --git a/nptl_2_17/pthread_condattr_init_2_17.c b/nptl_2_17/pthread_condattr_init_2_17.c -new file mode 100644 -index 00000000..c2765e96 ---- /dev/null -+++ b/nptl_2_17/pthread_condattr_init_2_17.c -@@ -0,0 +1,33 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+ -+ -+int -+__pthread_condattr_init (pthread_condattr_t *attr) -+{ -+ ASSERT_TYPE_SIZE (pthread_condattr_t, __SIZEOF_PTHREAD_CONDATTR_T); -+ ASSERT_PTHREAD_INTERNAL_SIZE (pthread_condattr_t, -+ struct pthread_condattr); -+ -+ memset (attr, '\0', sizeof (*attr)); -+ return 0; -+} -+strong_alias (__pthread_condattr_init, pthread_condattr_init) -diff --git a/nptl_2_17/pthread_condattr_setclock_2_17.c b/nptl_2_17/pthread_condattr_setclock_2_17.c -new file mode 100644 -index 00000000..69c64dcb ---- /dev/null -+++ b/nptl_2_17/pthread_condattr_setclock_2_17.c -@@ -0,0 +1,45 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+#include -+ -+ -+int -+pthread_condattr_setclock (pthread_condattr_t *attr, clockid_t clock_id) -+{ -+ /* Only a few clocks are allowed. */ -+ if (clock_id != CLOCK_MONOTONIC && clock_id != CLOCK_REALTIME) -+ /* If more clocks are allowed some day the storing of the clock ID -+ in the pthread_cond_t structure needs to be adjusted. */ -+ return EINVAL; -+ -+ /* Make sure the value fits in the bits we reserved. */ -+ assert (clock_id < (1 << COND_NWAITERS_SHIFT)); -+ -+ int *valuep = &((struct pthread_condattr *) attr)->value; -+ -+ *valuep = ((*valuep & ~(((1 << COND_NWAITERS_SHIFT) - 1) << 1)) -+ | (clock_id << 1)); -+ -+ return 0; -+} -diff --git a/nptl_2_17/pthread_mutex_cond_lock_2_17.c b/nptl_2_17/pthread_mutex_cond_lock_2_17.c -new file mode 100644 -index 00000000..87734543 ---- /dev/null -+++ b/nptl_2_17/pthread_mutex_cond_lock_2_17.c -@@ -0,0 +1,21 @@ -+#include -+ -+#define LLL_MUTEX_LOCK(mutex) \ -+ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) -+ -+/* Not actually elided so far. Needed? */ -+#define LLL_MUTEX_LOCK_ELISION(mutex) \ -+ ({ lll_cond_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); 0; }) -+ -+#define LLL_MUTEX_TRYLOCK(mutex) \ -+ lll_cond_trylock ((mutex)->__data.__lock) -+#define LLL_MUTEX_TRYLOCK_ELISION(mutex) LLL_MUTEX_TRYLOCK(mutex) -+ -+/* We need to assume that there are other threads blocked on the futex. -+ See __pthread_mutex_lock_full for further details. */ -+#define LLL_ROBUST_MUTEX_LOCK_MODIFIER FUTEX_WAITERS -+#define __pthread_mutex_lock __pthread_mutex_cond_lock -+#define __pthread_mutex_lock_full __pthread_mutex_cond_lock_full -+#define NO_INCR -+ -+#include -diff --git a/nptl_2_17/pthread_mutex_lock_2_17.c b/nptl_2_17/pthread_mutex_lock_2_17.c -new file mode 100644 -index 00000000..b08a2472 ---- /dev/null -+++ b/nptl_2_17/pthread_mutex_lock_2_17.c -@@ -0,0 +1,652 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#ifndef lll_lock_elision -+#define lll_lock_elision(lock, try_lock, private) ({ \ -+ lll_lock (lock, private); 0; }) -+#endif -+ -+#ifndef lll_trylock_elision -+#define lll_trylock_elision(a,t) lll_trylock(a) -+#endif -+ -+/* Some of the following definitions differ when pthread_mutex_cond_lock.c -+ includes this file. */ -+#ifndef LLL_MUTEX_LOCK -+/* lll_lock with single-thread optimization. */ -+static inline void -+lll_mutex_lock_optimized (pthread_mutex_t *mutex) -+{ -+ /* The single-threaded optimization is only valid for private -+ mutexes. For process-shared mutexes, the mutex could be in a -+ shared mapping, so synchronization with another process is needed -+ even without any threads. If the lock is already marked as -+ acquired, POSIX requires that pthread_mutex_lock deadlocks for -+ normal mutexes, so skip the optimization in that case as -+ well. */ -+ int private = PTHREAD_MUTEX_PSHARED (mutex); -+ if (private == LLL_PRIVATE && SINGLE_THREAD_P && mutex->__data.__lock == 0) -+ mutex->__data.__lock = 1; -+ else -+ lll_lock (mutex->__data.__lock, private); -+} -+# define LLL_MUTEX_LOCK(mutex) \ -+ lll_lock ((mutex)->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)) -+# define LLL_MUTEX_LOCK_OPTIMIZED(mutex) lll_mutex_lock_optimized (mutex) -+# define LLL_MUTEX_TRYLOCK(mutex) \ -+ lll_trylock ((mutex)->__data.__lock) -+# define LLL_ROBUST_MUTEX_LOCK_MODIFIER 0 -+# define LLL_MUTEX_LOCK_ELISION(mutex) \ -+ lll_lock_elision ((mutex)->__data.__lock, (mutex)->__data.__elision, \ -+ PTHREAD_MUTEX_PSHARED (mutex)) -+# define LLL_MUTEX_TRYLOCK_ELISION(mutex) \ -+ lll_trylock_elision((mutex)->__data.__lock, (mutex)->__data.__elision, \ -+ PTHREAD_MUTEX_PSHARED (mutex)) -+# define PTHREAD_MUTEX_LOCK ___pthread_mutex_lock -+# define PTHREAD_MUTEX_VERSIONS 1 -+#endif -+ -+static int __pthread_mutex_lock_full (pthread_mutex_t *mutex) -+ __attribute_noinline__; -+ -+int -+__pthread_mutex_lock (pthread_mutex_t *mutex) -+{ -+ /* See concurrency notes regarding mutex type which is loaded from __kind -+ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ -+ unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); -+ -+ LIBC_PROBE (mutex_entry, 1, mutex); -+ -+ if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP -+ | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) -+ return __pthread_mutex_lock_full (mutex); -+ -+ if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP)) -+ { -+ FORCE_ELISION (mutex, goto elision); -+ simple: -+ /* Normal mutex. */ -+ LLL_MUTEX_LOCK (mutex); -+ assert (mutex->__data.__owner == 0); -+ } -+#ifdef ENABLE_ELISION_SUPPORT -+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) -+ { -+ elision: __attribute__((unused)) -+ /* This case can never happen on a system without elision, -+ as the mutex type initialization functions will not -+ allow to set the elision flags. */ -+ /* Don't record owner or users for elision case. This is a -+ tail call. */ -+ return LLL_MUTEX_LOCK_ELISION (mutex); -+ } -+#endif -+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) -+ == PTHREAD_MUTEX_RECURSIVE_NP, 1)) -+ { -+ /* Recursive mutex. */ -+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); -+ -+ /* Check whether we already hold the mutex. */ -+ if (mutex->__data.__owner == id) -+ { -+ /* Just bump the counter. */ -+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) -+ /* Overflow of the counter. */ -+ return EAGAIN; -+ -+ ++mutex->__data.__count; -+ -+ return 0; -+ } -+ -+ /* We have to get the mutex. */ -+ LLL_MUTEX_LOCK (mutex); -+ -+ assert (mutex->__data.__owner == 0); -+ mutex->__data.__count = 1; -+ } -+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) -+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) -+ { -+ if (! __is_smp) -+ goto simple; -+ -+ if (LLL_MUTEX_TRYLOCK (mutex) != 0) -+ { -+ int cnt = 0; -+ int max_cnt = MIN (MAX_ADAPTIVE_COUNT, -+ mutex->__data.__spins * 2 + 10); -+ do -+ { -+ if (cnt++ >= max_cnt) -+ { -+ LLL_MUTEX_LOCK (mutex); -+ break; -+ } -+ atomic_spin_nop (); -+ } -+ while (LLL_MUTEX_TRYLOCK (mutex) != 0); -+ -+ mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8; -+ } -+ assert (mutex->__data.__owner == 0); -+ } -+ else -+ { -+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); -+ assert (PTHREAD_MUTEX_TYPE (mutex) == PTHREAD_MUTEX_ERRORCHECK_NP); -+ /* Check whether we already hold the mutex. */ -+ if (__glibc_unlikely (mutex->__data.__owner == id)) -+ return EDEADLK; -+ goto simple; -+ } -+ -+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); -+ -+ /* Record the ownership. */ -+ mutex->__data.__owner = id; -+#ifndef NO_INCR -+ ++mutex->__data.__nusers; -+#endif -+ -+ LIBC_PROBE (mutex_acquired, 1, mutex); -+ -+ return 0; -+} -+ -+static int -+__pthread_mutex_lock_full (pthread_mutex_t *mutex) -+{ -+ int oldval; -+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); -+ -+ switch (PTHREAD_MUTEX_TYPE (mutex)) -+ { -+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: -+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP: -+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ &mutex->__data.__list.__next); -+ /* We need to set op_pending before starting the operation. Also -+ see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ -+ oldval = mutex->__data.__lock; -+ /* This is set to FUTEX_WAITERS iff we might have shared the -+ FUTEX_WAITERS flag with other threads, and therefore need to keep it -+ set to avoid lost wake-ups. We have the same requirement in the -+ simple mutex algorithm. -+ We start with value zero for a normal mutex, and FUTEX_WAITERS if we -+ are building the special case mutexes for use from within condition -+ variables. */ -+ unsigned int assume_other_futex_waiters = LLL_ROBUST_MUTEX_LOCK_MODIFIER; -+ while (1) -+ { -+ /* Try to acquire the lock through a CAS from 0 (not acquired) to -+ our TID | assume_other_futex_waiters. */ -+ if (__glibc_likely (oldval == 0)) -+ { -+ oldval -+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+ id | assume_other_futex_waiters, 0); -+ if (__glibc_likely (oldval == 0)) -+ break; -+ } -+ -+ if ((oldval & FUTEX_OWNER_DIED) != 0) -+ { -+ /* The previous owner died. Try locking the mutex. */ -+ int newval = id; -+#ifdef NO_INCR -+ /* We are not taking assume_other_futex_waiters into accoount -+ here simply because we'll set FUTEX_WAITERS anyway. */ -+ newval |= FUTEX_WAITERS; -+#else -+ newval |= (oldval & FUTEX_WAITERS) | assume_other_futex_waiters; -+#endif -+ -+ newval -+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+ newval, oldval); -+ -+ if (newval != oldval) -+ { -+ oldval = newval; -+ continue; -+ } -+ -+ /* We got the mutex. */ -+ mutex->__data.__count = 1; -+ /* But it is inconsistent unless marked otherwise. */ -+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; -+ -+ /* We must not enqueue the mutex before we have acquired it. -+ Also see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ ENQUEUE_MUTEX (mutex); -+ /* We need to clear op_pending after we enqueue the mutex. */ -+ __asm ("" ::: "memory"); -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ -+ /* Note that we deliberately exit here. If we fall -+ through to the end of the function __nusers would be -+ incremented which is not correct because the old -+ owner has to be discounted. If we are not supposed -+ to increment __nusers we actually have to decrement -+ it here. */ -+#ifdef NO_INCR -+ --mutex->__data.__nusers; -+#endif -+ -+ return EOWNERDEAD; -+ } -+ -+ /* Check whether we already hold the mutex. */ -+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) -+ { -+ int kind = PTHREAD_MUTEX_TYPE (mutex); -+ if (kind == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP) -+ { -+ /* We do not need to ensure ordering wrt another memory -+ access. Also see comments at ENQUEUE_MUTEX. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ NULL); -+ return EDEADLK; -+ } -+ -+ if (kind == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP) -+ { -+ /* We do not need to ensure ordering wrt another memory -+ access. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ NULL); -+ -+ /* Just bump the counter. */ -+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) -+ /* Overflow of the counter. */ -+ return EAGAIN; -+ -+ ++mutex->__data.__count; -+ -+ return 0; -+ } -+ } -+ -+ /* We cannot acquire the mutex nor has its owner died. Thus, try -+ to block using futexes. Set FUTEX_WAITERS if necessary so that -+ other threads are aware that there are potentially threads -+ blocked on the futex. Restart if oldval changed in the -+ meantime. */ -+ if ((oldval & FUTEX_WAITERS) == 0) -+ { -+ if (atomic_compare_and_exchange_bool_acq (&mutex->__data.__lock, -+ oldval | FUTEX_WAITERS, -+ oldval) -+ != 0) -+ { -+ oldval = mutex->__data.__lock; -+ continue; -+ } -+ oldval |= FUTEX_WAITERS; -+ } -+ -+ /* It is now possible that we share the FUTEX_WAITERS flag with -+ another thread; therefore, update assume_other_futex_waiters so -+ that we do not forget about this when handling other cases -+ above and thus do not cause lost wake-ups. */ -+ assume_other_futex_waiters |= FUTEX_WAITERS; -+ -+ /* Block using the futex and reload current lock value. */ -+ lll_futex_wait (&mutex->__data.__lock, oldval, -+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex)); -+ oldval = mutex->__data.__lock; -+ } -+ -+ /* We have acquired the mutex; check if it is still consistent. */ -+ if (__builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) -+ { -+ /* This mutex is now not recoverable. */ -+ mutex->__data.__count = 0; -+ int private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); -+ lll_unlock (mutex->__data.__lock, private); -+ /* FIXME This violates the mutex destruction requirements. See -+ __pthread_mutex_unlock_full. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ return ENOTRECOVERABLE; -+ } -+ -+ mutex->__data.__count = 1; -+ /* We must not enqueue the mutex before we have acquired it. -+ Also see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ ENQUEUE_MUTEX (mutex); -+ /* We need to clear op_pending after we enqueue the mutex. */ -+ __asm ("" ::: "memory"); -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ break; -+ -+ /* The PI support requires the Linux futex system call. If that's not -+ available, pthread_mutex_init should never have allowed the type to -+ be set. So it will get the default case for an invalid type. */ -+#ifdef __NR_futex -+ case PTHREAD_MUTEX_PI_RECURSIVE_NP: -+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_PI_NORMAL_NP: -+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: -+ { -+ int kind, robust; -+ { -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); -+ kind = mutex_kind & PTHREAD_MUTEX_KIND_MASK_NP; -+ robust = mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP; -+ } -+ -+ if (robust) -+ { -+ /* Note: robust PI futexes are signaled by setting bit 0. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ (void *) (((uintptr_t) &mutex->__data.__list.__next) -+ | 1)); -+ /* We need to set op_pending before starting the operation. Also -+ see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ } -+ -+ oldval = mutex->__data.__lock; -+ -+ /* Check whether we already hold the mutex. */ -+ if (__glibc_unlikely ((oldval & FUTEX_TID_MASK) == id)) -+ { -+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) -+ { -+ /* We do not need to ensure ordering wrt another memory -+ access. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ return EDEADLK; -+ } -+ -+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP) -+ { -+ /* We do not need to ensure ordering wrt another memory -+ access. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ -+ /* Just bump the counter. */ -+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) -+ /* Overflow of the counter. */ -+ return EAGAIN; -+ -+ ++mutex->__data.__count; -+ -+ return 0; -+ } -+ } -+ -+ int newval = id; -+# ifdef NO_INCR -+ newval |= FUTEX_WAITERS; -+# endif -+ oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+ newval, 0); -+ -+ if (oldval != 0) -+ { -+ /* The mutex is locked. The kernel will now take care of -+ everything. */ -+ int private = (robust -+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) -+ : PTHREAD_MUTEX_PSHARED (mutex)); -+ INTERNAL_SYSCALL_DECL (__err); -+ int e = INTERNAL_SYSCALL (futex, 4, &mutex->__data.__lock, -+ __lll_private_flag (FUTEX_LOCK_PI, -+ private), 1, 0); -+ -+ if (INTERNAL_SYSCALL_ERROR_P (e) -+ && (INTERNAL_SYSCALL_ERRNO (e) == ESRCH -+ || INTERNAL_SYSCALL_ERRNO (e) == EDEADLK)) -+ { -+ assert (INTERNAL_SYSCALL_ERRNO (e) != EDEADLK -+ || (kind != PTHREAD_MUTEX_ERRORCHECK_NP -+ && kind != PTHREAD_MUTEX_RECURSIVE_NP)); -+ /* ESRCH can happen only for non-robust PI mutexes where -+ the owner of the lock died. */ -+ assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust); -+ -+ /* Delay the thread indefinitely. */ -+ while (1) -+ __pause_nocancel (); -+ } -+ -+ oldval = mutex->__data.__lock; -+ -+ assert (robust || (oldval & FUTEX_OWNER_DIED) == 0); -+ } -+ -+ if (__glibc_unlikely (oldval & FUTEX_OWNER_DIED)) -+ { -+ atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED); -+ -+ /* We got the mutex. */ -+ mutex->__data.__count = 1; -+ /* But it is inconsistent unless marked otherwise. */ -+ mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT; -+ -+ /* We must not enqueue the mutex before we have acquired it. -+ Also see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ ENQUEUE_MUTEX_PI (mutex); -+ /* We need to clear op_pending after we enqueue the mutex. */ -+ __asm ("" ::: "memory"); -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ -+ /* Note that we deliberately exit here. If we fall -+ through to the end of the function __nusers would be -+ incremented which is not correct because the old owner -+ has to be discounted. If we are not supposed to -+ increment __nusers we actually have to decrement it here. */ -+# ifdef NO_INCR -+ --mutex->__data.__nusers; -+# endif -+ -+ return EOWNERDEAD; -+ } -+ -+ if (robust -+ && __builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_NOTRECOVERABLE, 0)) -+ { -+ /* This mutex is now not recoverable. */ -+ mutex->__data.__count = 0; -+ -+ INTERNAL_SYSCALL_DECL (__err); -+ INTERNAL_SYSCALL (futex, 4, &mutex->__data.__lock, -+ __lll_private_flag (FUTEX_UNLOCK_PI, -+ PTHREAD_ROBUST_MUTEX_PSHARED (mutex)), -+ 0, 0); -+ -+ /* To the kernel, this will be visible after the kernel has -+ acquired the mutex in the syscall. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ return ENOTRECOVERABLE; -+ } -+ -+ mutex->__data.__count = 1; -+ if (robust) -+ { -+ /* We must not enqueue the mutex before we have acquired it. -+ Also see comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ ENQUEUE_MUTEX_PI (mutex); -+ /* We need to clear op_pending after we enqueue the mutex. */ -+ __asm ("" ::: "memory"); -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ } -+ } -+ break; -+#endif /* __NR_futex. */ -+ -+ case PTHREAD_MUTEX_PP_RECURSIVE_NP: -+ case PTHREAD_MUTEX_PP_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_PP_NORMAL_NP: -+ case PTHREAD_MUTEX_PP_ADAPTIVE_NP: -+ { -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ int kind = atomic_load_relaxed (&(mutex->__data.__kind)) -+ & PTHREAD_MUTEX_KIND_MASK_NP; -+ -+ oldval = mutex->__data.__lock; -+ -+ /* Check whether we already hold the mutex. */ -+ if (mutex->__data.__owner == id) -+ { -+ if (kind == PTHREAD_MUTEX_ERRORCHECK_NP) -+ return EDEADLK; -+ -+ if (kind == PTHREAD_MUTEX_RECURSIVE_NP) -+ { -+ /* Just bump the counter. */ -+ if (__glibc_unlikely (mutex->__data.__count + 1 == 0)) -+ /* Overflow of the counter. */ -+ return EAGAIN; -+ -+ ++mutex->__data.__count; -+ -+ return 0; -+ } -+ } -+ -+ int oldprio = -1, ceilval; -+ do -+ { -+ int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) -+ >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; -+ -+ if (__pthread_current_priority () > ceiling) -+ { -+ if (oldprio != -1) -+ __pthread_tpp_change_priority (oldprio, -1); -+ return EINVAL; -+ } -+ -+ int retval = __pthread_tpp_change_priority (oldprio, ceiling); -+ if (retval) -+ return retval; -+ -+ ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT; -+ oldprio = ceiling; -+ -+ oldval -+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+#ifdef NO_INCR -+ ceilval | 2, -+#else -+ ceilval | 1, -+#endif -+ ceilval); -+ -+ if (oldval == ceilval) -+ break; -+ -+ do -+ { -+ oldval -+ = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+ ceilval | 2, -+ ceilval | 1); -+ -+ if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval) -+ break; -+ -+ if (oldval != ceilval) -+ lll_futex_wait (&mutex->__data.__lock, ceilval | 2, -+ PTHREAD_MUTEX_PSHARED (mutex)); -+ } -+ while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock, -+ ceilval | 2, ceilval) -+ != ceilval); -+ } -+ while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval); -+ -+ assert (mutex->__data.__owner == 0); -+ mutex->__data.__count = 1; -+ } -+ break; -+ -+ default: -+ /* Correct code cannot set any other type. */ -+ return EINVAL; -+ } -+ -+ /* Record the ownership. */ -+ mutex->__data.__owner = id; -+#ifndef NO_INCR -+ ++mutex->__data.__nusers; -+#endif -+ -+ LIBC_PROBE (mutex_acquired, 1, mutex); -+ -+ return 0; -+} -+#ifndef __pthread_mutex_lock -+weak_alias (__pthread_mutex_lock, pthread_mutex_lock) -+hidden_def (__pthread_mutex_lock) -+#endif -+ -+ -+#ifdef NO_INCR -+void -+__pthread_mutex_cond_lock_adjust (pthread_mutex_t *mutex) -+{ -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ int mutex_kind = atomic_load_relaxed (&(mutex->__data.__kind)); -+ assert ((mutex_kind & PTHREAD_MUTEX_PRIO_INHERIT_NP) != 0); -+ assert ((mutex_kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP) == 0); -+ assert ((mutex_kind & PTHREAD_MUTEX_PSHARED_BIT) == 0); -+ -+ /* Record the ownership. */ -+ pid_t id = THREAD_GETMEM (THREAD_SELF, tid); -+ mutex->__data.__owner = id; -+ -+ if (mutex_kind == PTHREAD_MUTEX_PI_RECURSIVE_NP) -+ ++mutex->__data.__count; -+} -+#endif -diff --git a/nptl_2_17/pthread_mutex_unlock_2_17.c b/nptl_2_17/pthread_mutex_unlock_2_17.c -new file mode 100644 -index 00000000..00729d32 ---- /dev/null -+++ b/nptl_2_17/pthread_mutex_unlock_2_17.c -@@ -0,0 +1,361 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#ifndef lll_unlock_elision -+#define lll_unlock_elision(a,b,c) ({ lll_unlock (a,c); 0; }) -+#endif -+ -+static int -+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) -+ __attribute_noinline__; -+ -+int -+attribute_hidden -+__pthread_mutex_unlock_usercnt (pthread_mutex_t *mutex, int decr) -+{ -+ /* See concurrency notes regarding mutex type which is loaded from __kind -+ in struct __pthread_mutex_s in sysdeps/nptl/bits/thread-shared-types.h. */ -+ int type = PTHREAD_MUTEX_TYPE_ELISION (mutex); -+ if (__builtin_expect (type & -+ ~(PTHREAD_MUTEX_KIND_MASK_NP|PTHREAD_MUTEX_ELISION_FLAGS_NP), 0)) -+ return __pthread_mutex_unlock_full (mutex, decr); -+ -+ if (__builtin_expect (type, PTHREAD_MUTEX_TIMED_NP) -+ == PTHREAD_MUTEX_TIMED_NP) -+ { -+ /* Always reset the owner field. */ -+ normal: -+ mutex->__data.__owner = 0; -+ if (decr) -+ /* One less user. */ -+ --mutex->__data.__nusers; -+ -+ /* Unlock. */ -+ lll_unlock (mutex->__data.__lock, PTHREAD_MUTEX_PSHARED (mutex)); -+ -+ LIBC_PROBE (mutex_release, 1, mutex); -+ -+ return 0; -+ } -+ else if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_ELISION_NP)) -+ { -+ /* Don't reset the owner/users fields for elision. */ -+ return lll_unlock_elision (mutex->__data.__lock, mutex->__data.__elision, -+ PTHREAD_MUTEX_PSHARED (mutex)); -+ } -+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) -+ == PTHREAD_MUTEX_RECURSIVE_NP, 1)) -+ { -+ /* Recursive mutex. */ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) -+ return EPERM; -+ -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return 0; -+ goto normal; -+ } -+ else if (__builtin_expect (PTHREAD_MUTEX_TYPE (mutex) -+ == PTHREAD_MUTEX_ADAPTIVE_NP, 1)) -+ goto normal; -+ else -+ { -+ /* Error checking mutex. */ -+ assert (type == PTHREAD_MUTEX_ERRORCHECK_NP); -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) -+ || ! lll_islocked (mutex->__data.__lock)) -+ return EPERM; -+ goto normal; -+ } -+} -+ -+ -+static int -+__pthread_mutex_unlock_full (pthread_mutex_t *mutex, int decr) -+{ -+ int newowner = 0; -+ int private; -+ -+ switch (PTHREAD_MUTEX_TYPE (mutex)) -+ { -+ case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP: -+ /* Recursive mutex. */ -+ if ((mutex->__data.__lock & FUTEX_TID_MASK) -+ == THREAD_GETMEM (THREAD_SELF, tid) -+ && __builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_INCONSISTENT, 0)) -+ { -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return ENOTRECOVERABLE; -+ -+ goto notrecoverable; -+ } -+ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) -+ return EPERM; -+ -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return 0; -+ -+ goto robust; -+ -+ case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_ROBUST_NORMAL_NP: -+ case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP: -+ if ((mutex->__data.__lock & FUTEX_TID_MASK) -+ != THREAD_GETMEM (THREAD_SELF, tid) -+ || ! lll_islocked (mutex->__data.__lock)) -+ return EPERM; -+ -+ /* If the previous owner died and the caller did not succeed in -+ making the state consistent, mark the mutex as unrecoverable -+ and make all waiters. */ -+ if (__builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_INCONSISTENT, 0)) -+ notrecoverable: -+ newowner = PTHREAD_MUTEX_NOTRECOVERABLE; -+ -+ robust: -+ /* Remove mutex from the list. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ &mutex->__data.__list.__next); -+ /* We must set op_pending before we dequeue the mutex. Also see -+ comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ DEQUEUE_MUTEX (mutex); -+ -+ mutex->__data.__owner = newowner; -+ if (decr) -+ /* One less user. */ -+ --mutex->__data.__nusers; -+ -+ /* Unlock by setting the lock to 0 (not acquired); if the lock had -+ FUTEX_WAITERS set previously, then wake any waiters. -+ The unlock operation must be the last access to the mutex to not -+ violate the mutex destruction requirements (see __lll_unlock). */ -+ private = PTHREAD_ROBUST_MUTEX_PSHARED (mutex); -+ if (__glibc_unlikely ((atomic_exchange_rel (&mutex->__data.__lock, 0) -+ & FUTEX_WAITERS) != 0)) -+ lll_futex_wake (&mutex->__data.__lock, 1, private); -+ -+ /* We must clear op_pending after we release the mutex. -+ FIXME However, this violates the mutex destruction requirements -+ because another thread could acquire the mutex, destroy it, and -+ reuse the memory for something else; then, if this thread crashes, -+ and the memory happens to have a value equal to the TID, the kernel -+ will believe it is still related to the mutex (which has been -+ destroyed already) and will modify some other random object. */ -+ __asm ("" ::: "memory"); -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ break; -+ -+ /* The PI support requires the Linux futex system call. If that's not -+ available, pthread_mutex_init should never have allowed the type to -+ be set. So it will get the default case for an invalid type. */ -+#ifdef __NR_futex -+ case PTHREAD_MUTEX_PI_RECURSIVE_NP: -+ /* Recursive mutex. */ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) -+ return EPERM; -+ -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return 0; -+ goto continue_pi_non_robust; -+ -+ case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP: -+ /* Recursive mutex. */ -+ if ((mutex->__data.__lock & FUTEX_TID_MASK) -+ == THREAD_GETMEM (THREAD_SELF, tid) -+ && __builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_INCONSISTENT, 0)) -+ { -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return ENOTRECOVERABLE; -+ -+ goto pi_notrecoverable; -+ } -+ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) -+ return EPERM; -+ -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return 0; -+ -+ goto continue_pi_robust; -+ -+ case PTHREAD_MUTEX_PI_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_PI_NORMAL_NP: -+ case PTHREAD_MUTEX_PI_ADAPTIVE_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP: -+ case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP: -+ if ((mutex->__data.__lock & FUTEX_TID_MASK) -+ != THREAD_GETMEM (THREAD_SELF, tid) -+ || ! lll_islocked (mutex->__data.__lock)) -+ return EPERM; -+ -+ /* If the previous owner died and the caller did not succeed in -+ making the state consistent, mark the mutex as unrecoverable -+ and make all waiters. */ -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ if ((atomic_load_relaxed (&(mutex->__data.__kind)) -+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0 -+ && __builtin_expect (mutex->__data.__owner -+ == PTHREAD_MUTEX_INCONSISTENT, 0)) -+ pi_notrecoverable: -+ newowner = PTHREAD_MUTEX_NOTRECOVERABLE; -+ -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ if ((atomic_load_relaxed (&(mutex->__data.__kind)) -+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP) != 0) -+ { -+ continue_pi_robust: -+ /* Remove mutex from the list. -+ Note: robust PI futexes are signaled by setting bit 0. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, -+ (void *) (((uintptr_t) &mutex->__data.__list.__next) -+ | 1)); -+ /* We must set op_pending before we dequeue the mutex. Also see -+ comments at ENQUEUE_MUTEX. */ -+ __asm ("" ::: "memory"); -+ DEQUEUE_MUTEX (mutex); -+ } -+ -+ continue_pi_non_robust: -+ mutex->__data.__owner = newowner; -+ if (decr) -+ /* One less user. */ -+ --mutex->__data.__nusers; -+ -+ /* Unlock. Load all necessary mutex data before releasing the mutex -+ to not violate the mutex destruction requirements (see -+ lll_unlock). */ -+ /* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+ int robust = atomic_load_relaxed (&(mutex->__data.__kind)) -+ & PTHREAD_MUTEX_ROBUST_NORMAL_NP; -+ private = (robust -+ ? PTHREAD_ROBUST_MUTEX_PSHARED (mutex) -+ : PTHREAD_MUTEX_PSHARED (mutex)); -+ /* Unlock the mutex using a CAS unless there are futex waiters or our -+ TID is not the value of __lock anymore, in which case we let the -+ kernel take care of the situation. Use release MO in the CAS to -+ synchronize with acquire MO in lock acquisitions. */ -+ int l = atomic_load_relaxed (&mutex->__data.__lock); -+ do -+ { -+ if (((l & FUTEX_WAITERS) != 0) -+ || (l != THREAD_GETMEM (THREAD_SELF, tid))) -+ { -+ INTERNAL_SYSCALL_DECL (__err); -+ INTERNAL_SYSCALL (futex, 2, &mutex->__data.__lock, -+ __lll_private_flag (FUTEX_UNLOCK_PI, private)); -+ break; -+ } -+ } -+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock, -+ &l, 0)); -+ -+ /* This happens after the kernel releases the mutex but violates the -+ mutex destruction requirements; see comments in the code handling -+ PTHREAD_MUTEX_ROBUST_NORMAL_NP. */ -+ THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL); -+ break; -+#endif /* __NR_futex. */ -+ -+ case PTHREAD_MUTEX_PP_RECURSIVE_NP: -+ /* Recursive mutex. */ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid)) -+ return EPERM; -+ -+ if (--mutex->__data.__count != 0) -+ /* We still hold the mutex. */ -+ return 0; -+ goto pp; -+ -+ case PTHREAD_MUTEX_PP_ERRORCHECK_NP: -+ /* Error checking mutex. */ -+ if (mutex->__data.__owner != THREAD_GETMEM (THREAD_SELF, tid) -+ || (mutex->__data.__lock & ~ PTHREAD_MUTEX_PRIO_CEILING_MASK) == 0) -+ return EPERM; -+ /* FALLTHROUGH */ -+ -+ case PTHREAD_MUTEX_PP_NORMAL_NP: -+ case PTHREAD_MUTEX_PP_ADAPTIVE_NP: -+ /* Always reset the owner field. */ -+ pp: -+ mutex->__data.__owner = 0; -+ -+ if (decr) -+ /* One less user. */ -+ --mutex->__data.__nusers; -+ -+ /* Unlock. Use release MO in the CAS to synchronize with acquire MO in -+ lock acquisitions. */ -+ int newval; -+ int oldval = atomic_load_relaxed (&mutex->__data.__lock); -+ do -+ { -+ newval = oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK; -+ } -+ while (!atomic_compare_exchange_weak_release (&mutex->__data.__lock, -+ &oldval, newval)); -+ -+ if ((oldval & ~PTHREAD_MUTEX_PRIO_CEILING_MASK) > 1) -+ lll_futex_wake (&mutex->__data.__lock, 1, -+ PTHREAD_MUTEX_PSHARED (mutex)); -+ -+ int oldprio = newval >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT; -+ -+ LIBC_PROBE (mutex_release, 1, mutex); -+ -+ return __pthread_tpp_change_priority (oldprio, -1); -+ -+ default: -+ /* Correct code cannot set any other type. */ -+ return EINVAL; -+ } -+ -+ LIBC_PROBE (mutex_release, 1, mutex); -+ return 0; -+} -+ -+ -+int -+__pthread_mutex_unlock (pthread_mutex_t *mutex) -+{ -+ return __pthread_mutex_unlock_usercnt (mutex, 1); -+} -+weak_alias (__pthread_mutex_unlock, pthread_mutex_unlock) -+hidden_def (__pthread_mutex_unlock) -diff --git a/nptl_2_17/tpp_2_17.c b/nptl_2_17/tpp_2_17.c -new file mode 100644 -index 00000000..45fff81a ---- /dev/null -+++ b/nptl_2_17/tpp_2_17.c -@@ -0,0 +1,195 @@ -+/* Thread Priority Protect helpers. -+ Copyright (C) 2006-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Jakub Jelinek , 2006. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+#include -+#include -+ -+ -+int __sched_fifo_min_prio = -1; -+int __sched_fifo_max_prio = -1; -+ -+/* We only want to initialize __sched_fifo_min_prio and __sched_fifo_max_prio -+ once. The standard solution would be similar to pthread_once, but then -+ readers would need to use an acquire fence. In this specific case, -+ initialization is comprised of just idempotent writes to two variables -+ that have an initial value of -1. Therefore, we can treat each variable as -+ a separate, at-least-once initialized value. This enables using just -+ relaxed MO loads and stores, but requires that consumers check for -+ initialization of each value that is to be used; see -+ __pthread_tpp_change_priority for an example. -+ */ -+void -+__init_sched_fifo_prio (void) -+{ -+ atomic_store_relaxed (&__sched_fifo_max_prio, -+ __sched_get_priority_max (SCHED_FIFO)); -+ atomic_store_relaxed (&__sched_fifo_min_prio, -+ __sched_get_priority_min (SCHED_FIFO)); -+} -+ -+int -+__pthread_tpp_change_priority (int previous_prio, int new_prio) -+{ -+ struct pthread *self = THREAD_SELF; -+ struct priority_protection_data *tpp = THREAD_GETMEM (self, tpp); -+ int fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); -+ int fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); -+ -+ if (tpp == NULL) -+ { -+ /* See __init_sched_fifo_prio. We need both the min and max prio, -+ so need to check both, and run initialization if either one is -+ not initialized. The memory model's write-read coherence rule -+ makes this work. */ -+ if (fifo_min_prio == -1 || fifo_max_prio == -1) -+ { -+ __init_sched_fifo_prio (); -+ fifo_min_prio = atomic_load_relaxed (&__sched_fifo_min_prio); -+ fifo_max_prio = atomic_load_relaxed (&__sched_fifo_max_prio); -+ } -+ -+ size_t size = sizeof *tpp; -+ size += (fifo_max_prio - fifo_min_prio + 1) -+ * sizeof (tpp->priomap[0]); -+ tpp = calloc (size, 1); -+ if (tpp == NULL) -+ return ENOMEM; -+ tpp->priomax = fifo_min_prio - 1; -+ THREAD_SETMEM (self, tpp, tpp); -+ } -+ -+ assert (new_prio == -1 -+ || (new_prio >= fifo_min_prio -+ && new_prio <= fifo_max_prio)); -+ assert (previous_prio == -1 -+ || (previous_prio >= fifo_min_prio -+ && previous_prio <= fifo_max_prio)); -+ -+ int priomax = tpp->priomax; -+ int newpriomax = priomax; -+ if (new_prio != -1) -+ { -+ if (tpp->priomap[new_prio - fifo_min_prio] + 1 == 0) -+ return EAGAIN; -+ ++tpp->priomap[new_prio - fifo_min_prio]; -+ if (new_prio > priomax) -+ newpriomax = new_prio; -+ } -+ -+ if (previous_prio != -1) -+ { -+ if (--tpp->priomap[previous_prio - fifo_min_prio] == 0 -+ && priomax == previous_prio -+ && previous_prio > new_prio) -+ { -+ int i; -+ for (i = previous_prio - 1; i >= fifo_min_prio; --i) -+ if (tpp->priomap[i - fifo_min_prio]) -+ break; -+ newpriomax = i; -+ } -+ } -+ -+ if (priomax == newpriomax) -+ return 0; -+ -+ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ -+ lll_lock (self->lock, LLL_PRIVATE); -+ -+ tpp->priomax = newpriomax; -+ -+ int result = 0; -+ -+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) -+ { -+ if (__sched_getparam (self->tid, &self->schedparam) != 0) -+ result = errno; -+ else -+ self->flags |= ATTR_FLAG_SCHED_SET; -+ } -+ -+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) -+ { -+ self->schedpolicy = __sched_getscheduler (self->tid); -+ if (self->schedpolicy == -1) -+ result = errno; -+ else -+ self->flags |= ATTR_FLAG_POLICY_SET; -+ } -+ -+ if (result == 0) -+ { -+ struct sched_param sp = self->schedparam; -+ if (sp.sched_priority < newpriomax || sp.sched_priority < priomax) -+ { -+ if (sp.sched_priority < newpriomax) -+ sp.sched_priority = newpriomax; -+ -+ if (__sched_setscheduler (self->tid, self->schedpolicy, &sp) < 0) -+ result = errno; -+ } -+ } -+ -+ lll_unlock (self->lock, LLL_PRIVATE); -+ -+ return result; -+} -+ -+int -+__pthread_current_priority (void) -+{ -+ struct pthread *self = THREAD_SELF; -+ if ((self->flags & (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) -+ == (ATTR_FLAG_POLICY_SET | ATTR_FLAG_SCHED_SET)) -+ return self->schedparam.sched_priority; -+ -+ int result = 0; -+ -+ /* See CREATE THREAD NOTES in nptl/pthread_create.c. */ -+ lll_lock (self->lock, LLL_PRIVATE); -+ -+ if ((self->flags & ATTR_FLAG_SCHED_SET) == 0) -+ { -+ if (__sched_getparam (self->tid, &self->schedparam) != 0) -+ result = -1; -+ else -+ self->flags |= ATTR_FLAG_SCHED_SET; -+ } -+ -+ if ((self->flags & ATTR_FLAG_POLICY_SET) == 0) -+ { -+ self->schedpolicy = __sched_getscheduler (self->tid); -+ if (self->schedpolicy == -1) -+ result = -1; -+ else -+ self->flags |= ATTR_FLAG_POLICY_SET; -+ } -+ -+ if (result != -1) -+ result = self->schedparam.sched_priority; -+ -+ lll_unlock (self->lock, LLL_PRIVATE); -+ -+ return result; -+} -diff --git a/nptl_2_17/vars_2_17.c b/nptl_2_17/vars_2_17.c -new file mode 100644 -index 00000000..ae60c0f8 ---- /dev/null -+++ b/nptl_2_17/vars_2_17.c -@@ -0,0 +1,43 @@ -+/* Copyright (C) 2004-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+ -+/* Default thread attributes for the case when the user does not -+ provide any. */ -+struct pthread_attr __default_pthread_attr attribute_hidden; -+ -+/* Mutex protecting __default_pthread_attr. */ -+int __default_pthread_attr_lock = LLL_LOCK_INITIALIZER; -+ -+/* Flag whether the machine is SMP or not. */ -+int __is_smp attribute_hidden; -+ -+#ifndef TLS_MULTIPLE_THREADS_IN_TCB -+/* Variable set to a nonzero value either if more than one thread runs or ran, -+ or if a single-threaded process is trying to cancel itself. See -+ nptl/descr.h for more context on the single-threaded process case. */ -+int __pthread_multiple_threads attribute_hidden; -+#endif -+ -+/* Table of the key information. */ -+struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX] -+ __attribute__ ((nocommon)); -+hidden_data_def (__pthread_keys) --- -2.30.0 - diff --git a/0002-add-header-files-for-libphtread_2_17_so.patch b/0002-add-header-files-for-libphtread_2_17_so.patch deleted file mode 100644 index 98db0ff..0000000 --- a/0002-add-header-files-for-libphtread_2_17_so.patch +++ /dev/null @@ -1,2609 +0,0 @@ -From 76a50749f7af5935ba3739e815aa6a16ae4440d1 Mon Sep 17 00:00:00 2001 -From: Ulrich Drepper -Date: Tue Nov 26 22:50:54 2002 +0000 -Subject: [PATCH 2/9] build extra lipthreadcond so - -To successfully build some header files that reference glibc-2.17 - -Including but not limited to the following submission: -76a50749f7a -d5efd131d4e -eab380d8ec9 - ---- - nptl_2_17/bits/pthreadtypes_2_17.h | 127 +++ - nptl_2_17/bits/thread-shared-types_2_17.h | 186 ++++ - nptl_2_17/internaltypes_2_17.h | 179 ++++ - nptl_2_17/kernel-features_2_17.h | 162 +++ - nptl_2_17/pthreadP_2_17.h | 714 +++++++++++++ - nptl_2_17/pthread_2_17.h | 1175 +++++++++++++++++++++ - 6 files changed, 2543 insertions(+) - create mode 100644 nptl_2_17/bits/pthreadtypes_2_17.h - create mode 100644 nptl_2_17/bits/thread-shared-types_2_17.h - create mode 100644 nptl_2_17/internaltypes_2_17.h - create mode 100644 nptl_2_17/kernel-features_2_17.h - create mode 100644 nptl_2_17/pthreadP_2_17.h - create mode 100644 nptl_2_17/pthread_2_17.h - -diff --git a/nptl_2_17/bits/pthreadtypes_2_17.h b/nptl_2_17/bits/pthreadtypes_2_17.h -new file mode 100644 -index 00000000..da5521c1 ---- /dev/null -+++ b/nptl_2_17/bits/pthreadtypes_2_17.h -@@ -0,0 +1,127 @@ -+/* Declaration of common pthread types for all architectures. -+ Copyright (C) 2017-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _BITS_PTHREADTYPES_COMMON_H -+# define _BITS_PTHREADTYPES_COMMON_H 1 -+ -+/* For internal mutex and condition variable definitions. */ -+#include "thread-shared-types_2_17.h" -+ -+/* Thread identifiers. The structure of the attribute type is not -+ exposed on purpose. */ -+typedef unsigned long int pthread_t; -+ -+ -+/* Data structures for mutex handling. The structure of the attribute -+ type is not exposed on purpose. */ -+typedef union -+{ -+ char __size[__SIZEOF_PTHREAD_MUTEXATTR_T]; -+ int __align; -+} pthread_mutexattr_t; -+ -+ -+/* Data structure for condition variable handling. The structure of -+ the attribute type is not exposed on purpose. */ -+typedef union -+{ -+ char __size[__SIZEOF_PTHREAD_CONDATTR_T]; -+ int __align; -+} pthread_condattr_t; -+ -+ -+/* Keys for thread-specific data */ -+typedef unsigned int pthread_key_t; -+ -+ -+/* Once-only execution */ -+typedef int __ONCE_ALIGNMENT pthread_once_t; -+ -+ -+union pthread_attr_t -+{ -+ char __size[__SIZEOF_PTHREAD_ATTR_T]; -+ long int __align; -+}; -+#ifndef __have_pthread_attr_t -+typedef union pthread_attr_t pthread_attr_t; -+# define __have_pthread_attr_t 1 -+#endif -+ -+ -+typedef union -+{ -+ struct __pthread_mutex_s __data; -+ char __size[__SIZEOF_PTHREAD_MUTEX_T]; -+ long int __align; -+} pthread_mutex_t; -+ -+ -+typedef union -+{ -+struct -+{ -+ int __lock; -+ unsigned int __futex; -+ __extension__ unsigned long long int __total_seq; -+ __extension__ unsigned long long int __wakeup_seq; -+ __extension__ unsigned long long int __woken_seq; -+ void *__mutex; -+ unsigned int __nwaiters; -+ unsigned int __broadcast_seq; -+}__data; -+ char __size[__SIZEOF_PTHREAD_COND_T]; -+ long int __align; -+} pthread_cond_t; -+ -+ -+/* Data structure for reader-writer lock variable handling. The -+ structure of the attribute type is deliberately not exposed. */ -+typedef union -+{ -+ struct __pthread_rwlock_arch_t __data; -+ char __size[__SIZEOF_PTHREAD_RWLOCK_T]; -+ long int __align; -+} pthread_rwlock_t; -+ -+typedef union -+{ -+ char __size[__SIZEOF_PTHREAD_RWLOCKATTR_T]; -+ long int __align; -+} pthread_rwlockattr_t; -+ -+ -+/* POSIX spinlock data type. */ -+typedef volatile int pthread_spinlock_t; -+ -+ -+/* POSIX barriers data type. The structure of the type is -+ deliberately not exposed. */ -+typedef union -+{ -+ char __size[__SIZEOF_PTHREAD_BARRIER_T]; -+ long int __align; -+} pthread_barrier_t; -+ -+typedef union -+{ -+ char __size[__SIZEOF_PTHREAD_BARRIERATTR_T]; -+ int __align; -+} pthread_barrierattr_t; -+ -+#endif -diff --git a/nptl_2_17/bits/thread-shared-types_2_17.h b/nptl_2_17/bits/thread-shared-types_2_17.h -new file mode 100644 -index 00000000..c855d0d8 ---- /dev/null -+++ b/nptl_2_17/bits/thread-shared-types_2_17.h -@@ -0,0 +1,186 @@ -+/* Common threading primitives definitions for both POSIX and C11. -+ Copyright (C) 2017-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _THREAD_SHARED_TYPES_H -+#define _THREAD_SHARED_TYPES_H 1 -+ -+#include -+/* Arch-specific definitions. Each architecture must define the following -+ macros to define the expected sizes of pthread data types: -+ -+ __SIZEOF_PTHREAD_ATTR_T - size of pthread_attr_t. -+ __SIZEOF_PTHREAD_MUTEX_T - size of pthread_mutex_t. -+ __SIZEOF_PTHREAD_MUTEXATTR_T - size of pthread_mutexattr_t. -+ __SIZEOF_PTHREAD_COND_T - size of pthread_cond_t. -+ __SIZEOF_PTHREAD_CONDATTR_T - size of pthread_condattr_t. -+ __SIZEOF_PTHREAD_RWLOCK_T - size of pthread_rwlock_t. -+ __SIZEOF_PTHREAD_RWLOCKATTR_T - size of pthread_rwlockattr_t. -+ __SIZEOF_PTHREAD_BARRIER_T - size of pthread_barrier_t. -+ __SIZEOF_PTHREAD_BARRIERATTR_T - size of pthread_barrierattr_t. -+ -+ Also, the following macros must be define for internal pthread_mutex_t -+ struct definitions (struct __pthread_mutex_s): -+ -+ __PTHREAD_COMPAT_PADDING_MID - any additional members after 'kind' -+ and before '__spin' (for 64 bits) or -+ '__nusers' (for 32 bits). -+ __PTHREAD_COMPAT_PADDING_END - any additional members at the end of -+ the internal structure. -+ __PTHREAD_MUTEX_LOCK_ELISION - 1 if the architecture supports lock -+ elision or 0 otherwise. -+ __PTHREAD_MUTEX_NUSERS_AFTER_KIND - control where to put __nusers. The -+ preferred value for new architectures -+ is 0. -+ __PTHREAD_MUTEX_USE_UNION - control whether internal __spins and -+ __list will be place inside a union for -+ linuxthreads compatibility. -+ The preferred value for new architectures -+ is 0. -+ -+ For a new port the preferred values for the required defines are: -+ -+ #define __PTHREAD_COMPAT_PADDING_MID -+ #define __PTHREAD_COMPAT_PADDING_END -+ #define __PTHREAD_MUTEX_LOCK_ELISION 0 -+ #define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0 -+ #define __PTHREAD_MUTEX_USE_UNION 0 -+ -+ __PTHREAD_MUTEX_LOCK_ELISION can be set to 1 if the hardware plans to -+ eventually support lock elision using transactional memory. -+ -+ The additional macro defines any constraint for the lock alignment -+ inside the thread structures: -+ -+ __LOCK_ALIGNMENT - for internal lock/futex usage. -+ -+ Same idea but for the once locking primitive: -+ -+ __ONCE_ALIGNMENT - for pthread_once_t/once_flag definition. -+ -+ And finally the internal pthread_rwlock_t (struct __pthread_rwlock_arch_t) -+ must be defined. -+ */ -+#include -+ -+/* Common definition of pthread_mutex_t. */ -+ -+#if !__PTHREAD_MUTEX_USE_UNION -+typedef struct __pthread_internal_list -+{ -+ struct __pthread_internal_list *__prev; -+ struct __pthread_internal_list *__next; -+} __pthread_list_t; -+#else -+typedef struct __pthread_internal_slist -+{ -+ struct __pthread_internal_slist *__next; -+} __pthread_slist_t; -+#endif -+ -+/* Lock elision support. */ -+#if __PTHREAD_MUTEX_LOCK_ELISION -+# if !__PTHREAD_MUTEX_USE_UNION -+# define __PTHREAD_SPINS_DATA \ -+ short __spins; \ -+ short __elision -+# define __PTHREAD_SPINS 0, 0 -+# else -+# define __PTHREAD_SPINS_DATA \ -+ struct \ -+ { \ -+ short __espins; \ -+ short __eelision; \ -+ } __elision_data -+# define __PTHREAD_SPINS { 0, 0 } -+# define __spins __elision_data.__espins -+# define __elision __elision_data.__eelision -+# endif -+#else -+# define __PTHREAD_SPINS_DATA int __spins -+/* Mutex __spins initializer used by PTHREAD_MUTEX_INITIALIZER. */ -+# define __PTHREAD_SPINS 0 -+#endif -+ -+struct __pthread_mutex_s -+{ -+ int __lock __LOCK_ALIGNMENT; -+ unsigned int __count; -+ int __owner; -+#if !__PTHREAD_MUTEX_NUSERS_AFTER_KIND -+ unsigned int __nusers; -+#endif -+ /* KIND must stay at this position in the structure to maintain -+ binary compatibility with static initializers. -+ -+ Concurrency notes: -+ The __kind of a mutex is initialized either by the static -+ PTHREAD_MUTEX_INITIALIZER or by a call to pthread_mutex_init. -+ -+ After a mutex has been initialized, the __kind of a mutex is usually not -+ changed. BUT it can be set to -1 in pthread_mutex_destroy or elision can -+ be enabled. This is done concurrently in the pthread_mutex_*lock functions -+ by using the macro FORCE_ELISION. This macro is only defined for -+ architectures which supports lock elision. -+ -+ For elision, there are the flags PTHREAD_MUTEX_ELISION_NP and -+ PTHREAD_MUTEX_NO_ELISION_NP which can be set in addition to the already set -+ type of a mutex. -+ Before a mutex is initialized, only PTHREAD_MUTEX_NO_ELISION_NP can be set -+ with pthread_mutexattr_settype. -+ After a mutex has been initialized, the functions pthread_mutex_*lock can -+ enable elision - if the mutex-type and the machine supports it - by setting -+ the flag PTHREAD_MUTEX_ELISION_NP. This is done concurrently. Afterwards -+ the lock / unlock functions are using specific elision code-paths. */ -+ int __kind; -+ __PTHREAD_COMPAT_PADDING_MID -+#if __PTHREAD_MUTEX_NUSERS_AFTER_KIND -+ unsigned int __nusers; -+#endif -+#if !__PTHREAD_MUTEX_USE_UNION -+ __PTHREAD_SPINS_DATA; -+ __pthread_list_t __list; -+# define __PTHREAD_MUTEX_HAVE_PREV 1 -+#else -+ __extension__ union -+ { -+ __PTHREAD_SPINS_DATA; -+ __pthread_slist_t __list; -+ }; -+# define __PTHREAD_MUTEX_HAVE_PREV 0 -+#endif -+ __PTHREAD_COMPAT_PADDING_END -+}; -+ -+ -+/* Common definition of pthread_cond_t. */ -+ -+struct __pthread_cond_s -+{ -+ int __lock; -+ unsigned int __futex; -+ __extension__ unsigned long long int __total_seq; -+ __extension__ unsigned long long int __wakeup_seq; -+ __extension__ unsigned long long int __woken_seq; -+ void *__mutex; -+ unsigned int __nwaiters; -+ unsigned int __broadcast_seq; -+ -+long int __align; -+}; -+ -+#endif /* _THREAD_SHARED_TYPES_H */ -diff --git a/nptl_2_17/internaltypes_2_17.h b/nptl_2_17/internaltypes_2_17.h -new file mode 100644 -index 00000000..603dc01c ---- /dev/null -+++ b/nptl_2_17/internaltypes_2_17.h -@@ -0,0 +1,179 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _INTERNALTYPES_H -+#define _INTERNALTYPES_H 1 -+ -+#include -+#include -+#include -+ -+ -+struct pthread_attr -+{ -+ /* Scheduler parameters and priority. */ -+ struct sched_param schedparam; -+ int schedpolicy; -+ /* Various flags like detachstate, scope, etc. */ -+ int flags; -+ /* Size of guard area. */ -+ size_t guardsize; -+ /* Stack handling. */ -+ void *stackaddr; -+ size_t stacksize; -+ /* Affinity map. */ -+ cpu_set_t *cpuset; -+ size_t cpusetsize; -+}; -+ -+#define ATTR_FLAG_DETACHSTATE 0x0001 -+#define ATTR_FLAG_NOTINHERITSCHED 0x0002 -+#define ATTR_FLAG_SCOPEPROCESS 0x0004 -+#define ATTR_FLAG_STACKADDR 0x0008 -+#define ATTR_FLAG_OLDATTR 0x0010 -+#define ATTR_FLAG_SCHED_SET 0x0020 -+#define ATTR_FLAG_POLICY_SET 0x0040 -+ -+ -+/* Mutex attribute data structure. */ -+struct pthread_mutexattr -+{ -+ /* Identifier for the kind of mutex. -+ -+ Bit 31 is set if the mutex is to be shared between processes. -+ -+ Bit 0 to 30 contain one of the PTHREAD_MUTEX_ values to identify -+ the type of the mutex. */ -+ int mutexkind; -+}; -+ -+ -+/* Conditional variable attribute data structure. */ -+struct pthread_condattr -+{ -+ /* Combination of values: -+ -+ Bit 0 : flag whether conditional variable will be -+ sharable between processes. -+ Bit 1-COND_CLOCK_BITS: Clock ID. COND_CLOCK_BITS is the number of bits -+ needed to represent the ID of the clock. */ -+ int value; -+}; -+#define COND_CLOCK_BITS 1 -+#define COND_NWAITERS_SHIFT 1 -+ -+/* Read-write lock variable attribute data structure. */ -+struct pthread_rwlockattr -+{ -+ int lockkind; -+ int pshared; -+}; -+ -+ -+/* Barrier data structure. See pthread_barrier_wait for a description -+ of how these fields are used. */ -+struct pthread_barrier -+{ -+ unsigned int in; -+ unsigned int current_round; -+ unsigned int count; -+ int shared; -+ unsigned int out; -+}; -+/* See pthread_barrier_wait for a description. */ -+#define BARRIER_IN_THRESHOLD (UINT_MAX/2) -+ -+ -+/* Barrier variable attribute data structure. */ -+struct pthread_barrierattr -+{ -+ int pshared; -+}; -+ -+ -+/* Thread-local data handling. */ -+struct pthread_key_struct -+{ -+ /* Sequence numbers. Even numbers indicated vacant entries. Note -+ that zero is even. We use uintptr_t to not require padding on -+ 32- and 64-bit machines. On 64-bit machines it helps to avoid -+ wrapping, too. */ -+ uintptr_t seq; -+ -+ /* Destructor for the data. */ -+ void (*destr) (void *); -+}; -+ -+/* Check whether an entry is unused. */ -+#define KEY_UNUSED(p) (((p) & 1) == 0) -+/* Check whether a key is usable. We cannot reuse an allocated key if -+ the sequence counter would overflow after the next destroy call. -+ This would mean that we potentially free memory for a key with the -+ same sequence. This is *very* unlikely to happen, A program would -+ have to create and destroy a key 2^31 times (on 32-bit platforms, -+ on 64-bit platforms that would be 2^63). If it should happen we -+ simply don't use this specific key anymore. */ -+#define KEY_USABLE(p) (((uintptr_t) (p)) < ((uintptr_t) ((p) + 2))) -+ -+ -+/* Handling of read-write lock data. */ -+// XXX For now there is only one flag. Maybe more in future. -+#define RWLOCK_RECURSIVE(rwlock) ((rwlock)->__data.__flags != 0) -+ -+ -+/* Semaphore variable structure. */ -+struct new_sem -+{ -+#if __HAVE_64B_ATOMICS -+ /* The data field holds both value (in the least-significant 32 bits) and -+ nwaiters. */ -+# if __BYTE_ORDER == __LITTLE_ENDIAN -+# define SEM_VALUE_OFFSET 0 -+# elif __BYTE_ORDER == __BIG_ENDIAN -+# define SEM_VALUE_OFFSET 1 -+# else -+# error Unsupported byte order. -+# endif -+# define SEM_NWAITERS_SHIFT 32 -+# define SEM_VALUE_MASK (~(unsigned int)0) -+ uint64_t data; -+ int private; -+ int pad; -+#else -+# define SEM_VALUE_SHIFT 1 -+# define SEM_NWAITERS_MASK ((unsigned int)1) -+ unsigned int value; -+ int private; -+ int pad; -+ unsigned int nwaiters; -+#endif -+}; -+ -+struct old_sem -+{ -+ unsigned int value; -+}; -+ -+ -+/* Compatibility type for old conditional variable interfaces. */ -+typedef struct -+{ -+ pthread_cond_t *cond; -+} pthread_cond_2_0_t; -+ -+#endif /* internaltypes.h */ -diff --git a/nptl_2_17/kernel-features_2_17.h b/nptl_2_17/kernel-features_2_17.h -new file mode 100644 -index 00000000..299ae0a1 ---- /dev/null -+++ b/nptl_2_17/kernel-features_2_17.h -@@ -0,0 +1,162 @@ -+/* Set flags signalling availability of kernel features based on given -+ kernel version number. -+ Copyright (C) 1999-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+/* This file must not contain any C code. At least it must be protected -+ to allow using the file also in assembler files. */ -+ -+#ifndef __LINUX_KERNEL_VERSION_2_17 -+/* We assume the worst; all kernels should be supported. */ -+# define __LINUX_KERNEL_VERSION_2_17 0 -+#endif -+ -+/* We assume for __LINUX_KERNEL_VERSION the same encoding used in -+ linux/version.h. I.e., the major, minor, and subminor all get a -+ byte with the major number being in the highest byte. This means -+ we can do numeric comparisons. -+ -+ In the following we will define certain symbols depending on -+ whether the describes kernel feature is available in the kernel -+ version given by __LINUX_KERNEL_VERSION. We are not always exactly -+ recording the correct versions in which the features were -+ introduced. If somebody cares these values can afterwards be -+ corrected. */ -+ -+/* Some architectures use the socketcall multiplexer for some or all -+ socket-related operations instead of separate syscalls. -+ __ASSUME_SOCKETCALL is defined for such architectures. */ -+ -+/* The changed st_ino field appeared in 2.4.0-test6. However, SH is lame, -+ and still does not have a 64-bit inode field. */ -+#define __ASSUME_ST_INO_64_BIT 1 -+ -+/* The statfs64 syscalls are available in 2.5.74 (but not for alpha). */ -+#define __ASSUME_STATFS64 1 -+ -+/* pselect/ppoll were introduced just after 2.6.16-rc1. On x86_64 and -+ SH this appeared first in 2.6.19-rc1, on ia64 in 2.6.22-rc1. */ -+#define __ASSUME_PSELECT 1 -+ -+/* The *at syscalls were introduced just after 2.6.16-rc1. On PPC -+ they were introduced in 2.6.17-rc1, on SH in 2.6.19-rc1. */ -+#define __ASSUME_ATFCTS 1 -+ -+/* Support for inter-process robust mutexes was added in 2.6.17 (but -+ some architectures lack futex_atomic_cmpxchg_inatomic in some -+ configurations). */ -+#define __ASSUME_SET_ROBUST_LIST 1 -+ -+/* Support for various CLOEXEC and NONBLOCK flags was added in -+ 2.6.27. */ -+#define __ASSUME_IN_NONBLOCK 1 -+ -+/* Support for the FUTEX_CLOCK_REALTIME flag was added in 2.6.29. */ -+#define __ASSUME_FUTEX_CLOCK_REALTIME 1 -+ -+/* Support for preadv and pwritev was added in 2.6.30. */ -+#define __ASSUME_PREADV 1 -+#define __ASSUME_PWRITEV 1 -+ -+ -+/* Support for FUTEX_*_REQUEUE_PI was added in 2.6.31 (but some -+ * architectures lack futex_atomic_cmpxchg_inatomic in some -+ * configurations). */ -+#define __ASSUME_REQUEUE_PI 1 -+ -+/* Support for sendmmsg functionality was added in 3.0. */ -+#define __ASSUME_SENDMMSG 1 -+ -+/* On most architectures, most socket syscalls are supported for all -+ supported kernel versions, but on some socketcall architectures -+ separate syscalls were only added later. */ -+#define __ASSUME_SENDMSG_SYSCALL 1 -+#define __ASSUME_RECVMSG_SYSCALL 1 -+#define __ASSUME_ACCEPT_SYSCALL 1 -+#define __ASSUME_CONNECT_SYSCALL 1 -+#define __ASSUME_RECVFROM_SYSCALL 1 -+#define __ASSUME_SENDTO_SYSCALL 1 -+#define __ASSUME_ACCEPT4_SYSCALL 1 -+#define __ASSUME_RECVMMSG_SYSCALL 1 -+#define __ASSUME_SENDMMSG_SYSCALL 1 -+ -+/* Support for SysV IPC through wired syscalls. All supported architectures -+ either support ipc syscall and/or all the ipc correspondent syscalls. */ -+#define __ASSUME_DIRECT_SYSVIPC_SYSCALLS 1 -+ -+/* Support for p{read,write}v2 was added in 4.6. However Linux default -+ implementation does not assume the __ASSUME_* and instead use a fallback -+ implementation based on p{read,write}v and returning an error for -+ non supported flags. */ -+ -+/* Support for the renameat2 system call was added in kernel 3.15. */ -+#if __LINUX_KERNEL_VERSION >= 0x030F00 -+# define __ASSUME_RENAMEAT2 -+#endif -+ -+/* Support for the execveat syscall was added in 3.19. */ -+#if __LINUX_KERNEL_VERSION >= 0x031300 -+# define __ASSUME_EXECVEAT 1 -+#endif -+ -+#if __LINUX_KERNEL_VERSION >= 0x040400 -+# define __ASSUME_MLOCK2 1 -+#endif -+ -+#if __LINUX_KERNEL_VERSION >= 0x040500 -+# define __ASSUME_COPY_FILE_RANGE 1 -+#endif -+ -+/* Support for statx was added in kernel 4.11. */ -+#if __LINUX_KERNEL_VERSION >= 0x040B00 -+# define __ASSUME_STATX 1 -+#endif -+ -+/* Support for clone call used on fork. The signature varies across the -+ architectures with current 4 different variants: -+ -+ 1. long int clone (unsigned long flags, unsigned long newsp, -+ int *parent_tidptr, unsigned long tls, -+ int *child_tidptr) -+ -+ 2. long int clone (unsigned long newsp, unsigned long clone_flags, -+ int *parent_tidptr, int * child_tidptr, -+ unsigned long tls) -+ -+ 3. long int clone (unsigned long flags, unsigned long newsp, -+ int stack_size, int *parent_tidptr, -+ int *child_tidptr, unsigned long tls) -+ -+ 4. long int clone (unsigned long flags, unsigned long newsp, -+ int *parent_tidptr, int *child_tidptr, -+ unsigned long tls) -+ -+ The fourth variant is intended to be used as the default for newer ports, -+ Also IA64 uses the third variant but with __NR_clone2 instead of -+ __NR_clone. -+ -+ The macros names to define the variant used for the architecture is -+ similar to kernel: -+ -+ - __ASSUME_CLONE_BACKWARDS: for variant 1. -+ - __ASSUME_CLONE_BACKWARDS2: for variant 2 (s390). -+ - __ASSUME_CLONE_BACKWARDS3: for variant 3 (microblaze). -+ - __ASSUME_CLONE_DEFAULT: for variant 4. -+ - __ASSUME_CLONE2: for clone2 with variant 3 (ia64). -+ */ -+ -+#define __ASSUME_CLONE_DEFAULT 1 -diff --git a/nptl_2_17/pthreadP_2_17.h b/nptl_2_17/pthreadP_2_17.h -new file mode 100644 -index 00000000..3050fa54 ---- /dev/null -+++ b/nptl_2_17/pthreadP_2_17.h -@@ -0,0 +1,714 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2002. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _PTHREADP_H -+#define _PTHREADP_H 1 -+ -+ -+#include -+#include "kernel-features_2_17.h" -+#include "pthread_2_17.h" -+#include "internaltypes_2_17.h" -+ -+#include -+#include -+#include -+#include "descr.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* Atomic operations on TLS memory. */ -+#ifndef THREAD_ATOMIC_CMPXCHG_VAL -+# define THREAD_ATOMIC_CMPXCHG_VAL(descr, member, new, old) \ -+ atomic_compare_and_exchange_val_acq (&(descr)->member, new, old) -+#endif -+ -+#ifndef THREAD_ATOMIC_BIT_SET -+# define THREAD_ATOMIC_BIT_SET(descr, member, bit) \ -+ atomic_bit_set (&(descr)->member, bit) -+#endif -+ -+ -+/* Adaptive mutex definitions. */ -+#ifndef MAX_ADAPTIVE_COUNT -+# define MAX_ADAPTIVE_COUNT 100 -+#endif -+ -+ -+/* Magic cookie representing robust mutex with dead owner. */ -+#define PTHREAD_MUTEX_INCONSISTENT INT_MAX -+/* Magic cookie representing not recoverable robust mutex. */ -+#define PTHREAD_MUTEX_NOTRECOVERABLE (INT_MAX - 1) -+ -+ -+/* Internal mutex type value. */ -+enum -+{ -+ PTHREAD_MUTEX_KIND_MASK_NP = 3, -+ -+ PTHREAD_MUTEX_ELISION_NP = 256, -+ PTHREAD_MUTEX_NO_ELISION_NP = 512, -+ -+ PTHREAD_MUTEX_ROBUST_NORMAL_NP = 16, -+ PTHREAD_MUTEX_ROBUST_RECURSIVE_NP -+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_RECURSIVE_NP, -+ PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP -+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ERRORCHECK_NP, -+ PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP -+ = PTHREAD_MUTEX_ROBUST_NORMAL_NP | PTHREAD_MUTEX_ADAPTIVE_NP, -+ PTHREAD_MUTEX_PRIO_INHERIT_NP = 32, -+ PTHREAD_MUTEX_PI_NORMAL_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_NORMAL, -+ PTHREAD_MUTEX_PI_RECURSIVE_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_RECURSIVE_NP, -+ PTHREAD_MUTEX_PI_ERRORCHECK_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ERRORCHECK_NP, -+ PTHREAD_MUTEX_PI_ADAPTIVE_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ADAPTIVE_NP, -+ PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP, -+ PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_RECURSIVE_NP, -+ PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP, -+ PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP -+ = PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP, -+ PTHREAD_MUTEX_PRIO_PROTECT_NP = 64, -+ PTHREAD_MUTEX_PP_NORMAL_NP -+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_NORMAL, -+ PTHREAD_MUTEX_PP_RECURSIVE_NP -+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_RECURSIVE_NP, -+ PTHREAD_MUTEX_PP_ERRORCHECK_NP -+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ERRORCHECK_NP, -+ PTHREAD_MUTEX_PP_ADAPTIVE_NP -+ = PTHREAD_MUTEX_PRIO_PROTECT_NP | PTHREAD_MUTEX_ADAPTIVE_NP, -+ PTHREAD_MUTEX_ELISION_FLAGS_NP -+ = PTHREAD_MUTEX_ELISION_NP | PTHREAD_MUTEX_NO_ELISION_NP, -+ -+ PTHREAD_MUTEX_TIMED_ELISION_NP = -+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_ELISION_NP, -+ PTHREAD_MUTEX_TIMED_NO_ELISION_NP = -+ PTHREAD_MUTEX_TIMED_NP | PTHREAD_MUTEX_NO_ELISION_NP, -+}; -+#define PTHREAD_MUTEX_PSHARED_BIT 128 -+ -+/* See concurrency notes regarding __kind in struct __pthread_mutex_s -+ in sysdeps/nptl/bits/thread-shared-types.h. */ -+#define PTHREAD_MUTEX_TYPE(m) \ -+ (atomic_load_relaxed (&((m)->__data.__kind)) & 127) -+/* Don't include NO_ELISION, as that type is always the same -+ as the underlying lock type. */ -+#define PTHREAD_MUTEX_TYPE_ELISION(m) \ -+ (atomic_load_relaxed (&((m)->__data.__kind)) \ -+ & (127 | PTHREAD_MUTEX_ELISION_NP)) -+ -+#if LLL_PRIVATE == 0 && LLL_SHARED == 128 -+# define PTHREAD_MUTEX_PSHARED(m) \ -+ (atomic_load_relaxed (&((m)->__data.__kind)) & 128) -+#else -+# define PTHREAD_MUTEX_PSHARED(m) \ -+ ((atomic_load_relaxed (&((m)->__data.__kind)) & 128) \ -+ ? LLL_SHARED : LLL_PRIVATE) -+#endif -+ -+/* The kernel when waking robust mutexes on exit never uses -+ FUTEX_PRIVATE_FLAG FUTEX_WAKE. */ -+#define PTHREAD_ROBUST_MUTEX_PSHARED(m) LLL_SHARED -+ -+/* Ceiling in __data.__lock. __data.__lock is signed, so don't -+ use the MSB bit in there, but in the mask also include that bit, -+ so that the compiler can optimize & PTHREAD_MUTEX_PRIO_CEILING_MASK -+ masking if the value is then shifted down by -+ PTHREAD_MUTEX_PRIO_CEILING_SHIFT. */ -+#define PTHREAD_MUTEX_PRIO_CEILING_SHIFT 19 -+#define PTHREAD_MUTEX_PRIO_CEILING_MASK 0xfff80000 -+ -+ -+/* Flags in mutex attr. */ -+#define PTHREAD_MUTEXATTR_PROTOCOL_SHIFT 28 -+#define PTHREAD_MUTEXATTR_PROTOCOL_MASK 0x30000000 -+#define PTHREAD_MUTEXATTR_PRIO_CEILING_SHIFT 12 -+#define PTHREAD_MUTEXATTR_PRIO_CEILING_MASK 0x00fff000 -+#define PTHREAD_MUTEXATTR_FLAG_ROBUST 0x40000000 -+#define PTHREAD_MUTEXATTR_FLAG_PSHARED 0x80000000 -+#define PTHREAD_MUTEXATTR_FLAG_BITS \ -+ (PTHREAD_MUTEXATTR_FLAG_ROBUST | PTHREAD_MUTEXATTR_FLAG_PSHARED \ -+ | PTHREAD_MUTEXATTR_PROTOCOL_MASK | PTHREAD_MUTEXATTR_PRIO_CEILING_MASK) -+ -+ -+/* For the following, see pthread_rwlock_common.c. */ -+#define PTHREAD_RWLOCK_WRPHASE 1 -+#define PTHREAD_RWLOCK_WRLOCKED 2 -+#define PTHREAD_RWLOCK_RWAITING 4 -+#define PTHREAD_RWLOCK_READER_SHIFT 3 -+#define PTHREAD_RWLOCK_READER_OVERFLOW ((unsigned int) 1 \ -+ << (sizeof (unsigned int) * 8 - 1)) -+#define PTHREAD_RWLOCK_WRHANDOVER ((unsigned int) 1 \ -+ << (sizeof (unsigned int) * 8 - 1)) -+#define PTHREAD_RWLOCK_FUTEX_USED 2 -+ -+ -+/* Bits used in robust mutex implementation. */ -+#define FUTEX_WAITERS 0x80000000 -+#define FUTEX_OWNER_DIED 0x40000000 -+#define FUTEX_TID_MASK 0x3fffffff -+ -+ -+/* pthread_once definitions. See __pthread_once for how these are used. */ -+#define __PTHREAD_ONCE_INPROGRESS 1 -+#define __PTHREAD_ONCE_DONE 2 -+#define __PTHREAD_ONCE_FORK_GEN_INCR 4 -+ -+/* Attribute to indicate thread creation was issued from C11 thrd_create. */ -+#define ATTR_C11_THREAD ((void*)(uintptr_t)-1) -+ -+#if 0 -+/* Condition variable definitions. See __pthread_cond_wait_common. -+ Need to be defined here so there is one place from which -+ nptl_lock_constants can grab them. */ -+#define __PTHREAD_COND_CLOCK_MONOTONIC_MASK 2 -+#define __PTHREAD_COND_SHARED_MASK 1 -+#endif -+ -+/* Internal variables. */ -+ -+ -+/* Default pthread attributes. */ -+extern struct pthread_attr __default_pthread_attr attribute_hidden; -+extern int __default_pthread_attr_lock attribute_hidden; -+ -+/* Size and alignment of static TLS block. */ -+extern size_t __static_tls_size attribute_hidden; -+extern size_t __static_tls_align_m1 attribute_hidden; -+ -+/* Flag whether the machine is SMP or not. */ -+extern int __is_smp attribute_hidden; -+ -+/* Thread descriptor handling. */ -+extern list_t __stack_user; -+hidden_proto (__stack_user) -+ -+/* Attribute handling. */ -+extern struct pthread_attr *__attr_list attribute_hidden; -+extern int __attr_list_lock attribute_hidden; -+ -+/* Concurrency handling. */ -+extern int __concurrency_level attribute_hidden; -+ -+/* Thread-local data key handling. */ -+extern struct pthread_key_struct __pthread_keys[PTHREAD_KEYS_MAX]; -+hidden_proto (__pthread_keys) -+ -+/* Number of threads running. */ -+extern unsigned int __nptl_nthreads attribute_hidden; -+ -+#ifndef __ASSUME_SET_ROBUST_LIST -+/* Negative if we do not have the system call and we can use it. */ -+extern int __set_robust_list_avail attribute_hidden; -+#endif -+ -+/* Thread Priority Protection. */ -+extern int __sched_fifo_min_prio attribute_hidden; -+extern int __sched_fifo_max_prio attribute_hidden; -+extern void __init_sched_fifo_prio (void) attribute_hidden; -+extern int __pthread_tpp_change_priority (int prev_prio, int new_prio) -+ attribute_hidden; -+extern int __pthread_current_priority (void) attribute_hidden; -+ -+/* The library can run in debugging mode where it performs a lot more -+ tests. */ -+extern int __pthread_debug attribute_hidden; -+/** For now disable debugging support. */ -+#if 0 -+# define DEBUGGING_P __builtin_expect (__pthread_debug, 0) -+# define INVALID_TD_P(pd) (DEBUGGING_P && __find_in_stack_list (pd) == NULL) -+# define INVALID_NOT_TERMINATED_TD_P(pd) INVALID_TD_P (pd) -+#else -+# define DEBUGGING_P 0 -+/* Simplified test. This will not catch all invalid descriptors but -+ is better than nothing. And if the test triggers the thread -+ descriptor is guaranteed to be invalid. */ -+# define INVALID_TD_P(pd) __builtin_expect ((pd)->tid <= 0, 0) -+# define INVALID_NOT_TERMINATED_TD_P(pd) __builtin_expect ((pd)->tid < 0, 0) -+#endif -+ -+ -+/* Cancellation test. */ -+#define CANCELLATION_P(self) \ -+ do { \ -+ int cancelhandling = THREAD_GETMEM (self, cancelhandling); \ -+ if (CANCEL_ENABLED_AND_CANCELED (cancelhandling)) \ -+ { \ -+ THREAD_SETMEM (self, result, PTHREAD_CANCELED); \ -+ __do_cancel (); \ -+ } \ -+ } while (0) -+ -+ -+extern void __pthread_unwind (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute __attribute ((__noreturn__)) -+ weak_function; -+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute __attribute ((__noreturn__)) -+ weak_function; -+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+hidden_proto (__pthread_unwind) -+hidden_proto (__pthread_unwind_next) -+hidden_proto (__pthread_register_cancel) -+hidden_proto (__pthread_unregister_cancel) -+# ifdef SHARED -+extern void attribute_hidden pthread_cancel_init (void); -+# endif -+extern void __nptl_unwind_freeres (void) attribute_hidden; -+ -+ -+/* Called when a thread reacts on a cancellation request. */ -+static inline void -+__attribute ((noreturn, always_inline)) -+__do_cancel (void) -+{ -+ struct pthread *self = THREAD_SELF; -+ -+ /* Make sure we get no more cancellations. */ -+ THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT); -+ -+ __pthread_unwind ((__pthread_unwind_buf_t *) -+ THREAD_GETMEM (self, cleanup_jmp_buf)); -+} -+ -+ -+/* Set cancellation mode to asynchronous. */ -+#define CANCEL_ASYNC() \ -+ __pthread_enable_asynccancel () -+/* Reset to previous cancellation mode. */ -+#define CANCEL_RESET(oldtype) \ -+ __pthread_disable_asynccancel (oldtype) -+ -+# undef LIBC_CANCEL_ASYNC -+# define LIBC_CANCEL_ASYNC() CANCEL_ASYNC () -+ -+# undef LIBC_CANCEL_RESET -+# define LIBC_CANCEL_RESET(val) CANCEL_RESET (val) -+ -+# define LIBC_CANCEL_HANDLED() \ -+ __asm (".globl " __SYMBOL_PREFIX "__pthread_enable_asynccancel"); \ -+ __asm (".globl " __SYMBOL_PREFIX "__pthread_disable_asynccancel") -+ -+ -+/* Internal prototypes. */ -+ -+/* Thread list handling. */ -+extern struct pthread *__find_in_stack_list (struct pthread *pd) -+ attribute_hidden; -+ -+/* Deallocate a thread's stack after optionally making sure the thread -+ descriptor is still valid. */ -+extern void __free_tcb (struct pthread *pd) attribute_hidden; -+ -+/* Free allocated stack. */ -+extern void __deallocate_stack (struct pthread *pd) attribute_hidden; -+ -+/* Mark all the stacks except for the current one as available. This -+ function also re-initializes the lock for the stack cache. */ -+extern void __reclaim_stacks (void) attribute_hidden; -+ -+/* Make all threads's stacks executable. */ -+extern int __make_stacks_executable (void **stack_endp) attribute_hidden; -+ -+/* longjmp handling. */ -+extern void __pthread_cleanup_upto (__jmp_buf target, char *targetframe); -+hidden_proto (__pthread_cleanup_upto) -+ -+ -+/* Functions with versioned interfaces. */ -+extern int __pthread_create_2_1 (pthread_t *newthread, -+ const pthread_attr_t *attr, -+ void *(*start_routine) (void *), void *arg); -+extern int __pthread_create_2_0 (pthread_t *newthread, -+ const pthread_attr_t *attr, -+ void *(*start_routine) (void *), void *arg); -+extern int __pthread_attr_init_2_1 (pthread_attr_t *attr); -+extern int __pthread_attr_init_2_0 (pthread_attr_t *attr); -+ -+ -+/* Event handlers for libthread_db interface. */ -+extern void __nptl_create_event (void); -+extern void __nptl_death_event (void); -+hidden_proto (__nptl_create_event) -+hidden_proto (__nptl_death_event) -+ -+/* Register the generation counter in the libpthread with the libc. */ -+#ifdef TLS_MULTIPLE_THREADS_IN_TCB -+extern void __libc_pthread_init (unsigned long int *ptr, -+ void (*reclaim) (void), -+ const struct pthread_functions *functions); -+#else -+extern int *__libc_pthread_init (unsigned long int *ptr, -+ void (*reclaim) (void), -+ const struct pthread_functions *functions); -+ -+/* Variable set to a nonzero value either if more than one thread runs or ran, -+ or if a single-threaded process is trying to cancel itself. See -+ nptl/descr.h for more context on the single-threaded process case. */ -+extern int __pthread_multiple_threads attribute_hidden; -+/* Pointer to the corresponding variable in libc. */ -+extern int *__libc_multiple_threads_ptr attribute_hidden; -+#endif -+ -+/* Find a thread given its TID. */ -+extern struct pthread *__find_thread_by_id (pid_t tid) attribute_hidden -+#ifdef SHARED -+; -+#else -+weak_function; -+#define __find_thread_by_id(tid) \ -+ (__find_thread_by_id ? (__find_thread_by_id) (tid) : (struct pthread *) NULL) -+#endif -+ -+extern void __pthread_init_static_tls (struct link_map *) attribute_hidden; -+ -+extern size_t __pthread_get_minstack (const pthread_attr_t *attr); -+ -+/* Namespace save aliases. */ -+extern int __pthread_getschedparam (pthread_t thread_id, int *policy, -+ struct sched_param *param); -+extern int __pthread_setschedparam (pthread_t thread_id, int policy, -+ const struct sched_param *param); -+extern int __pthread_setcancelstate (int state, int *oldstate); -+extern int __pthread_mutex_init (pthread_mutex_t *__mutex, -+ const pthread_mutexattr_t *__mutexattr); -+extern int __pthread_mutex_destroy (pthread_mutex_t *__mutex); -+extern int __pthread_mutex_trylock (pthread_mutex_t *_mutex); -+extern int __pthread_mutex_lock (pthread_mutex_t *__mutex); -+extern int __pthread_mutex_timedlock (pthread_mutex_t *__mutex, -+ const struct timespec *__abstime); -+extern int __pthread_mutex_cond_lock (pthread_mutex_t *__mutex) -+ attribute_hidden; -+extern void __pthread_mutex_cond_lock_adjust (pthread_mutex_t *__mutex) -+ attribute_hidden; -+extern int __pthread_mutex_unlock (pthread_mutex_t *__mutex); -+extern int __pthread_mutex_unlock_usercnt (pthread_mutex_t *__mutex, -+ int __decr) attribute_hidden; -+extern int __pthread_mutexattr_init (pthread_mutexattr_t *attr); -+extern int __pthread_mutexattr_destroy (pthread_mutexattr_t *attr); -+extern int __pthread_mutexattr_settype (pthread_mutexattr_t *attr, int kind); -+extern int __pthread_attr_destroy (pthread_attr_t *attr); -+extern int __pthread_attr_getdetachstate (const pthread_attr_t *attr, -+ int *detachstate); -+extern int __pthread_attr_setdetachstate (pthread_attr_t *attr, -+ int detachstate); -+extern int __pthread_attr_getinheritsched (const pthread_attr_t *attr, -+ int *inherit); -+extern int __pthread_attr_setinheritsched (pthread_attr_t *attr, int inherit); -+extern int __pthread_attr_getschedparam (const pthread_attr_t *attr, -+ struct sched_param *param); -+extern int __pthread_attr_setschedparam (pthread_attr_t *attr, -+ const struct sched_param *param); -+extern int __pthread_attr_getschedpolicy (const pthread_attr_t *attr, -+ int *policy); -+extern int __pthread_attr_setschedpolicy (pthread_attr_t *attr, int policy); -+extern int __pthread_attr_getscope (const pthread_attr_t *attr, int *scope); -+extern int __pthread_attr_setscope (pthread_attr_t *attr, int scope); -+extern int __pthread_attr_getstackaddr (const pthread_attr_t *__restrict -+ __attr, void **__restrict __stackaddr); -+extern int __pthread_attr_setstackaddr (pthread_attr_t *__attr, -+ void *__stackaddr); -+extern int __pthread_attr_getstacksize (const pthread_attr_t *__restrict -+ __attr, -+ size_t *__restrict __stacksize); -+extern int __pthread_attr_setstacksize (pthread_attr_t *__attr, -+ size_t __stacksize); -+extern int __pthread_attr_getstack (const pthread_attr_t *__restrict __attr, -+ void **__restrict __stackaddr, -+ size_t *__restrict __stacksize); -+extern int __pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr, -+ size_t __stacksize); -+extern int __pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock, -+ const pthread_rwlockattr_t *__restrict -+ __attr); -+extern int __pthread_rwlock_destroy (pthread_rwlock_t *__rwlock); -+extern int __pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock); -+extern int __pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock); -+extern int __pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock); -+extern int __pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock); -+extern int __pthread_rwlock_unlock (pthread_rwlock_t *__rwlock); -+extern int __pthread_cond_broadcast (pthread_cond_t *cond); -+extern int __pthread_cond_destroy (pthread_cond_t *cond); -+extern int __pthread_cond_init (pthread_cond_t *cond, -+ const pthread_condattr_t *cond_attr); -+extern int __pthread_cond_signal (pthread_cond_t *cond); -+extern int __pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex); -+extern int __pthread_cond_timedwait (pthread_cond_t *cond, -+ pthread_mutex_t *mutex, -+ const struct timespec *abstime); -+extern int __pthread_cond_clockwait (pthread_cond_t *cond, -+ pthread_mutex_t *mutex, -+ clockid_t clockid, -+ const struct timespec *abstime) -+ __nonnull ((1, 2, 4)); -+extern int __pthread_condattr_destroy (pthread_condattr_t *attr); -+extern int __pthread_condattr_init (pthread_condattr_t *attr); -+extern int __pthread_key_create (pthread_key_t *key, void (*destr) (void *)); -+extern int __pthread_key_delete (pthread_key_t key); -+extern void *__pthread_getspecific (pthread_key_t key); -+extern int __pthread_setspecific (pthread_key_t key, const void *value); -+extern int __pthread_once (pthread_once_t *once_control, -+ void (*init_routine) (void)); -+extern int __pthread_atfork (void (*prepare) (void), void (*parent) (void), -+ void (*child) (void)); -+extern pthread_t __pthread_self (void); -+extern int __pthread_equal (pthread_t thread1, pthread_t thread2); -+extern int __pthread_detach (pthread_t th); -+extern int __pthread_cancel (pthread_t th); -+extern int __pthread_kill (pthread_t threadid, int signo); -+extern void __pthread_exit (void *value) __attribute__ ((__noreturn__)); -+extern int __pthread_join (pthread_t threadid, void **thread_return); -+extern int __pthread_setcanceltype (int type, int *oldtype); -+extern int __pthread_enable_asynccancel (void) attribute_hidden; -+extern void __pthread_disable_asynccancel (int oldtype) attribute_hidden; -+extern void __pthread_testcancel (void); -+extern int __pthread_timedjoin_ex (pthread_t, void **, const struct timespec *, -+ bool); -+ -+hidden_proto (__pthread_mutex_init) -+hidden_proto (__pthread_mutex_destroy) -+hidden_proto (__pthread_mutex_lock) -+hidden_proto (__pthread_mutex_trylock) -+hidden_proto (__pthread_mutex_unlock) -+hidden_proto (__pthread_rwlock_rdlock) -+hidden_proto (__pthread_rwlock_wrlock) -+hidden_proto (__pthread_rwlock_unlock) -+hidden_proto (__pthread_key_create) -+hidden_proto (__pthread_getspecific) -+hidden_proto (__pthread_setspecific) -+hidden_proto (__pthread_once) -+hidden_proto (__pthread_setcancelstate) -+hidden_proto (__pthread_testcancel) -+hidden_proto (__pthread_mutexattr_init) -+hidden_proto (__pthread_mutexattr_settype) -+hidden_proto (__pthread_timedjoin_ex) -+ -+extern int __pthread_cond_broadcast_2_0 (pthread_cond_2_0_t *cond); -+extern int __pthread_cond_destroy_2_0 (pthread_cond_2_0_t *cond); -+extern int __pthread_cond_init_2_0 (pthread_cond_2_0_t *cond, -+ const pthread_condattr_t *cond_attr); -+extern int __pthread_cond_signal_2_0 (pthread_cond_2_0_t *cond); -+extern int __pthread_cond_timedwait_2_0 (pthread_cond_2_0_t *cond, -+ pthread_mutex_t *mutex, -+ const struct timespec *abstime); -+extern int __pthread_cond_wait_2_0 (pthread_cond_2_0_t *cond, -+ pthread_mutex_t *mutex); -+ -+extern int __pthread_getaffinity_np (pthread_t th, size_t cpusetsize, -+ cpu_set_t *cpuset); -+ -+/* The two functions are in libc.so and not exported. */ -+extern int __libc_enable_asynccancel (void) attribute_hidden; -+extern void __libc_disable_asynccancel (int oldtype) attribute_hidden; -+ -+ -+/* The two functions are in librt.so and not exported. */ -+extern int __librt_enable_asynccancel (void) attribute_hidden; -+extern void __librt_disable_asynccancel (int oldtype) attribute_hidden; -+ -+/* Special versions which use non-exported functions. */ -+extern void __pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, -+ void (*routine) (void *), void *arg) -+ attribute_hidden; -+ -+/* Replace cleanup macros defined in with internal -+ versions that don't depend on unwind info and better support -+ cancellation. */ -+# undef pthread_cleanup_push -+# define pthread_cleanup_push(routine,arg) \ -+ { struct _pthread_cleanup_buffer _buffer; \ -+ __pthread_cleanup_push (&_buffer, (routine), (arg)); -+ -+extern void __pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, -+ int execute) attribute_hidden; -+# undef pthread_cleanup_pop -+# define pthread_cleanup_pop(execute) \ -+ __pthread_cleanup_pop (&_buffer, (execute)); } -+ -+# if defined __EXCEPTIONS && !defined __cplusplus -+/* Structure to hold the cleanup handler information. */ -+struct __pthread_cleanup_combined_frame -+{ -+ void (*__cancel_routine) (void *); -+ void *__cancel_arg; -+ int __do_it; -+ struct _pthread_cleanup_buffer __buffer; -+}; -+ -+/* Special cleanup macros which register cleanup both using -+ __pthread_cleanup_{push,pop} and using cleanup attribute. This is needed -+ for pthread_once, so that it supports both throwing exceptions from the -+ pthread_once callback (only cleanup attribute works there) and cancellation -+ of the thread running the callback if the callback or some routines it -+ calls don't have unwind information. */ -+ -+static __always_inline void -+__pthread_cleanup_combined_routine (struct __pthread_cleanup_combined_frame -+ *__frame) -+{ -+ if (__frame->__do_it) -+ { -+ __frame->__cancel_routine (__frame->__cancel_arg); -+ __frame->__do_it = 0; -+ __pthread_cleanup_pop (&__frame->__buffer, 0); -+ } -+} -+ -+static inline void -+__pthread_cleanup_combined_routine_voidptr (void *__arg) -+{ -+ struct __pthread_cleanup_combined_frame *__frame -+ = (struct __pthread_cleanup_combined_frame *) __arg; -+ if (__frame->__do_it) -+ { -+ __frame->__cancel_routine (__frame->__cancel_arg); -+ __frame->__do_it = 0; -+ } -+} -+ -+# define pthread_cleanup_combined_push(routine, arg) \ -+ do { \ -+ void (*__cancel_routine) (void *) = (routine); \ -+ struct __pthread_cleanup_combined_frame __clframe \ -+ __attribute__ ((__cleanup__ (__pthread_cleanup_combined_routine))) \ -+ = { .__cancel_routine = __cancel_routine, .__cancel_arg = (arg), \ -+ .__do_it = 1 }; \ -+ __pthread_cleanup_push (&__clframe.__buffer, \ -+ __pthread_cleanup_combined_routine_voidptr, \ -+ &__clframe); -+ -+# define pthread_cleanup_combined_pop(execute) \ -+ __pthread_cleanup_pop (&__clframe.__buffer, 0); \ -+ __clframe.__do_it = 0; \ -+ if (execute) \ -+ __cancel_routine (__clframe.__cancel_arg); \ -+ } while (0) -+ -+# endif -+ -+extern void __pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, -+ void (*routine) (void *), void *arg); -+extern void __pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, -+ int execute); -+ -+/* Old cleanup interfaces, still used in libc.so. */ -+extern void _pthread_cleanup_push (struct _pthread_cleanup_buffer *buffer, -+ void (*routine) (void *), void *arg); -+extern void _pthread_cleanup_pop (struct _pthread_cleanup_buffer *buffer, -+ int execute); -+extern void _pthread_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer, -+ void (*routine) (void *), void *arg); -+extern void _pthread_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer, -+ int execute); -+ -+extern void __nptl_deallocate_tsd (void) attribute_hidden; -+ -+extern void __nptl_setxid_error (struct xid_command *cmdp, int error) -+ attribute_hidden; -+extern int __nptl_setxid (struct xid_command *cmdp) attribute_hidden; -+#ifndef SHARED -+extern void __nptl_set_robust (struct pthread *self); -+#endif -+ -+extern void __nptl_stacks_freeres (void) attribute_hidden; -+extern void __shm_directory_freeres (void) attribute_hidden; -+ -+extern void __wait_lookup_done (void) attribute_hidden; -+ -+#ifdef SHARED -+# define PTHREAD_STATIC_FN_REQUIRE(name) -+#else -+# define PTHREAD_STATIC_FN_REQUIRE(name) __asm (".globl " #name); -+#endif -+ -+/* Test if the mutex is suitable for the FUTEX_WAIT_REQUEUE_PI operation. */ -+#if (defined lll_futex_wait_requeue_pi \ -+ && defined __ASSUME_REQUEUE_PI) -+# define USE_REQUEUE_PI(mut) \ -+ ((mut) && (mut) != (void *) ~0l \ -+ && (((mut)->__data.__kind \ -+ & (PTHREAD_MUTEX_PRIO_INHERIT_NP | PTHREAD_MUTEX_ROBUST_NORMAL_NP)) \ -+ == PTHREAD_MUTEX_PRIO_INHERIT_NP)) -+#else -+# define USE_REQUEUE_PI(mut) 0 -+#endif -+ -+ -+/* Returns 0 if POL is a valid scheduling policy. */ -+static inline int -+check_sched_policy_attr (int pol) -+{ -+ if (pol == SCHED_OTHER || pol == SCHED_FIFO || pol == SCHED_RR) -+ return 0; -+ -+ return EINVAL; -+} -+ -+/* Returns 0 if PR is within the accepted range of priority values for -+ the scheduling policy POL or EINVAL otherwise. */ -+static inline int -+check_sched_priority_attr (int pr, int pol) -+{ -+ int min = __sched_get_priority_min (pol); -+ int max = __sched_get_priority_max (pol); -+ -+ if (min >= 0 && max >= 0 && pr >= min && pr <= max) -+ return 0; -+ -+ return EINVAL; -+} -+ -+/* Returns 0 if ST is a valid stack size for a thread stack and EINVAL -+ otherwise. */ -+static inline int -+check_stacksize_attr (size_t st) -+{ -+ if (st >= PTHREAD_STACK_MIN) -+ return 0; -+ -+ return EINVAL; -+} -+ -+#define ASSERT_TYPE_SIZE(type, size) \ -+ _Static_assert (sizeof (type) == size, \ -+ "sizeof (" #type ") != " #size) -+ -+#define ASSERT_PTHREAD_INTERNAL_SIZE(type, internal) \ -+ _Static_assert (sizeof ((type) { { 0 } }).__size >= sizeof (internal),\ -+ "sizeof (" #type ".__size) < sizeof (" #internal ")") -+ -+#define ASSERT_PTHREAD_STRING(x) __STRING (x) -+#define ASSERT_PTHREAD_INTERNAL_OFFSET(type, member, offset) \ -+ _Static_assert (offsetof (type, member) == offset, \ -+ "offset of " #member " field of " #type " != " \ -+ ASSERT_PTHREAD_STRING (offset)) -+ -+#endif /* pthreadP.h */ -diff --git a/nptl_2_17/pthread_2_17.h b/nptl_2_17/pthread_2_17.h -new file mode 100644 -index 00000000..3cb871a2 ---- /dev/null -+++ b/nptl_2_17/pthread_2_17.h -@@ -0,0 +1,1175 @@ -+/* Copyright (C) 2002-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _PTHREAD_H -+#define _PTHREAD_H 1 -+ -+#include "bits/pthreadtypes_2_17.h" -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+ -+/* Detach state. */ -+enum -+{ -+ PTHREAD_CREATE_JOINABLE, -+#define PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_JOINABLE -+ PTHREAD_CREATE_DETACHED -+#define PTHREAD_CREATE_DETACHED PTHREAD_CREATE_DETACHED -+}; -+ -+ -+/* Mutex types. */ -+enum -+{ -+ PTHREAD_MUTEX_TIMED_NP, -+ PTHREAD_MUTEX_RECURSIVE_NP, -+ PTHREAD_MUTEX_ERRORCHECK_NP, -+ PTHREAD_MUTEX_ADAPTIVE_NP -+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8 -+ , -+ PTHREAD_MUTEX_NORMAL = PTHREAD_MUTEX_TIMED_NP, -+ PTHREAD_MUTEX_RECURSIVE = PTHREAD_MUTEX_RECURSIVE_NP, -+ PTHREAD_MUTEX_ERRORCHECK = PTHREAD_MUTEX_ERRORCHECK_NP, -+ PTHREAD_MUTEX_DEFAULT = PTHREAD_MUTEX_NORMAL -+#endif -+#ifdef __USE_GNU -+ /* For compatibility. */ -+ , PTHREAD_MUTEX_FAST_NP = PTHREAD_MUTEX_TIMED_NP -+#endif -+}; -+ -+ -+#ifdef __USE_XOPEN2K -+/* Robust mutex or not flags. */ -+enum -+{ -+ PTHREAD_MUTEX_STALLED, -+ PTHREAD_MUTEX_STALLED_NP = PTHREAD_MUTEX_STALLED, -+ PTHREAD_MUTEX_ROBUST, -+ PTHREAD_MUTEX_ROBUST_NP = PTHREAD_MUTEX_ROBUST -+}; -+#endif -+ -+ -+#if defined __USE_POSIX199506 || defined __USE_UNIX98 -+/* Mutex protocols. */ -+enum -+{ -+ PTHREAD_PRIO_NONE, -+ PTHREAD_PRIO_INHERIT, -+ PTHREAD_PRIO_PROTECT -+}; -+#endif -+ -+ -+#if __PTHREAD_MUTEX_HAVE_PREV -+# define PTHREAD_MUTEX_INITIALIZER \ -+ { { 0, 0, 0, 0, 0, __PTHREAD_SPINS, { 0, 0 } } } -+# ifdef __USE_GNU -+# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, __PTHREAD_SPINS, { 0, 0 } } } -+# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, __PTHREAD_SPINS, { 0, 0 } } } -+# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, __PTHREAD_SPINS, { 0, 0 } } } -+ -+# endif -+#else -+# define PTHREAD_MUTEX_INITIALIZER \ -+ { { 0, 0, 0, 0, 0, { __PTHREAD_SPINS } } } -+# ifdef __USE_GNU -+# define PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, PTHREAD_MUTEX_RECURSIVE_NP, 0, { __PTHREAD_SPINS } } } -+# define PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, 0, { __PTHREAD_SPINS } } } -+# define PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP \ -+ { { 0, 0, 0, PTHREAD_MUTEX_ADAPTIVE_NP, 0, { __PTHREAD_SPINS } } } -+ -+# endif -+#endif -+ -+ -+/* Read-write lock types. */ -+#if defined __USE_UNIX98 || defined __USE_XOPEN2K -+enum -+{ -+ PTHREAD_RWLOCK_PREFER_READER_NP, -+ PTHREAD_RWLOCK_PREFER_WRITER_NP, -+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, -+ PTHREAD_RWLOCK_DEFAULT_NP = PTHREAD_RWLOCK_PREFER_READER_NP -+}; -+ -+/* Define __PTHREAD_RWLOCK_INT_FLAGS_SHARED to 1 if pthread_rwlock_t -+ has the shared field. All 64-bit architectures have the shared field -+ in pthread_rwlock_t. */ -+#ifndef __PTHREAD_RWLOCK_INT_FLAGS_SHARED -+# if __WORDSIZE == 64 -+# define __PTHREAD_RWLOCK_INT_FLAGS_SHARED 1 -+# endif -+#endif -+ -+/* Read-write lock initializers. */ -+# define PTHREAD_RWLOCK_INITIALIZER \ -+ { { 0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, 0 } } -+# ifdef __USE_GNU -+# ifdef __PTHREAD_RWLOCK_INT_FLAGS_SHARED -+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, 0, 0, 0, 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, \ -+ PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP } } -+# else -+# if __BYTE_ORDER == __LITTLE_ENDIAN -+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP, \ -+ 0, __PTHREAD_RWLOCK_ELISION_EXTRA, 0, 0 } } -+# else -+# define PTHREAD_RWLOCK_WRITER_NONRECURSIVE_INITIALIZER_NP \ -+ { { 0, 0, 0, 0, 0, 0, 0, 0, 0, PTHREAD_RWLOCK_PREFER_WRITER_NONRECURSIVE_NP,\ -+ 0 } } -+# endif -+# endif -+# endif -+#endif /* Unix98 or XOpen2K */ -+ -+ -+/* Scheduler inheritance. */ -+enum -+{ -+ PTHREAD_INHERIT_SCHED, -+#define PTHREAD_INHERIT_SCHED PTHREAD_INHERIT_SCHED -+ PTHREAD_EXPLICIT_SCHED -+#define PTHREAD_EXPLICIT_SCHED PTHREAD_EXPLICIT_SCHED -+}; -+ -+ -+/* Scope handling. */ -+enum -+{ -+ PTHREAD_SCOPE_SYSTEM, -+#define PTHREAD_SCOPE_SYSTEM PTHREAD_SCOPE_SYSTEM -+ PTHREAD_SCOPE_PROCESS -+#define PTHREAD_SCOPE_PROCESS PTHREAD_SCOPE_PROCESS -+}; -+ -+ -+/* Process shared or private flag. */ -+enum -+{ -+ PTHREAD_PROCESS_PRIVATE, -+#define PTHREAD_PROCESS_PRIVATE PTHREAD_PROCESS_PRIVATE -+ PTHREAD_PROCESS_SHARED -+#define PTHREAD_PROCESS_SHARED PTHREAD_PROCESS_SHARED -+}; -+ -+ -+ -+/* Conditional variable handling. */ -+#define PTHREAD_COND_INITIALIZER { { 0, 0, 0, 0, 0, (void *) 0, 0, 0 } } -+ -+/* Cleanup buffers */ -+struct _pthread_cleanup_buffer -+{ -+ void (*__routine) (void *); /* Function to call. */ -+ void *__arg; /* Its argument. */ -+ int __canceltype; /* Saved cancellation type. */ -+ struct _pthread_cleanup_buffer *__prev; /* Chaining of cleanup functions. */ -+}; -+ -+/* Cancellation */ -+enum -+{ -+ PTHREAD_CANCEL_ENABLE, -+#define PTHREAD_CANCEL_ENABLE PTHREAD_CANCEL_ENABLE -+ PTHREAD_CANCEL_DISABLE -+#define PTHREAD_CANCEL_DISABLE PTHREAD_CANCEL_DISABLE -+}; -+enum -+{ -+ PTHREAD_CANCEL_DEFERRED, -+#define PTHREAD_CANCEL_DEFERRED PTHREAD_CANCEL_DEFERRED -+ PTHREAD_CANCEL_ASYNCHRONOUS -+#define PTHREAD_CANCEL_ASYNCHRONOUS PTHREAD_CANCEL_ASYNCHRONOUS -+}; -+#define PTHREAD_CANCELED ((void *) -1) -+ -+ -+/* Single execution handling. */ -+#define PTHREAD_ONCE_INIT 0 -+ -+ -+#ifdef __USE_XOPEN2K -+/* Value returned by 'pthread_barrier_wait' for one of the threads after -+ the required number of threads have called this function. -+ -1 is distinct from 0 and all errno constants */ -+# define PTHREAD_BARRIER_SERIAL_THREAD -1 -+#endif -+ -+ -+__BEGIN_DECLS -+ -+/* Create a new thread, starting with execution of START-ROUTINE -+ getting passed ARG. Creation attributed come from ATTR. The new -+ handle is stored in *NEWTHREAD. */ -+extern int pthread_create (pthread_t *__restrict __newthread, -+ const pthread_attr_t *__restrict __attr, -+ void *(*__start_routine) (void *), -+ void *__restrict __arg) __THROWNL __nonnull ((1, 3)); -+ -+/* Terminate calling thread. -+ -+ The registered cleanup handlers are called via exception handling -+ so we cannot mark this function with __THROW.*/ -+extern void pthread_exit (void *__retval) __attribute__ ((__noreturn__)); -+ -+/* Make calling thread wait for termination of the thread TH. The -+ exit status of the thread is stored in *THREAD_RETURN, if THREAD_RETURN -+ is not NULL. -+ -+ This function is a cancellation point and therefore not marked with -+ __THROW. */ -+extern int pthread_join (pthread_t __th, void **__thread_return); -+ -+#ifdef __USE_GNU -+/* Check whether thread TH has terminated. If yes return the status of -+ the thread in *THREAD_RETURN, if THREAD_RETURN is not NULL. */ -+extern int pthread_tryjoin_np (pthread_t __th, void **__thread_return) __THROW; -+ -+/* Make calling thread wait for termination of the thread TH, but only -+ until TIMEOUT. The exit status of the thread is stored in -+ *THREAD_RETURN, if THREAD_RETURN is not NULL. -+ -+ This function is a cancellation point and therefore not marked with -+ __THROW. */ -+extern int pthread_timedjoin_np (pthread_t __th, void **__thread_return, -+ const struct timespec *__abstime); -+#endif -+ -+/* Indicate that the thread TH is never to be joined with PTHREAD_JOIN. -+ The resources of TH will therefore be freed immediately when it -+ terminates, instead of waiting for another thread to perform PTHREAD_JOIN -+ on it. */ -+extern int pthread_detach (pthread_t __th) __THROW; -+ -+ -+/* Obtain the identifier of the current thread. */ -+extern pthread_t pthread_self (void) __THROW __attribute__ ((__const__)); -+ -+/* Compare two thread identifiers. */ -+extern int pthread_equal (pthread_t __thread1, pthread_t __thread2) -+ __THROW __attribute__ ((__const__)); -+ -+ -+/* Thread attribute handling. */ -+ -+/* Initialize thread attribute *ATTR with default attributes -+ (detachstate is PTHREAD_JOINABLE, scheduling policy is SCHED_OTHER, -+ no user-provided stack). */ -+extern int pthread_attr_init (pthread_attr_t *__attr) __THROW __nonnull ((1)); -+ -+/* Destroy thread attribute *ATTR. */ -+extern int pthread_attr_destroy (pthread_attr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Get detach state attribute. */ -+extern int pthread_attr_getdetachstate (const pthread_attr_t *__attr, -+ int *__detachstate) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set detach state attribute. */ -+extern int pthread_attr_setdetachstate (pthread_attr_t *__attr, -+ int __detachstate) -+ __THROW __nonnull ((1)); -+ -+ -+/* Get the size of the guard area created for stack overflow protection. */ -+extern int pthread_attr_getguardsize (const pthread_attr_t *__attr, -+ size_t *__guardsize) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the size of the guard area created for stack overflow protection. */ -+extern int pthread_attr_setguardsize (pthread_attr_t *__attr, -+ size_t __guardsize) -+ __THROW __nonnull ((1)); -+ -+ -+/* Return in *PARAM the scheduling parameters of *ATTR. */ -+extern int pthread_attr_getschedparam (const pthread_attr_t *__restrict __attr, -+ struct sched_param *__restrict __param) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set scheduling parameters (priority, etc) in *ATTR according to PARAM. */ -+extern int pthread_attr_setschedparam (pthread_attr_t *__restrict __attr, -+ const struct sched_param *__restrict -+ __param) __THROW __nonnull ((1, 2)); -+ -+/* Return in *POLICY the scheduling policy of *ATTR. */ -+extern int pthread_attr_getschedpolicy (const pthread_attr_t *__restrict -+ __attr, int *__restrict __policy) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set scheduling policy in *ATTR according to POLICY. */ -+extern int pthread_attr_setschedpolicy (pthread_attr_t *__attr, int __policy) -+ __THROW __nonnull ((1)); -+ -+/* Return in *INHERIT the scheduling inheritance mode of *ATTR. */ -+extern int pthread_attr_getinheritsched (const pthread_attr_t *__restrict -+ __attr, int *__restrict __inherit) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set scheduling inheritance mode in *ATTR according to INHERIT. */ -+extern int pthread_attr_setinheritsched (pthread_attr_t *__attr, -+ int __inherit) -+ __THROW __nonnull ((1)); -+ -+ -+/* Return in *SCOPE the scheduling contention scope of *ATTR. */ -+extern int pthread_attr_getscope (const pthread_attr_t *__restrict __attr, -+ int *__restrict __scope) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set scheduling contention scope in *ATTR according to SCOPE. */ -+extern int pthread_attr_setscope (pthread_attr_t *__attr, int __scope) -+ __THROW __nonnull ((1)); -+ -+/* Return the previously set address for the stack. */ -+extern int pthread_attr_getstackaddr (const pthread_attr_t *__restrict -+ __attr, void **__restrict __stackaddr) -+ __THROW __nonnull ((1, 2)) __attribute_deprecated__; -+ -+/* Set the starting address of the stack of the thread to be created. -+ Depending on whether the stack grows up or down the value must either -+ be higher or lower than all the address in the memory block. The -+ minimal size of the block must be PTHREAD_STACK_MIN. */ -+extern int pthread_attr_setstackaddr (pthread_attr_t *__attr, -+ void *__stackaddr) -+ __THROW __nonnull ((1)) __attribute_deprecated__; -+ -+/* Return the currently used minimal stack size. */ -+extern int pthread_attr_getstacksize (const pthread_attr_t *__restrict -+ __attr, size_t *__restrict __stacksize) -+ __THROW __nonnull ((1, 2)); -+ -+/* Add information about the minimum stack size needed for the thread -+ to be started. This size must never be less than PTHREAD_STACK_MIN -+ and must also not exceed the system limits. */ -+extern int pthread_attr_setstacksize (pthread_attr_t *__attr, -+ size_t __stacksize) -+ __THROW __nonnull ((1)); -+ -+#ifdef __USE_XOPEN2K -+/* Return the previously set address for the stack. */ -+extern int pthread_attr_getstack (const pthread_attr_t *__restrict __attr, -+ void **__restrict __stackaddr, -+ size_t *__restrict __stacksize) -+ __THROW __nonnull ((1, 2, 3)); -+ -+/* The following two interfaces are intended to replace the last two. They -+ require setting the address as well as the size since only setting the -+ address will make the implementation on some architectures impossible. */ -+extern int pthread_attr_setstack (pthread_attr_t *__attr, void *__stackaddr, -+ size_t __stacksize) __THROW __nonnull ((1)); -+#endif -+ -+#ifdef __USE_GNU -+/* Thread created with attribute ATTR will be limited to run only on -+ the processors represented in CPUSET. */ -+extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr, -+ size_t __cpusetsize, -+ const cpu_set_t *__cpuset) -+ __THROW __nonnull ((1, 3)); -+ -+/* Get bit set in CPUSET representing the processors threads created with -+ ATTR can run on. */ -+extern int pthread_attr_getaffinity_np (const pthread_attr_t *__attr, -+ size_t __cpusetsize, -+ cpu_set_t *__cpuset) -+ __THROW __nonnull ((1, 3)); -+ -+/* Get the default attributes used by pthread_create in this process. */ -+extern int pthread_getattr_default_np (pthread_attr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Set the default attributes to be used by pthread_create in this -+ process. */ -+extern int pthread_setattr_default_np (const pthread_attr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Initialize thread attribute *ATTR with attributes corresponding to the -+ already running thread TH. It shall be called on uninitialized ATTR -+ and destroyed with pthread_attr_destroy when no longer needed. */ -+extern int pthread_getattr_np (pthread_t __th, pthread_attr_t *__attr) -+ __THROW __nonnull ((2)); -+#endif -+ -+ -+/* Functions for scheduling control. */ -+ -+/* Set the scheduling parameters for TARGET_THREAD according to POLICY -+ and *PARAM. */ -+extern int pthread_setschedparam (pthread_t __target_thread, int __policy, -+ const struct sched_param *__param) -+ __THROW __nonnull ((3)); -+ -+/* Return in *POLICY and *PARAM the scheduling parameters for TARGET_THREAD. */ -+extern int pthread_getschedparam (pthread_t __target_thread, -+ int *__restrict __policy, -+ struct sched_param *__restrict __param) -+ __THROW __nonnull ((2, 3)); -+ -+/* Set the scheduling priority for TARGET_THREAD. */ -+extern int pthread_setschedprio (pthread_t __target_thread, int __prio) -+ __THROW; -+ -+ -+#ifdef __USE_GNU -+/* Get thread name visible in the kernel and its interfaces. */ -+extern int pthread_getname_np (pthread_t __target_thread, char *__buf, -+ size_t __buflen) -+ __THROW __nonnull ((2)); -+ -+/* Set thread name visible in the kernel and its interfaces. */ -+extern int pthread_setname_np (pthread_t __target_thread, const char *__name) -+ __THROW __nonnull ((2)); -+#endif -+ -+ -+#ifdef __USE_UNIX98 -+/* Determine level of concurrency. */ -+extern int pthread_getconcurrency (void) __THROW; -+ -+/* Set new concurrency level to LEVEL. */ -+extern int pthread_setconcurrency (int __level) __THROW; -+#endif -+ -+#ifdef __USE_GNU -+/* Yield the processor to another thread or process. -+ This function is similar to the POSIX `sched_yield' function but -+ might be differently implemented in the case of a m-on-n thread -+ implementation. */ -+extern int pthread_yield (void) __THROW; -+ -+ -+/* Limit specified thread TH to run only on the processors represented -+ in CPUSET. */ -+extern int pthread_setaffinity_np (pthread_t __th, size_t __cpusetsize, -+ const cpu_set_t *__cpuset) -+ __THROW __nonnull ((3)); -+ -+/* Get bit set in CPUSET representing the processors TH can run on. */ -+extern int pthread_getaffinity_np (pthread_t __th, size_t __cpusetsize, -+ cpu_set_t *__cpuset) -+ __THROW __nonnull ((3)); -+#endif -+ -+ -+/* Functions for handling initialization. */ -+ -+/* Guarantee that the initialization function INIT_ROUTINE will be called -+ only once, even if pthread_once is executed several times with the -+ same ONCE_CONTROL argument. ONCE_CONTROL must point to a static or -+ extern variable initialized to PTHREAD_ONCE_INIT. -+ -+ The initialization functions might throw exception which is why -+ this function is not marked with __THROW. */ -+extern int pthread_once (pthread_once_t *__once_control, -+ void (*__init_routine) (void)) __nonnull ((1, 2)); -+ -+ -+/* Functions for handling cancellation. -+ -+ Note that these functions are explicitly not marked to not throw an -+ exception in C++ code. If cancellation is implemented by unwinding -+ this is necessary to have the compiler generate the unwind information. */ -+ -+/* Set cancelability state of current thread to STATE, returning old -+ state in *OLDSTATE if OLDSTATE is not NULL. */ -+extern int pthread_setcancelstate (int __state, int *__oldstate); -+ -+/* Set cancellation state of current thread to TYPE, returning the old -+ type in *OLDTYPE if OLDTYPE is not NULL. */ -+extern int pthread_setcanceltype (int __type, int *__oldtype); -+ -+/* Cancel THREAD immediately or at the next possibility. */ -+extern int pthread_cancel (pthread_t __th); -+ -+/* Test for pending cancellation for the current thread and terminate -+ the thread as per pthread_exit(PTHREAD_CANCELED) if it has been -+ cancelled. */ -+extern void pthread_testcancel (void); -+ -+ -+/* Cancellation handling with integration into exception handling. */ -+ -+typedef struct -+{ -+ struct -+ { -+ __jmp_buf __cancel_jmp_buf; -+ int __mask_was_saved; -+ } __cancel_jmp_buf[1]; -+ void *__pad[4]; -+} __pthread_unwind_buf_t __attribute__ ((__aligned__)); -+ -+/* No special attributes by default. */ -+#ifndef __cleanup_fct_attribute -+# define __cleanup_fct_attribute -+#endif -+ -+ -+/* Structure to hold the cleanup handler information. */ -+struct __pthread_cleanup_frame -+{ -+ void (*__cancel_routine) (void *); -+ void *__cancel_arg; -+ int __do_it; -+ int __cancel_type; -+}; -+ -+#if defined __GNUC__ && defined __EXCEPTIONS -+# ifdef __cplusplus -+/* Class to handle cancellation handler invocation. */ -+class __pthread_cleanup_class -+{ -+ void (*__cancel_routine) (void *); -+ void *__cancel_arg; -+ int __do_it; -+ int __cancel_type; -+ -+ public: -+ __pthread_cleanup_class (void (*__fct) (void *), void *__arg) -+ : __cancel_routine (__fct), __cancel_arg (__arg), __do_it (1) { } -+ ~__pthread_cleanup_class () { if (__do_it) __cancel_routine (__cancel_arg); } -+ void __setdoit (int __newval) { __do_it = __newval; } -+ void __defer () { pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED, -+ &__cancel_type); } -+ void __restore () const { pthread_setcanceltype (__cancel_type, 0); } -+}; -+ -+/* Install a cleanup handler: ROUTINE will be called with arguments ARG -+ when the thread is canceled or calls pthread_exit. ROUTINE will also -+ be called with arguments ARG when the matching pthread_cleanup_pop -+ is executed with non-zero EXECUTE argument. -+ -+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always -+ be used in matching pairs at the same nesting level of braces. */ -+# define pthread_cleanup_push(routine, arg) \ -+ do { \ -+ __pthread_cleanup_class __clframe (routine, arg) -+ -+/* Remove a cleanup handler installed by the matching pthread_cleanup_push. -+ If EXECUTE is non-zero, the handler function is called. */ -+# define pthread_cleanup_pop(execute) \ -+ __clframe.__setdoit (execute); \ -+ } while (0) -+ -+# ifdef __USE_GNU -+/* Install a cleanup handler as pthread_cleanup_push does, but also -+ saves the current cancellation type and sets it to deferred -+ cancellation. */ -+# define pthread_cleanup_push_defer_np(routine, arg) \ -+ do { \ -+ __pthread_cleanup_class __clframe (routine, arg); \ -+ __clframe.__defer () -+ -+/* Remove a cleanup handler as pthread_cleanup_pop does, but also -+ restores the cancellation type that was in effect when the matching -+ pthread_cleanup_push_defer was called. */ -+# define pthread_cleanup_pop_restore_np(execute) \ -+ __clframe.__restore (); \ -+ __clframe.__setdoit (execute); \ -+ } while (0) -+# endif -+# else -+/* Function called to call the cleanup handler. As an extern inline -+ function the compiler is free to decide inlining the change when -+ needed or fall back on the copy which must exist somewhere -+ else. */ -+__extern_inline void -+__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame) -+{ -+ if (__frame->__do_it) -+ __frame->__cancel_routine (__frame->__cancel_arg); -+} -+ -+/* Install a cleanup handler: ROUTINE will be called with arguments ARG -+ when the thread is canceled or calls pthread_exit. ROUTINE will also -+ be called with arguments ARG when the matching pthread_cleanup_pop -+ is executed with non-zero EXECUTE argument. -+ -+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always -+ be used in matching pairs at the same nesting level of braces. */ -+# define pthread_cleanup_push(routine, arg) \ -+ do { \ -+ struct __pthread_cleanup_frame __clframe \ -+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \ -+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \ -+ .__do_it = 1 }; -+ -+/* Remove a cleanup handler installed by the matching pthread_cleanup_push. -+ If EXECUTE is non-zero, the handler function is called. */ -+# define pthread_cleanup_pop(execute) \ -+ __clframe.__do_it = (execute); \ -+ } while (0) -+ -+# ifdef __USE_GNU -+/* Install a cleanup handler as pthread_cleanup_push does, but also -+ saves the current cancellation type and sets it to deferred -+ cancellation. */ -+# define pthread_cleanup_push_defer_np(routine, arg) \ -+ do { \ -+ struct __pthread_cleanup_frame __clframe \ -+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \ -+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \ -+ .__do_it = 1 }; \ -+ (void) pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED, \ -+ &__clframe.__cancel_type) -+ -+/* Remove a cleanup handler as pthread_cleanup_pop does, but also -+ restores the cancellation type that was in effect when the matching -+ pthread_cleanup_push_defer was called. */ -+# define pthread_cleanup_pop_restore_np(execute) \ -+ (void) pthread_setcanceltype (__clframe.__cancel_type, NULL); \ -+ __clframe.__do_it = (execute); \ -+ } while (0) -+# endif -+# endif -+#else -+/* Install a cleanup handler: ROUTINE will be called with arguments ARG -+ when the thread is canceled or calls pthread_exit. ROUTINE will also -+ be called with arguments ARG when the matching pthread_cleanup_pop -+ is executed with non-zero EXECUTE argument. -+ -+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always -+ be used in matching pairs at the same nesting level of braces. */ -+# define pthread_cleanup_push(routine, arg) \ -+ do { \ -+ __pthread_unwind_buf_t __cancel_buf; \ -+ void (*__cancel_routine) (void *) = (routine); \ -+ void *__cancel_arg = (arg); \ -+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \ -+ __cancel_buf.__cancel_jmp_buf, 0); \ -+ if (__glibc_unlikely (__not_first_call)) \ -+ { \ -+ __cancel_routine (__cancel_arg); \ -+ __pthread_unwind_next (&__cancel_buf); \ -+ /* NOTREACHED */ \ -+ } \ -+ \ -+ __pthread_register_cancel (&__cancel_buf); \ -+ do { -+extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+ -+/* Remove a cleanup handler installed by the matching pthread_cleanup_push. -+ If EXECUTE is non-zero, the handler function is called. */ -+# define pthread_cleanup_pop(execute) \ -+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\ -+ } while (0); \ -+ __pthread_unregister_cancel (&__cancel_buf); \ -+ if (execute) \ -+ __cancel_routine (__cancel_arg); \ -+ } while (0) -+extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+ -+# ifdef __USE_GNU -+/* Install a cleanup handler as pthread_cleanup_push does, but also -+ saves the current cancellation type and sets it to deferred -+ cancellation. */ -+# define pthread_cleanup_push_defer_np(routine, arg) \ -+ do { \ -+ __pthread_unwind_buf_t __cancel_buf; \ -+ void (*__cancel_routine) (void *) = (routine); \ -+ void *__cancel_arg = (arg); \ -+ int __not_first_call = __sigsetjmp ((struct __jmp_buf_tag *) (void *) \ -+ __cancel_buf.__cancel_jmp_buf, 0); \ -+ if (__glibc_unlikely (__not_first_call)) \ -+ { \ -+ __cancel_routine (__cancel_arg); \ -+ __pthread_unwind_next (&__cancel_buf); \ -+ /* NOTREACHED */ \ -+ } \ -+ \ -+ __pthread_register_cancel_defer (&__cancel_buf); \ -+ do { -+extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+ -+/* Remove a cleanup handler as pthread_cleanup_pop does, but also -+ restores the cancellation type that was in effect when the matching -+ pthread_cleanup_push_defer was called. */ -+# define pthread_cleanup_pop_restore_np(execute) \ -+ do { } while (0);/* Empty to allow label before pthread_cleanup_pop. */\ -+ } while (0); \ -+ __pthread_unregister_cancel_restore (&__cancel_buf); \ -+ if (execute) \ -+ __cancel_routine (__cancel_arg); \ -+ } while (0) -+extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute; -+# endif -+ -+/* Internal interface to initiate cleanup. */ -+extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf) -+ __cleanup_fct_attribute __attribute__ ((__noreturn__)) -+# ifndef SHARED -+ __attribute__ ((__weak__)) -+# endif -+ ; -+#endif -+ -+/* Function used in the macros. */ -+struct __jmp_buf_tag; -+extern int __sigsetjmp (struct __jmp_buf_tag *__env, int __savemask) __THROWNL; -+ -+ -+/* Mutex handling. */ -+ -+/* Initialize a mutex. */ -+extern int pthread_mutex_init (pthread_mutex_t *__mutex, -+ const pthread_mutexattr_t *__mutexattr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy a mutex. */ -+extern int pthread_mutex_destroy (pthread_mutex_t *__mutex) -+ __THROW __nonnull ((1)); -+ -+/* Try locking a mutex. */ -+extern int pthread_mutex_trylock (pthread_mutex_t *__mutex) -+ __THROWNL __nonnull ((1)); -+ -+/* Lock a mutex. */ -+extern int pthread_mutex_lock (pthread_mutex_t *__mutex) -+ __THROWNL __nonnull ((1)); -+ -+#ifdef __USE_XOPEN2K -+/* Wait until lock becomes available, or specified time passes. */ -+extern int pthread_mutex_timedlock (pthread_mutex_t *__restrict __mutex, -+ const struct timespec *__restrict -+ __abstime) __THROWNL __nonnull ((1, 2)); -+#endif -+ -+/* Unlock a mutex. */ -+extern int pthread_mutex_unlock (pthread_mutex_t *__mutex) -+ __THROWNL __nonnull ((1)); -+ -+ -+/* Get the priority ceiling of MUTEX. */ -+extern int pthread_mutex_getprioceiling (const pthread_mutex_t * -+ __restrict __mutex, -+ int *__restrict __prioceiling) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the priority ceiling of MUTEX to PRIOCEILING, return old -+ priority ceiling value in *OLD_CEILING. */ -+extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex, -+ int __prioceiling, -+ int *__restrict __old_ceiling) -+ __THROW __nonnull ((1, 3)); -+ -+ -+#ifdef __USE_XOPEN2K8 -+/* Declare the state protected by MUTEX as consistent. */ -+extern int pthread_mutex_consistent (pthread_mutex_t *__mutex) -+ __THROW __nonnull ((1)); -+# ifdef __USE_GNU -+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mutex) -+ __THROW __nonnull ((1)); -+# endif -+#endif -+ -+ -+/* Functions for handling mutex attributes. */ -+ -+/* Initialize mutex attribute object ATTR with default attributes -+ (kind is PTHREAD_MUTEX_TIMED_NP). */ -+extern int pthread_mutexattr_init (pthread_mutexattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy mutex attribute object ATTR. */ -+extern int pthread_mutexattr_destroy (pthread_mutexattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Get the process-shared flag of the mutex attribute ATTR. */ -+extern int pthread_mutexattr_getpshared (const pthread_mutexattr_t * -+ __restrict __attr, -+ int *__restrict __pshared) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the process-shared flag of the mutex attribute ATTR. */ -+extern int pthread_mutexattr_setpshared (pthread_mutexattr_t *__attr, -+ int __pshared) -+ __THROW __nonnull ((1)); -+ -+#if defined __USE_UNIX98 || defined __USE_XOPEN2K8 -+/* Return in *KIND the mutex kind attribute in *ATTR. */ -+extern int pthread_mutexattr_gettype (const pthread_mutexattr_t *__restrict -+ __attr, int *__restrict __kind) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the mutex kind attribute in *ATTR to KIND (either PTHREAD_MUTEX_NORMAL, -+ PTHREAD_MUTEX_RECURSIVE, PTHREAD_MUTEX_ERRORCHECK, or -+ PTHREAD_MUTEX_DEFAULT). */ -+extern int pthread_mutexattr_settype (pthread_mutexattr_t *__attr, int __kind) -+ __THROW __nonnull ((1)); -+#endif -+ -+/* Return in *PROTOCOL the mutex protocol attribute in *ATTR. */ -+extern int pthread_mutexattr_getprotocol (const pthread_mutexattr_t * -+ __restrict __attr, -+ int *__restrict __protocol) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the mutex protocol attribute in *ATTR to PROTOCOL (either -+ PTHREAD_PRIO_NONE, PTHREAD_PRIO_INHERIT, or PTHREAD_PRIO_PROTECT). */ -+extern int pthread_mutexattr_setprotocol (pthread_mutexattr_t *__attr, -+ int __protocol) -+ __THROW __nonnull ((1)); -+ -+/* Return in *PRIOCEILING the mutex prioceiling attribute in *ATTR. */ -+extern int pthread_mutexattr_getprioceiling (const pthread_mutexattr_t * -+ __restrict __attr, -+ int *__restrict __prioceiling) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the mutex prioceiling attribute in *ATTR to PRIOCEILING. */ -+extern int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *__attr, -+ int __prioceiling) -+ __THROW __nonnull ((1)); -+ -+#ifdef __USE_XOPEN2K -+/* Get the robustness flag of the mutex attribute ATTR. */ -+extern int pthread_mutexattr_getrobust (const pthread_mutexattr_t *__attr, -+ int *__robustness) -+ __THROW __nonnull ((1, 2)); -+# ifdef __USE_GNU -+extern int pthread_mutexattr_getrobust_np (const pthread_mutexattr_t *__attr, -+ int *__robustness) -+ __THROW __nonnull ((1, 2)); -+# endif -+ -+/* Set the robustness flag of the mutex attribute ATTR. */ -+extern int pthread_mutexattr_setrobust (pthread_mutexattr_t *__attr, -+ int __robustness) -+ __THROW __nonnull ((1)); -+# ifdef __USE_GNU -+extern int pthread_mutexattr_setrobust_np (pthread_mutexattr_t *__attr, -+ int __robustness) -+ __THROW __nonnull ((1)); -+# endif -+#endif -+ -+ -+#if defined __USE_UNIX98 || defined __USE_XOPEN2K -+/* Functions for handling read-write locks. */ -+ -+/* Initialize read-write lock RWLOCK using attributes ATTR, or use -+ the default values if later is NULL. */ -+extern int pthread_rwlock_init (pthread_rwlock_t *__restrict __rwlock, -+ const pthread_rwlockattr_t *__restrict -+ __attr) __THROW __nonnull ((1)); -+ -+/* Destroy read-write lock RWLOCK. */ -+extern int pthread_rwlock_destroy (pthread_rwlock_t *__rwlock) -+ __THROW __nonnull ((1)); -+ -+/* Acquire read lock for RWLOCK. */ -+extern int pthread_rwlock_rdlock (pthread_rwlock_t *__rwlock) -+ __THROWNL __nonnull ((1)); -+ -+/* Try to acquire read lock for RWLOCK. */ -+extern int pthread_rwlock_tryrdlock (pthread_rwlock_t *__rwlock) -+ __THROWNL __nonnull ((1)); -+ -+# ifdef __USE_XOPEN2K -+/* Try to acquire read lock for RWLOCK or return after specfied time. */ -+extern int pthread_rwlock_timedrdlock (pthread_rwlock_t *__restrict __rwlock, -+ const struct timespec *__restrict -+ __abstime) __THROWNL __nonnull ((1, 2)); -+# endif -+ -+/* Acquire write lock for RWLOCK. */ -+extern int pthread_rwlock_wrlock (pthread_rwlock_t *__rwlock) -+ __THROWNL __nonnull ((1)); -+ -+/* Try to acquire write lock for RWLOCK. */ -+extern int pthread_rwlock_trywrlock (pthread_rwlock_t *__rwlock) -+ __THROWNL __nonnull ((1)); -+ -+# ifdef __USE_XOPEN2K -+/* Try to acquire write lock for RWLOCK or return after specfied time. */ -+extern int pthread_rwlock_timedwrlock (pthread_rwlock_t *__restrict __rwlock, -+ const struct timespec *__restrict -+ __abstime) __THROWNL __nonnull ((1, 2)); -+# endif -+ -+/* Unlock RWLOCK. */ -+extern int pthread_rwlock_unlock (pthread_rwlock_t *__rwlock) -+ __THROWNL __nonnull ((1)); -+ -+ -+/* Functions for handling read-write lock attributes. */ -+ -+/* Initialize attribute object ATTR with default values. */ -+extern int pthread_rwlockattr_init (pthread_rwlockattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy attribute object ATTR. */ -+extern int pthread_rwlockattr_destroy (pthread_rwlockattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Return current setting of process-shared attribute of ATTR in PSHARED. */ -+extern int pthread_rwlockattr_getpshared (const pthread_rwlockattr_t * -+ __restrict __attr, -+ int *__restrict __pshared) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set process-shared attribute of ATTR to PSHARED. */ -+extern int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *__attr, -+ int __pshared) -+ __THROW __nonnull ((1)); -+ -+/* Return current setting of reader/writer preference. */ -+extern int pthread_rwlockattr_getkind_np (const pthread_rwlockattr_t * -+ __restrict __attr, -+ int *__restrict __pref) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set reader/write preference. */ -+extern int pthread_rwlockattr_setkind_np (pthread_rwlockattr_t *__attr, -+ int __pref) __THROW __nonnull ((1)); -+#endif -+ -+ -+/* Functions for handling conditional variables. */ -+ -+/* Initialize condition variable COND using attributes ATTR, or use -+ the default values if later is NULL. */ -+extern int pthread_cond_init (pthread_cond_t *__restrict __cond, -+ const pthread_condattr_t *__restrict __cond_attr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy condition variable COND. */ -+extern int pthread_cond_destroy (pthread_cond_t *__cond) -+ __THROW __nonnull ((1)); -+ -+/* Wake up one thread waiting for condition variable COND. */ -+extern int pthread_cond_signal (pthread_cond_t *__cond) -+ __THROWNL __nonnull ((1)); -+ -+/* Wake up all threads waiting for condition variables COND. */ -+extern int pthread_cond_broadcast (pthread_cond_t *__cond) -+ __THROWNL __nonnull ((1)); -+ -+/* Wait for condition variable COND to be signaled or broadcast. -+ MUTEX is assumed to be locked before. -+ -+ This function is a cancellation point and therefore not marked with -+ __THROW. */ -+extern int pthread_cond_wait (pthread_cond_t *__restrict __cond, -+ pthread_mutex_t *__restrict __mutex) -+ __nonnull ((1, 2)); -+ -+/* Wait for condition variable COND to be signaled or broadcast until -+ ABSTIME. MUTEX is assumed to be locked before. ABSTIME is an -+ absolute time specification; zero is the beginning of the epoch -+ (00:00:00 GMT, January 1, 1970). -+ -+ This function is a cancellation point and therefore not marked with -+ __THROW. */ -+extern int pthread_cond_timedwait (pthread_cond_t *__restrict __cond, -+ pthread_mutex_t *__restrict __mutex, -+ const struct timespec *__restrict __abstime) -+ __nonnull ((1, 2, 3)); -+ -+/* Wait for condition variable COND to be signaled or broadcast until -+ ABSTIME measured by the specified clock. MUTEX is assumed to be -+ locked before. CLOCK is the clock to use. ABSTIME is an absolute -+ time specification against CLOCK's epoch. -+ -+ This function is a cancellation point and therefore not marked with -+ __THROW. */ -+extern int pthread_cond_clockwait (pthread_cond_t *__restrict __cond, -+ pthread_mutex_t *__restrict __mutex, -+ __clockid_t __clock_id, -+ const struct timespec *__restrict __abstime) -+ __nonnull ((1, 2, 4)); -+ -+/* Functions for handling condition variable attributes. */ -+ -+/* Initialize condition variable attribute ATTR. */ -+extern int pthread_condattr_init (pthread_condattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy condition variable attribute ATTR. */ -+extern int pthread_condattr_destroy (pthread_condattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Get the process-shared flag of the condition variable attribute ATTR. */ -+extern int pthread_condattr_getpshared (const pthread_condattr_t * -+ __restrict __attr, -+ int *__restrict __pshared) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the process-shared flag of the condition variable attribute ATTR. */ -+extern int pthread_condattr_setpshared (pthread_condattr_t *__attr, -+ int __pshared) __THROW __nonnull ((1)); -+ -+#ifdef __USE_XOPEN2K -+/* Get the clock selected for the condition variable attribute ATTR. */ -+extern int pthread_condattr_getclock (const pthread_condattr_t * -+ __restrict __attr, -+ __clockid_t *__restrict __clock_id) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the clock selected for the condition variable attribute ATTR. */ -+extern int pthread_condattr_setclock (pthread_condattr_t *__attr, -+ __clockid_t __clock_id) -+ __THROW __nonnull ((1)); -+#endif -+ -+ -+#ifdef __USE_XOPEN2K -+/* Functions to handle spinlocks. */ -+ -+/* Initialize the spinlock LOCK. If PSHARED is nonzero the spinlock can -+ be shared between different processes. */ -+extern int pthread_spin_init (pthread_spinlock_t *__lock, int __pshared) -+ __THROW __nonnull ((1)); -+ -+/* Destroy the spinlock LOCK. */ -+extern int pthread_spin_destroy (pthread_spinlock_t *__lock) -+ __THROW __nonnull ((1)); -+ -+/* Wait until spinlock LOCK is retrieved. */ -+extern int pthread_spin_lock (pthread_spinlock_t *__lock) -+ __THROWNL __nonnull ((1)); -+ -+/* Try to lock spinlock LOCK. */ -+extern int pthread_spin_trylock (pthread_spinlock_t *__lock) -+ __THROWNL __nonnull ((1)); -+ -+/* Release spinlock LOCK. */ -+extern int pthread_spin_unlock (pthread_spinlock_t *__lock) -+ __THROWNL __nonnull ((1)); -+ -+ -+/* Functions to handle barriers. */ -+ -+/* Initialize BARRIER with the attributes in ATTR. The barrier is -+ opened when COUNT waiters arrived. */ -+extern int pthread_barrier_init (pthread_barrier_t *__restrict __barrier, -+ const pthread_barrierattr_t *__restrict -+ __attr, unsigned int __count) -+ __THROW __nonnull ((1)); -+ -+/* Destroy a previously dynamically initialized barrier BARRIER. */ -+extern int pthread_barrier_destroy (pthread_barrier_t *__barrier) -+ __THROW __nonnull ((1)); -+ -+/* Wait on barrier BARRIER. */ -+extern int pthread_barrier_wait (pthread_barrier_t *__barrier) -+ __THROWNL __nonnull ((1)); -+ -+ -+/* Initialize barrier attribute ATTR. */ -+extern int pthread_barrierattr_init (pthread_barrierattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Destroy previously dynamically initialized barrier attribute ATTR. */ -+extern int pthread_barrierattr_destroy (pthread_barrierattr_t *__attr) -+ __THROW __nonnull ((1)); -+ -+/* Get the process-shared flag of the barrier attribute ATTR. */ -+extern int pthread_barrierattr_getpshared (const pthread_barrierattr_t * -+ __restrict __attr, -+ int *__restrict __pshared) -+ __THROW __nonnull ((1, 2)); -+ -+/* Set the process-shared flag of the barrier attribute ATTR. */ -+extern int pthread_barrierattr_setpshared (pthread_barrierattr_t *__attr, -+ int __pshared) -+ __THROW __nonnull ((1)); -+#endif -+ -+ -+/* Functions for handling thread-specific data. */ -+ -+/* Create a key value identifying a location in the thread-specific -+ data area. Each thread maintains a distinct thread-specific data -+ area. DESTR_FUNCTION, if non-NULL, is called with the value -+ associated to that key when the key is destroyed. -+ DESTR_FUNCTION is not called if the value associated is NULL when -+ the key is destroyed. */ -+extern int pthread_key_create (pthread_key_t *__key, -+ void (*__destr_function) (void *)) -+ __THROW __nonnull ((1)); -+ -+/* Destroy KEY. */ -+extern int pthread_key_delete (pthread_key_t __key) __THROW; -+ -+/* Return current value of the thread-specific data slot identified by KEY. */ -+extern void *pthread_getspecific (pthread_key_t __key) __THROW; -+ -+/* Store POINTER in the thread-specific data slot identified by KEY. */ -+extern int pthread_setspecific (pthread_key_t __key, -+ const void *__pointer) __THROW ; -+ -+ -+#ifdef __USE_XOPEN2K -+/* Get ID of CPU-time clock for thread THREAD_ID. */ -+extern int pthread_getcpuclockid (pthread_t __thread_id, -+ __clockid_t *__clock_id) -+ __THROW __nonnull ((2)); -+#endif -+ -+ -+/* Install handlers to be called when a new process is created with FORK. -+ The PREPARE handler is called in the parent process just before performing -+ FORK. The PARENT handler is called in the parent process just after FORK. -+ The CHILD handler is called in the child process. Each of the three -+ handlers can be NULL, meaning that no handler needs to be called at that -+ point. -+ PTHREAD_ATFORK can be called several times, in which case the PREPARE -+ handlers are called in LIFO order (last added with PTHREAD_ATFORK, -+ first called before FORK), and the PARENT and CHILD handlers are called -+ in FIFO (first added, first called). */ -+ -+extern int pthread_atfork (void (*__prepare) (void), -+ void (*__parent) (void), -+ void (*__child) (void)) __THROW; -+ -+ -+#ifdef __USE_EXTERN_INLINES -+/* Optimizations. */ -+__extern_inline int -+__NTH (pthread_equal (pthread_t __thread1, pthread_t __thread2)) -+{ -+ return __thread1 == __thread2; -+} -+#endif -+ -+__END_DECLS -+ -+#endif /* pthread.h */ --- -2.30.0 - diff --git a/0003-add-build-script-and-files-of-libpthread_2_17_so.patch b/0003-add-build-script-and-files-of-libpthread_2_17_so.patch deleted file mode 100644 index 75ca704..0000000 --- a/0003-add-build-script-and-files-of-libpthread_2_17_so.patch +++ /dev/null @@ -1,135 +0,0 @@ -From 7cb15fbef45361db6ad718077a4f0a6d2dc845f2 Mon Sep 17 00:00:00 2001 -From: Yang Yanchao -Date: Wed Nov 24 09:31:31 2021 +0800 -Subject: [PATCH 3/9] build extra lipthreadcond so - -Add the build script and file of libpthread-2.17.so - ---- - nptl_2_17/Makefile | 52 +++++++++++++++++++++++++++ - nptl_2_17/build_libpthread-2.17.so.sh | 10 ++++++ - nptl_2_17/libpthread-2.17-aarch64.map | 14 ++++++++ - nptl_2_17/libpthread-2.17-x86_64.map | 14 ++++++++ - 4 files changed, 90 insertions(+) - create mode 100644 nptl_2_17/Makefile - create mode 100644 nptl_2_17/build_libpthread-2.17.so.sh - create mode 100644 nptl_2_17/libpthread-2.17-aarch64.map - create mode 100644 nptl_2_17/libpthread-2.17-x86_64.map - -diff --git a/nptl_2_17/Makefile b/nptl_2_17/Makefile -new file mode 100644 -index 00000000..f248ce56 ---- /dev/null -+++ b/nptl_2_17/Makefile -@@ -0,0 +1,52 @@ -+include libpthread-2.17_config -+subdir=libpthread-2.17 -+objdir=../$(build_dir)/ -+ -+ -+ifdef subdir -+.. := ../ -+endif -+ -+objpfx := $(patsubst %//,%/,$(objdir)/$(subdir)/) -+common-objpfx = $(objdir)/ -+common-objdir = $(objdir) -+ -+sysdep_dir := $(..)sysdeps -+export sysdep_dir := $(sysdep_dir) -+ -+include $(common-objpfx)soversions.mk -+include $(common-objpfx)config.make -+ -+uses-callbacks = -fexceptions -+ -+sysdirs := $(foreach D,$(config-sysdirs),$(firstword $(filter /%,$D) $(..)$D)) -+ -++sysdep_dirs = $(sysdirs) -++sysdep_dirs := $(objdir) $(+sysdep_dirs) -+ -++sysdep-includes := $(foreach dir,$(+sysdep_dirs), $(addprefix -I,$(wildcard $(dir)/include) $(dir))) -+ -+compile_obj = pthread_cond_wait_2_17.os pthread_cond_signal_2_17.os pthread_cond_broadcast_2_17.os pthread_cond_init_2_17.os pthread_cond_destroy_2_17.os pthread_condattr_getclock_2_17.os pthread_condattr_getpshared_2_17.os pthread_condattr_init_2_17.os pthread_condattr_setclock_2_17.os cleanup_compat_2_17.os pthread_mutex_lock_2_17.os pthread_mutex_unlock_2_17.os tpp_2_17.os vars_2_17.os pause_nocancel_2_17.os lll_timedlock_wait_2_17.os pthread_mutex_cond_lock_2_17.os cancellation_2_17.os lowlevellock_2_17.os unwind_2_17.os -+ -+ifeq (x86_64, $(arch)) -+compile_obj += elision-timed_2_17.os elision-trylock_2_17.os elision-lock_2_17.os elision-unlock_2_17.os -+endif -+ -+exist_obj_dir = $(foreach n,$(exist_obj),../$(build_dir)/nptl/$(n)) -+ -+compile_obj_dir = $(foreach n,$(compile_obj),../$(build_dir)/nptl/$(n)) -+ -+CFLAGS = -c -std=gnu11 -fgnu89-inline -fPIE -DNDEBUG -O2 -Wall -Werror -Wp,-D_GLIBCXX_ASSERTIONS -Wundef -Wwrite-strings -fasynchronous-unwind-tables -fmerge-all-constants -frounding-math -fstack-clash-protection -fstack-protector-strong -g -mtune=generic -Wstrict-prototypes -Wold-style-definition -fno-math-errno -fPIC -fexceptions -fasynchronous-unwind-tables -ftls-model=initial-exec -D_FORTIFY_SOURCE=2 -DSHARED -DTOP_NAMESPACE=glibc -+ -+Headers = -I../include -I../$(build_dir)/nptl $(+sysdep-includes) -I../nptl_2_17 -I../nptl -I../libio -I../. -D_LIBC_REENTRANT -include ../$(build_dir)/libc-modules.h -include include/libc-symbols.h -+ -+all: libpthread-2.17.so -+ -+libpthread-2.17.so : $(compile_obj) libpthread-2.17_pic.a -+ gcc -shared -static-libgcc -Wl,-O1 -Wl,-z,defs -Wl,-dynamic-linker=/usr/local/lib/$(ld.so-version) -B../$(build_dir)/csu/ -Wl,--version-script=libpthread-2.17-$(arch).map -Wl,-soname=libpthread-2.17.so.0 -Wl,-z,noexecstack -Wtrampolines -Wl,-z,combreloc -Wl,-z,relro -Wl,--hash-style=both -Wl,-z,now -Wl,--enable-new-dtags,-z,nodelete,-z,initfirst -L../$(build_dir) -L../$(build_dir)/math -L../$(build_dir)/elf -L../$(build_dir)/dlfcn -L../$(build_dir)/nss -L../$(build_dir)/nis -L../$(build_dir)/rt -L../$(build_dir)/resolv -L../$(build_dir)/mathvec -L../$(build_dir)/support -L../$(build_dir)/crypt -L../$(build_dir)/nptl -Wl,-rpath-link=../$(build_dir):../$(build_dir)/math:../$(build_dir)/elf:../$(build_dir)/dlfcn:../$(build_dir)/nss:../$(build_dir)/nis:../$(build_dir)/rt:../$(build_dir)/resolv:../$(build_dir)/mathvec:../$(build_dir)/support:../$(build_dir)/crypt:../$(build_dir)/nptl -o ../$(build_dir)/nptl/libpthread-2.17.so ../$(build_dir)/csu/abi-note.o -Wl,--whole-archive ../$(build_dir)/nptl/libpthread-2.17_pic.a -Wl,--no-whole-archive -Wl,--start-group ../$(build_dir)/libc.so ../$(build_dir)/libc_nonshared.a -Wl,--as-needed ../$(build_dir)/elf/ld.so -Wl,--no-as-needed -Wl,--end-group -+ -+libpthread-2.17_pic.a : $(compile_obj_dir) $(exist_obj_dir) -+ ar cruv ../$(build_dir)/nptl/$@ $^ -+ -+$(compile_obj) : %.os : %.c -+ gcc $< $(CFLAGS) $(Headers) -o ../$(build_dir)/nptl/$@ -MD -MP -MF ../$(build_dir)/nptl/$@.dt -MT ../$(build_dir)/nptl/$@ -diff --git a/nptl_2_17/build_libpthread-2.17.so.sh b/nptl_2_17/build_libpthread-2.17.so.sh -new file mode 100644 -index 00000000..bdb97d0f ---- /dev/null -+++ b/nptl_2_17/build_libpthread-2.17.so.sh -@@ -0,0 +1,10 @@ -+#!/bin/sh -+set -e -+build_arch=$1 -+build_dir=$2 -+config_dir=libpthread-2.17_config -+ -+echo arch=${build_arch} > ${config_dir} -+echo build_dir=${build_dir} >> ${config_dir} -+make -+rm -rf ${config_dir} -diff --git a/nptl_2_17/libpthread-2.17-aarch64.map b/nptl_2_17/libpthread-2.17-aarch64.map -new file mode 100644 -index 00000000..2c49fe17 ---- /dev/null -+++ b/nptl_2_17/libpthread-2.17-aarch64.map -@@ -0,0 +1,14 @@ -+GLIBC_2.17 { -+ global: -+ pthread_cond_init; pthread_cond_destroy; -+ pthread_cond_signal; pthread_cond_broadcast; -+ pthread_cond_wait; pthread_cond_timedwait; -+ local: -+ *; -+}; -+GLIBC_2.34 { -+ global: -+ pthread_cond_clockwait; -+ local: -+ *; -+}; -diff --git a/nptl_2_17/libpthread-2.17-x86_64.map b/nptl_2_17/libpthread-2.17-x86_64.map -new file mode 100644 -index 00000000..b01e7d0d ---- /dev/null -+++ b/nptl_2_17/libpthread-2.17-x86_64.map -@@ -0,0 +1,14 @@ -+GLIBC_2.3.2 { -+ global: -+ pthread_cond_init; pthread_cond_destroy; -+ pthread_cond_signal; pthread_cond_broadcast; -+ pthread_cond_wait; pthread_cond_timedwait; -+ local: -+ *; -+}; -+GLIBC_2.34 { -+ global: -+ pthread_cond_clockwait; -+ local: -+ *; -+}; --- -2.30.0 - diff --git a/0004-add-two-header-files-with-some-deleted-macros.patch b/0004-add-two-header-files-with-some-deleted-macros.patch deleted file mode 100644 index 7acb8e2..0000000 --- a/0004-add-two-header-files-with-some-deleted-macros.patch +++ /dev/null @@ -1,166 +0,0 @@ -From d6e6184b4f10ef2cbdec09eae60350ced71e3de7 Mon Sep 17 00:00:00 2001 -From: Yang Yanchao -Date: Wed Nov 24 09:31:31 2021 +0800 -Subject: [PATCH 4/9] build extra lipthreadcond so - -For compatibility with glibc2.17, two header files are added with some -deleted macros. - ---- - nptl_2_17/compat_pthread_2_17.h | 61 +++++++++++++++++++++++++++ - nptl_2_17/old_macros_2_17.h | 75 +++++++++++++++++++++++++++++++++ - 2 files changed, 136 insertions(+) - create mode 100644 nptl_2_17/compat_pthread_2_17.h - create mode 100644 nptl_2_17/old_macros_2_17.h - -diff --git a/nptl_2_17/compat_pthread_2_17.h b/nptl_2_17/compat_pthread_2_17.h -new file mode 100644 -index 00000000..d13051ba ---- /dev/null -+++ b/nptl_2_17/compat_pthread_2_17.h -@@ -0,0 +1,61 @@ -+#ifndef _COMPAT_PTHREAD_2_17_H -+#define _COMPAT_PTHREAD_2_17_H 1 -+ -+#include -+#include -+ -+#ifdef __x86_64__ -+#define __PTHREAD_COMPAT_PADDING_MID -+#define __PTHREAD_COMPAT_PADDING_END -+#define __PTHREAD_MUTEX_LOCK_ELISION 1 -+# define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0 -+# define __PTHREAD_MUTEX_USE_UNION 0 -+//# define ENABLE_ELISION_SUPPORT 1 -+#else -+#define __PTHREAD_COMPAT_PADDING_MID -+#define __PTHREAD_COMPAT_PADDING_END -+#define __PTHREAD_MUTEX_LOCK_ELISION 0 -+#define __PTHREAD_MUTEX_NUSERS_AFTER_KIND 0 -+#define __PTHREAD_MUTEX_USE_UNION 0 -+#endif -+ -+#define CANCELSTATE_BIT 0 -+#define CANCELSTATE_BITMASK (0x01 << CANCELSTATE_BIT) -+ /* Bit set if asynchronous cancellation mode is selected. */ -+#define CANCELTYPE_BIT 1 -+#define CANCELTYPE_BITMASK (0x01 << CANCELTYPE_BIT) -+ /* Bit set if canceling has been initiated. */ -+#define CANCELING_BIT 2 -+#define CANCELING_BITMASK (0x01 << CANCELING_BIT) -+ /* Bit set if canceled. */ -+#define CANCELED_BIT 3 -+#define CANCELED_BITMASK (0x01 << CANCELED_BIT) -+ /* Bit set if thread is exiting. */ -+#define EXITING_BIT 4 -+#define EXITING_BITMASK (0x01 << EXITING_BIT) -+ /* Bit set if thread terminated and TCB is freed. */ -+#define TERMINATED_BIT 5 -+#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT) -+ /* Bit set if thread is supposed to change XID. */ -+#define SETXID_BIT 6 -+#define SETXID_BITMASK (0x01 << SETXID_BIT) -+ /* Mask for the rest. Helps the compiler to optimize. */ -+#define CANCEL_RESTMASK 0xffffff80 -+ -+ -+#define CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS(value) \ -+ (((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK \ -+ | EXITING_BITMASK | CANCEL_RESTMASK | TERMINATED_BITMASK)) \ -+ == (CANCELTYPE_BITMASK | CANCELED_BITMASK)) -+ -+# define INTERNAL_SYSCALL_DECL(err) do { } while (0) -+ -+/* -+ * __pause_nocancel delete by fbb4a3143724ef3f044a4f05351,add it -+ */ -+ -+__typeof (pause) __pause_nocancel; -+hidden_proto (__pause_nocancel) -+ -+#endif -+ -diff --git a/nptl_2_17/old_macros_2_17.h b/nptl_2_17/old_macros_2_17.h -new file mode 100644 -index 00000000..334b2ab1 ---- /dev/null -+++ b/nptl_2_17/old_macros_2_17.h -@@ -0,0 +1,75 @@ -+#ifndef _OLD_MACROS_2_17_H -+#define _OLD_MACROS_2_17_H 1 -+ -+/* -+ * Contains macros that have been defined in glibc2.34. -+ * Cancel the definition and use the old version. -+ * This header file needs to be included at the end. -+ */ -+#undef __lll_unlock -+#define __lll_unlock(futex, private) \ -+ ((void) \ -+ ({ \ -+ int *__futex = (futex); \ -+ int __private = (private); \ -+ int __oldval = atomic_exchange_rel (__futex, 0); \ -+ if (__glibc_unlikely (__oldval > 1)) \ -+ lll_futex_wake (__futex, 1, __private); \ -+ })) -+ -+#undef lll_unlock -+#define lll_unlock(futex, private) \ -+ __lll_unlock (&(futex), private) -+ -+extern int __lll_timedlock_wait (int *futex, const struct timespec *, -+ int private) attribute_hidden; -+ -+ -+/* As __lll_lock, but with a timeout. If the timeout occurs then return -+ ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */ -+#define __lll_timedlock(futex, abstime, private) \ -+ ({ \ -+ int *__futex = (futex); \ -+ int __val = 0; \ -+ \ -+ if (__glibc_unlikely \ -+ (atomic_compare_and_exchange_bool_acq (__futex, 1, 0))) \ -+ __val = __lll_timedlock_wait (__futex, abstime, private); \ -+ __val; \ -+ }) -+#define lll_timedlock(futex, abstime, private) \ -+ __lll_timedlock (&(futex), abstime, private) -+ -+/* Verify whether the supplied clockid is supported by -+ lll_futex_clock_wait_bitset. */ -+#define lll_futex_supported_clockid(clockid) \ -+ ((clockid) == CLOCK_REALTIME || (clockid) == CLOCK_MONOTONIC) -+ -+/* The kernel currently only supports CLOCK_MONOTONIC or -+ CLOCK_REALTIME timeouts for FUTEX_WAIT_BITSET. We could attempt to -+ convert others here but currently do not. */ -+#define lll_futex_clock_wait_bitset(futexp, val, clockid, timeout, private) \ -+ ({ \ -+ long int __ret; \ -+ if (lll_futex_supported_clockid (clockid)) \ -+ { \ -+ const unsigned int clockbit = \ -+ (clockid == CLOCK_REALTIME) ? FUTEX_CLOCK_REALTIME : 0; \ -+ const int op = \ -+ __lll_private_flag (FUTEX_WAIT_BITSET | clockbit, private); \ -+ \ -+ __ret = lll_futex_syscall (6, futexp, op, val, \ -+ timeout, NULL /* Unused. */, \ -+ FUTEX_BITSET_MATCH_ANY); \ -+ } \ -+ else \ -+ __ret = -EINVAL; \ -+ __ret; \ -+ }) -+ -+# undef INTERNAL_VSYSCALL -+# define INTERNAL_VSYSCALL INTERNAL_SYSCALL -+# undef INLINE_VSYSCALL -+# define INLINE_VSYSCALL INLINE_SYSCALL -+ -+#endif --- -2.30.0 - diff --git a/0005-add-pthread-functions_h.patch b/0005-add-pthread-functions_h.patch deleted file mode 100644 index f169140..0000000 --- a/0005-add-pthread-functions_h.patch +++ /dev/null @@ -1,140 +0,0 @@ -From 463dc947b4f9bc4137c9919ee72b896403926474 Mon Sep 17 00:00:00 2001 -From: Roland McGrath -Date: Thu Jun 12 13:48:47 2014 -0700 -Subject: [PATCH 5/9] build extra lipthreadcond so - -add pthread-functions.h which delete by 1d67cf9e8a0194588e66fb3b7afcbdc3bf836a - ---- - nptl_2_17/pthread-functions_2_17.h | 119 +++++++++++++++++++++++++++++ - 1 file changed, 119 insertions(+) - create mode 100644 nptl_2_17/pthread-functions_2_17.h - -diff --git a/nptl_2_17/pthread-functions_2_17.h b/nptl_2_17/pthread-functions_2_17.h -new file mode 100644 -index 00000000..07ca8e7e ---- /dev/null -+++ b/nptl_2_17/pthread-functions_2_17.h -@@ -0,0 +1,119 @@ -+/* Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#ifndef _PTHREAD_FUNCTIONS_H -+#define _PTHREAD_FUNCTIONS_H 1 -+ -+#include -+#include -+#include -+#include -+ -+struct xid_command; -+ -+/* Data type shared with libc. The libc uses it to pass on calls to -+ the thread functions. */ -+struct pthread_functions -+{ -+ int (*ptr_pthread_attr_destroy) (pthread_attr_t *); -+ int (*ptr___pthread_attr_init_2_0) (pthread_attr_t *); -+ int (*ptr___pthread_attr_init_2_1) (pthread_attr_t *); -+ int (*ptr_pthread_attr_getdetachstate) (const pthread_attr_t *, int *); -+ int (*ptr_pthread_attr_setdetachstate) (pthread_attr_t *, int); -+ int (*ptr_pthread_attr_getinheritsched) (const pthread_attr_t *, int *); -+ int (*ptr_pthread_attr_setinheritsched) (pthread_attr_t *, int); -+ int (*ptr_pthread_attr_getschedparam) (const pthread_attr_t *, -+ struct sched_param *); -+ int (*ptr_pthread_attr_setschedparam) (pthread_attr_t *, -+ const struct sched_param *); -+ int (*ptr_pthread_attr_getschedpolicy) (const pthread_attr_t *, int *); -+ int (*ptr_pthread_attr_setschedpolicy) (pthread_attr_t *, int); -+ int (*ptr_pthread_attr_getscope) (const pthread_attr_t *, int *); -+ int (*ptr_pthread_attr_setscope) (pthread_attr_t *, int); -+ int (*ptr_pthread_condattr_destroy) (pthread_condattr_t *); -+ int (*ptr_pthread_condattr_init) (pthread_condattr_t *); -+ int (*ptr___pthread_cond_broadcast) (pthread_cond_t *); -+ int (*ptr___pthread_cond_destroy) (pthread_cond_t *); -+ int (*ptr___pthread_cond_init) (pthread_cond_t *, -+ const pthread_condattr_t *); -+ int (*ptr___pthread_cond_signal) (pthread_cond_t *); -+ int (*ptr___pthread_cond_wait) (pthread_cond_t *, pthread_mutex_t *); -+ int (*ptr___pthread_cond_timedwait) (pthread_cond_t *, pthread_mutex_t *, -+ const struct timespec *); -+ int (*ptr___pthread_cond_clockwait) (pthread_cond_t *, -+ pthread_mutex_t *, -+ clockid_t, -+ const struct timespec *); -+ int (*ptr___pthread_cond_broadcast_2_0) (pthread_cond_2_0_t *); -+ int (*ptr___pthread_cond_destroy_2_0) (pthread_cond_2_0_t *); -+ int (*ptr___pthread_cond_init_2_0) (pthread_cond_2_0_t *, -+ const pthread_condattr_t *); -+ int (*ptr___pthread_cond_signal_2_0) (pthread_cond_2_0_t *); -+ int (*ptr___pthread_cond_wait_2_0) (pthread_cond_2_0_t *, pthread_mutex_t *); -+ int (*ptr___pthread_cond_timedwait_2_0) (pthread_cond_2_0_t *, -+ pthread_mutex_t *, -+ const struct timespec *); -+ int (*ptr_pthread_equal) (pthread_t, pthread_t); -+ void (*ptr___pthread_exit) (void *) __attribute__ ((__noreturn__)); -+ int (*ptr_pthread_getschedparam) (pthread_t, int *, struct sched_param *); -+ int (*ptr_pthread_setschedparam) (pthread_t, int, -+ const struct sched_param *); -+ int (*ptr_pthread_mutex_destroy) (pthread_mutex_t *); -+ int (*ptr_pthread_mutex_init) (pthread_mutex_t *, -+ const pthread_mutexattr_t *); -+ int (*ptr_pthread_mutex_lock) (pthread_mutex_t *); -+ int (*ptr_pthread_mutex_unlock) (pthread_mutex_t *); -+ int (*ptr___pthread_setcancelstate) (int, int *); -+ int (*ptr_pthread_setcanceltype) (int, int *); -+ void (*ptr___pthread_cleanup_upto) (__jmp_buf, char *); -+ int (*ptr___pthread_once) (pthread_once_t *, void (*) (void)); -+ int (*ptr___pthread_rwlock_rdlock) (pthread_rwlock_t *); -+ int (*ptr___pthread_rwlock_wrlock) (pthread_rwlock_t *); -+ int (*ptr___pthread_rwlock_unlock) (pthread_rwlock_t *); -+ int (*ptr___pthread_key_create) (pthread_key_t *, void (*) (void *)); -+ void *(*ptr___pthread_getspecific) (pthread_key_t); -+ int (*ptr___pthread_setspecific) (pthread_key_t, const void *); -+ void (*ptr__pthread_cleanup_push_defer) (struct _pthread_cleanup_buffer *, -+ void (*) (void *), void *); -+ void (*ptr__pthread_cleanup_pop_restore) (struct _pthread_cleanup_buffer *, -+ int); -+#define HAVE_PTR_NTHREADS -+ unsigned int *ptr_nthreads; -+ void (*ptr___pthread_unwind) (__pthread_unwind_buf_t *) -+ __attribute ((noreturn)) __cleanup_fct_attribute; -+ void (*ptr__nptl_deallocate_tsd) (void); -+ int (*ptr__nptl_setxid) (struct xid_command *); -+ void (*ptr_set_robust) (struct pthread *); -+}; -+ -+/* Variable in libc.so. */ -+extern struct pthread_functions __libc_pthread_functions attribute_hidden; -+extern int __libc_pthread_functions_init attribute_hidden; -+ -+#ifdef PTR_DEMANGLE -+# define PTHFCT_CALL(fct, params) \ -+ ({ __typeof (__libc_pthread_functions.fct) __p; \ -+ __p = __libc_pthread_functions.fct; \ -+ PTR_DEMANGLE (__p); \ -+ __p params; }) -+#else -+# define PTHFCT_CALL(fct, params) \ -+ __libc_pthread_functions.fct params -+#endif -+ -+#endif /* pthread-functions.h */ --- -2.30.0 - diff --git a/0006-add-elsion-function-which-moved-to-libc-in-glibc-2.34.patch b/0006-add-elsion-function-which-moved-to-libc-in-glibc-2.34.patch deleted file mode 100644 index e60fc08..0000000 --- a/0006-add-elsion-function-which-moved-to-libc-in-glibc-2.34.patch +++ /dev/null @@ -1,587 +0,0 @@ -From 1cdbe579482c07e9f4bb3baa4864da2d3e7eb837 Mon Sep 17 00:00:00 2001 -From: Andi Kleen -Date: Sat, 10 Nov 2012 00:51:26 -0800i -Subject: [PATCH 6/9] build extra lipthreadcond so - -add elsion functions which moved to libc in glibc-2.34. -Some attributes are changed and cannot be directly referenced. - - ---- - nptl_2_17/lll_timedlock_wait_2_17.c | 59 +++++++++++++++++++++++++++++ - nptl_2_17/elision-conf_2_17.c | 138 +++++++++++++++++++++++++++++++ - nptl_2_17/elision-lock_2_17.c | 107 ++++++++++++++++++++++++ - nptl_2_17/elision-timed_2_17.c | 27 ++++++ - nptl_2_17/elision-trylock_2_17.c | 75 +++++++++++++++++ - nptl_2_17/elision-unlock_2_17.c | 34 ++++++++ - nptl_2_17/hle_2_17.h | 75 +++++++++++++++++ - 6 files changed, 515 - insertions(+) - create mode 100644 nptl_2_17/lll_timedlock_wait_2_17.c - create mode 100644 nptl_2_17/elision-conf_2_17.c - create mode 100644 nptl_2_17/elision-lock_2_17.c - create mode 100644 nptl_2_17/elision-timed_2_17.c - create mode 100644 nptl_2_17/elision-trylock_2_17.c - create mode 100644 nptl_2_17/elision-unlock_2_17.c - create mode 100644 nptl_2_17/hle_2_17.h - -diff --git a/nptl_2_17/lll_timedlock_wait_2_17.c b/nptl_2_17/lll_timedlock_wait_2_17.c -new file mode 100644 -index 00000000..91bf9637 ---- /dev/null -+++ b/nptl_2_17/lll_timedlock_wait_2_17.c -@@ -0,0 +1,59 @@ -+/* Timed low level locking for pthread library. Generic futex-using version. -+ Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Paul Mackerras , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+#include -+#include -+#include -+ -+ -+int -+__lll_timedlock_wait (int *futex, const struct timespec *abstime, int private) -+{ -+ /* Reject invalid timeouts. */ -+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) -+ return EINVAL; -+ -+ /* Try locking. */ -+ while (atomic_exchange_acq (futex, 2) != 0) -+ { -+ struct timeval tv; -+ -+ /* Get the current time. */ -+ (void) __gettimeofday (&tv, NULL); -+ -+ /* Compute relative timeout. */ -+ struct timespec rt; -+ rt.tv_sec = abstime->tv_sec - tv.tv_sec; -+ rt.tv_nsec = abstime->tv_nsec - tv.tv_usec * 1000; -+ if (rt.tv_nsec < 0) -+ { -+ rt.tv_nsec += 1000000000; -+ --rt.tv_sec; -+ } -+ -+ if (rt.tv_sec < 0) -+ return ETIMEDOUT; -+ -+ /* If *futex == 2, wait until woken or timeout. */ -+ lll_futex_timed_wait (futex, 2, &rt, private); -+ } -+ -+ return 0; -+} -diff --git a/nptl_2_17/elision-conf_2_17.c b/nptl_2_17/elision-conf_2_17.c -new file mode 100644 -index 00000000..22af2944 ---- /dev/null -+++ b/nptl_2_17/elision-conf_2_17.c -@@ -0,0 +1,138 @@ -+/* elision-conf.c: Lock elision tunable parameters. -+ Copyright (C) 2013-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "config.h" -+#include -+#include -+#include -+#include -+ -+#if HAVE_TUNABLES -+# define TUNABLE_NAMESPACE elision -+#endif -+#include -+ -+/* Reasonable initial tuning values, may be revised in the future. -+ This is a conservative initial value. */ -+ -+struct elision_config __elision_aconf = -+ { -+ /* How often to not attempt to use elision if a transaction aborted -+ because the lock is already acquired. Expressed in number of lock -+ acquisition attempts. */ -+ .skip_lock_busy = 3, -+ /* How often to not attempt to use elision if a transaction aborted due -+ to reasons other than other threads' memory accesses. Expressed in -+ number of lock acquisition attempts. */ -+ .skip_lock_internal_abort = 3, -+ /* How often we retry using elision if there is chance for the transaction -+ to finish execution (e.g., it wasn't aborted due to the lock being -+ already acquired. */ -+ .retry_try_xbegin = 3, -+ /* Same as SKIP_LOCK_INTERNAL_ABORT but for trylock. */ -+ .skip_trylock_internal_abort = 3, -+ }; -+ -+/* Force elision for all new locks. This is used to decide whether existing -+ DEFAULT locks should be automatically upgraded to elision in -+ pthread_mutex_lock(). Disabled for suid programs. Only used when elision -+ is available. */ -+ -+int __pthread_force_elision attribute_hidden = 0; -+ -+#if HAVE_TUNABLES -+static inline void -+__always_inline -+do_set_elision_enable (int32_t elision_enable) -+{ -+ /* Enable elision if it's avaliable in hardware. It's not necessary to check -+ if __libc_enable_secure isn't enabled since elision_enable will be set -+ according to the default, which is disabled. */ -+ if (elision_enable == 1) -+ __pthread_force_elision = HAS_CPU_FEATURE (RTM) ? 1 : 0; -+} -+ -+/* The pthread->elision_enable tunable is 0 or 1 indicating that elision -+ should be disabled or enabled respectively. The feature will only be used -+ if it's supported by the hardware. */ -+ -+void -+TUNABLE_CALLBACK (set_elision_enable) (tunable_val_t *valp) -+{ -+ int32_t elision_enable = (int32_t) valp->numval; -+ do_set_elision_enable (elision_enable); -+} -+ -+#define TUNABLE_CALLBACK_FNDECL(__name, __type) \ -+static inline void \ -+__always_inline \ -+do_set_elision_ ## __name (__type value) \ -+{ \ -+ __elision_aconf.__name = value; \ -+} \ -+void \ -+TUNABLE_CALLBACK (set_elision_ ## __name) (tunable_val_t *valp) \ -+{ \ -+ __type value = (__type) (valp)->numval; \ -+ do_set_elision_ ## __name (value); \ -+} -+ -+TUNABLE_CALLBACK_FNDECL (skip_lock_busy, int32_t); -+TUNABLE_CALLBACK_FNDECL (skip_lock_internal_abort, int32_t); -+TUNABLE_CALLBACK_FNDECL (retry_try_xbegin, int32_t); -+TUNABLE_CALLBACK_FNDECL (skip_trylock_internal_abort, int32_t); -+#endif -+ -+/* Initialize elision. */ -+ -+static void -+elision_init (int argc __attribute__ ((unused)), -+ char **argv __attribute__ ((unused)), -+ char **environ) -+{ -+#if HAVE_TUNABLES -+ /* Elision depends on tunables and must be explicitly turned on by setting -+ the appropriate tunable on a supported platform. */ -+ -+ TUNABLE_GET (enable, int32_t, -+ TUNABLE_CALLBACK (set_elision_enable)); -+ TUNABLE_GET (skip_lock_busy, int32_t, -+ TUNABLE_CALLBACK (set_elision_skip_lock_busy)); -+ TUNABLE_GET (skip_lock_internal_abort, int32_t, -+ TUNABLE_CALLBACK (set_elision_skip_lock_internal_abort)); -+ TUNABLE_GET (tries, int32_t, -+ TUNABLE_CALLBACK (set_elision_retry_try_xbegin)); -+ TUNABLE_GET (skip_trylock_internal_abort, int32_t, -+ TUNABLE_CALLBACK (set_elision_skip_trylock_internal_abort)); -+#endif -+ -+ if (!__pthread_force_elision) -+ __elision_aconf.retry_try_xbegin = 0; /* Disable elision on rwlocks. */ -+} -+ -+#ifdef SHARED -+# define INIT_SECTION ".init_array" -+#else -+# define INIT_SECTION ".preinit_array" -+#endif -+ -+void (*const __pthread_init_array []) (int, char **, char **) -+ __attribute__ ((section (INIT_SECTION), aligned (sizeof (void *)))) = -+{ -+ &elision_init -+}; -diff --git a/nptl_2_17/elision-lock_2_17.c b/nptl_2_17/elision-lock_2_17.c -new file mode 100644 -index 00000000..e6dbbc21 ---- /dev/null -+++ b/nptl_2_17/elision-lock_2_17.c -@@ -0,0 +1,107 @@ -+/* elision-lock.c: Elided pthread mutex lock. -+ Copyright (C) 2011-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+#include "pthreadP_2_17.h" -+#include "lowlevellock.h" -+#include "hle_2_17.h" -+#include -+ -+#if !defined(LLL_LOCK) && !defined(EXTRAARG) -+/* Make sure the configuration code is always linked in for static -+ libraries. */ -+#include "elision-conf_2_17.c" -+#endif -+ -+#ifndef EXTRAARG -+#define EXTRAARG -+#endif -+#ifndef LLL_LOCK -+#define LLL_LOCK(a,b) lll_lock(a,b), 0 -+#endif -+ -+#define aconf __elision_aconf -+ -+/* Adaptive lock using transactions. -+ By default the lock region is run as a transaction, and when it -+ aborts or the lock is busy the lock adapts itself. */ -+ -+int -+__lll_lock_elision (int *futex, short *adapt_count, EXTRAARG int private) -+{ -+ /* adapt_count can be accessed concurrently; these accesses can be both -+ inside of transactions (if critical sections are nested and the outer -+ critical section uses lock elision) and outside of transactions. Thus, -+ we need to use atomic accesses to avoid data races. However, the -+ value of adapt_count is just a hint, so relaxed MO accesses are -+ sufficient. */ -+ if (atomic_load_relaxed (adapt_count) <= 0) -+ { -+ unsigned status; -+ int try_xbegin; -+ -+ for (try_xbegin = aconf.retry_try_xbegin; -+ try_xbegin > 0; -+ try_xbegin--) -+ { -+ if ((status = _xbegin()) == _XBEGIN_STARTED) -+ { -+ if (*futex == 0) -+ return 0; -+ -+ /* Lock was busy. Fall back to normal locking. -+ Could also _xend here but xabort with 0xff code -+ is more visible in the profiler. */ -+ _xabort (_ABORT_LOCK_BUSY); -+ } -+ -+ if (!(status & _XABORT_RETRY)) -+ { -+ if ((status & _XABORT_EXPLICIT) -+ && _XABORT_CODE (status) == _ABORT_LOCK_BUSY) -+ { -+ /* Right now we skip here. Better would be to wait a bit -+ and retry. This likely needs some spinning. See -+ above for why relaxed MO is sufficient. */ -+ if (atomic_load_relaxed (adapt_count) -+ != aconf.skip_lock_busy) -+ atomic_store_relaxed (adapt_count, aconf.skip_lock_busy); -+ } -+ /* Internal abort. There is no chance for retry. -+ Use the normal locking and next time use lock. -+ Be careful to avoid writing to the lock. See above for why -+ relaxed MO is sufficient. */ -+ else if (atomic_load_relaxed (adapt_count) -+ != aconf.skip_lock_internal_abort) -+ atomic_store_relaxed (adapt_count, -+ aconf.skip_lock_internal_abort); -+ break; -+ } -+ } -+ } -+ else -+ { -+ /* Use a normal lock until the threshold counter runs out. -+ Lost updates possible. */ -+ atomic_store_relaxed (adapt_count, -+ atomic_load_relaxed (adapt_count) - 1); -+ } -+ -+ /* Use a normal lock here. */ -+ return LLL_LOCK ((*futex), private); -+} -diff --git a/nptl_2_17/elision-timed_2_17.c b/nptl_2_17/elision-timed_2_17.c -new file mode 100644 -index 00000000..5050f2d1 ---- /dev/null -+++ b/nptl_2_17/elision-timed_2_17.c -@@ -0,0 +1,27 @@ -+/* elision-timed.c: Lock elision timed lock. -+ Copyright (C) 2013-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+#include -+#include "lowlevellock.h" -+#include -+#define __lll_lock_elision __lll_timedlock_elision -+#define EXTRAARG const struct timespec *t, -+#undef LLL_LOCK -+#define LLL_LOCK(a, b) lll_timedlock(a, t, b) -+#include "elision-lock_2_17.c" -diff --git a/nptl_2_17/elision-trylock_2_17.c b/nptl_2_17/elision-trylock_2_17.c -new file mode 100644 -index 00000000..70d8f8b9 ---- /dev/null -+++ b/nptl_2_17/elision-trylock_2_17.c -@@ -0,0 +1,75 @@ -+/* elision-trylock.c: Lock eliding trylock for pthreads. -+ Copyright (C) 2013-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+ -+#include -+#include "hle_2_17.h" -+#include -+ -+#define aconf __elision_aconf -+ -+/* Try to elide a futex trylock. FUTEX is the futex variable. ADAPT_COUNT is -+ the adaptation counter in the mutex. */ -+ -+int -+__lll_trylock_elision (int *futex, short *adapt_count) -+{ -+ /* Implement POSIX semantics by forbiding nesting -+ trylock. Sorry. After the abort the code is re-executed -+ non transactional and if the lock was already locked -+ return an error. */ -+ _xabort (_ABORT_NESTED_TRYLOCK); -+ -+ /* Only try a transaction if it's worth it. See __lll_lock_elision for -+ why we need atomic accesses. Relaxed MO is sufficient because this is -+ just a hint. */ -+ if (atomic_load_relaxed (adapt_count) <= 0) -+ { -+ unsigned status; -+ -+ if ((status = _xbegin()) == _XBEGIN_STARTED) -+ { -+ if (*futex == 0) -+ return 0; -+ -+ /* Lock was busy. Fall back to normal locking. -+ Could also _xend here but xabort with 0xff code -+ is more visible in the profiler. */ -+ _xabort (_ABORT_LOCK_BUSY); -+ } -+ -+ if (!(status & _XABORT_RETRY)) -+ { -+ /* Internal abort. No chance for retry. For future -+ locks don't try speculation for some time. See above for MO. */ -+ if (atomic_load_relaxed (adapt_count) -+ != aconf.skip_lock_internal_abort) -+ atomic_store_relaxed (adapt_count, aconf.skip_lock_internal_abort); -+ } -+ /* Could do some retries here. */ -+ } -+ else -+ { -+ /* Lost updates are possible but harmless (see above). */ -+ atomic_store_relaxed (adapt_count, -+ atomic_load_relaxed (adapt_count) - 1); -+ } -+ -+ return lll_trylock (*futex); -+} -diff --git a/nptl_2_17/elision-unlock_2_17.c b/nptl_2_17/elision-unlock_2_17.c -new file mode 100644 -index 00000000..b5d38c5f ---- /dev/null -+++ b/nptl_2_17/elision-unlock_2_17.c -@@ -0,0 +1,34 @@ -+/* elision-unlock.c: Commit an elided pthread lock. -+ Copyright (C) 2013-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include "lowlevellock.h" -+#include "hle_2_17.h" -+#include -+ -+int -+__lll_unlock_elision(int *lock, int private) -+{ -+ /* When the lock was free we're in a transaction. -+ When you crash here you unlocked a free lock. */ -+ if (*lock == 0) -+ _xend(); -+ else -+ lll_unlock ((*lock), private); -+ return 0; -+} -diff --git a/nptl_2_17/hle_2_17.h b/nptl_2_17/hle_2_17.h -new file mode 100644 -index 00000000..4a7b9e3b ---- /dev/null -+++ b/nptl_2_17/hle_2_17.h -@@ -0,0 +1,75 @@ -+/* Shared RTM header. Emulate TSX intrinsics for compilers and assemblers -+ that do not support the intrinsics and instructions yet. */ -+#ifndef _HLE_H -+#define _HLE_H 1 -+ -+#ifdef __ASSEMBLER__ -+ -+.macro XBEGIN target -+ .byte 0xc7,0xf8 -+ .long \target-1f -+1: -+.endm -+ -+.macro XEND -+ .byte 0x0f,0x01,0xd5 -+.endm -+ -+.macro XABORT code -+ .byte 0xc6,0xf8,\code -+.endm -+ -+.macro XTEST -+ .byte 0x0f,0x01,0xd6 -+.endm -+ -+#endif -+ -+/* Official RTM intrinsics interface matching gcc/icc, but works -+ on older gcc compatible compilers and binutils. -+ We should somehow detect if the compiler supports it, because -+ it may be able to generate slightly better code. */ -+ -+#define _XBEGIN_STARTED (~0u) -+#define _XABORT_EXPLICIT (1 << 0) -+#define _XABORT_RETRY (1 << 1) -+#define _XABORT_CONFLICT (1 << 2) -+#define _XABORT_CAPACITY (1 << 3) -+#define _XABORT_DEBUG (1 << 4) -+#define _XABORT_NESTED (1 << 5) -+#define _XABORT_CODE(x) (((x) >> 24) & 0xff) -+ -+#define _ABORT_LOCK_BUSY 0xff -+#define _ABORT_LOCK_IS_LOCKED 0xfe -+#define _ABORT_NESTED_TRYLOCK 0xfd -+ -+#ifndef __ASSEMBLER__ -+ -+#define __force_inline __attribute__((__always_inline__)) inline -+ -+static __force_inline int _xbegin(void) -+{ -+ int ret = _XBEGIN_STARTED; -+ asm volatile (".byte 0xc7,0xf8 ; .long 0" : "+a" (ret) :: "memory"); -+ return ret; -+} -+ -+static __force_inline void _xend(void) -+{ -+ asm volatile (".byte 0x0f,0x01,0xd5" ::: "memory"); -+} -+ -+static __force_inline void _xabort(const unsigned int status) -+{ -+ asm volatile (".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); -+} -+ -+static __force_inline int _xtest(void) -+{ -+ unsigned char out; -+ asm volatile (".byte 0x0f,0x01,0xd6 ; setnz %0" : "=r" (out) :: "memory"); -+ return out; -+} -+ -+#endif -+#endif --- -2.30.0 - diff --git a/0007-add-lowlevellock_2_17_c.patch b/0007-add-lowlevellock_2_17_c.patch deleted file mode 100644 index b516e77..0000000 --- a/0007-add-lowlevellock_2_17_c.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 3df6f22e5fde470a6e0242e582e58919493bdd54 Mon Sep 17 00:00:00 2001 -From: Roland McGrath -Date: Tue, 15 Jul 2014 15:23:06 -0700 -Subject: [PATCH 7/9] build extra lipthreadcond so - -since 78fe624d44b8f6489b2d0de9bfdc09290a719a7, lowlevellock.c depends futex-internal.h which uses the private symbol __GI___libc_fatal of glibc. -We can't reference it in libpthread-2.17.so. Therefore, recompile in libphtread-2.17.so - ---- - nptl_2_17/lowlevellock_2_17.c | 46 ++++++++++++++++++++++ - 2 files changed, 46 insertions(+) - create mode 100644 nptl_2_17/lowlevellock_2_17.c - -diff --git a/nptl_2_17/lowlevellock_2_17.c b/nptl_2_17/lowlevellock_2_17.c -new file mode 100644 -index 00000000..bf1ca6b9 ---- /dev/null -+++ b/nptl_2_17/lowlevellock_2_17.c -@@ -0,0 +1,46 @@ -+/* low level locking for pthread library. Generic futex-using version. -+ Copyright (C) 2003-2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Paul Mackerras , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+void -+__lll_lock_wait_private (int *futex) -+{ -+ if (*futex == 2) -+ lll_futex_wait (futex, 2, LLL_PRIVATE); /* Wait if *futex == 2. */ -+ -+ while (atomic_exchange_acq (futex, 2) != 0) -+ lll_futex_wait (futex, 2, LLL_PRIVATE); /* Wait if *futex == 2. */ -+} -+ -+ -+/* This function doesn't get included in libc. */ -+void -+__lll_lock_wait (int *futex, int private) -+{ -+ if (*futex == 2) -+ lll_futex_wait (futex, 2, private); /* Wait if *futex == 2. */ -+ -+ while (atomic_exchange_acq (futex, 2) != 0) -+ lll_futex_wait (futex, 2, private); /* Wait if *futex == 2. */ -+} --- -2.30.0 - diff --git a/0008-add-pause_nocancel_2_17.patch b/0008-add-pause_nocancel_2_17.patch deleted file mode 100644 index 8d7ec88..0000000 --- a/0008-add-pause_nocancel_2_17.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 329ea513b451ae8322aa7a24ed84da13992af2dd Mon Sep 17 00:00:00 2001 -From: Zack Weinberg -Date: Tue, 3 Apr 2018 18:26:44 -0400 -Subject: [PATCH 8/9] build extra lipthreadcond so - -since bb4a3143724ef3f044a4f05351fe041300ee382, Remove pause and nanosleep not cancel wrappers -To build libtphread-2.17.so, we added it back. - ---- - nptl_2_17/pause_nocancel_2_17.c | 34 +++++++++++++++++++++++++++++++++ - 1 file changed, 34 insertions(+) - create mode 100644 nptl_2_17/pause_nocancel_2_17.c - -diff --git a/nptl_2_17/pause_nocancel_2_17.c b/nptl_2_17/pause_nocancel_2_17.c -new file mode 100644 -index 00000000..ab8e78d2 ---- /dev/null -+++ b/nptl_2_17/pause_nocancel_2_17.c -@@ -0,0 +1,34 @@ -+/* Linux pause syscall implementation -- non-cancellable. -+ Copyright (C) 2018 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library. If not, see -+ . */ -+ -+#include -+#include -+#include -+#include -+#include -+ -+int -+__pause_nocancel (void) -+{ -+#ifdef __NR_pause -+ return INLINE_SYSCALL_CALL (pause); -+#else -+ return INLINE_SYSCALL_CALL (ppoll, NULL, 0, NULL, NULL); -+#endif -+} -+hidden_def (__pause_nocancel) --- -2.30.0 - diff --git a/0009-add-unwind-with-longjmp.patch b/0009-add-unwind-with-longjmp.patch deleted file mode 100644 index cfe1ecb..0000000 --- a/0009-add-unwind-with-longjmp.patch +++ /dev/null @@ -1,161 +0,0 @@ -From 09d65ff393e9183eecba1e5cb877e95dbdd3d4a4 Mon Sep 17 00:00:00 2001 -From: Ulrich Drepper -Date: Sat, 12 Apr 2003 00:58:26 +0000 -Subject: [PATCH 9/9] build extra lipthreadcond so - -since 6253bacdc00de132dec452ff7c6ce3ba7fa23d81, __libc_longjmp became a -private interface.We can't quote directly. -Change it to longjmp - ---- - nptl_2_17/unwind_2_17.c | 138 ++++++++++++++++++++++++++++++++++++++++ - 1 file changed, 138 insertions(+) - create mode 100644 nptl_2_17/unwind_2_17.c - -diff --git a/nptl_2_17/unwind_2_17.c b/nptl_2_17/unwind_2_17.c -new file mode 100644 -index 00000000..ada8f74d ---- /dev/null -+++ b/nptl_2_17/unwind_2_17.c -@@ -0,0 +1,138 @@ -+/* Copyright (C) 2003-2016 Free Software Foundation, Inc. -+ This file is part of the GNU C Library. -+ Contributed by Ulrich Drepper -+ and Richard Henderson , 2003. -+ -+ The GNU C Library is free software; you can redistribute it and/or -+ modify it under the terms of the GNU Lesser General Public -+ License as published by the Free Software Foundation; either -+ version 2.1 of the License, or (at your option) any later version. -+ -+ The GNU C Library is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ Lesser General Public License for more details. -+ -+ You should have received a copy of the GNU Lesser General Public -+ License along with the GNU C Library; if not, see -+ . */ -+ -+#include "pthreadP_2_17.h" -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#ifdef _STACK_GROWS_DOWN -+# define FRAME_LEFT(frame, other, adj) \ -+ ((uintptr_t) frame - adj >= (uintptr_t) other - adj) -+#elif _STACK_GROWS_UP -+# define FRAME_LEFT(frame, other, adj) \ -+ ((uintptr_t) frame - adj <= (uintptr_t) other - adj) -+#else -+# error "Define either _STACK_GROWS_DOWN or _STACK_GROWS_UP" -+#endif -+ -+static _Unwind_Reason_Code -+unwind_stop (int version, _Unwind_Action actions, -+ _Unwind_Exception_Class exc_class, -+ struct _Unwind_Exception *exc_obj, -+ struct _Unwind_Context *context, void *stop_parameter) -+{ -+ struct pthread_unwind_buf *buf = stop_parameter; -+ struct pthread *self = THREAD_SELF; -+ struct _pthread_cleanup_buffer *curp = THREAD_GETMEM (self, cleanup); -+ int do_longjump = 0; -+ -+ /* Adjust all pointers used in comparisons, so that top of thread's -+ stack is at the top of address space. Without that, things break -+ if stack is allocated above the main stack. */ -+ uintptr_t adj = (uintptr_t) self->stackblock + self->stackblock_size; -+ -+ /* Do longjmp if we're at "end of stack", aka "end of unwind data". -+ We assume there are only C frame without unwind data in between -+ here and the jmp_buf target. Otherwise simply note that the CFA -+ of a function is NOT within it's stack frame; it's the SP of the -+ previous frame. */ -+ if ((actions & _UA_END_OF_STACK) -+ || ! _JMPBUF_CFA_UNWINDS_ADJ (buf->cancel_jmp_buf[0].jmp_buf, context, -+ adj)) -+ do_longjump = 1; -+ -+ if (__glibc_unlikely (curp != NULL)) -+ { -+ /* Handle the compatibility stuff. Execute all handlers -+ registered with the old method which would be unwound by this -+ step. */ -+ struct _pthread_cleanup_buffer *oldp = buf->priv.data.cleanup; -+ void *cfa = (void *) (_Unwind_Ptr) _Unwind_GetCFA (context); -+ -+ if (curp != oldp && (do_longjump || FRAME_LEFT (cfa, curp, adj))) -+ { -+ do -+ { -+ /* Pointer to the next element. */ -+ struct _pthread_cleanup_buffer *nextp = curp->__prev; -+ -+ /* Call the handler. */ -+ curp->__routine (curp->__arg); -+ -+ /* To the next. */ -+ curp = nextp; -+ } -+ while (curp != oldp -+ && (do_longjump || FRAME_LEFT (cfa, curp, adj))); -+ -+ /* Mark the current element as handled. */ -+ THREAD_SETMEM (self, cleanup, curp); -+ } -+ } -+ -+ if (do_longjump) -+ longjmp ((struct __jmp_buf_tag *) buf->cancel_jmp_buf, 1); -+ -+ return _URC_NO_REASON; -+} -+ -+ -+static void -+unwind_cleanup (_Unwind_Reason_Code reason, struct _Unwind_Exception *exc) -+{ -+ /* When we get here a C++ catch block didn't rethrow the object. We -+ cannot handle this case and therefore abort. */ -+ __libc_fatal ("FATAL: exception not rethrown\n"); -+} -+ -+ -+void -+__cleanup_fct_attribute __attribute ((noreturn)) -+__pthread_unwind (__pthread_unwind_buf_t *buf) -+{ -+ struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf; -+ struct pthread *self = THREAD_SELF; -+ -+ /* This is not a catchable exception, so don't provide any details about -+ the exception type. We do need to initialize the field though. */ -+ THREAD_SETMEM (self, exc.exception_class, 0); -+ THREAD_SETMEM (self, exc.exception_cleanup, &unwind_cleanup); -+ -+ _Unwind_ForcedUnwind (&self->exc, unwind_stop, ibuf); -+ /* NOTREACHED */ -+ -+ /* We better do not get here. */ -+ abort (); -+} -+hidden_def (__pthread_unwind) -+ -+ -+void -+__cleanup_fct_attribute __attribute ((noreturn)) -+__pthread_unwind_next (__pthread_unwind_buf_t *buf) -+{ -+ struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf; -+ -+ __pthread_unwind ((__pthread_unwind_buf_t *) ibuf->priv.data.prev); -+} -+hidden_def (__pthread_unwind_next) --- -2.30.0 - -- Gitee