diff -pruN 1:3.8.1-27/compiler-rt/lib/asan/asan_linux.cc 1:3.8.1-27ubuntu1/compiler-rt/lib/asan/asan_linux.cc
--- 1:3.8.1-27/compiler-rt/lib/asan/asan_linux.cc	2015-12-03 10:39:43.000000000 +0000
+++ 1:3.8.1-27ubuntu1/compiler-rt/lib/asan/asan_linux.cc	2017-12-27 02:41:48.000000000 +0000
@@ -31,6 +31,7 @@
 #include <dlfcn.h>
 #include <fcntl.h>
 #include <pthread.h>
+#include <signal.h>
 #include <stdio.h>
 #include <unistd.h>
 #include <unwind.h>
diff -pruN 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc
--- 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc	2016-06-01 08:17:03.000000000 +0000
+++ 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc	2017-12-27 02:41:48.000000000 +0000
@@ -565,8 +565,7 @@ uptr internal_prctl(int option, uptr arg
 }
 #endif
 
-uptr internal_sigaltstack(const struct sigaltstack *ss,
-                         struct sigaltstack *oss) {
+uptr internal_sigaltstack(const void *ss, void *oss) {
   return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
 }
 
diff -pruN 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_linux.h 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
--- 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_linux.h	2015-12-08 21:54:39.000000000 +0000
+++ 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_linux.h	2017-12-27 02:41:48.000000000 +0000
@@ -21,7 +21,6 @@
 #include "sanitizer_platform_limits_posix.h"
 
 struct link_map;  // Opaque type returned by dlopen().
-struct sigaltstack;
 
 namespace __sanitizer {
 // Dirent structure for getdents(). Note that this structure is different from
@@ -30,8 +29,7 @@ struct linux_dirent;
 
 // Syscall wrappers.
 uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
-uptr internal_sigaltstack(const struct sigaltstack* ss,
-                          struct sigaltstack* oss);
+uptr internal_sigaltstack(const void* ss, void* oss);
 uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
     __sanitizer_sigset_t *oldset);
 void internal_sigfillset(__sanitizer_sigset_t *set);
diff -pruN 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
--- 1:3.8.1-27/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc	2016-06-01 08:17:03.000000000 +0000
+++ 1:3.8.1-27ubuntu1/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc	2017-12-27 02:41:48.000000000 +0000
@@ -272,7 +272,7 @@ static int TracerThread(void* argument)
 
   // Alternate stack for signal handling.
   InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
-  struct sigaltstack handler_stack;
+  stack_t handler_stack;
   internal_memset(&handler_stack, 0, sizeof(handler_stack));
   handler_stack.ss_sp = handler_stack_memory.data();
   handler_stack.ss_size = kHandlerStackSize;
diff -pruN 1:3.8.1-27/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc 1:3.8.1-27ubuntu1/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
--- 1:3.8.1-27/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc	2015-12-14 16:26:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc	2017-12-27 02:41:48.000000000 +0000
@@ -311,7 +311,7 @@ bool IsGlobalVar(uptr addr) {
 int ExtractResolvFDs(void *state, int *fds, int nfd) {
 #if SANITIZER_LINUX && !SANITIZER_ANDROID
   int cnt = 0;
-  __res_state *statp = (__res_state*)state;
+  struct __res_state *statp = (struct __res_state*)state;
   for (int i = 0; i < MAXNS && cnt < nfd; i++) {
     if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
       fds[cnt++] = statp->_u._ext.nssocks[i];
diff -pruN 1:3.8.1-27/debian/changelog 1:3.8.1-27ubuntu1/debian/changelog
--- 1:3.8.1-27/debian/changelog	2017-12-23 22:00:21.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/changelog	2017-12-26 17:26:32.000000000 +0000
@@ -1,3 +1,18 @@
+llvm-toolchain-3.8 (1:3.8.1-27ubuntu1) bionic; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Fix sanitizer build failure with glibc-2.26.
+    - Link with --no-keep-files-mapped --no-map-whole-files when using
+      gold.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - build using gold on arm64 and s390x. For backports, arm64 might still
+      need the BFD linker, and building with only one or two processes in
+      parallel.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Tue, 26 Dec 2017 18:26:32 +0100
+
 llvm-toolchain-3.8 (1:3.8.1-27) unstable; urgency=medium
 
   * Really fix the latomic link on armel (called armv7l on cmake)
@@ -12,6 +27,21 @@ llvm-toolchain-3.8 (1:3.8.1-26) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Fri, 22 Dec 2017 09:48:48 +0100
 
+llvm-toolchain-3.8 (1:3.8.1-25ubuntu1) bionic; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Fix sanitizer build failure with glibc-2.26.
+    - Link with --no-keep-files-mapped --no-map-whole-files when using
+      gold.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - build using gold on arm64 and s390x. For backports, arm64 might still
+      need the BFD linker, and building with only one or two processes in
+      parallel.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Fri, 27 Oct 2017 13:47:23 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-25) unstable; urgency=medium
 
   * Fix the detection of gcc (Closes: #853523)
@@ -35,6 +65,59 @@ llvm-toolchain-3.8 (1:3.8.1-25) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Sat, 21 Oct 2017 13:43:31 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu7) artful; urgency=medium
+
+  * Do not build with -g1 on armhf, use NDEBUG.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Mon, 25 Sep 2017 10:23:31 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu6) artful; urgency=medium
+
+  * Build with -g1 instead of -g on armhf and s390x as well.
+
+ -- Matthias Klose <doko@ubuntu.com>  Fri, 15 Sep 2017 20:15:28 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu5) artful; urgency=medium
+
+  * Try to build with -g1 instead of -g on amd64 as well.
+
+ -- Matthias Klose <doko@ubuntu.com>  Thu, 14 Sep 2017 22:10:21 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu4) artful; urgency=medium
+
+  * build using gold on arm64 and s390x. For backports, arm64 might still
+    need the BFD linker, and building with only one or two processes in
+    parallel.
+  * On arm64 and ppc64el, build with -g1 instead of -g.
+  * Set CMAKE_CXX_FLAGS_RELWITHDEBINFO and pass opt_flags.
+  * Fix sanitizer build failure with glibc-2.26.
+  * Link with --no-keep-files-mapped --no-map-whole-files when using
+    gold.
+  * Fix build failure with gcc-7.
+
+ -- Matthias Klose <doko@ubuntu.com>  Thu, 14 Sep 2017 10:17:17 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu3) artful; urgency=medium
+
+  * Enable ocaml on all arches.
+
+ -- Dimitri John Ledkov <xnox@ubuntu.com>  Sat, 15 Jul 2017 14:21:04 +0100
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu2) artful; urgency=high
+
+  * No change rebuild against ocaml 4.04.
+
+ -- Dimitri John Ledkov <xnox@ubuntu.com>  Mon, 03 Jul 2017 15:21:03 +0100
+
+llvm-toolchain-3.8 (1:3.8.1-24ubuntu1) artful; urgency=medium
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Fri, 02 Jun 2017 15:38:09 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-24) unstable; urgency=medium
 
   * Team upload
@@ -46,6 +129,28 @@ llvm-toolchain-3.8 (1:3.8.1-24) unstable
 
  -- Gianfranco Costamagna <locutusofborg@debian.org>  Fri, 02 Jun 2017 15:11:29 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-23ubuntu3) artful; urgency=medium
+
+  * Fix R_AARCH64_MOVW_UABS_G3 relocation, thanks Yichao Yu and
+    Edmund Grimley Evans (Closes: #862360)
+
+ -- Graham Inggs <ginggs@ubuntu.com>  Fri, 12 May 2017 13:55:28 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-23ubuntu2) artful; urgency=medium
+
+  * Disable sanitizer testsuite on arm64, it hangs the buildd.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Sat, 29 Apr 2017 14:07:07 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-23ubuntu1) artful; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Wed, 26 Apr 2017 10:39:35 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-23) unstable; urgency=medium
 
   * Oups, same player try again (wrong package name, sorry)
@@ -108,12 +213,30 @@ llvm-toolchain-3.8 (1:3.8.1-19~exp1) exp
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Sun, 19 Mar 2017 22:09:04 +0100
 
+llvm-toolchain-3.8 (1:3.8.1-18ubuntu1) zesty; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Mon, 13 Mar 2017 17:52:28 +0100
+
 llvm-toolchain-3.8 (1:3.8.1-18) unstable; urgency=medium
 
   * Fix the broken liblldb-3.8-dev links (Closes: #856864)
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Sun, 12 Mar 2017 09:49:18 +0100
 
+llvm-toolchain-3.8 (1:3.8.1-17ubuntu1) zesty; urgency=medium
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Bhavani Shankar <bhavi@ubuntu.com>  Tue, 10 Jan 2017 12:56:13 +0530
+
 llvm-toolchain-3.8 (1:3.8.1-17) unstable; urgency=medium
 
   * Disable NEON generation on armhf (Closes: #841474, #842142)
@@ -127,6 +250,15 @@ llvm-toolchain-3.8 (1:3.8.1-17) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Mon, 09 Jan 2017 21:24:17 +0100
 
+llvm-toolchain-3.8 (1:3.8.1-16ubuntu1) zesty; urgency=medium
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Fri, 25 Nov 2016 17:48:03 +0100
+
 llvm-toolchain-3.8 (1:3.8.1-16) unstable; urgency=medium
 
   * Fix segfaults in the memory sanitizers (Closes: #842642)
@@ -135,6 +267,15 @@ llvm-toolchain-3.8 (1:3.8.1-16) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Fri, 11 Nov 2016 16:14:31 +0100
 
+llvm-toolchain-3.8 (1:3.8.1-15ubuntu1) zesty; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Thu, 10 Nov 2016 16:17:45 +0100
+
 llvm-toolchain-3.8 (1:3.8.1-15) unstable; urgency=medium
 
   * Limit build-deps on g++-multilib where it is available:
@@ -162,6 +303,15 @@ llvm-toolchain-3.8 (1:3.8.1-13) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Tue, 25 Oct 2016 10:53:49 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-12ubuntu1) yakkety; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Tue, 13 Sep 2016 15:40:48 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-12) unstable; urgency=medium
 
   * Fix a ftbfs on lldb on arm64 (Closes: #836591) (Closes: #836335)
@@ -189,6 +339,15 @@ llvm-toolchain-3.8 (1:3.8.1-10) unstable
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Sat, 27 Aug 2016 17:36:06 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-9ubuntu1) yakkety; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386/amd64, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Mon, 29 Aug 2016 08:56:55 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-9) unstable; urgency=medium
 
   * Generate manpages for lli, lldb-mi & git-clang-format
@@ -201,6 +360,21 @@ llvm-toolchain-3.8 (1:3.8.1-9) unstable;
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Mon, 08 Aug 2016 18:50:13 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-8ubuntu2) yakkety; urgency=medium
+
+  * Ignore test results on amd64 too, because AVX512 breaks also here.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Thu, 11 Aug 2016 15:13:54 +0200
+
+llvm-toolchain-3.8 (1:3.8.1-8ubuntu1) yakkety; urgency=low
+
+  * Merge from Debian unstable.  Remaining changes:
+    - Ignore test results on i386, as done on any other architecture.
+    - drop-avx512-from-skylake.diff: Don't enable AVX512 on Skylake, as it's
+      a server cpu feature and breaks llvmpipe on workstations.
+
+ -- Gianfranco Costamagna <locutusofborg@debian.org>  Thu, 11 Aug 2016 11:57:12 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-8) unstable; urgency=medium
 
   * Disable the usage of ld gold on powerpc (Closes: #833583)
@@ -214,6 +388,13 @@ llvm-toolchain-3.8 (1:3.8.1-8) unstable;
 
  -- Sylvestre Ledru <sylvestre@debian.org>  Sun, 07 Aug 2016 14:10:09 +0200
 
+llvm-toolchain-3.8 (1:3.8.1-7ubuntu2) yakkety; urgency=medium
+
+  * Ignore test results on i386, as done on any other architecture.
+  * Don't use gold on powerpc, not supporting secure-plt.
+
+ -- Matthias Klose <doko@ubuntu.com>  Sun, 07 Aug 2016 12:45:46 +0200
+
 llvm-toolchain-3.8 (1:3.8.1-7) unstable; urgency=medium
 
   * Fix the detection of gcc. This broke the build on the latest unstable
diff -pruN 1:3.8.1-27/debian/control 1:3.8.1-27ubuntu1/debian/control
--- 1:3.8.1-27/debian/control	2017-10-21 13:28:48.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/control	2017-12-24 22:52:12.000000000 +0000
@@ -1,7 +1,8 @@
 Source: llvm-toolchain-3.8
 Section: devel
 Priority: optional
-Maintainer: LLVM Packaging Team <pkg-llvm-team@lists.alioth.debian.org>
+Maintainer: Ubuntu Developers <ubuntu-devel-discuss@lists.ubuntu.com>
+XSBC-Original-Maintainer: LLVM Packaging Team <pkg-llvm-team@lists.alioth.debian.org>
 Uploaders: Sylvestre Ledru <sylvestre@debian.org>
 Build-Depends: debhelper (>= 9.0), flex, bison, dejagnu, tcl, expect,
     cmake, perl, libtool, chrpath, texinfo, sharutils, libffi-dev (>= 3.0.9),
diff -pruN 1:3.8.1-27/debian/patches/drop-avx512-from-skylake.diff 1:3.8.1-27ubuntu1/debian/patches/drop-avx512-from-skylake.diff
--- 1:3.8.1-27/debian/patches/drop-avx512-from-skylake.diff	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/patches/drop-avx512-from-skylake.diff	2017-06-02 13:38:05.000000000 +0000
@@ -0,0 +1,16 @@
+--- a/lib/Target/X86/X86.td
++++ b/lib/Target/X86/X86.td
+@@ -487,13 +487,7 @@ def : KnightsLandingProc<"knl">;
+ // FIXME: define SKX model
+ class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel, [
+   FeatureMMX,
+-  FeatureAVX512,
+   FeatureFXSR,
+-  FeatureCDI,
+-  FeatureDQI,
+-  FeatureBWI,
+-  FeatureVLX,
+-  FeaturePKU,
+   FeatureCMPXCHG16B,
+   FeatureSlowBTMem,
+   FeaturePOPCNT,
diff -pruN 1:3.8.1-27/debian/patches/pr81066.diff 1:3.8.1-27ubuntu1/debian/patches/pr81066.diff
--- 1:3.8.1-27/debian/patches/pr81066.diff	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/patches/pr81066.diff	2017-09-14 08:18:49.000000000 +0000
@@ -0,0 +1,89 @@
+# DP; Fix PR sanitizer/81066, taken from the gcc-7-branch.
+
+libsanitizer/
+
+2017-07-17  Jakub Jelinek  <jakub@redhat.com>
+
+	Backported from mainline
+	2017-07-14  Jakub Jelinek  <jakub@redhat.com>
+
+	PR sanitizer/81066
+	* sanitizer_common/sanitizer_linux.h: Cherry-pick upstream r307969.
+	* sanitizer_common/sanitizer_linux.cc: Likewise.
+	* sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc: Likewise.
+	* tsan/tsan_platform_linux.cc: Likewise.
+
+Index: b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc
+===================================================================
+--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc
++++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc
+@@ -565,8 +565,7 @@ uptr internal_prctl(int option, uptr arg
+ }
+ #endif
+ 
+-uptr internal_sigaltstack(const struct sigaltstack *ss,
+-                         struct sigaltstack *oss) {
++uptr internal_sigaltstack(const void *ss, void *oss) {
+   return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
+ }
+ 
+Index: b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+===================================================================
+--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
++++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
+@@ -21,7 +21,6 @@
+ #include "sanitizer_platform_limits_posix.h"
+ 
+ struct link_map;  // Opaque type returned by dlopen().
+-struct sigaltstack;
+ 
+ namespace __sanitizer {
+ // Dirent structure for getdents(). Note that this structure is different from
+@@ -30,8 +29,7 @@ struct linux_dirent;
+ 
+ // Syscall wrappers.
+ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
+-uptr internal_sigaltstack(const struct sigaltstack* ss,
+-                          struct sigaltstack* oss);
++uptr internal_sigaltstack(const void* ss, void* oss);
+ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+     __sanitizer_sigset_t *oldset);
+ void internal_sigfillset(__sanitizer_sigset_t *set);
+Index: b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+===================================================================
+--- a/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
++++ b/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+@@ -272,7 +272,7 @@ static int TracerThread(void* argument)
+ 
+   // Alternate stack for signal handling.
+   InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
+-  struct sigaltstack handler_stack;
++  stack_t handler_stack;
+   internal_memset(&handler_stack, 0, sizeof(handler_stack));
+   handler_stack.ss_sp = handler_stack_memory.data();
+   handler_stack.ss_size = kHandlerStackSize;
+Index: b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
+===================================================================
+--- a/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
++++ b/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
+@@ -311,7 +311,7 @@ bool IsGlobalVar(uptr addr) {
+ int ExtractResolvFDs(void *state, int *fds, int nfd) {
+ #if SANITIZER_LINUX && !SANITIZER_ANDROID
+   int cnt = 0;
+-  __res_state *statp = (__res_state*)state;
++  struct __res_state *statp = (struct __res_state*)state;
+   for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+     if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+       fds[cnt++] = statp->_u._ext.nssocks[i];
+Index: b/compiler-rt/lib/asan/asan_linux.cc
+===================================================================
+--- a/compiler-rt/lib/asan/asan_linux.cc
++++ b/compiler-rt/lib/asan/asan_linux.cc
+@@ -31,6 +31,7 @@
+ #include <dlfcn.h>
+ #include <fcntl.h>
+ #include <pthread.h>
++#include <signal.h>
+ #include <stdio.h>
+ #include <unistd.h>
+ #include <unwind.h>
diff -pruN 1:3.8.1-27/debian/patches/series 1:3.8.1-27ubuntu1/debian/patches/series
--- 1:3.8.1-27/debian/patches/series	2017-12-22 09:24:04.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/patches/series	2017-12-26 17:26:32.000000000 +0000
@@ -61,3 +61,5 @@ fix-R_AARCH64_MOVW_UABS_G3-relocation.pa
 asan-48bit-VMA-aarch64.patch
 ftfbs-gcc.diff
 test-keep-alive.diff
+drop-avx512-from-skylake.diff
+pr81066.diff
diff -pruN 1:3.8.1-27/debian/rules 1:3.8.1-27ubuntu1/debian/rules
--- 1:3.8.1-27/debian/rules	2017-10-21 13:28:48.000000000 +0000
+++ 1:3.8.1-27ubuntu1/debian/rules	2017-12-24 22:52:12.000000000 +0000
@@ -16,6 +16,9 @@ SONAME_EXT      := 1
 DEBIAN_REVISION := $(shell dpkg-parsechangelog |  sed -rne "s,^Version: 1:([0-9.]+)(~|-)(.*),\3,p")
 ifneq (,$(filter parallel=%,$(subst $(COMMA), ,$(DEB_BUILD_OPTIONS))))
   NJOBS := -j $(subst parallel=,,$(filter parallel=%,$(subst $(COMMA), ,$(DEB_BUILD_OPTIONS))))
+  #ifneq (,$(filter $(DEB_HOST_ARCH),arm64))
+  #  NJOBS := -j 2
+  #endif
 endif
 
 VENDOR=$(shell lsb_release -is)
@@ -95,12 +98,12 @@ else
 	control_vars = '-Vdep:devlibs=libstdc++6-$(GCC_VERSION)-dev'
 endif
 
-BINUTILS_GOLD_ARCHS := amd64 armhf i386 powerpcspe ppc64 ppc64el sparc sparc64 x32
+BINUTILS_GOLD_ARCHS := amd64 arm64 armhf i386 powerpcspe ppc64 ppc64el sparc sparc64 x32 s390x
 ifeq ($(shell dpkg --compare-versions $(shell dpkg-query -W -f '$${Version}' binutils) ge 2.23.1-1~exp3 ; echo $$?),0)
 ifneq (,$(filter $(DEB_HOST_ARCH),$(BINUTILS_GOLD_ARCHS)))
 # -fused-ld=gold enables the gold linker (but is not supported by all archs / distro)
-	LDFLAGS_EXTRA += -fuse-ld=gold
-	CXXFLAGS_EXTRA += -Wl,-fuse-ld=gold
+	LDFLAGS_EXTRA += -fuse-ld=gold --no-keep-files-mapped --no-map-whole-files
+	CXXFLAGS_EXTRA += -Wl,-fuse-ld=gold -Wl,--no-keep-files-mapped -Wl,--no-map-whole-files
 	CMAKE_EXTRA += -DLLVM_BINUTILS_INCDIR=/usr/include/
 endif
 endif
@@ -539,7 +542,7 @@ override_dh_installdeb:
 
 ifeq (,$(filter nocheck, $(DEB_BUILD_OPTIONS)))
 # List of the archs we know we have 100 % tests working
-ARCH_LLVM_TEST_OK := i386 amd64
+ARCH_LLVM_TEST_OK := #i386 amd64
 
 override_dh_auto_test:
 
diff -pruN 1:3.8.1-27/lib/Target/X86/X86.td 1:3.8.1-27ubuntu1/lib/Target/X86/X86.td
--- 1:3.8.1-27/lib/Target/X86/X86.td	2015-12-29 07:03:27.000000000 +0000
+++ 1:3.8.1-27ubuntu1/lib/Target/X86/X86.td	2017-12-27 02:41:48.000000000 +0000
@@ -487,13 +487,7 @@ def : KnightsLandingProc<"knl">;
 // FIXME: define SKX model
 class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel, [
   FeatureMMX,
-  FeatureAVX512,
   FeatureFXSR,
-  FeatureCDI,
-  FeatureDQI,
-  FeatureBWI,
-  FeatureVLX,
-  FeaturePKU,
   FeatureCMPXCHG16B,
   FeatureSlowBTMem,
   FeaturePOPCNT,
diff -pruN 1:3.8.1-27/.pc/applied-patches 1:3.8.1-27ubuntu1/.pc/applied-patches
--- 1:3.8.1-27/.pc/applied-patches	2017-12-27 02:41:37.529141075 +0000
+++ 1:3.8.1-27ubuntu1/.pc/applied-patches	2017-12-27 02:41:48.449447519 +0000
@@ -60,3 +60,5 @@ fix-R_AARCH64_MOVW_UABS_G3-relocation.pa
 asan-48bit-VMA-aarch64.patch
 ftfbs-gcc.diff
 test-keep-alive.diff
+drop-avx512-from-skylake.diff
+pr81066.diff
diff -pruN 1:3.8.1-27/.pc/drop-avx512-from-skylake.diff/lib/Target/X86/X86.td 1:3.8.1-27ubuntu1/.pc/drop-avx512-from-skylake.diff/lib/Target/X86/X86.td
--- 1:3.8.1-27/.pc/drop-avx512-from-skylake.diff/lib/Target/X86/X86.td	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/drop-avx512-from-skylake.diff/lib/Target/X86/X86.td	2015-12-29 07:03:27.000000000 +0000
@@ -0,0 +1,787 @@
+//===-- X86.td - Target definition file for the Intel X86 --*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a target description file for the Intel i386 architecture, referred
+// to here as the "X86" architecture.
+//
+//===----------------------------------------------------------------------===//
+
+// Get the target-independent interfaces which we are implementing...
+//
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// X86 Subtarget state
+//
+
+def Mode64Bit : SubtargetFeature<"64bit-mode", "In64BitMode", "true",
+                                  "64-bit mode (x86_64)">;
+def Mode32Bit : SubtargetFeature<"32bit-mode", "In32BitMode", "true",
+                                  "32-bit mode (80386)">;
+def Mode16Bit : SubtargetFeature<"16bit-mode", "In16BitMode", "true",
+                                  "16-bit mode (i8086)">;
+
+//===----------------------------------------------------------------------===//
+// X86 Subtarget features
+//===----------------------------------------------------------------------===//
+
+def FeatureCMOV    : SubtargetFeature<"cmov","HasCMov", "true",
+                                      "Enable conditional move instructions">;
+
+def FeaturePOPCNT   : SubtargetFeature<"popcnt", "HasPOPCNT", "true",
+                                       "Support POPCNT instruction">;
+
+def FeatureFXSR    : SubtargetFeature<"fxsr", "HasFXSR", "true",
+                                      "Support fxsave/fxrestore instructions">;
+
+def FeatureXSAVE   : SubtargetFeature<"xsave", "HasXSAVE", "true",
+                                       "Support xsave instructions">;
+
+def FeatureXSAVEOPT: SubtargetFeature<"xsaveopt", "HasXSAVEOPT", "true",
+                                       "Support xsaveopt instructions">;
+
+def FeatureXSAVEC  : SubtargetFeature<"xsavec", "HasXSAVEC", "true",
+                                       "Support xsavec instructions">;
+
+def FeatureXSAVES  : SubtargetFeature<"xsaves", "HasXSAVES", "true",
+                                       "Support xsaves instructions">;
+
+def FeatureSSE1    : SubtargetFeature<"sse", "X86SSELevel", "SSE1",
+                                      "Enable SSE instructions",
+                                      // SSE codegen depends on cmovs, and all
+                                      // SSE1+ processors support them.
+                                      [FeatureCMOV]>;
+def FeatureSSE2    : SubtargetFeature<"sse2", "X86SSELevel", "SSE2",
+                                      "Enable SSE2 instructions",
+                                      [FeatureSSE1]>;
+def FeatureSSE3    : SubtargetFeature<"sse3", "X86SSELevel", "SSE3",
+                                      "Enable SSE3 instructions",
+                                      [FeatureSSE2]>;
+def FeatureSSSE3   : SubtargetFeature<"ssse3", "X86SSELevel", "SSSE3",
+                                      "Enable SSSE3 instructions",
+                                      [FeatureSSE3]>;
+def FeatureSSE41   : SubtargetFeature<"sse4.1", "X86SSELevel", "SSE41",
+                                      "Enable SSE 4.1 instructions",
+                                      [FeatureSSSE3]>;
+def FeatureSSE42   : SubtargetFeature<"sse4.2", "X86SSELevel", "SSE42",
+                                      "Enable SSE 4.2 instructions",
+                                      [FeatureSSE41]>;
+// The MMX subtarget feature is separate from the rest of the SSE features
+// because it's important (for odd compatibility reasons) to be able to
+// turn it off explicitly while allowing SSE+ to be on.
+def FeatureMMX     : SubtargetFeature<"mmx","X863DNowLevel", "MMX",
+                                      "Enable MMX instructions">;
+def Feature3DNow   : SubtargetFeature<"3dnow", "X863DNowLevel", "ThreeDNow",
+                                      "Enable 3DNow! instructions",
+                                      [FeatureMMX]>;
+def Feature3DNowA  : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
+                                      "Enable 3DNow! Athlon instructions",
+                                      [Feature3DNow]>;
+// All x86-64 hardware has SSE2, but we don't mark SSE2 as an implied
+// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
+// without disabling 64-bit mode.
+def Feature64Bit   : SubtargetFeature<"64bit", "HasX86_64", "true",
+                                      "Support 64-bit instructions",
+                                      [FeatureCMOV]>;
+def FeatureCMPXCHG16B : SubtargetFeature<"cx16", "HasCmpxchg16b", "true",
+                                      "64-bit with cmpxchg16b",
+                                      [Feature64Bit]>;
+def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
+                                       "Bit testing of memory is slow">;
+def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
+                                       "SHLD instruction is slow">;
+// FIXME: This should not apply to CPUs that do not have SSE.
+def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16",
+                                "IsUAMem16Slow", "true",
+                                "Slow unaligned 16-byte memory access">;
+def FeatureSlowUAMem32 : SubtargetFeature<"slow-unaligned-mem-32",
+                                "IsUAMem32Slow", "true",
+                                "Slow unaligned 32-byte memory access">;
+def FeatureSSE4A   : SubtargetFeature<"sse4a", "HasSSE4A", "true",
+                                      "Support SSE 4a instructions",
+                                      [FeatureSSE3]>;
+
+def FeatureAVX     : SubtargetFeature<"avx", "X86SSELevel", "AVX",
+                                      "Enable AVX instructions",
+                                      [FeatureSSE42]>;
+def FeatureAVX2    : SubtargetFeature<"avx2", "X86SSELevel", "AVX2",
+                                      "Enable AVX2 instructions",
+                                      [FeatureAVX]>;
+def FeatureAVX512   : SubtargetFeature<"avx512f", "X86SSELevel", "AVX512F",
+                                      "Enable AVX-512 instructions",
+                                      [FeatureAVX2]>;
+def FeatureERI      : SubtargetFeature<"avx512er", "HasERI", "true",
+                      "Enable AVX-512 Exponential and Reciprocal Instructions",
+                                      [FeatureAVX512]>;
+def FeatureCDI      : SubtargetFeature<"avx512cd", "HasCDI", "true",
+                      "Enable AVX-512 Conflict Detection Instructions",
+                                      [FeatureAVX512]>;
+def FeaturePFI      : SubtargetFeature<"avx512pf", "HasPFI", "true",
+                      "Enable AVX-512 PreFetch Instructions",
+                                      [FeatureAVX512]>;
+def FeatureDQI     : SubtargetFeature<"avx512dq", "HasDQI", "true",
+                      "Enable AVX-512 Doubleword and Quadword Instructions",
+                                      [FeatureAVX512]>;
+def FeatureBWI     : SubtargetFeature<"avx512bw", "HasBWI", "true",
+                      "Enable AVX-512 Byte and Word Instructions",
+                                      [FeatureAVX512]>;
+def FeatureVLX     : SubtargetFeature<"avx512vl", "HasVLX", "true",
+                      "Enable AVX-512 Vector Length eXtensions",
+                                      [FeatureAVX512]>;
+def FeaturePKU   : SubtargetFeature<"pku", "HasPKU", "true",
+                      "Enable protection keys">;
+def FeaturePCLMUL  : SubtargetFeature<"pclmul", "HasPCLMUL", "true",
+                         "Enable packed carry-less multiplication instructions",
+                               [FeatureSSE2]>;
+def FeatureFMA     : SubtargetFeature<"fma", "HasFMA", "true",
+                                      "Enable three-operand fused multiple-add",
+                                      [FeatureAVX]>;
+def FeatureFMA4    : SubtargetFeature<"fma4", "HasFMA4", "true",
+                                      "Enable four-operand fused multiple-add",
+                                      [FeatureAVX, FeatureSSE4A]>;
+def FeatureXOP     : SubtargetFeature<"xop", "HasXOP", "true",
+                                      "Enable XOP instructions",
+                                      [FeatureFMA4]>;
+def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem",
+                                          "HasSSEUnalignedMem", "true",
+                      "Allow unaligned memory operands with SSE instructions">;
+def FeatureAES     : SubtargetFeature<"aes", "HasAES", "true",
+                                      "Enable AES instructions",
+                                      [FeatureSSE2]>;
+def FeatureTBM     : SubtargetFeature<"tbm", "HasTBM", "true",
+                                      "Enable TBM instructions">;
+def FeatureMOVBE   : SubtargetFeature<"movbe", "HasMOVBE", "true",
+                                      "Support MOVBE instruction">;
+def FeatureRDRAND  : SubtargetFeature<"rdrnd", "HasRDRAND", "true",
+                                      "Support RDRAND instruction">;
+def FeatureF16C    : SubtargetFeature<"f16c", "HasF16C", "true",
+                       "Support 16-bit floating point conversion instructions",
+                       [FeatureAVX]>;
+def FeatureFSGSBase : SubtargetFeature<"fsgsbase", "HasFSGSBase", "true",
+                                       "Support FS/GS Base instructions">;
+def FeatureLZCNT   : SubtargetFeature<"lzcnt", "HasLZCNT", "true",
+                                      "Support LZCNT instruction">;
+def FeatureBMI     : SubtargetFeature<"bmi", "HasBMI", "true",
+                                      "Support BMI instructions">;
+def FeatureBMI2    : SubtargetFeature<"bmi2", "HasBMI2", "true",
+                                      "Support BMI2 instructions">;
+def FeatureRTM     : SubtargetFeature<"rtm", "HasRTM", "true",
+                                      "Support RTM instructions">;
+def FeatureHLE     : SubtargetFeature<"hle", "HasHLE", "true",
+                                      "Support HLE">;
+def FeatureADX     : SubtargetFeature<"adx", "HasADX", "true",
+                                      "Support ADX instructions">;
+def FeatureSHA     : SubtargetFeature<"sha", "HasSHA", "true",
+                                      "Enable SHA instructions",
+                                      [FeatureSSE2]>;
+def FeaturePRFCHW  : SubtargetFeature<"prfchw", "HasPRFCHW", "true",
+                                      "Support PRFCHW instructions">;
+def FeatureRDSEED  : SubtargetFeature<"rdseed", "HasRDSEED", "true",
+                                      "Support RDSEED instruction">;
+def FeatureLAHFSAHF : SubtargetFeature<"sahf", "HasLAHFSAHF", "true",
+                                       "Support LAHF and SAHF instructions">;
+def FeatureMPX     : SubtargetFeature<"mpx", "HasMPX", "true",
+                                      "Support MPX instructions">;
+def FeatureLEAForSP : SubtargetFeature<"lea-sp", "UseLeaForSP", "true",
+                                     "Use LEA for adjusting the stack pointer">;
+def FeatureSlowDivide32 : SubtargetFeature<"idivl-to-divb",
+                                     "HasSlowDivide32", "true",
+                                     "Use 8-bit divide for positive values less than 256">;
+def FeatureSlowDivide64 : SubtargetFeature<"idivq-to-divw",
+                                     "HasSlowDivide64", "true",
+                                     "Use 16-bit divide for positive values less than 65536">;
+def FeaturePadShortFunctions : SubtargetFeature<"pad-short-functions",
+                                     "PadShortFunctions", "true",
+                                     "Pad short functions">;
+// TODO: This feature ought to be renamed.
+// What it really refers to are CPUs for which certain instructions
+// (which ones besides the example below?) are microcoded.
+// The best examples of this are the memory forms of CALL and PUSH
+// instructions, which should be avoided in favor of a MOV + register CALL/PUSH.
+def FeatureCallRegIndirect : SubtargetFeature<"call-reg-indirect",
+                                     "CallRegIndirect", "true",
+                                     "Call register indirect">;
+def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true",
+                                   "LEA instruction needs inputs at AG stage">;
+def FeatureSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true",
+                                   "LEA instruction with certain arguments is slow">;
+def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true",
+                                   "INC and DEC instructions are slower than ADD and SUB">;
+def FeatureSoftFloat
+    : SubtargetFeature<"soft-float", "UseSoftFloat", "true",
+                       "Use software floating point features.">;
+
+//===----------------------------------------------------------------------===//
+// X86 processors supported.
+//===----------------------------------------------------------------------===//
+
+include "X86Schedule.td"
+
+def ProcIntelAtom : SubtargetFeature<"atom", "X86ProcFamily", "IntelAtom",
+                    "Intel Atom processors">;
+def ProcIntelSLM  : SubtargetFeature<"slm", "X86ProcFamily", "IntelSLM",
+                    "Intel Silvermont processors">;
+
+class Proc<string Name, list<SubtargetFeature> Features>
+ : ProcessorModel<Name, GenericModel, Features>;
+
+def : Proc<"generic",         [FeatureSlowUAMem16]>;
+def : Proc<"i386",            [FeatureSlowUAMem16]>;
+def : Proc<"i486",            [FeatureSlowUAMem16]>;
+def : Proc<"i586",            [FeatureSlowUAMem16]>;
+def : Proc<"pentium",         [FeatureSlowUAMem16]>;
+def : Proc<"pentium-mmx",     [FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"i686",            [FeatureSlowUAMem16]>;
+def : Proc<"pentiumpro",      [FeatureSlowUAMem16, FeatureCMOV]>;
+def : Proc<"pentium2",        [FeatureSlowUAMem16, FeatureMMX, FeatureCMOV,
+                               FeatureFXSR]>;
+def : Proc<"pentium3",        [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
+                               FeatureFXSR]>;
+def : Proc<"pentium3m",       [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1,
+                               FeatureFXSR, FeatureSlowBTMem]>;
+def : Proc<"pentium-m",       [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
+                               FeatureFXSR, FeatureSlowBTMem]>;
+def : Proc<"pentium4",        [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
+                               FeatureFXSR]>;
+def : Proc<"pentium4m",       [FeatureSlowUAMem16, FeatureMMX, FeatureSSE2,
+                               FeatureFXSR, FeatureSlowBTMem]>;
+
+// Intel Core Duo.
+def : ProcessorModel<"yonah", SandyBridgeModel,
+                     [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
+                      FeatureSlowBTMem]>;
+
+// NetBurst.
+def : Proc<"prescott",
+           [FeatureSlowUAMem16, FeatureMMX, FeatureSSE3, FeatureFXSR,
+            FeatureSlowBTMem]>;
+def : Proc<"nocona", [
+  FeatureSlowUAMem16,
+  FeatureMMX,
+  FeatureSSE3,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem
+]>;
+
+// Intel Core 2 Solo/Duo.
+def : ProcessorModel<"core2", SandyBridgeModel, [
+  FeatureSlowUAMem16,
+  FeatureMMX,
+  FeatureSSSE3,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeatureLAHFSAHF
+]>;
+def : ProcessorModel<"penryn", SandyBridgeModel, [
+  FeatureSlowUAMem16,
+  FeatureMMX,
+  FeatureSSE41,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeatureLAHFSAHF
+]>;
+
+// Atom CPUs.
+class BonnellProc<string Name> : ProcessorModel<Name, AtomModel, [
+  ProcIntelAtom,
+  FeatureSlowUAMem16,
+  FeatureMMX,
+  FeatureSSSE3,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureMOVBE,
+  FeatureSlowBTMem,
+  FeatureLEAForSP,
+  FeatureSlowDivide32,
+  FeatureSlowDivide64,
+  FeatureCallRegIndirect,
+  FeatureLEAUsesAG,
+  FeaturePadShortFunctions,
+  FeatureLAHFSAHF
+]>;
+def : BonnellProc<"bonnell">;
+def : BonnellProc<"atom">; // Pin the generic name to the baseline.
+
+class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
+  ProcIntelSLM,
+  FeatureMMX,
+  FeatureSSE42,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureMOVBE,
+  FeaturePOPCNT,
+  FeaturePCLMUL,
+  FeatureAES,
+  FeatureSlowDivide64,
+  FeatureCallRegIndirect,
+  FeaturePRFCHW,
+  FeatureSlowLEA,
+  FeatureSlowIncDec,
+  FeatureSlowBTMem,
+  FeatureLAHFSAHF
+]>;
+def : SilvermontProc<"silvermont">;
+def : SilvermontProc<"slm">; // Legacy alias.
+
+// "Arrandale" along with corei3 and corei5
+class NehalemProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+  FeatureMMX,
+  FeatureSSE42,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeaturePOPCNT,
+  FeatureLAHFSAHF
+]>;
+def : NehalemProc<"nehalem">;
+def : NehalemProc<"corei7">;
+
+// Westmere is a similar machine to nehalem with some additional features.
+// Westmere is the corei3/i5/i7 path from nehalem to sandybridge
+class WestmereProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+  FeatureMMX,
+  FeatureSSE42,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureLAHFSAHF
+]>;
+def : WestmereProc<"westmere">;
+
+// SSE is not listed here since llvm treats AVX as a reimplementation of SSE,
+// rather than a superset.
+class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeatureSlowUAMem32,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureLAHFSAHF
+]>;
+def : SandyBridgeProc<"sandybridge">;
+def : SandyBridgeProc<"corei7-avx">; // Legacy alias.
+
+class IvyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeatureSlowUAMem32,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureRDRAND,
+  FeatureF16C,
+  FeatureFSGSBase,
+  FeatureLAHFSAHF
+]>;
+def : IvyBridgeProc<"ivybridge">;
+def : IvyBridgeProc<"core-avx-i">; // Legacy alias.
+
+class HaswellProc<string Name> : ProcessorModel<Name, HaswellModel, [
+  FeatureMMX,
+  FeatureAVX2,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureRDRAND,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureF16C,
+  FeatureFSGSBase,
+  FeatureMOVBE,
+  FeatureLZCNT,
+  FeatureBMI,
+  FeatureBMI2,
+  FeatureFMA,
+  FeatureRTM,
+  FeatureHLE,
+  FeatureSlowIncDec,
+  FeatureLAHFSAHF
+]>;
+def : HaswellProc<"haswell">;
+def : HaswellProc<"core-avx2">; // Legacy alias.
+
+class BroadwellProc<string Name> : ProcessorModel<Name, HaswellModel, [
+  FeatureMMX,
+  FeatureAVX2,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureRDRAND,
+  FeatureF16C,
+  FeatureFSGSBase,
+  FeatureMOVBE,
+  FeatureLZCNT,
+  FeatureBMI,
+  FeatureBMI2,
+  FeatureFMA,
+  FeatureRTM,
+  FeatureHLE,
+  FeatureADX,
+  FeatureRDSEED,
+  FeatureSlowIncDec,
+  FeatureLAHFSAHF
+]>;
+def : BroadwellProc<"broadwell">;
+
+// FIXME: define KNL model
+class KnightsLandingProc<string Name> : ProcessorModel<Name, HaswellModel, [
+  FeatureMMX,
+  FeatureAVX512,
+  FeatureFXSR,
+  FeatureERI,
+  FeatureCDI,
+  FeaturePFI,
+  FeatureCMPXCHG16B,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureRDRAND,
+  FeatureF16C,
+  FeatureFSGSBase,
+  FeatureMOVBE,
+  FeatureLZCNT,
+  FeatureBMI,
+  FeatureBMI2,
+  FeatureFMA,
+  FeatureRTM,
+  FeatureHLE,
+  FeatureSlowIncDec,
+  FeatureMPX,
+  FeatureLAHFSAHF
+]>;
+def : KnightsLandingProc<"knl">;
+
+// FIXME: define SKX model
+class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel, [
+  FeatureMMX,
+  FeatureAVX512,
+  FeatureFXSR,
+  FeatureCDI,
+  FeatureDQI,
+  FeatureBWI,
+  FeatureVLX,
+  FeaturePKU,
+  FeatureCMPXCHG16B,
+  FeatureSlowBTMem,
+  FeaturePOPCNT,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureRDRAND,
+  FeatureF16C,
+  FeatureFSGSBase,
+  FeatureMOVBE,
+  FeatureLZCNT,
+  FeatureBMI,
+  FeatureBMI2,
+  FeatureFMA,
+  FeatureRTM,
+  FeatureHLE,
+  FeatureADX,
+  FeatureRDSEED,
+  FeatureSlowIncDec,
+  FeatureMPX,
+  FeatureXSAVEC,
+  FeatureXSAVES,
+  FeatureLAHFSAHF
+]>;
+def : SkylakeProc<"skylake">;
+def : SkylakeProc<"skx">; // Legacy alias.
+
+
+// AMD CPUs.
+
+def : Proc<"k6",              [FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"k6-2",            [FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"k6-3",            [FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"athlon",          [FeatureSlowUAMem16, Feature3DNowA,
+                               FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-tbird",    [FeatureSlowUAMem16, Feature3DNowA,
+                               FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-4",        [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
+                               FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-xp",       [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
+                               FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"athlon-mp",       [FeatureSlowUAMem16, FeatureSSE1, Feature3DNowA,
+                               FeatureFXSR, FeatureSlowBTMem, FeatureSlowSHLD]>;
+def : Proc<"k8",              [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
+                               FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"opteron",         [FeatureSlowUAMem16, FeatureSSE2, Feature3DNowA,
+                               FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"athlon64",        [FeatureSlowUAMem16, FeatureSSE2,   Feature3DNowA,
+                               FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"athlon-fx",       [FeatureSlowUAMem16, FeatureSSE2,   Feature3DNowA,
+                               FeatureFXSR, Feature64Bit, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"k8-sse3",         [FeatureSlowUAMem16, FeatureSSE3,   Feature3DNowA,
+                               FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"opteron-sse3",    [FeatureSlowUAMem16, FeatureSSE3,   Feature3DNowA,
+                               FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"athlon64-sse3",   [FeatureSlowUAMem16, FeatureSSE3,   Feature3DNowA,
+                               FeatureFXSR, FeatureCMPXCHG16B, FeatureSlowBTMem,
+                               FeatureSlowSHLD]>;
+def : Proc<"amdfam10",        [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
+                               FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
+                               FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
+def : Proc<"barcelona",       [FeatureSSE4A, Feature3DNowA, FeatureFXSR,
+                               FeatureCMPXCHG16B, FeatureLZCNT, FeaturePOPCNT,
+                               FeatureSlowBTMem, FeatureSlowSHLD, FeatureLAHFSAHF]>;
+
+// Bobcat
+def : Proc<"btver1", [
+  FeatureMMX,
+  FeatureSSSE3,
+  FeatureSSE4A,
+  FeatureFXSR,
+  FeatureCMPXCHG16B,
+  FeaturePRFCHW,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureSlowSHLD,
+  FeatureLAHFSAHF
+]>;
+
+// Jaguar
+def : ProcessorModel<"btver2", BtVer2Model, [
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureSSE4A,
+  FeatureCMPXCHG16B,
+  FeaturePRFCHW,
+  FeatureAES,
+  FeaturePCLMUL,
+  FeatureBMI,
+  FeatureF16C,
+  FeatureMOVBE,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureXSAVEOPT,
+  FeatureSlowSHLD,
+  FeatureLAHFSAHF
+]>;
+
+// Bulldozer
+def : Proc<"bdver1", [
+  FeatureXOP,
+  FeatureFMA4,
+  FeatureCMPXCHG16B,
+  FeatureAES,
+  FeaturePRFCHW,
+  FeaturePCLMUL,
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureSSE4A,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureSlowSHLD,
+  FeatureLAHFSAHF
+]>;
+// Piledriver
+def : Proc<"bdver2", [
+  FeatureXOP,
+  FeatureFMA4,
+  FeatureCMPXCHG16B,
+  FeatureAES,
+  FeaturePRFCHW,
+  FeaturePCLMUL,
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureSSE4A,
+  FeatureF16C,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureBMI,
+  FeatureTBM,
+  FeatureFMA,
+  FeatureSlowSHLD,
+  FeatureLAHFSAHF
+]>;
+
+// Steamroller
+def : Proc<"bdver3", [
+  FeatureXOP,
+  FeatureFMA4,
+  FeatureCMPXCHG16B,
+  FeatureAES,
+  FeaturePRFCHW,
+  FeaturePCLMUL,
+  FeatureMMX,
+  FeatureAVX,
+  FeatureFXSR,
+  FeatureSSE4A,
+  FeatureF16C,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureBMI,
+  FeatureTBM,
+  FeatureFMA,
+  FeatureXSAVEOPT,
+  FeatureSlowSHLD,
+  FeatureFSGSBase,
+  FeatureLAHFSAHF
+]>;
+
+// Excavator
+def : Proc<"bdver4", [
+  FeatureMMX,
+  FeatureAVX2,
+  FeatureFXSR,
+  FeatureXOP,
+  FeatureFMA4,
+  FeatureCMPXCHG16B,
+  FeatureAES,
+  FeaturePRFCHW,
+  FeaturePCLMUL,
+  FeatureF16C,
+  FeatureLZCNT,
+  FeaturePOPCNT,
+  FeatureXSAVE,
+  FeatureBMI,
+  FeatureBMI2,
+  FeatureTBM,
+  FeatureFMA,
+  FeatureXSAVEOPT,
+  FeatureFSGSBase,
+  FeatureLAHFSAHF
+]>;
+
+def : Proc<"geode",           [FeatureSlowUAMem16, Feature3DNowA]>;
+
+def : Proc<"winchip-c6",      [FeatureSlowUAMem16, FeatureMMX]>;
+def : Proc<"winchip2",        [FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3",              [FeatureSlowUAMem16, Feature3DNow]>;
+def : Proc<"c3-2", [FeatureSlowUAMem16, FeatureMMX, FeatureSSE1, FeatureFXSR]>;
+
+// We also provide a generic 64-bit specific x86 processor model which tries to
+// be good for modern chips without enabling instruction set encodings past the
+// basic SSE2 and 64-bit ones. It disables slow things from any mainstream and
+// modern 64-bit x86 chip, and enables features that are generally beneficial.
+//
+// We currently use the Sandy Bridge model as the default scheduling model as
+// we use it across Nehalem, Westmere, Sandy Bridge, and Ivy Bridge which
+// covers a huge swath of x86 processors. If there are specific scheduling
+// knobs which need to be tuned differently for AMD chips, we might consider
+// forming a common base for them.
+def : ProcessorModel<"x86-64", SandyBridgeModel,
+                     [FeatureMMX, FeatureSSE2, FeatureFXSR, Feature64Bit,
+                      FeatureSlowBTMem ]>;
+
+//===----------------------------------------------------------------------===//
+// Register File Description
+//===----------------------------------------------------------------------===//
+
+include "X86RegisterInfo.td"
+
+//===----------------------------------------------------------------------===//
+// Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "X86InstrInfo.td"
+
+def X86InstrInfo : InstrInfo;
+
+//===----------------------------------------------------------------------===//
+// Calling Conventions
+//===----------------------------------------------------------------------===//
+
+include "X86CallingConv.td"
+
+
+//===----------------------------------------------------------------------===//
+// Assembly Parser
+//===----------------------------------------------------------------------===//
+
+def ATTAsmParserVariant : AsmParserVariant {
+  int Variant = 0;
+
+  // Variant name.
+  string Name = "att";
+
+  // Discard comments in assembly strings.
+  string CommentDelimiter = "#";
+
+  // Recognize hard coded registers.
+  string RegisterPrefix = "%";
+}
+
+def IntelAsmParserVariant : AsmParserVariant {
+  int Variant = 1;
+
+  // Variant name.
+  string Name = "intel";
+
+  // Discard comments in assembly strings.
+  string CommentDelimiter = ";";
+
+  // Recognize hard coded registers.
+  string RegisterPrefix = "";
+}
+
+//===----------------------------------------------------------------------===//
+// Assembly Printers
+//===----------------------------------------------------------------------===//
+
+// The X86 target supports two different syntaxes for emitting machine code.
+// This is controlled by the -x86-asm-syntax={att|intel}
+def ATTAsmWriter : AsmWriter {
+  string AsmWriterClassName  = "ATTInstPrinter";
+  int Variant = 0;
+}
+def IntelAsmWriter : AsmWriter {
+  string AsmWriterClassName  = "IntelInstPrinter";
+  int Variant = 1;
+}
+
+def X86 : Target {
+  // Information about the instructions...
+  let InstructionSet = X86InstrInfo;
+  let AssemblyParserVariants = [ATTAsmParserVariant, IntelAsmParserVariant];
+  let AssemblyWriters = [ATTAsmWriter, IntelAsmWriter];
+}
diff -pruN 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/asan/asan_linux.cc 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/asan/asan_linux.cc
--- 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/asan/asan_linux.cc	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/asan/asan_linux.cc	2015-12-03 10:39:43.000000000 +0000
@@ -0,0 +1,171 @@
+//===-- asan_linux.cc -----------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
+#endif
+
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD
+#include <ucontext.h>
+extern "C" void* _DYNAMIC;
+#else
+#include <sys/ucontext.h>
+#include <link.h>
+#endif
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
+  __FreeBSD_version <= 902001  // v9.2
+#define ucontext_t xucontext_t
+#endif
+
+typedef enum {
+  ASAN_RT_VERSION_UNDEFINED = 0,
+  ASAN_RT_VERSION_DYNAMIC,
+  ASAN_RT_VERSION_STATIC,
+} asan_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+asan_rt_version_t  __asan_rt_version;
+}
+
+namespace __asan {
+
+void InitializePlatformInterceptors() {}
+
+void *AsanDoesNotSupportStaticLinkage() {
+  // This will fail to link with -static.
+  return &_DYNAMIC;  // defined in link.h
+}
+
+#if SANITIZER_ANDROID
+// FIXME: should we do anything for Android?
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+#else
+static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
+                                void *data) {
+  // Continue until the first dynamic library is found
+  if (!info->dlpi_name || info->dlpi_name[0] == 0)
+    return 0;
+
+  // Ignore vDSO
+  if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
+    return 0;
+
+  *(const char **)data = info->dlpi_name;
+  return 1;
+}
+
+static bool IsDynamicRTName(const char *libname) {
+  return internal_strstr(libname, "libclang_rt.asan") ||
+    internal_strstr(libname, "libasan.so");
+}
+
+static void ReportIncompatibleRT() {
+  Report("Your application is linked against incompatible ASan runtimes.\n");
+  Die();
+}
+
+void AsanCheckDynamicRTPrereqs() {
+  if (!ASAN_DYNAMIC)
+    return;
+
+  // Ensure that dynamic RT is the first DSO in the list
+  const char *first_dso_name = nullptr;
+  dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
+  if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+    Report("ASan runtime does not come first in initial library list; "
+           "you should either link runtime to your application or "
+           "manually preload it with LD_PRELOAD.\n");
+    Die();
+  }
+}
+
+void AsanCheckIncompatibleRT() {
+  if (ASAN_DYNAMIC) {
+    if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+      __asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
+    } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
+      ReportIncompatibleRT();
+    }
+  } else {
+    if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+      // Ensure that dynamic runtime is not present. We should detect it
+      // as early as possible, otherwise ASan interceptors could bind to
+      // the functions in dynamic ASan runtime instead of the functions in
+      // system libraries, causing crashes later in ASan initialization.
+      MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+      char filename[128];
+      while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
+                            sizeof(filename), nullptr)) {
+        if (IsDynamicRTName(filename)) {
+          Report("Your application is linked against "
+                 "incompatible ASan runtimes.\n");
+          Die();
+        }
+      }
+      __asan_rt_version = ASAN_RT_VERSION_STATIC;
+    } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
+      ReportIncompatibleRT();
+    }
+  }
+}
+#endif // SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+  ucontext_t *ucp = (ucontext_t*)context;
+  *stack = (uptr)ucp->uc_stack.ss_sp;
+  *ssize = ucp->uc_stack.ss_size;
+}
+#else
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+  UNIMPLEMENTED();
+}
+#endif
+
+void *AsanDlSymNext(const char *sym) {
+  return dlsym(RTLD_NEXT, sym);
+}
+
+} // namespace __asan
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff -pruN 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc
--- 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.cc	2016-06-01 08:17:03.000000000 +0000
@@ -0,0 +1,1252 @@
+//===-- sanitizer_linux.cc ------------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+
+#if !SANITIZER_FREEBSD
+#include <asm/param.h>
+#endif
+
+// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
+// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
+// access stat from asm/stat.h, without conflicting with definition in
+// sys/stat.h, we use this trick.
+#if defined(__mips64)
+#include <asm/unistd.h>
+#include <sys/types.h>
+#define stat kernel_stat
+#include <asm/stat.h>
+#undef stat
+#endif
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/mman.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+#if SANITIZER_FREEBSD
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+extern char **environ;  // provided by crt1
+#endif  // SANITIZER_FREEBSD
+
+#if !SANITIZER_ANDROID
+#include <sys/signal.h>
+#endif
+
+#if SANITIZER_LINUX
+// <linux/time.h>
+struct kernel_timeval {
+  long tv_sec;
+  long tv_usec;
+};
+
+// <linux/futex.h> is broken on some linux distributions.
+const int FUTEX_WAIT = 0;
+const int FUTEX_WAKE = 1;
+#endif  // SANITIZER_LINUX
+
+// Are we using 32-bit or 64-bit Linux syscalls?
+// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
+// but it still needs to use 64-bit syscalls.
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+    SANITIZER_WORDSIZE == 64)
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+#else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+#endif
+
+namespace __sanitizer {
+
+#if SANITIZER_LINUX && defined(__x86_64__)
+#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && defined(__aarch64__)
+#include "sanitizer_syscall_linux_aarch64.inc"
+#else
+#include "sanitizer_syscall_generic.inc"
+#endif
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+                   OFF_T offset) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+  return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
+                          offset);
+#else
+  // mmap2 specifies file offset in 4096-byte units.
+  CHECK(IsAligned(offset, 4096));
+  return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+                          offset / 4096);
+#endif
+}
+
+uptr internal_munmap(void *addr, uptr length) {
+  return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+  return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
+}
+
+uptr internal_close(fd_t fd) {
+  return internal_syscall(SYSCALL(close), fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+  return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+                          mode);
+#else
+  return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+  sptr res;
+  HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf,
+               count));
+  return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+  sptr res;
+  HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf,
+               count));
+  return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+  sptr res;
+  HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
+               (OFF_T)size));
+  return res;
+}
+
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && !SANITIZER_FREEBSD
+static void stat64_to_stat(struct stat64 *in, struct stat *out) {
+  internal_memset(out, 0, sizeof(*out));
+  out->st_dev = in->st_dev;
+  out->st_ino = in->st_ino;
+  out->st_mode = in->st_mode;
+  out->st_nlink = in->st_nlink;
+  out->st_uid = in->st_uid;
+  out->st_gid = in->st_gid;
+  out->st_rdev = in->st_rdev;
+  out->st_size = in->st_size;
+  out->st_blksize = in->st_blksize;
+  out->st_blocks = in->st_blocks;
+  out->st_atime = in->st_atime;
+  out->st_mtime = in->st_mtime;
+  out->st_ctime = in->st_ctime;
+  out->st_ino = in->st_ino;
+}
+#endif
+
+#if defined(__mips64)
+static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
+  internal_memset(out, 0, sizeof(*out));
+  out->st_dev = in->st_dev;
+  out->st_ino = in->st_ino;
+  out->st_mode = in->st_mode;
+  out->st_nlink = in->st_nlink;
+  out->st_uid = in->st_uid;
+  out->st_gid = in->st_gid;
+  out->st_rdev = in->st_rdev;
+  out->st_size = in->st_size;
+  out->st_blksize = in->st_blksize;
+  out->st_blocks = in->st_blocks;
+  out->st_atime = in->st_atime_nsec;
+  out->st_mtime = in->st_mtime_nsec;
+  out->st_ctime = in->st_ctime_nsec;
+  out->st_ino = in->st_ino;
+}
+#endif
+
+uptr internal_stat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+  return internal_syscall(SYSCALL(stat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+                          (uptr)buf, 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if defined(__mips64)
+  // For mips64, stat syscall fills buffer in the format of kernel_stat
+  struct kernel_stat kbuf;
+  int res = internal_syscall(SYSCALL(stat), path, &kbuf);
+  kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+  return res;
+# else
+  return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
+# endif
+#else
+  struct stat64 buf64;
+  int res = internal_syscall(SYSCALL(stat64), path, &buf64);
+  stat64_to_stat(&buf64, (struct stat *)buf);
+  return res;
+#endif
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD
+  return internal_syscall(SYSCALL(lstat), path, buf);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
+                         (uptr)buf, AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+  // For mips64, lstat syscall fills buffer in the format of kernel_stat
+  struct kernel_stat kbuf;
+  int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
+  kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+  return res;
+# else
+  return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
+# endif
+#else
+  struct stat64 buf64;
+  int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
+  stat64_to_stat(&buf64, (struct stat *)buf);
+  return res;
+#endif
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+  // For mips64, fstat syscall fills buffer in the format of kernel_stat
+  struct kernel_stat kbuf;
+  int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
+  kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+  return res;
+# else
+  return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
+# endif
+#else
+  struct stat64 buf64;
+  int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
+  stat64_to_stat(&buf64, (struct stat *)buf);
+  return res;
+#endif
+}
+
+uptr internal_filesize(fd_t fd) {
+  struct stat st;
+  if (internal_fstat(fd, &st))
+    return -1;
+  return (uptr)st.st_size;
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+  return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(readlinkat), AT_FDCWD,
+                          (uptr)path, (uptr)buf, bufsize);
+#else
+  return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
+}
+
+uptr internal_unlink(const char *path) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+  return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+                          (uptr)newpath);
+#else
+  return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
+}
+
+uptr internal_sched_yield() {
+  return internal_syscall(SYSCALL(sched_yield));
+}
+
+void internal__exit(int exitcode) {
+#if SANITIZER_FREEBSD
+  internal_syscall(SYSCALL(exit), exitcode);
+#else
+  internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
+  Die();  // Unreachable.
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+                     char *const envp[]) {
+  return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+                          (uptr)envp);
+}
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+  struct stat st;
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+#else
+  if (internal_stat(filename, &st))
+#endif
+    return false;
+  // Sanity check: filename is a regular file.
+  return S_ISREG(st.st_mode);
+}
+
+uptr GetTid() {
+#if SANITIZER_FREEBSD
+  return (uptr)pthread_self();
+#else
+  return internal_syscall(SYSCALL(gettid));
+#endif
+}
+
+u64 NanoTime() {
+#if SANITIZER_FREEBSD
+  timeval tv;
+#else
+  kernel_timeval tv;
+#endif
+  internal_memset(&tv, 0, sizeof(tv));
+  internal_syscall(SYSCALL(gettimeofday), (uptr)&tv, 0);
+  return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+}
+
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on FreeBSD) and does not use libc. This function should be
+// called first inside __asan_init.
+const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD
+  if (::environ != 0) {
+    uptr NameLen = internal_strlen(name);
+    for (char **Env = ::environ; *Env != 0; Env++) {
+      if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+        return (*Env) + NameLen + 1;
+    }
+  }
+  return 0;  // Not found.
+#elif SANITIZER_LINUX
+  static char *environ;
+  static uptr len;
+  static bool inited;
+  if (!inited) {
+    inited = true;
+    uptr environ_size;
+    if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
+      environ = nullptr;
+  }
+  if (!environ || len == 0) return nullptr;
+  uptr namelen = internal_strlen(name);
+  const char *p = environ;
+  while (*p != '\0') {  // will happen at the \0\0 that terminates the buffer
+    // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
+    const char* endp =
+        (char*)internal_memchr(p, '\0', len - (p - environ));
+    if (!endp)  // this entry isn't NUL terminated
+      return nullptr;
+    else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=')  // Match.
+      return p + namelen + 1;  // point after =
+    p = endp + 1;
+  }
+  return nullptr;  // Not found.
+#else
+#error "Unsupported platform"
+#endif
+}
+
+extern "C" {
+  SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
+}
+
+#if !SANITIZER_GO
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+                                   int arr_size) {
+  char *buff;
+  uptr buff_size;
+  uptr buff_len;
+  *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+  if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) {
+    (*arr)[0] = nullptr;
+    return;
+  }
+  (*arr)[0] = buff;
+  int count, i;
+  for (count = 1, i = 1; ; i++) {
+    if (buff[i] == 0) {
+      if (buff[i+1] == 0) break;
+      (*arr)[count] = &buff[i+1];
+      CHECK_LE(count, arr_size - 1);  // FIXME: make this more flexible.
+      count++;
+    }
+  }
+  (*arr)[count] = nullptr;
+}
+#endif
+
+static void GetArgsAndEnv(char*** argv, char*** envp) {
+#if !SANITIZER_GO
+  if (&__libc_stack_end) {
+#endif
+    uptr* stack_end = (uptr*)__libc_stack_end;
+    int argc = *stack_end;
+    *argv = (char**)(stack_end + 1);
+    *envp = (char**)(stack_end + argc + 2);
+#if !SANITIZER_GO
+  } else {
+    static const int kMaxArgv = 2000, kMaxEnvp = 2000;
+    ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
+    ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
+  }
+#endif
+}
+
+void ReExec() {
+  char **argv, **envp;
+  GetArgsAndEnv(&argv, &envp);
+  uptr rv = internal_execve("/proc/self/exe", argv, envp);
+  int rverrno;
+  CHECK_EQ(internal_iserror(rv, &rverrno), true);
+  Printf("execve failed, errno %d\n", rverrno);
+  Die();
+}
+
+enum MutexState {
+  MtxUnlocked = 0,
+  MtxLocked = 1,
+  MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex() {
+  internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+  CHECK_EQ(owner_, 0);
+  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+  if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+    return;
+  while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+    _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#else
+    internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT, MtxSleeping, 0, 0, 0);
+#endif
+  }
+}
+
+void BlockingMutex::Unlock() {
+  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+  u32 v = atomic_exchange(m, MtxUnlocked, memory_order_relaxed);
+  CHECK_NE(v, MtxUnlocked);
+  if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+    _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#else
+    internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE, 1, 0, 0, 0);
+#endif
+  }
+}
+
+void BlockingMutex::CheckLocked() {
+  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+  CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+// ----------------- sanitizer_linux.h
+// The actual size of this structure is specified by d_reclen.
+// Note that getdents64 uses a different structure format. We only provide the
+// 32-bit syscall here.
+struct linux_dirent {
+#if SANITIZER_X32 || defined(__aarch64__)
+  u64 d_ino;
+  u64 d_off;
+#else
+  unsigned long      d_ino;
+  unsigned long      d_off;
+#endif
+  unsigned short     d_reclen;
+#ifdef __aarch64__
+  unsigned char      d_type;
+#endif
+  char               d_name[256];
+};
+
+// Syscall wrappers.
+uptr internal_ptrace(int request, int pid, void *addr, void *data) {
+  return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+                          (uptr)data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+  return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
+                          0 /* rusage */);
+}
+
+uptr internal_getpid() {
+  return internal_syscall(SYSCALL(getpid));
+}
+
+uptr internal_getppid() {
+  return internal_syscall(SYSCALL(getppid));
+}
+
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+  return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+  return internal_syscall(SYSCALL(lseek), fd, offset, whence);
+}
+
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+  return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
+}
+#endif
+
+uptr internal_sigaltstack(const struct sigaltstack *ss,
+                         struct sigaltstack *oss) {
+  return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
+}
+
+int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+  return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
+  return internal_syscall(SYSCALL(fork));
+#endif
+}
+
+#if SANITIZER_LINUX
+#define SA_RESTORER 0x04000000
+// Doesn't set sa_restorer, use with caution (see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+  __sanitizer_kernel_sigaction_t k_act, k_oldact;
+  internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+  internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+  const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act;
+  __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+  if (u_act) {
+    k_act.handler = u_act->handler;
+    k_act.sigaction = u_act->sigaction;
+    internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+                    sizeof(__sanitizer_kernel_sigset_t));
+    // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL).
+    k_act.sa_flags = u_act->sa_flags | SA_RESTORER;
+    // FIXME: most often sa_restorer is unset, however the kernel requires it
+    // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+    // If sa_restorer passed to the kernel is NULL, the program may crash upon
+    // signal delivery or fail to unwind the stack in the signal handler.
+    // libc implementation of sigaction() passes its own restorer to
+    // rt_sigaction, so we need to do the same (we'll need to reimplement the
+    // restorers; for x86_64 the restorer address can be obtained from
+    // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+    k_act.sa_restorer = u_act->sa_restorer;
+#endif
+  }
+
+  uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+      (uptr)(u_act ? &k_act : nullptr),
+      (uptr)(u_oldact ? &k_oldact : nullptr),
+      (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+  if ((result == 0) && u_oldact) {
+    u_oldact->handler = k_oldact.handler;
+    u_oldact->sigaction = k_oldact.sigaction;
+    internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+                    sizeof(__sanitizer_kernel_sigset_t));
+    u_oldact->sa_flags = k_oldact.sa_flags;
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+    u_oldact->sa_restorer = k_oldact.sa_restorer;
+#endif
+  }
+  return result;
+}
+#endif  // SANITIZER_LINUX
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+    __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD
+  return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+  __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+  __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+  return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
+                          (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
+                          sizeof(__sanitizer_kernel_sigset_t));
+#endif
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
+  internal_memset(set, 0xff, sizeof(*set));
+}
+
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
+  signum -= 1;
+  CHECK_GE(signum, 0);
+  CHECK_LT(signum, sizeof(*set) * 8);
+  __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+  const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+  const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+  k_set->sig[idx] &= ~(1 << bit);
+}
+#endif  // SANITIZER_LINUX
+
+// ThreadLister implementation.
+ThreadLister::ThreadLister(int pid)
+  : pid_(pid),
+    descriptor_(-1),
+    buffer_(4096),
+    error_(true),
+    entry_((struct linux_dirent *)buffer_.data()),
+    bytes_read_(0) {
+  char task_directory_path[80];
+  internal_snprintf(task_directory_path, sizeof(task_directory_path),
+                    "/proc/%d/task/", pid);
+  uptr openrv = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
+  if (internal_iserror(openrv)) {
+    error_ = true;
+    Report("Can't open /proc/%d/task for reading.\n", pid);
+  } else {
+    error_ = false;
+    descriptor_ = openrv;
+  }
+}
+
+int ThreadLister::GetNextTID() {
+  int tid = -1;
+  do {
+    if (error_)
+      return -1;
+    if ((char *)entry_ >= &buffer_[bytes_read_] && !GetDirectoryEntries())
+      return -1;
+    if (entry_->d_ino != 0 && entry_->d_name[0] >= '0' &&
+        entry_->d_name[0] <= '9') {
+      // Found a valid tid.
+      tid = (int)internal_atoll(entry_->d_name);
+    }
+    entry_ = (struct linux_dirent *)(((char *)entry_) + entry_->d_reclen);
+  } while (tid < 0);
+  return tid;
+}
+
+void ThreadLister::Reset() {
+  if (error_ || descriptor_ < 0)
+    return;
+  internal_lseek(descriptor_, 0, SEEK_SET);
+}
+
+ThreadLister::~ThreadLister() {
+  if (descriptor_ >= 0)
+    internal_close(descriptor_);
+}
+
+bool ThreadLister::error() { return error_; }
+
+bool ThreadLister::GetDirectoryEntries() {
+  CHECK_GE(descriptor_, 0);
+  CHECK_NE(error_, true);
+  bytes_read_ = internal_getdents(descriptor_,
+                                  (struct linux_dirent *)buffer_.data(),
+                                  buffer_.size());
+  if (internal_iserror(bytes_read_)) {
+    Report("Can't read directory entries from /proc/%d/task.\n", pid_);
+    error_ = true;
+    return false;
+  } else if (bytes_read_ == 0) {
+    return false;
+  }
+  entry_ = (struct linux_dirent *)buffer_.data();
+  return true;
+}
+
+uptr GetPageSize() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
+  return EXEC_PAGESIZE;
+#else
+  return sysconf(_SC_PAGESIZE);  // EXEC_PAGESIZE may not be trustworthy.
+#endif
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+#if SANITIZER_FREEBSD
+  const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 };
+  const char *default_module_name = "kern.proc.pathname";
+  size_t Size = buf_len;
+  bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
+  int readlink_error = IsErr ? errno : 0;
+  uptr module_name_len = Size;
+#else
+  const char *default_module_name = "/proc/self/exe";
+  uptr module_name_len = internal_readlink(
+      default_module_name, buf, buf_len);
+  int readlink_error;
+  bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif
+  if (IsErr) {
+    // We can't read binary name for some reason, assume it's unknown.
+    Report("WARNING: reading executable name failed with errno %d, "
+           "some stack frames may not be symbolized\n", readlink_error);
+    module_name_len = internal_snprintf(buf, buf_len, "%s",
+                                        default_module_name);
+    CHECK_LT(module_name_len, buf_len);
+  }
+  return module_name_len;
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+#if SANITIZER_LINUX
+  char *tmpbuf;
+  uptr tmpsize;
+  uptr tmplen;
+  if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen,
+                       1024 * 1024)) {
+    internal_strncpy(buf, tmpbuf, buf_len);
+    UnmapOrDie(tmpbuf, tmpsize);
+    return internal_strlen(buf);
+  }
+#endif
+  return ReadBinaryName(buf, buf_len);
+}
+
+// Match full names of the form /path/to/base_name{-,.}*
+bool LibraryNameIs(const char *full_name, const char *base_name) {
+  const char *name = full_name;
+  // Strip path.
+  while (*name != '\0') name++;
+  while (name > full_name && *name != '/') name--;
+  if (*name == '/') name++;
+  uptr base_name_length = internal_strlen(base_name);
+  if (internal_strncmp(name, base_name, base_name_length)) return false;
+  return (name[base_name_length] == '-' || name[base_name_length] == '.');
+}
+
+#if !SANITIZER_ANDROID
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+  CHECK_NE(map, nullptr);
+#if !SANITIZER_FREEBSD
+  typedef ElfW(Phdr) Elf_Phdr;
+  typedef ElfW(Ehdr) Elf_Ehdr;
+#endif  // !SANITIZER_FREEBSD
+  char *base = (char *)map->l_addr;
+  Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
+  char *phdrs = base + ehdr->e_phoff;
+  char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize;
+
+  // Find the segment with the minimum base so we can "relocate" the p_vaddr
+  // fields.  Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC
+  // objects have a non-zero base.
+  uptr preferred_base = (uptr)-1;
+  for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+    Elf_Phdr *phdr = (Elf_Phdr *)iter;
+    if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr)
+      preferred_base = (uptr)phdr->p_vaddr;
+  }
+
+  // Compute the delta from the real base to get a relocation delta.
+  sptr delta = (uptr)base - preferred_base;
+  // Now we can figure out what the loader really mapped.
+  for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+    Elf_Phdr *phdr = (Elf_Phdr *)iter;
+    if (phdr->p_type == PT_LOAD) {
+      uptr seg_start = phdr->p_vaddr + delta;
+      uptr seg_end = seg_start + phdr->p_memsz;
+      // None of these values are aligned.  We consider the ragged edges of the
+      // load command as defined, since they are mapped from the file.
+      seg_start = RoundDownTo(seg_start, GetPageSizeCached());
+      seg_end = RoundUpTo(seg_end, GetPageSizeCached());
+      cb((void *)seg_start, seg_end - seg_start);
+    }
+  }
+}
+#endif
+
+#if defined(__x86_64__) && SANITIZER_LINUX
+// We cannot use glibc's clone wrapper, because it messes with the child
+// task's TLS. It writes the PID and TID of the child task to its thread
+// descriptor, but in our case the child task shares the thread descriptor with
+// the parent (because we don't know how to allocate a new thread
+// descriptor to keep glibc happy). So the stock version of clone(), when
+// used with CLONE_VM, would end up corrupting the parent's thread descriptor.
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+                    int *parent_tidptr, void *newtls, int *child_tidptr) {
+  long long res;
+  if (!fn || !child_stack)
+    return -EINVAL;
+  CHECK_EQ(0, (uptr)child_stack % 16);
+  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+  ((unsigned long long *)child_stack)[0] = (uptr)fn;
+  ((unsigned long long *)child_stack)[1] = (uptr)arg;
+  register void *r8 __asm__("r8") = newtls;
+  register int *r10 __asm__("r10") = child_tidptr;
+  __asm__ __volatile__(
+                       /* %rax = syscall(%rax = SYSCALL(clone),
+                        *                %rdi = flags,
+                        *                %rsi = child_stack,
+                        *                %rdx = parent_tidptr,
+                        *                %r8  = new_tls,
+                        *                %r10 = child_tidptr)
+                        */
+                       "syscall\n"
+
+                       /* if (%rax != 0)
+                        *   return;
+                        */
+                       "testq  %%rax,%%rax\n"
+                       "jnz    1f\n"
+
+                       /* In the child. Terminate unwind chain. */
+                       // XXX: We should also terminate the CFI unwind chain
+                       // here. Unfortunately clang 3.2 doesn't support the
+                       // necessary CFI directives, so we skip that part.
+                       "xorq   %%rbp,%%rbp\n"
+
+                       /* Call "fn(arg)". */
+                       "popq   %%rax\n"
+                       "popq   %%rdi\n"
+                       "call   *%%rax\n"
+
+                       /* Call _exit(%rax). */
+                       "movq   %%rax,%%rdi\n"
+                       "movq   %2,%%rax\n"
+                       "syscall\n"
+
+                       /* Return to parent. */
+                     "1:\n"
+                       : "=a" (res)
+                       : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+                         "S"(child_stack),
+                         "D"(flags),
+                         "d"(parent_tidptr),
+                         "r"(r8),
+                         "r"(r10)
+                       : "rsp", "memory", "r11", "rcx");
+  return res;
+}
+#elif defined(__mips__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+                    int *parent_tidptr, void *newtls, int *child_tidptr) {
+  long long res;
+  if (!fn || !child_stack)
+    return -EINVAL;
+  CHECK_EQ(0, (uptr)child_stack % 16);
+  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+  ((unsigned long long *)child_stack)[0] = (uptr)fn;
+  ((unsigned long long *)child_stack)[1] = (uptr)arg;
+  register void *a3 __asm__("$7") = newtls;
+  register int *a4 __asm__("$8") = child_tidptr;
+  // We don't have proper CFI directives here because it requires alot of code
+  // for very marginal benefits.
+  __asm__ __volatile__(
+                       /* $v0 = syscall($v0 = __NR_clone,
+                        * $a0 = flags,
+                        * $a1 = child_stack,
+                        * $a2 = parent_tidptr,
+                        * $a3 = new_tls,
+                        * $a4 = child_tidptr)
+                        */
+                       ".cprestore 16;\n"
+                       "move $4,%1;\n"
+                       "move $5,%2;\n"
+                       "move $6,%3;\n"
+                       "move $7,%4;\n"
+                       /* Store the fifth argument on stack
+                        * if we are using 32-bit abi.
+                        */
+#if SANITIZER_WORDSIZE == 32
+                       "lw %5,16($29);\n"
+#else
+                       "move $8,%5;\n"
+#endif
+                       "li $2,%6;\n"
+                       "syscall;\n"
+
+                       /* if ($v0 != 0)
+                        * return;
+                        */
+                       "bnez $2,1f;\n"
+
+                       /* Call "fn(arg)". */
+                       "ld $25,0($29);\n"
+                       "ld $4,8($29);\n"
+                       "jal $25;\n"
+
+                       /* Call _exit($v0). */
+                       "move $4,$2;\n"
+                       "li $2,%7;\n"
+                       "syscall;\n"
+
+                       /* Return to parent. */
+                     "1:\n"
+                       : "=r" (res)
+                       : "r"(flags),
+                         "r"(child_stack),
+                         "r"(parent_tidptr),
+                         "r"(a3),
+                         "r"(a4),
+                         "i"(__NR_clone),
+                         "i"(__NR_exit)
+                       : "memory", "$29" );
+  return res;
+}
+#elif defined(__aarch64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+                    int *parent_tidptr, void *newtls, int *child_tidptr) {
+  long long res;
+  if (!fn || !child_stack)
+    return -EINVAL;
+  CHECK_EQ(0, (uptr)child_stack % 16);
+  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+  ((unsigned long long *)child_stack)[0] = (uptr)fn;
+  ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+  register int (*__fn)(void *)  __asm__("x0") = fn;
+  register void *__stack __asm__("x1") = child_stack;
+  register int   __flags __asm__("x2") = flags;
+  register void *__arg   __asm__("x3") = arg;
+  register int  *__ptid  __asm__("x4") = parent_tidptr;
+  register void *__tls   __asm__("x5") = newtls;
+  register int  *__ctid  __asm__("x6") = child_tidptr;
+
+  __asm__ __volatile__(
+                       "mov x0,x2\n" /* flags  */
+                       "mov x2,x4\n" /* ptid  */
+                       "mov x3,x5\n" /* tls  */
+                       "mov x4,x6\n" /* ctid  */
+                       "mov x8,%9\n" /* clone  */
+
+                       "svc 0x0\n"
+
+                       /* if (%r0 != 0)
+                        *   return %r0;
+                        */
+                       "cmp x0, #0\n"
+                       "bne 1f\n"
+
+                       /* In the child, now. Call "fn(arg)". */
+                       "ldp x1, x0, [sp], #16\n"
+                       "blr x1\n"
+
+                       /* Call _exit(%r0).  */
+                       "mov x8, %10\n"
+                       "svc 0x0\n"
+                     "1:\n"
+
+                       : "=r" (res)
+                       : "i"(-EINVAL),
+                         "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+                         "r"(__ptid), "r"(__tls), "r"(__ctid),
+                         "i"(__NR_clone), "i"(__NR_exit)
+                       : "x30", "memory");
+  return res;
+}
+#elif defined(__powerpc64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+                   int *parent_tidptr, void *newtls, int *child_tidptr) {
+  long long res;
+/* Stack frame offsets.  */
+#if _CALL_ELF != 2
+#define FRAME_MIN_SIZE         112
+#define FRAME_TOC_SAVE         40
+#else
+#define FRAME_MIN_SIZE         32
+#define FRAME_TOC_SAVE         24
+#endif
+  if (!fn || !child_stack)
+    return -EINVAL;
+  CHECK_EQ(0, (uptr)child_stack % 16);
+  child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+  ((unsigned long long *)child_stack)[0] = (uptr)fn;
+  ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+  register int (*__fn)(void *) __asm__("r3") = fn;
+  register void *__cstack      __asm__("r4") = child_stack;
+  register int __flags         __asm__("r5") = flags;
+  register void * __arg        __asm__("r6") = arg;
+  register int * __ptidptr     __asm__("r7") = parent_tidptr;
+  register void * __newtls     __asm__("r8") = newtls;
+  register int * __ctidptr     __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+           /* fn, arg, child_stack are saved acrVoss the syscall */
+           "mr 28, %5\n\t"
+           "mr 29, %6\n\t"
+           "mr 27, %8\n\t"
+
+           /* syscall
+             r3 == flags
+             r4 == child_stack
+             r5 == parent_tidptr
+             r6 == newtls
+             r7 == child_tidptr */
+           "mr 3, %7\n\t"
+           "mr 5, %9\n\t"
+           "mr 6, %10\n\t"
+           "mr 7, %11\n\t"
+           "li 0, %3\n\t"
+           "sc\n\t"
+
+           /* Test if syscall was successful */
+           "cmpdi  cr1, 3, 0\n\t"
+           "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+           "bne-   cr1, 1f\n\t"
+
+           /* Do the function call */
+           "std   2, %13(1)\n\t"
+#if _CALL_ELF != 2
+           "ld    0, 0(28)\n\t"
+           "ld    2, 8(28)\n\t"
+           "mtctr 0\n\t"
+#else
+           "mr    12, 28\n\t"
+           "mtctr 12\n\t"
+#endif
+           "mr    3, 27\n\t"
+           "bctrl\n\t"
+           "ld    2, %13(1)\n\t"
+
+           /* Call _exit(r3) */
+           "li 0, %4\n\t"
+           "sc\n\t"
+
+           /* Return to parent */
+           "1:\n\t"
+           "mr %0, 3\n\t"
+             : "=r" (res)
+             : "0" (-1), "i" (EINVAL),
+               "i" (__NR_clone), "i" (__NR_exit),
+               "r" (__fn), "r" (__cstack), "r" (__flags),
+               "r" (__arg), "r" (__ptidptr), "r" (__newtls),
+               "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE)
+             : "cr0", "cr1", "memory", "ctr",
+               "r0", "r29", "r27", "r28");
+  return res;
+}
+#endif  // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+#if __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+    int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
+                                   void *data) {
+  // Any name starting with "lib" indicates a bug in L where library base names
+  // are returned instead of paths.
+  if (info->dlpi_name && info->dlpi_name[0] == 'l' &&
+      info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {
+    *(bool *)data = true;
+    return 1;
+  }
+  return 0;
+}
+
+static atomic_uint32_t android_api_level;
+
+static AndroidApiLevel AndroidDetectApiLevel() {
+  if (!&dl_iterate_phdr)
+    return ANDROID_KITKAT; // K or lower
+  bool base_name_seen = false;
+  dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
+  if (base_name_seen)
+    return ANDROID_LOLLIPOP_MR1; // L MR1
+  return ANDROID_POST_LOLLIPOP;   // post-L
+  // Plain L (API level 21) is completely broken wrt ASan and not very
+  // interesting to detect.
+}
+
+AndroidApiLevel AndroidGetApiLevel() {
+  AndroidApiLevel level =
+      (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
+  if (level) return level;
+  level = AndroidDetectApiLevel();
+  atomic_store(&android_api_level, level, memory_order_relaxed);
+  return level;
+}
+
+#endif
+
+bool IsDeadlySignal(int signum) {
+  if (common_flags()->handle_abort && signum == SIGABRT)
+    return true;
+  if (common_flags()->handle_sigill && signum == SIGILL)
+    return true;
+  if (common_flags()->handle_sigfpe && signum == SIGFPE)
+    return true;
+  return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
+}
+
+#ifndef SANITIZER_GO
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+  // Start the thread with signals blocked, otherwise it can steal user signals.
+  __sanitizer_sigset_t set, old;
+  internal_sigfillset(&set);
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+  // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+  // on any thread, setuid call hangs (see test/tsan/setuid.c).
+  internal_sigdelset(&set, 33);
+#endif
+  internal_sigprocmask(SIG_SETMASK, &set, &old);
+  void *th;
+  real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg);
+  internal_sigprocmask(SIG_SETMASK, &old, nullptr);
+  return th;
+}
+
+void internal_join_thread(void *th) {
+  real_pthread_join(th, nullptr);
+}
+#else
+void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
+
+void internal_join_thread(void *th) {}
+#endif
+
+void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if defined(__arm__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.arm_pc;
+  *bp = ucontext->uc_mcontext.arm_fp;
+  *sp = ucontext->uc_mcontext.arm_sp;
+#elif defined(__aarch64__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.pc;
+  *bp = ucontext->uc_mcontext.regs[29];
+  *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.sc_iaoq[0];
+  /* GCC uses %r3 whenever a frame pointer is needed.  */
+  *bp = ucontext->uc_mcontext.sc_gr[3];
+  *sp = ucontext->uc_mcontext.sc_gr[30];
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.mc_rip;
+  *bp = ucontext->uc_mcontext.mc_rbp;
+  *sp = ucontext->uc_mcontext.mc_rsp;
+# else
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.gregs[REG_RIP];
+  *bp = ucontext->uc_mcontext.gregs[REG_RBP];
+  *sp = ucontext->uc_mcontext.gregs[REG_RSP];
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.mc_eip;
+  *bp = ucontext->uc_mcontext.mc_ebp;
+  *sp = ucontext->uc_mcontext.mc_esp;
+# else
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.gregs[REG_EIP];
+  *bp = ucontext->uc_mcontext.gregs[REG_EBP];
+  *sp = ucontext->uc_mcontext.gregs[REG_ESP];
+# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.regs->nip;
+  *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+  // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+  // pointer, but GCC always uses r31 when we need a frame pointer.
+  *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
+#elif defined(__sparc__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  uptr *stk_ptr;
+# if defined (__arch64__)
+  *pc = ucontext->uc_mcontext.mc_gregs[MC_PC];
+  *sp = ucontext->uc_mcontext.mc_gregs[MC_O6];
+  stk_ptr = (uptr *) (*sp + 2047);
+  *bp = stk_ptr[15];
+# else
+  *pc = ucontext->uc_mcontext.gregs[REG_PC];
+  *sp = ucontext->uc_mcontext.gregs[REG_O6];
+  stk_ptr = (uptr *) *sp;
+  *bp = stk_ptr[15];
+# endif
+#elif defined(__mips__)
+  ucontext_t *ucontext = (ucontext_t*)context;
+  *pc = ucontext->uc_mcontext.pc;
+  *bp = ucontext->uc_mcontext.gregs[30];
+  *sp = ucontext->uc_mcontext.gregs[29];
+#else
+# error "Unsupported arch"
+#endif
+}
+
+void DisableReexec() {
+  // No need to re-exec on Linux.
+}
+
+void MaybeReexec() {
+  // No need to re-exec on Linux.
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff -pruN 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.h 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.h
--- 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.h	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_linux.h	2015-12-08 21:54:39.000000000 +0000
@@ -0,0 +1,90 @@
+//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Linux-specific syscall wrappers and classes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_H
+#define SANITIZER_LINUX_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_platform_limits_posix.h"
+
+struct link_map;  // Opaque type returned by dlopen().
+struct sigaltstack;
+
+namespace __sanitizer {
+// Dirent structure for getdents(). Note that this structure is different from
+// the one in <dirent.h>, which is used by readdir().
+struct linux_dirent;
+
+// Syscall wrappers.
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
+uptr internal_sigaltstack(const struct sigaltstack* ss,
+                          struct sigaltstack* oss);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+    __sanitizer_sigset_t *oldset);
+void internal_sigfillset(__sanitizer_sigset_t *set);
+
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
+  || defined(__powerpc64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+                    int *parent_tidptr, void *newtls, int *child_tidptr);
+#endif
+#endif  // SANITIZER_LINUX
+
+// This class reads thread IDs from /proc/<pid>/task using only syscalls.
+class ThreadLister {
+ public:
+  explicit ThreadLister(int pid);
+  ~ThreadLister();
+  // GetNextTID returns -1 if the list of threads is exhausted, or if there has
+  // been an error.
+  int GetNextTID();
+  void Reset();
+  bool error();
+
+ private:
+  bool GetDirectoryEntries();
+
+  int pid_;
+  int descriptor_;
+  InternalScopedBuffer<char> buffer_;
+  bool error_;
+  struct linux_dirent* entry_;
+  int bytes_read_;
+};
+
+// Exposed for testing.
+uptr ThreadDescriptorSize();
+uptr ThreadSelf();
+uptr ThreadSelfOffset();
+
+// Matches a library's file name against a base name (stripping path and version
+// information).
+bool LibraryNameIs(const char *full_name, const char *base_name);
+
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
+}  // namespace __sanitizer
+
+#endif  // SANITIZER_FREEBSD || SANITIZER_LINUX
+#endif  // SANITIZER_LINUX_H
diff -pruN 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
--- 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc	2016-06-01 08:17:03.000000000 +0000
@@ -0,0 +1,522 @@
+//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
+                        defined(__aarch64__) || defined(__powerpc64__))
+
+#include "sanitizer_stoptheworld.h"
+
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_atomic.h"
+
+#include <errno.h>
+#include <sched.h> // for CLONE_* definitions
+#include <stddef.h>
+#include <sys/prctl.h> // for PR_* definitions
+#include <sys/ptrace.h> // for PTRACE_* definitions
+#include <sys/types.h> // for pid_t
+#include <sys/uio.h> // for iovec
+#include <elf.h> // for NT_PRSTATUS
+#if SANITIZER_ANDROID && defined(__arm__)
+# include <linux/user.h>  // for pt_regs
+#else
+# ifdef __aarch64__
+// GLIBC 2.20+ sys/user does not include asm/ptrace.h
+#  include <asm/ptrace.h>
+# endif
+# include <sys/user.h>  // for user_regs_struct
+# if SANITIZER_ANDROID && SANITIZER_MIPS
+#   include <asm/reg.h>  // for mips SP register in sys/user.h
+# endif
+#endif
+#include <sys/wait.h> // for signal-related stuff
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+
+// This module works by spawning a Linux task which then attaches to every
+// thread in the caller process with ptrace. This suspends the threads, and
+// PTRACE_GETREGS can then be used to obtain their register state. The callback
+// supplied to StopTheWorld() is run in the tracer task while the threads are
+// suspended.
+// The tracer task must be placed in a different thread group for ptrace to
+// work, so it cannot be spawned as a pthread. Instead, we use the low-level
+// clone() interface (we want to share the address space with the caller
+// process, so we prefer clone() over fork()).
+//
+// We don't use any libc functions, relying instead on direct syscalls. There
+// are two reasons for this:
+// 1. calling a library function while threads are suspended could cause a
+// deadlock, if one of the treads happens to be holding a libc lock;
+// 2. it's generally not safe to call libc functions from the tracer task,
+// because clone() does not set up a thread-local storage for it. Any
+// thread-local variables used by libc will be shared between the tracer task
+// and the thread which spawned it.
+
+COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+
+namespace __sanitizer {
+
+// Structure for passing arguments into the tracer thread.
+struct TracerThreadArgument {
+  StopTheWorldCallback callback;
+  void *callback_argument;
+  // The tracer thread waits on this mutex while the parent finishes its
+  // preparations.
+  BlockingMutex mutex;
+  // Tracer thread signals its completion by setting done.
+  atomic_uintptr_t done;
+  uptr parent_pid;
+};
+
+// This class handles thread suspending/unsuspending in the tracer thread.
+class ThreadSuspender {
+ public:
+  explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
+    : arg(arg)
+    , pid_(pid) {
+      CHECK_GE(pid, 0);
+    }
+  bool SuspendAllThreads();
+  void ResumeAllThreads();
+  void KillAllThreads();
+  SuspendedThreadsList &suspended_threads_list() {
+    return suspended_threads_list_;
+  }
+  TracerThreadArgument *arg;
+ private:
+  SuspendedThreadsList suspended_threads_list_;
+  pid_t pid_;
+  bool SuspendThread(SuspendedThreadID thread_id);
+};
+
+bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) {
+  // Are we already attached to this thread?
+  // Currently this check takes linear time, however the number of threads is
+  // usually small.
+  if (suspended_threads_list_.Contains(tid))
+    return false;
+  int pterrno;
+  if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
+                       &pterrno)) {
+    // Either the thread is dead, or something prevented us from attaching.
+    // Log this event and move on.
+    VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno);
+    return false;
+  } else {
+    VReport(2, "Attached to thread %d.\n", tid);
+    // The thread is not guaranteed to stop before ptrace returns, so we must
+    // wait on it. Note: if the thread receives a signal concurrently,
+    // we can get notification about the signal before notification about stop.
+    // In such case we need to forward the signal to the thread, otherwise
+    // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
+    // any logic relying on signals will break. After forwarding we need to
+    // continue to wait for stopping, because the thread is not stopped yet.
+    // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
+    // as invisible as possible.
+    for (;;) {
+      int status;
+      uptr waitpid_status;
+      HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
+      int wperrno;
+      if (internal_iserror(waitpid_status, &wperrno)) {
+        // Got a ECHILD error. I don't think this situation is possible, but it
+        // doesn't hurt to report it.
+        VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n",
+                tid, wperrno);
+        internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
+        return false;
+      }
+      if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
+        internal_ptrace(PTRACE_CONT, tid, nullptr,
+                        (void*)(uptr)WSTOPSIG(status));
+        continue;
+      }
+      break;
+    }
+    suspended_threads_list_.Append(tid);
+    return true;
+  }
+}
+
+void ThreadSuspender::ResumeAllThreads() {
+  for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
+    pid_t tid = suspended_threads_list_.GetThreadID(i);
+    int pterrno;
+    if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
+                          &pterrno)) {
+      VReport(2, "Detached from thread %d.\n", tid);
+    } else {
+      // Either the thread is dead, or we are already detached.
+      // The latter case is possible, for instance, if this function was called
+      // from a signal handler.
+      VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
+    }
+  }
+}
+
+void ThreadSuspender::KillAllThreads() {
+  for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
+    internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
+                    nullptr, nullptr);
+}
+
+bool ThreadSuspender::SuspendAllThreads() {
+  ThreadLister thread_lister(pid_);
+  bool added_threads;
+  do {
+    // Run through the directory entries once.
+    added_threads = false;
+    pid_t tid = thread_lister.GetNextTID();
+    while (tid >= 0) {
+      if (SuspendThread(tid))
+        added_threads = true;
+      tid = thread_lister.GetNextTID();
+    }
+    if (thread_lister.error()) {
+      // Detach threads and fail.
+      ResumeAllThreads();
+      return false;
+    }
+    thread_lister.Reset();
+  } while (added_threads);
+  return true;
+}
+
+// Pointer to the ThreadSuspender instance for use in signal handler.
+static ThreadSuspender *thread_suspender_instance = nullptr;
+
+// Synchronous signals that should not be blocked.
+static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
+                                    SIGXCPU, SIGXFSZ };
+
+static void TracerThreadDieCallback() {
+  // Generally a call to Die() in the tracer thread should be fatal to the
+  // parent process as well, because they share the address space.
+  // This really only works correctly if all the threads are suspended at this
+  // point. So we correctly handle calls to Die() from within the callback, but
+  // not those that happen before or after the callback. Hopefully there aren't
+  // a lot of opportunities for that to happen...
+  ThreadSuspender *inst = thread_suspender_instance;
+  if (inst && stoptheworld_tracer_pid == internal_getpid()) {
+    inst->KillAllThreads();
+    thread_suspender_instance = nullptr;
+  }
+}
+
+// Signal handler to wake up suspended threads when the tracer thread dies.
+static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) {
+  SignalContext ctx = SignalContext::Create(siginfo, uctx);
+  VPrintf(1, "Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n",
+      signum, ctx.addr, ctx.pc, ctx.sp);
+  ThreadSuspender *inst = thread_suspender_instance;
+  if (inst) {
+    if (signum == SIGABRT)
+      inst->KillAllThreads();
+    else
+      inst->ResumeAllThreads();
+    RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+    thread_suspender_instance = nullptr;
+    atomic_store(&inst->arg->done, 1, memory_order_relaxed);
+  }
+  internal__exit((signum == SIGABRT) ? 1 : 2);
+}
+
+// Size of alternative stack for signal handlers in the tracer thread.
+static const int kHandlerStackSize = 4096;
+
+// This function will be run as a cloned task.
+static int TracerThread(void* argument) {
+  TracerThreadArgument *tracer_thread_argument =
+      (TracerThreadArgument *)argument;
+
+  internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+  // Check if parent is already dead.
+  if (internal_getppid() != tracer_thread_argument->parent_pid)
+    internal__exit(4);
+
+  // Wait for the parent thread to finish preparations.
+  tracer_thread_argument->mutex.Lock();
+  tracer_thread_argument->mutex.Unlock();
+
+  RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
+
+  ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
+  // Global pointer for the signal handler.
+  thread_suspender_instance = &thread_suspender;
+
+  // Alternate stack for signal handling.
+  InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
+  struct sigaltstack handler_stack;
+  internal_memset(&handler_stack, 0, sizeof(handler_stack));
+  handler_stack.ss_sp = handler_stack_memory.data();
+  handler_stack.ss_size = kHandlerStackSize;
+  internal_sigaltstack(&handler_stack, nullptr);
+
+  // Install our handler for synchronous signals. Other signals should be
+  // blocked by the mask we inherited from the parent thread.
+  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
+    __sanitizer_sigaction act;
+    internal_memset(&act, 0, sizeof(act));
+    act.sigaction = TracerThreadSignalHandler;
+    act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+    internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
+  }
+
+  int exit_code = 0;
+  if (!thread_suspender.SuspendAllThreads()) {
+    VReport(1, "Failed suspending threads.\n");
+    exit_code = 3;
+  } else {
+    tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
+                                     tracer_thread_argument->callback_argument);
+    thread_suspender.ResumeAllThreads();
+    exit_code = 0;
+  }
+  RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+  thread_suspender_instance = nullptr;
+  atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
+  return exit_code;
+}
+
+class ScopedStackSpaceWithGuard {
+ public:
+  explicit ScopedStackSpaceWithGuard(uptr stack_size) {
+    stack_size_ = stack_size;
+    guard_size_ = GetPageSizeCached();
+    // FIXME: Omitting MAP_STACK here works in current kernels but might break
+    // in the future.
+    guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
+                                   "ScopedStackWithGuard");
+    CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
+  }
+  ~ScopedStackSpaceWithGuard() {
+    UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
+  }
+  void *Bottom() const {
+    return (void *)(guard_start_ + stack_size_ + guard_size_);
+  }
+
+ private:
+  uptr stack_size_;
+  uptr guard_size_;
+  uptr guard_start_;
+};
+
+// We have a limitation on the stack frame size, so some stuff had to be moved
+// into globals.
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
+
+class StopTheWorldScope {
+ public:
+  StopTheWorldScope() {
+    // Make this process dumpable. Processes that are not dumpable cannot be
+    // attached to.
+    process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
+    if (!process_was_dumpable_)
+      internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+  }
+
+  ~StopTheWorldScope() {
+    // Restore the dumpable flag.
+    if (!process_was_dumpable_)
+      internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
+  }
+
+ private:
+  int process_was_dumpable_;
+};
+
+// When sanitizer output is being redirected to file (i.e. by using log_path),
+// the tracer should write to the parent's log instead of trying to open a new
+// file. Alert the logging code to the fact that we have a tracer.
+struct ScopedSetTracerPID {
+  explicit ScopedSetTracerPID(uptr tracer_pid) {
+    stoptheworld_tracer_pid = tracer_pid;
+    stoptheworld_tracer_ppid = internal_getpid();
+  }
+  ~ScopedSetTracerPID() {
+    stoptheworld_tracer_pid = 0;
+    stoptheworld_tracer_ppid = 0;
+  }
+};
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+  StopTheWorldScope in_stoptheworld;
+  // Prepare the arguments for TracerThread.
+  struct TracerThreadArgument tracer_thread_argument;
+  tracer_thread_argument.callback = callback;
+  tracer_thread_argument.callback_argument = argument;
+  tracer_thread_argument.parent_pid = internal_getpid();
+  atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
+  const uptr kTracerStackSize = 2 * 1024 * 1024;
+  ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
+  // Block the execution of TracerThread until after we have set ptrace
+  // permissions.
+  tracer_thread_argument.mutex.Lock();
+  // Signal handling story.
+  // We don't want async signals to be delivered to the tracer thread,
+  // so we block all async signals before creating the thread. An async signal
+  // handler can temporary modify errno, which is shared with this thread.
+  // We ought to use pthread_sigmask here, because sigprocmask has undefined
+  // behavior in multithreaded programs. However, on linux sigprocmask is
+  // equivalent to pthread_sigmask with the exception that pthread_sigmask
+  // does not allow to block some signals used internally in pthread
+  // implementation. We are fine with blocking them here, we are really not
+  // going to pthread_cancel the thread.
+  // The tracer thread should not raise any synchronous signals. But in case it
+  // does, we setup a special handler for sync signals that properly kills the
+  // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
+  // in the tracer thread won't interfere with user program. Double note: if a
+  // user does something along the lines of 'kill -11 pid', that can kill the
+  // process even if user setup own handler for SEGV.
+  // Thing to watch out for: this code should not change behavior of user code
+  // in any observable way. In particular it should not override user signal
+  // handlers.
+  internal_sigfillset(&blocked_sigset);
+  for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
+    internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
+  int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+  CHECK_EQ(rv, 0);
+  uptr tracer_pid = internal_clone(
+      TracerThread, tracer_stack.Bottom(),
+      CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
+      &tracer_thread_argument, nullptr /* parent_tidptr */,
+      nullptr /* newtls */, nullptr /* child_tidptr */);
+  internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
+  int local_errno = 0;
+  if (internal_iserror(tracer_pid, &local_errno)) {
+    VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
+    tracer_thread_argument.mutex.Unlock();
+  } else {
+    ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
+    // On some systems we have to explicitly declare that we want to be traced
+    // by the tracer thread.
+#ifdef PR_SET_PTRACER
+    internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
+#endif
+    // Allow the tracer thread to start.
+    tracer_thread_argument.mutex.Unlock();
+    // NOTE: errno is shared between this thread and the tracer thread.
+    // internal_waitpid() may call syscall() which can access/spoil errno,
+    // so we can't call it now. Instead we for the tracer thread to finish using
+    // the spin loop below. Man page for sched_yield() says "In the Linux
+    // implementation, sched_yield() always succeeds", so let's hope it does not
+    // spoil errno. Note that this spin loop runs only for brief periods before
+    // the tracer thread has suspended us and when it starts unblocking threads.
+    while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
+      sched_yield();
+    // Now the tracer thread is about to exit and does not touch errno,
+    // wait for it.
+    for (;;) {
+      uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
+      if (!internal_iserror(waitpid_status, &local_errno))
+        break;
+      if (local_errno == EINTR)
+        continue;
+      VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+              local_errno);
+      break;
+    }
+  }
+}
+
+// Platform-specific methods from SuspendedThreadsList.
+#if SANITIZER_ANDROID && defined(__arm__)
+typedef pt_regs regs_struct;
+#define REG_SP ARM_sp
+
+#elif SANITIZER_LINUX && defined(__arm__)
+typedef user_regs regs_struct;
+#define REG_SP uregs[13]
+
+#elif defined(__i386__) || defined(__x86_64__)
+typedef user_regs_struct regs_struct;
+#if defined(__i386__)
+#define REG_SP esp
+#else
+#define REG_SP rsp
+#endif
+
+#elif defined(__powerpc__) || defined(__powerpc64__)
+typedef pt_regs regs_struct;
+#define REG_SP gpr[PT_R1]
+
+#elif defined(__mips__)
+typedef struct user regs_struct;
+# if SANITIZER_ANDROID
+#  define REG_SP regs[EF_R29]
+# else
+#  define REG_SP regs[EF_REG29]
+# endif
+
+#elif defined(__aarch64__)
+typedef struct user_pt_regs regs_struct;
+#define REG_SP sp
+#define ARCH_IOVEC_FOR_GETREGSET
+
+#else
+#error "Unsupported architecture"
+#endif // SANITIZER_ANDROID && defined(__arm__)
+
+int SuspendedThreadsList::GetRegistersAndSP(uptr index,
+                                            uptr *buffer,
+                                            uptr *sp) const {
+  pid_t tid = GetThreadID(index);
+  regs_struct regs;
+  int pterrno;
+#ifdef ARCH_IOVEC_FOR_GETREGSET
+  struct iovec regset_io;
+  regset_io.iov_base = &regs;
+  regset_io.iov_len = sizeof(regs_struct);
+  bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
+                                (void*)NT_PRSTATUS, (void*)&regset_io),
+                                &pterrno);
+#else
+  bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
+                                &regs), &pterrno);
+#endif
+  if (isErr) {
+    VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
+            pterrno);
+    return -1;
+  }
+
+  *sp = regs.REG_SP;
+  internal_memcpy(buffer, &regs, sizeof(regs));
+  return 0;
+}
+
+uptr SuspendedThreadsList::RegisterCount() {
+  return sizeof(regs_struct) / sizeof(uptr);
+}
+} // namespace __sanitizer
+
+#endif  // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
+        // || defined(__aarch64__) || defined(__powerpc64__)
diff -pruN 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc
--- 1:3.8.1-27/.pc/pr81066.diff/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc	1970-01-01 00:00:00.000000000 +0000
+++ 1:3.8.1-27ubuntu1/.pc/pr81066.diff/compiler-rt/lib/tsan/rtl/tsan_platform_linux.cc	2015-12-14 16:26:00.000000000 +0000
@@ -0,0 +1,366 @@
+//===-- tsan_platform_linux.cc --------------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Linux- and FreeBSD-specific code.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+#include <dlfcn.h>
+#if SANITIZER_LINUX
+#define __need_res_state
+#include <resolv.h>
+#endif
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
+
+namespace __tsan {
+
+static uptr g_data_start;
+static uptr g_data_end;
+
+#ifdef TSAN_RUNTIME_VMA
+// Runtime detected VMA size.
+uptr vmaSize;
+#endif
+
+enum {
+  MemTotal  = 0,
+  MemShadow = 1,
+  MemMeta   = 2,
+  MemFile   = 3,
+  MemMmap   = 4,
+  MemTrace  = 5,
+  MemHeap   = 6,
+  MemOther  = 7,
+  MemCount  = 8,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file,
+                         uptr *mem, uptr stats_size) {
+  mem[MemTotal] += rss;
+  if (p >= ShadowBeg() && p < ShadowEnd())
+    mem[MemShadow] += rss;
+  else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
+    mem[MemMeta] += rss;
+#ifndef SANITIZER_GO
+  else if (p >= HeapMemBeg() && p < HeapMemEnd())
+    mem[MemHeap] += rss;
+  else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
+    mem[file ? MemFile : MemMmap] += rss;
+  else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
+    mem[file ? MemFile : MemMmap] += rss;
+#else
+  else if (p >= AppMemBeg() && p < AppMemEnd())
+    mem[file ? MemFile : MemMmap] += rss;
+#endif
+  else if (p >= TraceMemBeg() && p < TraceMemEnd())
+    mem[MemTrace] += rss;
+  else
+    mem[MemOther] += rss;
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+  uptr mem[MemCount];
+  internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
+  __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+  StackDepotStats *stacks = StackDepotGetStats();
+  internal_snprintf(buf, buf_size,
+      "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+      " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+      mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+      mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+      mem[MemHeap] >> 20, mem[MemOther] >> 20,
+      stacks->allocated >> 20, stacks->n_uniq_ids,
+      nlive, nthread);
+}
+
+#if SANITIZER_LINUX
+void FlushShadowMemoryCallback(
+    const SuspendedThreadsList &suspended_threads_list,
+    void *argument) {
+  FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
+}
+#endif
+
+void FlushShadowMemory() {
+#if SANITIZER_LINUX
+  StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
+}
+
+#ifndef SANITIZER_GO
+// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Accesses to .rodata can't race, so this saves time, memory and trace space.
+static void MapRodata() {
+  // First create temp file.
+  const char *tmpdir = GetEnv("TMPDIR");
+  if (tmpdir == 0)
+    tmpdir = GetEnv("TEST_TMPDIR");
+#ifdef P_tmpdir
+  if (tmpdir == 0)
+    tmpdir = P_tmpdir;
+#endif
+  if (tmpdir == 0)
+    return;
+  char name[256];
+  internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
+                    tmpdir, (int)internal_getpid());
+  uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+  if (internal_iserror(openrv))
+    return;
+  internal_unlink(name);  // Unlink it now, so that we can reuse the buffer.
+  fd_t fd = openrv;
+  // Fill the file with kShadowRodata.
+  const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
+  InternalScopedBuffer<u64> marker(kMarkerSize);
+  // volatile to prevent insertion of memset
+  for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+    *p = kShadowRodata;
+  internal_write(fd, marker.data(), marker.size());
+  // Map the file into memory.
+  uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
+                            MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
+  if (internal_iserror(page)) {
+    internal_close(fd);
+    return;
+  }
+  // Map the file into shadow of .rodata sections.
+  MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+  uptr start, end, offset, prot;
+  // Reusing the buffer 'name'.
+  while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
+    if (name[0] != 0 && name[0] != '['
+        && (prot & MemoryMappingLayout::kProtectionRead)
+        && (prot & MemoryMappingLayout::kProtectionExecute)
+        && !(prot & MemoryMappingLayout::kProtectionWrite)
+        && IsAppMem(start)) {
+      // Assume it's .rodata
+      char *shadow_start = (char*)MemToShadow(start);
+      char *shadow_end = (char*)MemToShadow(end);
+      for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
+        internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
+                      PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+      }
+    }
+  }
+  internal_close(fd);
+}
+
+void InitializeShadowMemoryPlatform() {
+  MapRodata();
+}
+
+static void InitDataSeg() {
+  MemoryMappingLayout proc_maps(true);
+  uptr start, end, offset;
+  char name[128];
+#if SANITIZER_FREEBSD
+  // On FreeBSD BSS is usually the last block allocated within the
+  // low range and heap is the last block allocated within the range
+  // 0x800000000-0x8ffffffff.
+  while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+                        /*protection*/ 0)) {
+    DPrintf("%p-%p %p %s\n", start, end, offset, name);
+    if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
+        name[0] == '\0') {
+      g_data_start = start;
+      g_data_end = end;
+    }
+  }
+#else
+  bool prev_is_data = false;
+  while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
+                        /*protection*/ 0)) {
+    DPrintf("%p-%p %p %s\n", start, end, offset, name);
+    bool is_data = offset != 0 && name[0] != 0;
+    // BSS may get merged with [heap] in /proc/self/maps. This is not very
+    // reliable.
+    bool is_bss = offset == 0 &&
+      (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
+    if (g_data_start == 0 && is_data)
+      g_data_start = start;
+    if (is_bss)
+      g_data_end = end;
+    prev_is_data = is_data;
+  }
+#endif
+  DPrintf("guessed data_start=%p data_end=%p\n",  g_data_start, g_data_end);
+  CHECK_LT(g_data_start, g_data_end);
+  CHECK_GE((uptr)&g_data_start, g_data_start);
+  CHECK_LT((uptr)&g_data_start, g_data_end);
+}
+
+#endif  // #ifndef SANITIZER_GO
+
+void InitializePlatformEarly() {
+#ifdef TSAN_RUNTIME_VMA
+  vmaSize =
+    (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+#if defined(__aarch64__)
+  if (vmaSize != 39 && vmaSize != 42) {
+    Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+    Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize);
+    Die();
+  }
+#elif defined(__powerpc64__)
+  if (vmaSize != 44 && vmaSize != 46) {
+    Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+    Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize);
+    Die();
+  }
+#endif
+#endif
+}
+
+void InitializePlatform() {
+  DisableCoreDumperIfNecessary();
+
+  // Go maps shadow memory lazily and works fine with limited address space.
+  // Unlimited stack is not a problem as well, because the executable
+  // is not compiled with -pie.
+  if (kCppMode) {
+    bool reexec = false;
+    // TSan doesn't play well with unlimited stack size (as stack
+    // overlaps with shadow memory). If we detect unlimited stack size,
+    // we re-exec the program with limited stack size as a best effort.
+    if (StackSizeIsUnlimited()) {
+      const uptr kMaxStackSize = 32 * 1024 * 1024;
+      VReport(1, "Program is run with unlimited stack size, which wouldn't "
+                 "work with ThreadSanitizer.\n"
+                 "Re-execing with stack size limited to %zd bytes.\n",
+              kMaxStackSize);
+      SetStackSizeLimitInBytes(kMaxStackSize);
+      reexec = true;
+    }
+
+    if (!AddressSpaceIsUnlimited()) {
+      Report("WARNING: Program is run with limited virtual address space,"
+             " which wouldn't work with ThreadSanitizer.\n");
+      Report("Re-execing with unlimited virtual address space.\n");
+      SetAddressSpaceUnlimited();
+      reexec = true;
+    }
+    if (reexec)
+      ReExec();
+  }
+
+#ifndef SANITIZER_GO
+  CheckAndProtect();
+  InitTlsSize();
+  InitDataSeg();
+#endif
+}
+
+bool IsGlobalVar(uptr addr) {
+  return g_data_start && addr >= g_data_start && addr < g_data_end;
+}
+
+#ifndef SANITIZER_GO
+// Extract file descriptors passed to glibc internal __res_iclose function.
+// This is required to properly "close" the fds, because we do not see internal
+// closes within glibc. The code is a pure hack.
+int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+  int cnt = 0;
+  __res_state *statp = (__res_state*)state;
+  for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+    if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+      fds[cnt++] = statp->_u._ext.nssocks[i];
+  }
+  return cnt;
+#else
+  return 0;
+#endif
+}
+
+// Extract file descriptors passed via UNIX domain sockets.
+// This is requried to properly handle "open" of these fds.
+// see 'man recvmsg' and 'man 3 cmsg'.
+int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
+  int res = 0;
+  msghdr *msg = (msghdr*)msgp;
+  struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+  for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+    if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
+      continue;
+    int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
+    for (int i = 0; i < n; i++) {
+      fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
+      if (res == nfd)
+        return res;
+    }
+  }
+  return res;
+}
+
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+    void *abstime), void *c, void *m, void *abstime,
+    void(*cleanup)(void *arg), void *arg) {
+  // pthread_cleanup_push/pop are hardcore macros mess.
+  // We can't intercept nor call them w/o including pthread.h.
+  int res;
+  pthread_cleanup_push(cleanup, arg);
+  res = fn(c, m, abstime);
+  pthread_cleanup_pop(0);
+  return res;
+}
+#endif
+
+#ifndef SANITIZER_GO
+void ReplaceSystemMalloc() { }
+#endif
+
+}  // namespace __tsan
+
+#endif  // SANITIZER_LINUX || SANITIZER_FREEBSD
