diff -pruN 1.6.3-1/api/go1.6.txt 1.6.3-1ubuntu1/api/go1.6.txt
--- 1.6.3-1/api/go1.6.txt 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/api/go1.6.txt 2016-07-21 13:36:09.000000000 +0000
@@ -273,3 +273,128 @@ pkg text/template, method (ExecError) Er
pkg text/template, type ExecError struct
pkg text/template, type ExecError struct, Err error
pkg text/template, type ExecError struct, Name string
+pkg debug/elf, const R_390_12 = 2
+pkg debug/elf, const R_390_12 R_390
+pkg debug/elf, const R_390_16 = 3
+pkg debug/elf, const R_390_16 R_390
+pkg debug/elf, const R_390_20 = 57
+pkg debug/elf, const R_390_20 R_390
+pkg debug/elf, const R_390_32 = 4
+pkg debug/elf, const R_390_32 R_390
+pkg debug/elf, const R_390_64 = 22
+pkg debug/elf, const R_390_64 R_390
+pkg debug/elf, const R_390_8 = 1
+pkg debug/elf, const R_390_8 R_390
+pkg debug/elf, const R_390_COPY = 9
+pkg debug/elf, const R_390_COPY R_390
+pkg debug/elf, const R_390_GLOB_DAT = 10
+pkg debug/elf, const R_390_GLOB_DAT R_390
+pkg debug/elf, const R_390_GOT12 = 6
+pkg debug/elf, const R_390_GOT12 R_390
+pkg debug/elf, const R_390_GOT16 = 15
+pkg debug/elf, const R_390_GOT16 R_390
+pkg debug/elf, const R_390_GOT20 = 58
+pkg debug/elf, const R_390_GOT20 R_390
+pkg debug/elf, const R_390_GOT32 = 7
+pkg debug/elf, const R_390_GOT32 R_390
+pkg debug/elf, const R_390_GOT64 = 24
+pkg debug/elf, const R_390_GOT64 R_390
+pkg debug/elf, const R_390_GOTENT = 26
+pkg debug/elf, const R_390_GOTENT R_390
+pkg debug/elf, const R_390_GOTOFF = 13
+pkg debug/elf, const R_390_GOTOFF R_390
+pkg debug/elf, const R_390_GOTOFF16 = 27
+pkg debug/elf, const R_390_GOTOFF16 R_390
+pkg debug/elf, const R_390_GOTOFF64 = 28
+pkg debug/elf, const R_390_GOTOFF64 R_390
+pkg debug/elf, const R_390_GOTPC = 14
+pkg debug/elf, const R_390_GOTPC R_390
+pkg debug/elf, const R_390_GOTPCDBL = 21
+pkg debug/elf, const R_390_GOTPCDBL R_390
+pkg debug/elf, const R_390_GOTPLT12 = 29
+pkg debug/elf, const R_390_GOTPLT12 R_390
+pkg debug/elf, const R_390_GOTPLT16 = 30
+pkg debug/elf, const R_390_GOTPLT16 R_390
+pkg debug/elf, const R_390_GOTPLT20 = 59
+pkg debug/elf, const R_390_GOTPLT20 R_390
+pkg debug/elf, const R_390_GOTPLT32 = 31
+pkg debug/elf, const R_390_GOTPLT32 R_390
+pkg debug/elf, const R_390_GOTPLT64 = 32
+pkg debug/elf, const R_390_GOTPLT64 R_390
+pkg debug/elf, const R_390_GOTPLTENT = 33
+pkg debug/elf, const R_390_GOTPLTENT R_390
+pkg debug/elf, const R_390_GOTPLTOFF16 = 34
+pkg debug/elf, const R_390_GOTPLTOFF16 R_390
+pkg debug/elf, const R_390_GOTPLTOFF32 = 35
+pkg debug/elf, const R_390_GOTPLTOFF32 R_390
+pkg debug/elf, const R_390_GOTPLTOFF64 = 36
+pkg debug/elf, const R_390_GOTPLTOFF64 R_390
+pkg debug/elf, const R_390_JMP_SLOT = 11
+pkg debug/elf, const R_390_JMP_SLOT R_390
+pkg debug/elf, const R_390_NONE = 0
+pkg debug/elf, const R_390_NONE R_390
+pkg debug/elf, const R_390_PC16 = 16
+pkg debug/elf, const R_390_PC16 R_390
+pkg debug/elf, const R_390_PC16DBL = 17
+pkg debug/elf, const R_390_PC16DBL R_390
+pkg debug/elf, const R_390_PC32 = 5
+pkg debug/elf, const R_390_PC32 R_390
+pkg debug/elf, const R_390_PC32DBL = 19
+pkg debug/elf, const R_390_PC32DBL R_390
+pkg debug/elf, const R_390_PC64 = 23
+pkg debug/elf, const R_390_PC64 R_390
+pkg debug/elf, const R_390_PLT16DBL = 18
+pkg debug/elf, const R_390_PLT16DBL R_390
+pkg debug/elf, const R_390_PLT32 = 8
+pkg debug/elf, const R_390_PLT32 R_390
+pkg debug/elf, const R_390_PLT32DBL = 20
+pkg debug/elf, const R_390_PLT32DBL R_390
+pkg debug/elf, const R_390_PLT64 = 25
+pkg debug/elf, const R_390_PLT64 R_390
+pkg debug/elf, const R_390_RELATIVE = 12
+pkg debug/elf, const R_390_RELATIVE R_390
+pkg debug/elf, const R_390_TLS_DTPMOD = 54
+pkg debug/elf, const R_390_TLS_DTPMOD R_390
+pkg debug/elf, const R_390_TLS_DTPOFF = 55
+pkg debug/elf, const R_390_TLS_DTPOFF R_390
+pkg debug/elf, const R_390_TLS_GD32 = 40
+pkg debug/elf, const R_390_TLS_GD32 R_390
+pkg debug/elf, const R_390_TLS_GD64 = 41
+pkg debug/elf, const R_390_TLS_GD64 R_390
+pkg debug/elf, const R_390_TLS_GDCALL = 38
+pkg debug/elf, const R_390_TLS_GDCALL R_390
+pkg debug/elf, const R_390_TLS_GOTIE12 = 42
+pkg debug/elf, const R_390_TLS_GOTIE12 R_390
+pkg debug/elf, const R_390_TLS_GOTIE20 = 60
+pkg debug/elf, const R_390_TLS_GOTIE20 R_390
+pkg debug/elf, const R_390_TLS_GOTIE32 = 43
+pkg debug/elf, const R_390_TLS_GOTIE32 R_390
+pkg debug/elf, const R_390_TLS_GOTIE64 = 44
+pkg debug/elf, const R_390_TLS_GOTIE64 R_390
+pkg debug/elf, const R_390_TLS_IE32 = 47
+pkg debug/elf, const R_390_TLS_IE32 R_390
+pkg debug/elf, const R_390_TLS_IE64 = 48
+pkg debug/elf, const R_390_TLS_IE64 R_390
+pkg debug/elf, const R_390_TLS_IEENT = 49
+pkg debug/elf, const R_390_TLS_IEENT R_390
+pkg debug/elf, const R_390_TLS_LDCALL = 39
+pkg debug/elf, const R_390_TLS_LDCALL R_390
+pkg debug/elf, const R_390_TLS_LDM32 = 45
+pkg debug/elf, const R_390_TLS_LDM32 R_390
+pkg debug/elf, const R_390_TLS_LDM64 = 46
+pkg debug/elf, const R_390_TLS_LDM64 R_390
+pkg debug/elf, const R_390_TLS_LDO32 = 52
+pkg debug/elf, const R_390_TLS_LDO32 R_390
+pkg debug/elf, const R_390_TLS_LDO64 = 53
+pkg debug/elf, const R_390_TLS_LDO64 R_390
+pkg debug/elf, const R_390_TLS_LE32 = 50
+pkg debug/elf, const R_390_TLS_LE32 R_390
+pkg debug/elf, const R_390_TLS_LE64 = 51
+pkg debug/elf, const R_390_TLS_LE64 R_390
+pkg debug/elf, const R_390_TLS_LOAD = 37
+pkg debug/elf, const R_390_TLS_LOAD R_390
+pkg debug/elf, const R_390_TLS_TPOFF = 56
+pkg debug/elf, const R_390_TLS_TPOFF R_390
+pkg debug/elf, method (R_390) GoString() string
+pkg debug/elf, method (R_390) String() string
+pkg debug/elf, type R_390 int
diff -pruN 1.6.3-1/debian/changelog 1.6.3-1ubuntu1/debian/changelog
--- 1.6.3-1/debian/changelog 2016-07-19 03:26:05.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/changelog 2016-07-20 10:09:44.000000000 +0000
@@ -1,3 +1,15 @@
+golang-1.6 (1.6.3-1ubuntu1) yakkety; urgency=medium
+
+ * Merge from Debian unstable. Remaining changes:
+ - s390x support from IBM (and related packaging changes).
+ - d/patches/0002-no-pie-when-race.patch to fix amd64 FTBFS.
+ - Do not distribute un-built from source race detector runtime files and
+ recommend golang-race-detector-runtime instead (see Debian bug #807455).
+ - Backport some shared library fixes from upstream tip.
+ - Build standard library into shared library.
+
+ -- Michael Hudson-Doyle Wed, 20 Jul 2016 14:09:10 +1200
+
golang-1.6 (1.6.3-1) unstable; urgency=medium
[ Michael Hudson-Doyle ]
@@ -19,6 +31,27 @@ golang-1.6 (1.6.2-2) unstable; urgency=m
-- Tianon Gravi Thu, 23 Jun 2016 20:01:00 -0700
+golang-1.6 (1.6.2-1ubuntu2) yakkety; urgency=medium
+
+ * Build standard library into shared library. (LP: #1508122)
+ * Add two patches with fixes for Go shared libraries on i386 and ppc64el:
+ - d/patches/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch
+ - d/patches/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch
+
+ -- Michael Hudson-Doyle Fri, 10 Jun 2016 13:02:04 +1200
+
+golang-1.6 (1.6.2-1ubuntu1) yakkety; urgency=medium
+
+ * Merge from Debian unstable. Remaining changes:
+ - s390x support from IBM (and related packaging changes).
+ - d/patches/0002-no-pie-when-race.patch to fix amd64 FTBFS.
+ - Do not distribute un-built from source race detector runtime files and
+ recommend golang-race-detector-runtime instead (see Debian bug #807455).
+ - debian/source/lintian-overrides: silence some extra source-missing false
+ positives.
+
+ -- Michael Hudson-Doyle Tue, 14 Jun 2016 14:02:55 +1200
+
golang-1.6 (1.6.2-1) unstable; urgency=medium
* Update to 1.6.2 upstream release (Closes: #825696)
@@ -59,6 +92,108 @@ golang (2:1.6.1-1) unstable; urgency=med
-- Tianon Gravi Tue, 12 Apr 2016 23:06:43 -0700
+golang-1.6 (1.6.2-0ubuntu5) yakkety; urgency=medium
+
+ * cgo and epoll fixes for s390x. (LP: #1591010)
+
+ -- Michael Hudson-Doyle Fri, 10 Jun 2016 13:02:04 +1200
+
+golang-1.6 (1.6.2-0ubuntu4) yakkety; urgency=medium
+
+ * Fix d/patches/0002-no-pie-when-race.patch.
+
+ -- Michael Hudson-Doyle Fri, 29 Apr 2016 10:45:00 +1200
+
+golang-1.6 (1.6.2-0ubuntu3) yakkety; urgency=medium
+
+ * Replace d/patches/0002-no-pie-when-race.patch with version that went
+ upstream.
+
+ -- Michael Hudson-Doyle Fri, 29 Apr 2016 09:05:51 +1200
+
+golang-1.6 (1.6.2-0ubuntu2) yakkety; urgency=medium
+
+ * Add d/patches/0002-no-pie-when-race.patch to fix amd64 FTBFS. (LP: #1574916)
+
+ -- Michael Hudson-Doyle Tue, 26 Apr 2016 13:57:00 +1200
+
+golang-1.6 (1.6.2-0ubuntu1) yakkety; urgency=medium
+
+ * New upstream release. (LP: #1567096)
+ * Drop d/patches/0002-maxcpus-fix.patch, included in release.
+
+ -- Michael Hudson-Doyle Tue, 26 Apr 2016 10:47:26 +1200
+
+golang-1.6 (1.6.1-0ubuntu1) xenial; urgency=medium
+
+ * New upstream release.
+ * Drop d/patches/0003-crypto-dsa-eliminate-invalid-PublicKey-early.patch,
+ included in reelase.
+
+ -- Michael Hudson-Doyle Wed, 13 Apr 2016 14:03:09 +1200
+
+golang-1.6 (1.6-0ubuntu5) xenial; urgency=medium
+
+ * Fix GOVER calculation in debian/rules to only pull out MAJOR.MINOR.
+ * Two patches backported from upstream tip:
+ - d/patches/0002-maxcpus-fix.patch (LP: #1565978)
+ - d/patches/0003-crypto-dsa-eliminate-invalid-PublicKey-early.patch
+
+ -- Michael Hudson-Doyle Fri, 08 Apr 2016 13:54:12 +1200
+
+golang-1.6 (1.6-0ubuntu4) xenial; urgency=medium
+
+ * Update d/patches/0001-s390x-port.patch from IBM, making syscall types
+ match those that will be in Go 1.7.
+
+ -- Michael Hudson-Doyle Mon, 04 Apr 2016 15:47:35 +1200
+
+golang-1.6 (1.6-0ubuntu3) xenial; urgency=medium
+
+ * Fix broken symlinks in golang-X.Y-doc.
+
+ -- Michael Hudson-Doyle Wed, 30 Mar 2016 14:04:04 +1300
+
+golang-1.6 (1.6-0ubuntu2) xenial; urgency=medium
+
+ * Update d/patches/0001-s390x-port.patch from IBM's repo. (LP: #1561271)
+ * Strip the binaries as it has worked for the last five years or so and
+ upstream sees no reason to disable it. (LP: #1561343)
+ * Remove Breaks/Replaces of trusty golang packages (moves to golang-defaults).
+ * Recommend golang-X.Y-race-detector-runtime rather than unversioned package.
+
+ -- Michael Hudson-Doyle Wed, 23 Mar 2016 20:10:13 +1300
+
+golang-1.6 (1.6-0ubuntu1) xenial; urgency=medium
+
+ * Build golang version-specific packages (LP: #1555856)
+ * Things that (conceptually at least) move to new golang version independent
+ golang-defaults source package:
+ - Drop "Build empty golang-go and golang-src packages on architectures without
+ golang".
+ - Remove man pages.
+ - Do not suggest golang-golang-x-tools.
+ - Stop using alternatives to manage /usr/bin/go.
+ * sed trickery in debian/rules to support easy changes to new golang versions.
+
+ -- Michael Hudson-Doyle Mon, 07 Mar 2016 17:01:40 +1300
+
+golang (2:1.6-1ubuntu1) xenial; urgency=medium
+
+ * Merge from Debian unstable (LP: #1551489). Remaining changes:
+ - Build empty golang-go and golang-src packages on architectures without
+ golang support and depend on gccgo instead (see Debian bug #780355).
+ - Do not distribute un-built from source race detector runtime files and
+ recommend golang-race-detector-runtime instead (see Debian bug #807455).
+ - debian/source/lintian-overrides: silence some extra source-missing false
+ positives.
+ - Breaks/Replaces: older golang-golang-x-tools, not Conflicts, to ensure
+ smooth upgrades.
+ - Add d/patches/0001-s390x-port.patch from IBM's repo.
+ - Updates for s390x support.
+
+ -- Michael Hudson-Doyle Tue, 01 Mar 2016 13:35:02 +1300
+
golang (2:1.6-1) unstable; urgency=medium
* Update to 1.6 upstream release (thanks Hilko!)
@@ -81,6 +216,66 @@ golang (2:1.6-1) unstable; urgency=mediu
-- Tianon Gravi Mon, 29 Feb 2016 16:10:32 -0800
+golang (2:1.6-0ubuntu3) xenial; urgency=medium
+
+ * Update d/patches/0001-s390x-port.patch from IBM's repo.
+
+ -- Matthias Klose Sun, 21 Feb 2016 16:50:57 +0100
+
+golang (2:1.6-0ubuntu1) xenial; urgency=medium
+
+ * Go 1.6 release.
+
+ -- Matthias Klose Sat, 20 Feb 2016 18:56:52 +0100
+
+golang (2:1.6~rc2-0ubuntu1) xenial; urgency=medium
+
+ * New upstream version. (LP: #1541660)
+ * Update d/patches/0001-s390x-port.patch from IBM's repo.
+
+ -- Michael Hudson-Doyle Thu, 04 Feb 2016 14:06:36 +1300
+
+golang (2:1.6~rc1-0ubuntu1) xenial; urgency=medium
+
+ * New upstream version.
+ * Update d/patches/0001-s390x-port.patch from IBM's repo.
+
+ -- Michael Hudson-Doyle Fri, 29 Jan 2016 10:22:32 +1300
+
+golang (2:1.6~beta2-0ubuntu1) xenial; urgency=medium
+
+ * New upstream version.
+ - Update debian/rules clean to new location of generated file.
+ * Dropped changes:
+ - d/patches/armhf-elf-header.patch (fixed properly upstream).
+ - d/patches/skip-userns-tests-when-chrooted.patch (included upstream).
+ * Add:
+ - d/patches/0001-s390x-port.patch
+ * Updates for s390x support.
+
+ -- Michael Hudson-Doyle Sat, 16 Jan 2016 19:59:59 +1300
+
+golang (2:1.5.3-1ubuntu1) xenial; urgency=low
+
+ * Merge from Debian unstable. Remaining changes:
+ - d/patches/armhf-elf-header.patch (see upstream bug
+ https://github.com/golang/go/issues/7094)
+ - debian/copyright: updated copyright file to fix some lintian warnings
+ (see Debian bug #807304)
+ - Build empty golang-go and golang-src packages on architectures without
+ golang support and depend on gccgo instead (see Debian bug #780355)
+ - Do not distribute un-built from source race detector runtime files and
+ recommend golang-race-detector-runtime instead (see Debian bug #807455)
+ - debian/source/lintian-overrides: silence some extra source-missing false
+ positives.
+ - Breaks/Replaces: older golang-golang-x-tools, not Conflicts, to ensure
+ smooth upgrades.
+ - d/patches/skip-userns-tests-when-chrooted.patch (see Debian bug #807303)
+ - Do not include race enabled packages in golang-go deb (see Debian bug
+ #807294)
+
+ -- Steve Langasek Fri, 15 Jan 2016 14:04:35 -0800
+
golang (2:1.5.3-1) unstable; urgency=high
* Update to 1.5.3 upstream release
@@ -90,6 +285,31 @@ golang (2:1.5.3-1) unstable; urgency=hig
-- Tianon Gravi Thu, 14 Jan 2016 07:41:44 -0800
+golang (2:1.5.2-1ubuntu1) xenial; urgency=low
+
+ * Merge from Debian unstable (LP: #1524165). Remaining changes:
+ - d/patches/armhf-elf-header.patch (see upstream bug
+ https://github.com/golang/go/issues/7094)
+ - debian/copyright: updated copyright file to fix some lintian warnings
+ (see Debian bug #807304)
+ - Build empty golang-go and golang-src packages on architectures without
+ golang support and depend on gccgo instead (see Debian bug #780355)
+ - Do not distribute un-built from source race detector runtime files and
+ recommend golang-race-detector-runtime instead (see Debian bug #807455)
+ - debian/source/lintian-overrides: silence some extra source-missing false
+ positives.
+ - Breaks/Replaces: older golang-golang-x-tools, not Conflicts, to ensure
+ smooth upgrades.
+ * Dropped changes, included upstream:
+ - d/patches/qemu-compat.patch
+ - d/patches/support-new-relocations.patch
+ * Add a patch to avoid build failures:
+ - d/patches/skip-userns-tests-when-chrooted.patch (see Debian bug #807303)
+ * Do not include race enabled packages in golang-go deb (see Debian bug
+ #807294)
+
+ -- Michael Hudson-Doyle Thu, 07 Jan 2016 10:11:50 +1300
+
golang (2:1.5.2-1) unstable; urgency=medium
* Update to 1.5.2 upstream release (Closes: #807136)
@@ -124,6 +344,73 @@ golang (2:1.5.1-1) unstable; urgency=med
-- Tianon Gravi Sat, 24 Oct 2015 10:22:02 -0700
+golang (2:1.5.1-0ubuntu4) xenial; urgency=medium
+
+ * Add d/patches/support-new-relocations.patch to fix ftbfs on xenial.
+
+ -- Michael Hudson-Doyle Mon, 16 Nov 2015 09:43:57 +1300
+
+golang (2:1.5.1-0ubuntu3) xenial; urgency=medium
+
+ * Add d/patches/qemu-compat.patch (LP: #1501651)
+
+ -- Michael Hudson-Doyle Fri, 13 Nov 2015 10:26:17 +1300
+
+golang (2:1.5.1-0ubuntu2) wily; urgency=medium
+
+ * Recommends: golang-race-detector-runtime on amd64. (LP: #1506393)
+
+ -- Michael Hudson-Doyle Thu, 15 Oct 2015 22:35:25 +1300
+
+golang (2:1.5.1-0ubuntu1) wily; urgency=medium
+
+ * New upstream release.
+
+ -- Michael Hudson-Doyle Wed, 09 Sep 2015 21:11:47 +1200
+
+golang (2:1.5-0ubuntu1) wily; urgency=medium
+
+ * New upstream release.
+ - Drop debian/patches/disable-duffzero-ppc64el.patch
+ * Breaks/Replaces: older golang-go.tools (LP: 1486560)
+
+ -- Michael Hudson-Doyle Fri, 21 Aug 2015 11:48:02 +1200
+
+golang (2:1.5~rc1-0ubuntu2) wily; urgency=medium
+
+ * debian/control: Breaks/Replaces golang-go.tools, so as to permit proper
+ upgrade.
+
+ -- Mathieu Trudel-Lapierre Fri, 28 Aug 2015 09:26:01 -0400
+
+golang (2:1.5~rc1-0ubuntu1) wily; urgency=low
+
+ [ Tianon Gravi ]
+ * Upload to experimental.
+ * Update to 1.5rc1 upstream release.
+ - Compiler and runtime written entirely in Go.
+ - Concurrent garbage collector.
+ - GOMAXPROCS=runtime.NumCPU() by default.
+ - "internal" packages for all, not just core.
+ - Experimental "vendoring" support.
+ - Cross-compilation no longer requires a complete rebuild of the stdlib in
+ GOROOT, and thus the golang-go-GOHOST-GOARCH packages are removed.
+ * Sync debian/copyright with the Ubuntu delta. (thanks doko!)
+
+ [ Michael Hudson-Doyle ]
+ * Update GO{HOST,}ARCH computation
+ * Breaks/Replaces: older golang-golang-x-tools
+ * Two patches:
+ - correct ELF header on armhf
+ - disable duffzero on ppc64el
+
+ [ Mathieu Trudel-Lapierre ]
+ * debian/copyright: updated copyright file to fix some lintian warnings.
+ * debian/source/lintian-overrides: silence some extra source-missing false
+ positives.
+
+ -- Michael Hudson-Doyle Tue, 18 Aug 2015 16:05:37 +1200
+
golang (2:1.4.3-3) unstable; urgency=medium
* Fix FTBFS for non-amd64 architectures due to handling of "-race".
@@ -187,6 +474,24 @@ golang (2:1.4.2-4) unstable; urgency=hig
-- Tianon Gravi Mon, 14 Sep 2015 12:27:57 -0700
+golang (2:1.4.2-3ubuntu2) wily; urgency=medium
+
+ * Fix installation of fake golang-go package.
+
+ -- Matthias Klose Wed, 27 May 2015 12:54:55 +0200
+
+golang (2:1.4.2-3ubuntu1) wily; urgency=medium
+
+ * Merge with Debian; remaining changes:
+ - Build empty golang-go and golang-src packages on architectures
+ without golang support, and depend on gccgo instead.
+ - golang-go: Conflict with golang-go (<< 2:1.3.3-1ubuntu2).
+ - 016-armhf-elf-header.patch: Use correct ELF header for armhf binaries.
+ - d/copyright: Amendments for full compliance with copyright format.
+ * Remove generated override files.
+
+ -- Matthias Klose Tue, 26 May 2015 15:05:11 +0200
+
golang (2:1.4.2-3) unstable; urgency=medium
* Add missing "prerm" for our new alternatives (thanks piuparts).
@@ -220,6 +525,41 @@ golang (2:1.4.1-1~exp1) experimental; ur
-- Tianon Gravi Fri, 16 Jan 2015 00:52:10 -0500
+golang (2:1.3.3-1ubuntu4) vivid; urgency=medium
+
+ * Build empty golang-go and golang-src packages on architectures
+ without golang support, and depend on gccgo instead.
+
+ -- Matthias Klose Tue, 10 Mar 2015 21:46:25 +0100
+
+golang (2:1.3.3-1ubuntu3) vivid; urgency=medium
+
+ * Regenerate the control file.
+
+ -- Matthias Klose Tue, 03 Mar 2015 14:08:51 +0100
+
+golang (2:1.3.3-1ubuntu2) vivid; urgency=medium
+
+ * Install gofmt as an alternative.
+ * Update the update-alternatives dance.
+ * golang-go: Conflict with golang-go (<< 2:1.3.3-1ubuntu2).
+ * Bump the alternative priority to 100, see Debian #779503.
+
+ -- Matthias Klose Tue, 03 Mar 2015 13:29:34 +0100
+
+golang (2:1.3.3-1ubuntu1) vivid; urgency=low
+
+ * Merge from Debian unstable. (LP: #1407409) Remaining changes:
+ - 016-armhf-elf-header.patch: Use correct ELF header for armhf binaries.
+ - Support co-installability with gccgo-go tool:
+ - d/rules,golang-go.install: Rename bin/go -> bin/golang-go
+ - d/golang-go.{postinst,prerm}: Install/remove /usr/bin/go using
+ alternatives.
+ - d/copyright: Amendments for full compiliance with copyright format.
+ - d/control: Demote golang-go.tools to Suggests to support Ubuntu MIR.
+
+ -- Gianfranco Costamagna Sun, 04 Jan 2015 12:18:38 +0100
+
golang (2:1.3.3-1) unstable; urgency=medium
* New upstream version (https://code.google.com/p/go/source/list?name=go1.3.3)
@@ -235,6 +575,23 @@ golang (2:1.3.3-1) unstable; urgency=med
-- Tianon Gravi Fri, 12 Dec 2014 16:11:02 -0500
+golang (2:1.3.2-1ubuntu1) vivid; urgency=medium
+
+ * Merge from Debian unstable. Remaining changes:
+ - 016-armhf-elf-header.patch: Use correct ELF header for armhf binaries.
+ - Support co-installability with gccgo-go tool:
+ - d/rules,golang-go.install: Rename bin/go -> bin/golang-go
+ - d/golang-go.{postinst,prerm}: Install/remove /usr/bin/go using
+ alternatives.
+ - d/copyright: Amendments for full compiliance with copyright format.
+ - d/control: Demote golang-go.tools to Suggests to support Ubuntu MIR.
+ - dropped patches (now upstream):
+ - d/p/issue27650045_40001_50001.diff
+ - d/p/issue28050043_60001_70001.diff
+ - d/p/issue54790044_100001_110001.diff
+
+ -- Serge Hallyn Tue, 18 Nov 2014 15:12:26 -0600
+
golang (2:1.3.2-1) unstable; urgency=medium
* New upstream version
diff -pruN 1.6.3-1/debian/control 1.6.3-1ubuntu1/debian/control
--- 1.6.3-1/debian/control 2016-07-19 03:26:22.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/control 2016-07-20 10:12:59.000000000 +0000
@@ -5,7 +5,8 @@
Source: golang-1.6
Section: devel
Priority: optional
-Maintainer: Go Compiler Team
+Maintainer: Ubuntu Developers
+XSBC-Original-Maintainer: Go Compiler Team
Uploaders: Michael Stapelberg ,
Paul Tagliamonte ,
Tianon Gravi ,
@@ -19,12 +20,16 @@ Standards-Version: 3.9.6
Homepage: https://golang.org
Package: golang-1.6-go
-Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el
+Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el s390x
Depends: golang-1.6-src (>= ${source:Version}),
${misc:Depends},
${perl:Depends},
${shlibs:Depends}
-Recommends: g++, gcc, libc6-dev, pkg-config
+Recommends: g++,
+ gcc,
+ golang-1.6-race-detector-runtime [amd64],
+ libc6-dev,
+ pkg-config
Suggests: bzr, ca-certificates, git, mercurial, subversion
Description: Go programming language compiler, linker, compiled stdlib
The Go programming language is an open source project to make programmers more
@@ -43,7 +48,7 @@ Description: Go programming language com
pre-compile the standard library inside GOROOT for cross-compilation to work.
Package: golang-1.6-src
-Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el
+Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el s390x
Depends: ${misc:Depends}, ${shlibs:Depends}
Description: Go programming language - source files
The Go programming language is an open source project to make programmers more
@@ -96,3 +101,25 @@ Description: Go programming language com
.
This package is a metapackage that, when installed, guarantees
that (most of) a full Go development environment is installed.
+
+Package: libgolang-1.6-std1
+Architecture: amd64 arm64 armhf i386 ppc64el s390x
+Pre-Depends: ${misc:Pre-Depends}
+Provides: ${golang:Provides}
+Depends: ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
+Description: Go standard shared library
+ This package contains the Go standard library built as a shared library.
+ Packages should not depend on this package directly but rather the ABI-stamped
+ value in Provides:. dpkg-shlibdeps will do this automatically.
+
+Package: golang-1.6-go-shared-dev
+Architecture: amd64 arm64 armhf i386 ppc64el s390x
+Pre-Depends: ${misc:Pre-Depends}
+Depends: golang-1.6-go (= ${binary:Version}),
+ libgolang-1.6-std1 (= ${binary:Version}),
+ ${misc:Depends},
+ ${perl:Depends},
+ ${shlibs:Depends}
+Description: Go standard shared library support files
+ This package contains the files necessary to link against the shared
+ library packaged in libgolang-1.6-std1.
diff -pruN 1.6.3-1/debian/control.in 1.6.3-1ubuntu1/debian/control.in
--- 1.6.3-1/debian/control.in 2016-07-19 02:52:34.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/control.in 2016-07-20 09:45:02.000000000 +0000
@@ -1,7 +1,8 @@
Source: golang-X.Y
Section: devel
Priority: optional
-Maintainer: Go Compiler Team
+Maintainer: Ubuntu Developers
+XSBC-Original-Maintainer: Go Compiler Team
Uploaders: Michael Stapelberg ,
Paul Tagliamonte ,
Tianon Gravi ,
@@ -15,12 +16,16 @@ Standards-Version: 3.9.6
Homepage: https://golang.org
Package: golang-X.Y-go
-Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el
+Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el s390x
Depends: golang-X.Y-src (>= ${source:Version}),
${misc:Depends},
${perl:Depends},
${shlibs:Depends}
-Recommends: g++, gcc, libc6-dev, pkg-config
+Recommends: g++,
+ gcc,
+ golang-X.Y-race-detector-runtime [amd64],
+ libc6-dev,
+ pkg-config
Suggests: bzr, ca-certificates, git, mercurial, subversion
Description: Go programming language compiler, linker, compiled stdlib
The Go programming language is an open source project to make programmers more
@@ -39,7 +44,7 @@ Description: Go programming language com
pre-compile the standard library inside GOROOT for cross-compilation to work.
Package: golang-X.Y-src
-Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el
+Architecture: amd64 arm64 armel armhf i386 ppc64 ppc64el s390x
Depends: ${misc:Depends}, ${shlibs:Depends}
Description: Go programming language - source files
The Go programming language is an open source project to make programmers more
@@ -92,3 +97,25 @@ Description: Go programming language com
.
This package is a metapackage that, when installed, guarantees
that (most of) a full Go development environment is installed.
+
+Package: libgolang-X.Y-std1
+Architecture: amd64 arm64 armhf i386 ppc64el s390x
+Pre-Depends: ${misc:Pre-Depends}
+Provides: ${golang:Provides}
+Depends: ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
+Description: Go standard shared library
+ This package contains the Go standard library built as a shared library.
+ Packages should not depend on this package directly but rather the ABI-stamped
+ value in Provides:. dpkg-shlibdeps will do this automatically.
+
+Package: golang-X.Y-go-shared-dev
+Architecture: amd64 arm64 armhf i386 ppc64el s390x
+Pre-Depends: ${misc:Pre-Depends}
+Depends: golang-X.Y-go (= ${binary:Version}),
+ libgolang-X.Y-std1 (= ${binary:Version}),
+ ${misc:Depends},
+ ${perl:Depends},
+ ${shlibs:Depends}
+Description: Go standard shared library support files
+ This package contains the files necessary to link against the shared
+ library packaged in libgolang-X.Y-std1.
diff -pruN 1.6.3-1/debian/helpers/goenv.sh 1.6.3-1ubuntu1/debian/helpers/goenv.sh
--- 1.6.3-1/debian/helpers/goenv.sh 2016-07-19 02:52:34.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/helpers/goenv.sh 2016-07-20 09:44:24.000000000 +0000
@@ -11,7 +11,7 @@ __goos__deb_arch_os() {
__goarch__deb_arch_cpu() {
case "$1" in
- amd64|arm|arm64|ppc64) echo "$1" ;;
+ amd64|arm|arm64|ppc64|s390x) echo "$1" ;;
i386) echo 386 ;;
ppc64el) echo ppc64le ;;
mips64el) echo mips64le ;;
diff -pruN 1.6.3-1/debian/helpers/installshlib.sh 1.6.3-1ubuntu1/debian/helpers/installshlib.sh
--- 1.6.3-1/debian/helpers/installshlib.sh 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/helpers/installshlib.sh 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -eux
+GOARCH=$(./bin/go env GOARCH)
+
+./bin/go build -o ./bin/readabihash -linkshared -ldflags="-r ''" debian/helpers/readabihash.go
+
+TRIPLET=$(dpkg-architecture -qDEB_HOST_MULTIARCH)
+
+mkdir -p debian/libgolang-${GOVER}-std1/usr/lib/${TRIPLET}
+mv pkg/linux_${GOARCH}_dynlink/libstd.so debian/libgolang-${GOVER}-std1/usr/lib/${TRIPLET}/libgolang-${GOVER}-std.so.1
+
+ln -s ../../../${TRIPLET}/libgolang-${GOVER}-std.so.1 pkg/linux_${GOARCH}_dynlink/libstd.so
+
+mkdir -p debian/golang-${GOVER}-go-shared-dev/usr/lib/go-${GOVER}/pkg/
+mv pkg/linux_${GOARCH}_dynlink/ debian/golang-${GOVER}-go-shared-dev/usr/lib/go-${GOVER}/pkg/
+
+cp bin/readabihash debian/golang-${GOVER}-go-shared-dev/usr/lib/go-${GOVER}/pkg/
diff -pruN 1.6.3-1/debian/helpers/readabihash.go 1.6.3-1ubuntu1/debian/helpers/readabihash.go
--- 1.6.3-1/debian/helpers/readabihash.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/helpers/readabihash.go 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,85 @@
+package main
+
+import (
+ "debug/elf"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+func rnd(v int32, r int32) int32 {
+ if r <= 0 {
+ return v
+ }
+ v += r - 1
+ c := v % r
+ if c < 0 {
+ c += r
+ }
+ v -= c
+ return v
+}
+
+func readwithpad(r io.Reader, sz int32) ([]byte, error) {
+ full := rnd(sz, 4)
+ data := make([]byte, full)
+ _, err := r.Read(data)
+ if err != nil {
+ return nil, err
+ }
+ data = data[:sz]
+ return data, nil
+}
+
+func readnote(filename, name string, type_ int32) ([]byte, error) {
+ f, err := elf.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ for _, sect := range f.Sections {
+ if sect.Type != elf.SHT_NOTE {
+ continue
+ }
+ r := sect.Open()
+ for {
+ var namesize, descsize, nt_type int32
+ err = binary.Read(r, f.ByteOrder, &namesize)
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("read namesize failed", err)
+ }
+ err = binary.Read(r, f.ByteOrder, &descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read descsize failed", err)
+ }
+ err = binary.Read(r, f.ByteOrder, &nt_type)
+ if err != nil {
+ return nil, fmt.Errorf("read type failed", err)
+ }
+ nt_name, err := readwithpad(r, namesize)
+ if err != nil {
+ return nil, fmt.Errorf("read name failed", err)
+ }
+ desc, err := readwithpad(r, descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read desc failed", err)
+ }
+ if name == string(nt_name) && type_ == nt_type {
+ return desc, nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+func main() {
+ desc, err := readnote(os.Args[1], "Go\x00\x00", 2)
+ if err != nil {
+ log.Fatalf("readnote failed: %v", err)
+ }
+ fmt.Printf("%x\n", desc)
+}
diff -pruN 1.6.3-1/debian/libgolang-X.Y-std1.lintian-overrides 1.6.3-1ubuntu1/debian/libgolang-X.Y-std1.lintian-overrides
--- 1.6.3-1/debian/libgolang-X.Y-std1.lintian-overrides 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/libgolang-X.Y-std1.lintian-overrides 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,3 @@
+# Go shared libraries are not linked by the system linker so
+# the .so symlinks are not where lintian expects.
+libgolang-std1: dev-pkg-without-shlib-symlink
diff -pruN 1.6.3-1/debian/patches/0001-s390x-port.patch 1.6.3-1ubuntu1/debian/patches/0001-s390x-port.patch
--- 1.6.3-1/debian/patches/0001-s390x-port.patch 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/patches/0001-s390x-port.patch 2016-07-20 09:44:37.000000000 +0000
@@ -0,0 +1,25427 @@
+Subject: [PATCH] [s390x] Add support for Linux on IBM z architecture (s390x)
+
+--- a/api/go1.6.txt
++++ b/api/go1.6.txt
+@@ -273,3 +273,128 @@
+ pkg text/template, type ExecError struct
+ pkg text/template, type ExecError struct, Err error
+ pkg text/template, type ExecError struct, Name string
++pkg debug/elf, const R_390_12 = 2
++pkg debug/elf, const R_390_12 R_390
++pkg debug/elf, const R_390_16 = 3
++pkg debug/elf, const R_390_16 R_390
++pkg debug/elf, const R_390_20 = 57
++pkg debug/elf, const R_390_20 R_390
++pkg debug/elf, const R_390_32 = 4
++pkg debug/elf, const R_390_32 R_390
++pkg debug/elf, const R_390_64 = 22
++pkg debug/elf, const R_390_64 R_390
++pkg debug/elf, const R_390_8 = 1
++pkg debug/elf, const R_390_8 R_390
++pkg debug/elf, const R_390_COPY = 9
++pkg debug/elf, const R_390_COPY R_390
++pkg debug/elf, const R_390_GLOB_DAT = 10
++pkg debug/elf, const R_390_GLOB_DAT R_390
++pkg debug/elf, const R_390_GOT12 = 6
++pkg debug/elf, const R_390_GOT12 R_390
++pkg debug/elf, const R_390_GOT16 = 15
++pkg debug/elf, const R_390_GOT16 R_390
++pkg debug/elf, const R_390_GOT20 = 58
++pkg debug/elf, const R_390_GOT20 R_390
++pkg debug/elf, const R_390_GOT32 = 7
++pkg debug/elf, const R_390_GOT32 R_390
++pkg debug/elf, const R_390_GOT64 = 24
++pkg debug/elf, const R_390_GOT64 R_390
++pkg debug/elf, const R_390_GOTENT = 26
++pkg debug/elf, const R_390_GOTENT R_390
++pkg debug/elf, const R_390_GOTOFF = 13
++pkg debug/elf, const R_390_GOTOFF R_390
++pkg debug/elf, const R_390_GOTOFF16 = 27
++pkg debug/elf, const R_390_GOTOFF16 R_390
++pkg debug/elf, const R_390_GOTOFF64 = 28
++pkg debug/elf, const R_390_GOTOFF64 R_390
++pkg debug/elf, const R_390_GOTPC = 14
++pkg debug/elf, const R_390_GOTPC R_390
++pkg debug/elf, const R_390_GOTPCDBL = 21
++pkg debug/elf, const R_390_GOTPCDBL R_390
++pkg debug/elf, const R_390_GOTPLT12 = 29
++pkg debug/elf, const R_390_GOTPLT12 R_390
++pkg debug/elf, const R_390_GOTPLT16 = 30
++pkg debug/elf, const R_390_GOTPLT16 R_390
++pkg debug/elf, const R_390_GOTPLT20 = 59
++pkg debug/elf, const R_390_GOTPLT20 R_390
++pkg debug/elf, const R_390_GOTPLT32 = 31
++pkg debug/elf, const R_390_GOTPLT32 R_390
++pkg debug/elf, const R_390_GOTPLT64 = 32
++pkg debug/elf, const R_390_GOTPLT64 R_390
++pkg debug/elf, const R_390_GOTPLTENT = 33
++pkg debug/elf, const R_390_GOTPLTENT R_390
++pkg debug/elf, const R_390_GOTPLTOFF16 = 34
++pkg debug/elf, const R_390_GOTPLTOFF16 R_390
++pkg debug/elf, const R_390_GOTPLTOFF32 = 35
++pkg debug/elf, const R_390_GOTPLTOFF32 R_390
++pkg debug/elf, const R_390_GOTPLTOFF64 = 36
++pkg debug/elf, const R_390_GOTPLTOFF64 R_390
++pkg debug/elf, const R_390_JMP_SLOT = 11
++pkg debug/elf, const R_390_JMP_SLOT R_390
++pkg debug/elf, const R_390_NONE = 0
++pkg debug/elf, const R_390_NONE R_390
++pkg debug/elf, const R_390_PC16 = 16
++pkg debug/elf, const R_390_PC16 R_390
++pkg debug/elf, const R_390_PC16DBL = 17
++pkg debug/elf, const R_390_PC16DBL R_390
++pkg debug/elf, const R_390_PC32 = 5
++pkg debug/elf, const R_390_PC32 R_390
++pkg debug/elf, const R_390_PC32DBL = 19
++pkg debug/elf, const R_390_PC32DBL R_390
++pkg debug/elf, const R_390_PC64 = 23
++pkg debug/elf, const R_390_PC64 R_390
++pkg debug/elf, const R_390_PLT16DBL = 18
++pkg debug/elf, const R_390_PLT16DBL R_390
++pkg debug/elf, const R_390_PLT32 = 8
++pkg debug/elf, const R_390_PLT32 R_390
++pkg debug/elf, const R_390_PLT32DBL = 20
++pkg debug/elf, const R_390_PLT32DBL R_390
++pkg debug/elf, const R_390_PLT64 = 25
++pkg debug/elf, const R_390_PLT64 R_390
++pkg debug/elf, const R_390_RELATIVE = 12
++pkg debug/elf, const R_390_RELATIVE R_390
++pkg debug/elf, const R_390_TLS_DTPMOD = 54
++pkg debug/elf, const R_390_TLS_DTPMOD R_390
++pkg debug/elf, const R_390_TLS_DTPOFF = 55
++pkg debug/elf, const R_390_TLS_DTPOFF R_390
++pkg debug/elf, const R_390_TLS_GD32 = 40
++pkg debug/elf, const R_390_TLS_GD32 R_390
++pkg debug/elf, const R_390_TLS_GD64 = 41
++pkg debug/elf, const R_390_TLS_GD64 R_390
++pkg debug/elf, const R_390_TLS_GDCALL = 38
++pkg debug/elf, const R_390_TLS_GDCALL R_390
++pkg debug/elf, const R_390_TLS_GOTIE12 = 42
++pkg debug/elf, const R_390_TLS_GOTIE12 R_390
++pkg debug/elf, const R_390_TLS_GOTIE20 = 60
++pkg debug/elf, const R_390_TLS_GOTIE20 R_390
++pkg debug/elf, const R_390_TLS_GOTIE32 = 43
++pkg debug/elf, const R_390_TLS_GOTIE32 R_390
++pkg debug/elf, const R_390_TLS_GOTIE64 = 44
++pkg debug/elf, const R_390_TLS_GOTIE64 R_390
++pkg debug/elf, const R_390_TLS_IE32 = 47
++pkg debug/elf, const R_390_TLS_IE32 R_390
++pkg debug/elf, const R_390_TLS_IE64 = 48
++pkg debug/elf, const R_390_TLS_IE64 R_390
++pkg debug/elf, const R_390_TLS_IEENT = 49
++pkg debug/elf, const R_390_TLS_IEENT R_390
++pkg debug/elf, const R_390_TLS_LDCALL = 39
++pkg debug/elf, const R_390_TLS_LDCALL R_390
++pkg debug/elf, const R_390_TLS_LDM32 = 45
++pkg debug/elf, const R_390_TLS_LDM32 R_390
++pkg debug/elf, const R_390_TLS_LDM64 = 46
++pkg debug/elf, const R_390_TLS_LDM64 R_390
++pkg debug/elf, const R_390_TLS_LDO32 = 52
++pkg debug/elf, const R_390_TLS_LDO32 R_390
++pkg debug/elf, const R_390_TLS_LDO64 = 53
++pkg debug/elf, const R_390_TLS_LDO64 R_390
++pkg debug/elf, const R_390_TLS_LE32 = 50
++pkg debug/elf, const R_390_TLS_LE32 R_390
++pkg debug/elf, const R_390_TLS_LE64 = 51
++pkg debug/elf, const R_390_TLS_LE64 R_390
++pkg debug/elf, const R_390_TLS_LOAD = 37
++pkg debug/elf, const R_390_TLS_LOAD R_390
++pkg debug/elf, const R_390_TLS_TPOFF = 56
++pkg debug/elf, const R_390_TLS_TPOFF R_390
++pkg debug/elf, method (R_390) GoString() string
++pkg debug/elf, method (R_390) String() string
++pkg debug/elf, type R_390 int
+--- a/doc/devel/release.html
++++ b/doc/devel/release.html
+@@ -61,6 +61,13 @@
+ 1.6.3 milestone on our issue tracker for details.
+
+
++go1.6 (released 2016/02/17)
++
++
++Go 1.6 is a major release of Go.
++Read the Go 1.6 Release Notes for more information.
++
++
+ go1.5 (released 2015/08/19)
+
+
+--- /dev/null
++++ b/misc/cgo/test/issue9400/asm_s390x.s
+@@ -0,0 +1,26 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build !gccgo
++
++#include "textflag.h"
++
++TEXT ·RewindAndSetgid(SB),NOSPLIT,$0-0
++ // Rewind stack pointer so anything that happens on the stack
++ // will clobber the test pattern created by the caller
++ ADD $(1024 * 8), R15
++
++ // Ask signaller to setgid
++ MOVD $·Baton(SB), R5
++ MOVW $1, 0(R5)
++
++ // Wait for setgid completion
++loop:
++ SYNC
++ MOVW ·Baton(SB), R3
++ CMPBNE R3, $0, loop
++
++ // Restore stack
++ SUB $(1024 * 8), R15
++ RET
+--- a/src/cmd/asm/internal/arch/arch.go
++++ b/src/cmd/asm/internal/arch/arch.go
+@@ -10,6 +10,7 @@
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/mips"
+ "cmd/internal/obj/ppc64"
++ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/x86"
+ "fmt"
+ "strings"
+@@ -82,6 +83,10 @@
+ a := archPPC64()
+ a.LinkArch = &ppc64.Linkppc64le
+ return a
++ case "s390x":
++ a := archS390x()
++ a.LinkArch = &s390x.Links390x
++ return a
+ }
+ return nil
+ }
+@@ -426,3 +431,56 @@
+ IsJump: jumpMIPS64,
+ }
+ }
++
++func archS390x() *Arch {
++ register := make(map[string]int16)
++ // Create maps for easy lookup of instruction names etc.
++ // Note that there is no list of names as there is for x86.
++ for i := s390x.REG_R0; i <= s390x.REG_R15; i++ {
++ register[obj.Rconv(i)] = int16(i)
++ }
++ for i := s390x.REG_F0; i <= s390x.REG_F15; i++ {
++ register[obj.Rconv(i)] = int16(i)
++ }
++ for i := s390x.REG_V0; i <= s390x.REG_V31; i++ {
++ register[obj.Rconv(i)] = int16(i)
++ }
++ for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ {
++ register[obj.Rconv(i)] = int16(i)
++ }
++ register["LR"] = s390x.REG_LR
++ // Pseudo-registers.
++ register["SB"] = RSB
++ register["FP"] = RFP
++ register["PC"] = RPC
++ // Avoid unintentionally clobbering g using R13.
++ delete(register, "R13")
++ register["g"] = s390x.REG_R13
++ registerPrefix := map[string]bool{
++ "AR": true,
++ "F": true,
++ "R": true,
++ }
++
++ instructions := make(map[string]int)
++ for i, s := range obj.Anames {
++ instructions[s] = i
++ }
++ for i, s := range s390x.Anames {
++ if i >= obj.A_ARCHSPECIFIC {
++ instructions[s] = i + obj.ABaseS390X
++ }
++ }
++ // Annoying aliases.
++ instructions["BR"] = s390x.ABR
++ instructions["BL"] = s390x.ABL
++
++ return &Arch{
++ LinkArch: &s390x.Links390x,
++ Instructions: instructions,
++ Register: register,
++ RegisterPrefix: registerPrefix,
++ RegisterNumber: s390xRegisterNumber,
++ IsJump: jumpS390x,
++ }
++}
+--- /dev/null
++++ b/src/cmd/asm/internal/arch/s390x.go
+@@ -0,0 +1,136 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// This file encapsulates some of the odd characteristics of the
++// s390x instruction set, to minimize its interaction
++// with the core of the assembler.
++
++package arch
++
++import "cmd/internal/obj/s390x"
++
++func jumpS390x(word string) bool {
++ switch word {
++ case "BC",
++ "BCL",
++ "BEQ",
++ "BGE",
++ "BGT",
++ "BL",
++ "BLE",
++ "BLT",
++ "BNE",
++ "BR",
++ "BVC",
++ "BVS",
++ "CMPBEQ",
++ "CMPBGE",
++ "CMPBGT",
++ "CMPBLE",
++ "CMPBLT",
++ "CMPBNE",
++ "CMPUBEQ",
++ "CMPUBGE",
++ "CMPUBGT",
++ "CMPUBLE",
++ "CMPUBLT",
++ "CMPUBNE",
++ "CALL",
++ "JMP":
++ return true
++ }
++ return false
++}
++
++// IsS390xRLD reports whether the op (as defined by an s390x.A* constant) is
++// one of the RLD-like instructions that require special handling.
++// The FMADD-like instructions behave similarly.
++func IsS390xRLD(op int) bool {
++ switch op {
++ case s390x.AFMADD,
++ s390x.AFMADDS,
++ s390x.AFMSUB,
++ s390x.AFMSUBS,
++ s390x.AFNMADD,
++ s390x.AFNMADDS,
++ s390x.AFNMSUB,
++ s390x.AFNMSUBS:
++ return true
++ }
++ return false
++}
++
++// IsS390xCMP reports whether the op (as defined by an s390x.A* constant) is
++// one of the CMP instructions that require special handling.
++func IsS390xCMP(op int) bool {
++ switch op {
++ case s390x.ACMP, s390x.ACMPU, s390x.ACMPW, s390x.ACMPWU:
++ return true
++ }
++ return false
++}
++
++// IsS390xNEG reports whether the op (as defined by an s390x.A* constant) is
++// one of the NEG-like instructions that require special handling.
++func IsS390xNEG(op int) bool {
++ switch op {
++ case s390x.AADDME,
++ s390x.AADDZE,
++ s390x.ANEG,
++ s390x.ASUBME,
++ s390x.ASUBZE:
++ return true
++ }
++ return false
++}
++
++// IsS390xWithLength reports whether the op (as defined by an s390x.A* constant)
++// refers to an instruction which takes a length as its first argument.
++func IsS390xWithLength(op int) bool {
++ switch op {
++ case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
++ return true
++ case s390x.AVLL, s390x.AVSTL:
++ return true
++ }
++ return false
++}
++
++// IsS390xWithIndex reports whether the op (as defined by an s390x.A* constant)
++// refers to an instruction which takes an index as its first argument.
++func IsS390xWithIndex(op int) bool {
++ switch op {
++ case s390x.AVSCEG, s390x.AVSCEF, s390x.AVGEG, s390x.AVGEF:
++ return true
++ case s390x.AVGMG, s390x.AVGMF, s390x.AVGMH, s390x.AVGMB:
++ return true
++ case s390x.AVLEIG, s390x.AVLEIF, s390x.AVLEIH, s390x.AVLEIB:
++ return true
++ case s390x.AVPDI:
++ return true
++ }
++ return false
++}
++
++func s390xRegisterNumber(name string, n int16) (int16, bool) {
++ switch name {
++ case "AR":
++ if 0 <= n && n <= 15 {
++ return s390x.REG_AR0 + n, true
++ }
++ case "F":
++ if 0 <= n && n <= 15 {
++ return s390x.REG_F0 + n, true
++ }
++ case "R":
++ if 0 <= n && n <= 15 {
++ return s390x.REG_R0 + n, true
++ }
++ case "V":
++ if 0 <= n && n <= 31 {
++ return s390x.REG_V0 + n, true
++ }
++ }
++ return 0, false
++}
+--- a/src/cmd/asm/internal/asm/asm.go
++++ b/src/cmd/asm/internal/asm/asm.go
+@@ -381,6 +381,20 @@
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
++ if p.arch.Thechar == 'z' {
++ // 3-operand jumps.
++ target = &a[2]
++ prog.From = a[0]
++ if a[1].Reg != 0 {
++ // compare two regs; jump.
++ prog.Reg = p.getRegister(prog, op, &a[1])
++ } else {
++ // compare reg with imm; jump.
++ prog.From3 = newAddr(a[1])
++ }
++ break
++ }
++
+ fallthrough
+ default:
+ p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op))
+@@ -593,6 +607,15 @@
+ p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
+ return
+ }
++ case 'z':
++ if arch.IsS390xWithLength(op) || arch.IsS390xWithIndex(op) {
++ prog.From = a[1]
++ prog.From3 = newAddr(a[0])
++ } else {
++ prog.Reg = p.getRegister(prog, op, &a[1])
++ prog.From = a[0]
++ }
++ prog.To = a[2]
+ default:
+ p.errorf("TODO: implement three-operand instructions for this architecture")
+ return
+@@ -628,6 +651,13 @@
+ prog.To = a[3]
+ break
+ }
++ if p.arch.Thechar == 'z' {
++ prog.From = a[1]
++ prog.Reg = p.getRegister(prog, op, &a[2])
++ prog.From3 = newAddr(a[0])
++ prog.To = a[3]
++ break
++ }
+ p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
+ return
+ case 5:
+--- a/src/cmd/asm/internal/asm/endtoend_test.go
++++ b/src/cmd/asm/internal/asm/endtoend_test.go
+@@ -389,3 +389,7 @@
+ func TestPPC64EndToEnd(t *testing.T) {
+ testEndToEnd(t, "ppc64", "ppc64")
+ }
++
++func TestS390XEndToEnd(t *testing.T) {
++ testEndToEnd(t, "s390x", "s390x")
++}
+--- a/src/cmd/asm/internal/asm/operand_test.go
++++ b/src/cmd/asm/internal/asm/operand_test.go
+@@ -70,6 +70,11 @@
+ testOperandParser(t, parser, mips64OperandTests)
+ }
+
++func TestS390XOperandParser(t *testing.T) {
++ parser := newParser("s390x")
++ testOperandParser(t, parser, s390xOperandTests)
++}
++
+ type operandTest struct {
+ input, output string
+ }
+@@ -518,6 +523,104 @@
+ {"a(FP)", "a(FP)"},
+ {"g", "g"},
+ {"ret+8(FP)", "ret+8(FP)"},
++ {"runtime·abort(SB)", "runtime.abort(SB)"},
++ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
++ {"·trunc(SB)", "\"\".trunc(SB)"},
++ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
++}
++
++var s390xOperandTests = []operandTest{
++ {"$((1<<63)-1)", "$9223372036854775807"},
++ {"$(-64*1024)", "$-65536"},
++ {"$(1024 * 8)", "$8192"},
++ {"$-1", "$-1"},
++ {"$-24(R4)", "$-24(R4)"},
++ {"$0", "$0"},
++ {"$0(R1)", "$(R1)"},
++ {"$0.5", "$(0.5)"},
++ {"$0x7000", "$28672"},
++ {"$0x88888eef", "$2290650863"},
++ {"$1", "$1"},
++ {"$_main<>(SB)", "$_main<>(SB)"},
++ {"$argframe(FP)", "$argframe(FP)"},
++ {"$~3", "$-4"},
++ {"(-288-3*8)(R1)", "-312(R1)"},
++ {"(16)(R7)", "16(R7)"},
++ {"(8)(g)", "8(g)"},
++ {"(R0)", "(R0)"},
++ {"(R3)", "(R3)"},
++ {"(R4)", "(R4)"},
++ {"(R5)", "(R5)"},
++ {"-1(R4)", "-1(R4)"},
++ {"-1(R5)", "-1(R5)"},
++ {"6(PC)", "6(PC)"},
++ {"R0", "R0"},
++ {"R1", "R1"},
++ {"R2", "R2"},
++ {"R3", "R3"},
++ {"R4", "R4"},
++ {"R5", "R5"},
++ {"R6", "R6"},
++ {"R7", "R7"},
++ {"R8", "R8"},
++ {"R9", "R9"},
++ {"R10", "R10"},
++ {"R11", "R11"},
++ {"R12", "R12"},
++ // {"R13", "R13"}, R13 is g
++ {"R14", "R14"},
++ {"R15", "R15"},
++ {"F0", "F0"},
++ {"F1", "F1"},
++ {"F2", "F2"},
++ {"F3", "F3"},
++ {"F4", "F4"},
++ {"F5", "F5"},
++ {"F6", "F6"},
++ {"F7", "F7"},
++ {"F8", "F8"},
++ {"F9", "F9"},
++ {"F10", "F10"},
++ {"F11", "F11"},
++ {"F12", "F12"},
++ {"F13", "F13"},
++ {"F14", "F14"},
++ {"F15", "F15"},
++ {"V0", "V0"},
++ {"V1", "V1"},
++ {"V2", "V2"},
++ {"V3", "V3"},
++ {"V4", "V4"},
++ {"V5", "V5"},
++ {"V6", "V6"},
++ {"V7", "V7"},
++ {"V8", "V8"},
++ {"V9", "V9"},
++ {"V10", "V10"},
++ {"V11", "V11"},
++ {"V12", "V12"},
++ {"V13", "V13"},
++ {"V14", "V14"},
++ {"V15", "V15"},
++ {"V16", "V16"},
++ {"V17", "V17"},
++ {"V18", "V18"},
++ {"V19", "V19"},
++ {"V20", "V20"},
++ {"V21", "V21"},
++ {"V22", "V22"},
++ {"V23", "V23"},
++ {"V24", "V24"},
++ {"V25", "V25"},
++ {"V26", "V26"},
++ {"V27", "V27"},
++ {"V28", "V28"},
++ {"V29", "V29"},
++ {"V30", "V30"},
++ {"V31", "V31"},
++ {"a(FP)", "a(FP)"},
++ {"g", "g"},
++ {"ret+8(FP)", "ret+8(FP)"},
+ {"runtime·abort(SB)", "runtime.abort(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·trunc(SB)", "\"\".trunc(SB)"},
+--- /dev/null
++++ b/src/cmd/asm/internal/asm/testdata/s390x.s
+@@ -0,0 +1,215 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++TEXT main·foo(SB),7,$16-0 // TEXT main.foo(SB), 7, $16-0
++ MOVD R1, R2 // b9040021
++ MOVW R3, R4 // b9140043
++ MOVH R5, R6 // b9070065
++ MOVB R7, R8 // b9060087
++ MOVWZ R1, R2 // b9160021
++ MOVHZ R2, R3 // b9850032
++ MOVBZ R4, R5 // b9840054
++ MOVDBR R1, R2 // b90f0021
++ MOVWBR R3, R4 // b91f0043
++
++ MOVD (R15), R1 // e310f0000004
++ MOVW (R15), R2 // e320f0000014
++ MOVH (R15), R3 // e330f0000015
++ MOVB (R15), R4 // e340f0000077
++ MOVWZ (R15), R5 // e350f0000016
++ MOVHZ (R15), R6 // e360f0000091
++ MOVBZ (R15), R7 // e370f0000090
++ MOVDBR (R15), R8 // e380f000000f
++ MOVWBR (R15), R9 // e390f000001e
++
++ MOVD R1, n-8(SP) // e310f0100024
++ MOVW R2, n-8(SP) // e320f0100050
++ MOVH R3, n-8(SP) // e330f0100070
++ MOVB R4, n-8(SP) // e340f0100072
++ MOVWZ R5, n-8(SP) // e350f0100050
++ MOVHZ R6, n-8(SP) // e360f0100070
++ MOVBZ R7, n-8(SP) // e370f0100072
++ MOVDBR R8, n-8(SP) // e380f010002f
++ MOVWBR R9, n-8(SP) // e390f010003e
++
++ MOVD $-8589934592, R1 // c01efffffffe
++ MOVW $-131072, R2 // c021fffe0000
++ MOVH $-512, R3 // a739fe00
++ MOVB $-1, R4 // a749ffff
++
++ MOVD $-2147483648, n-8(SP) // c0b180000000e3b0f0100024
++ MOVW $-131072, n-8(SP) // c0b1fffe0000e3b0f0100050
++ MOVH $-512, n-8(SP) // e544f010fe00
++ MOVB $-1, n-8(SP) // 92fff010
++
++ ADD R1, R2 // b9e81022
++ ADD R1, R2, R3 // b9e81032
++ ADD $8192, R1 // c21800002000
++ ADD $8192, R1, R2 // ec21200000d9
++ ADDC R1, R2 // b9ea1022
++ ADDC $1, R1, R2 // b9040021c22a00000001
++ ADDC R1, R2, R3 // b9ea1032
++ SUB R3, R4 // b9090043
++ SUB R3, R4, R5 // b9e93054
++ SUB $8192, R3 // c238ffffe000
++ SUB $8192, R3, R4 // ec43e00000d9
++ SUBC R1, R2 // b90b0021
++ SUBC $1, R1, R2 // b9040021c22affffffff
++ SUBC R2, R3, R4 // b9eb2043
++ MULLW R6, R7 // b91c0076
++ MULLW R6, R7, R8 // b9040087b91c0086
++ MULLW $8192, R6 // c26000002000
++ MULLW $8192, R6, R7 // b9040076c27000002000
++ DIVD R1, R2 // b90400b2b90d00a1b904002b
++ DIVD R1, R2, R3 // b90400b2b90d00a1b904003b
++ DIVW R4, R5 // b90400b5b91d00a4b904005b
++ DIVW R4, R5, R6 // b90400b5b91d00a4b904006b
++ DIVDU R7, R8 // b90400a0b90400b8b98700a7b904008b
++ DIVDU R7, R8, R9 // b90400a0b90400b8b98700a7b904009b
++ DIVWU R1, R2 // b90400a0b90400b2b99700a1b904002b
++ DIVWU R1, R2, R3 // b90400a0b90400b2b99700a1b904003b
++
++ XC $8, (R15), n-8(SP) // XC (R15), $8, n-8(SP) // d707f010f000
++ NC $8, (R15), n-8(SP) // NC (R15), $8, n-8(SP) // d407f010f000
++ OC $8, (R15), n-8(SP) // OC (R15), $8, n-8(SP) // d607f010f000
++ MVC $8, (R15), n-8(SP) // MVC (R15), $8, n-8(SP) // d207f010f000
++ CLC $8, (R15), n-8(SP) // CLC (R15), $8, n-8(SP) // d507f000f010
++ XC $256, -8(R15), -8(R15) // XC -8(R15), $256, -8(R15) // b90400afc2a8fffffff8d7ffa000a000
++ MVC $256, 8192(R1), 8192(R2) // MVC 8192(R1), $256, 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000
++
++ CMP R1, R2 // b9200012
++ CMP R3, $-2147483648 // c23c80000000
++ CMPU R4, R5 // b9210045
++ CMPU R6, $4294967295 // c26effffffff
++ CMPW R7, R8 // 1978
++ CMPW R9, $-2147483648 // c29d80000000
++ CMPWU R1, R2 // 1512
++ CMPWU R3, $4294967295 // c23fffffffff
++
++ BNE 0(PC) // a7740000
++ BEQ 0(PC) // a7840000
++ BLT 0(PC) // a7440000
++ BLE 0(PC) // a7c40000
++ BGT 0(PC) // a7240000
++ BGE 0(PC) // a7a40000
++
++ CMPBNE R1, R2, 0(PC) // ec1200007064
++ CMPBEQ R3, R4, 0(PC) // ec3400008064
++ CMPBLT R5, R6, 0(PC) // ec5600004064
++ CMPBLE R7, R8, 0(PC) // ec780000c064
++ CMPBGT R9, R1, 0(PC) // ec9100002064
++ CMPBGE R2, R3, 0(PC) // ec230000a064
++
++ CMPBNE R1, $-127, 0(PC) // ec170000817c
++ CMPBEQ R3, $0, 0(PC) // ec380000007c
++ CMPBLT R5, $128, 0(PC) // ec540000807c
++ CMPBLE R7, $127, 0(PC) // ec7c00007f7c
++ CMPBGT R9, $0, 0(PC) // ec920000007c
++ CMPBGE R2, $128, 0(PC) // ec2a0000807c
++
++ CMPUBNE R1, R2, 0(PC) // ec1200007065
++ CMPUBEQ R3, R4, 0(PC) // ec3400008065
++ CMPUBLT R5, R6, 0(PC) // ec5600004065
++ CMPUBLE R7, R8, 0(PC) // ec780000c065
++ CMPUBGT R9, R1, 0(PC) // ec9100002065
++ CMPUBGE R2, R3, 0(PC) // ec230000a065
++
++ CMPUBNE R1, $256, 0(PC) // ec170000007d
++ CMPUBEQ R3, $0, 0(PC) // ec380000007d
++ CMPUBLT R5, $256, 0(PC) // ec540000007d
++ CMPUBLE R7, $0, 0(PC) // ec7c0000007d
++ CMPUBGT R9, $256, 0(PC) // ec920000007d
++ CMPUBGE R2, $0, 0(PC) // ec2a0000007d
++
++ CEFBRA R0, F15 // b39400f0
++ CDFBRA R1, F14 // b39500e1
++ CEGBRA R2, F13 // b3a400d2
++ CDGBRA R3, F12 // b3a500c3
++
++ CELFBR R0, F15 // b39000f0
++ CDLFBR R1, F14 // b39100e1
++ CELGBR R2, F13 // b3a000d2
++ CDLGBR R3, F12 // b3a100c3
++
++ CFEBRA F15, R1 // b398501f
++ CFDBRA F14, R2 // b399502e
++ CGEBRA F13, R3 // b3a8503d
++ CGDBRA F12, R4 // b3a9504c
++
++ CLFEBR F15, R1 // b39c501f
++ CLFDBR F14, R2 // b39d502e
++ CLGEBR F13, R3 // b3ac503d
++ CLGDBR F12, R4 // b3ad504c
++
++ FMOVS $0, F11 // b37400b0
++ FMOVD $0, F12 // b37500c0
++ FMOVS (R1)(R2*1), F0 // ed0210000064
++ FMOVS n-8(SP), F15 // edf0f0100064
++ FMOVD -9999999(R8)(R9*1), F8 // c0a1ff67698141aa9000ed8a80000065
++ FMOVD F4, F5 // 2854
++ FADDS F0, F15 // b30a00f0
++ FADD F1, F14 // b31a00e1
++ FSUBS F2, F13 // b30b00d2
++ FSUB F3, F12 // b31b00c3
++ FMULS F4, F11 // b31700b4
++ FMUL F5, F10 // b31c00a5
++ FDIVS F6, F9 // b30d0096
++ FDIV F7, F8 // b31d0087
++ FABS F1, F2 // b3100021
++ FSQRTS F3, F4 // b3140043
++ FSQRT F5, F15 // b31500f5
++
++ VL (R15), V1 // e710f0000006
++ VST V1, (R15) // e710f000000e
++ VL (R15), V31 // e7f0f0000806
++ VST V31, (R15) // e7f0f000080e
++ VESLB $5, V14 // e7ee00050030
++ VESRAG $0, V15, V16 // e70f0000383a
++ VLM (R15), V8, V23 // e787f0000436
++ VSTM V8, V23, (R15) // e787f000043e
++ VONE V1 // e710ffff0044
++ VZERO V16 // e70000000844
++ VGBM $52428, V31 // e7f0cccc0844
++ VREPIB $255, V4 // e74000ff0045
++ VREPG $1, V4, V16 // e7040001384d
++ VREPB $4, V31, V1 // e71f0004044d
++ VFTCIDB $4095, V1, V2 // e721fff0304a
++ WFTCIDB $3276, V15, V16 // e70fccc8384a
++ VPOPCT V8, V19 // e73800000850
++ VFEEZBS V1, V2, V31 // e7f120300880
++ WFCHDBS V22, V23, V4 // e746701836eb
++ VMNH V1, V2, V30 // e7e1200018fe
++ VO V2, V1, V0 // e7021000006a
++ VERLLVF V2, V30, V27 // e7be20002c73
++ VSCBIB V0, V23, V24 // e78700000cf5
++ VNOT V16, V1 // e7101000046b
++ VCLZF V16, V17 // e71000002c53
++ VLVGP R3, R4, V8 // e78340000062
++
++ // many vector instructions have their inputs reordered
++ // typically this is to put the length or index input into From3
++ VGEG $1, 8(R15)(V30*1), V31 // VGEG 8(R15)(V30*1), $1, V31 // e7fef0081c12
++ VSCEG $1, V31, 16(R15)(V30*1) // VSCEG V31, $1, 16(R15)(V30*1) // e7fef0101c1a
++ VGEF $0, 2048(R15)(V1*1), V2 // VGEF 2048(R15)(V1*1), $0, V2 // e721f8000013
++ VSCEF $0, V2, 4095(R15)(V1*1) // VSCEF V2, $0, 4095(R15)(V1*1) // e721ffff001b
++ VLL R0, (R15), V1 // VLL (R15), R0, V1 // e710f0000037
++ VSTL R0, V16, (R15) // VSTL V16, R0, (R15) // e700f000083f
++ VGMH $8, $16, V12 // VGMH $16, $8, V12 // e7c008101046
++ VLEIF $2, $-43, V16 // VLEIF $-43, $2, V16 // e700ffd52843
++ VSLDB $3, V1, V16, V18 // VSLDB V1, V16, $3, V18 // e72100030a77
++ VERIMB $2, V31, V1, V2 // VERIMB V31, V1, $2, V2 // e72f10020472
++ VSEL V1, V2, V3, V4 // VSEL V2, V3, V1, V4 // e7412000308d
++ VGFMAH V21, V31, V24, V0 // VGFMAH V31, V24, V21, V0 // e705f10087bc
++ WFMSDB V2, V25, V24, V31 // WFMSDB V25, V24, V2, V31 // e7f298038b8e
++ VPERM V31, V0, V2, V3 // VPERM V0, V2, V31, V3 // e73f0000248c
++ VPDI $1, V2, V31, V1 // VPDI V2, V31, $1, V1 // e712f0001284
++
++ RET
++
++TEXT main·init(SB),7,$0 // TEXT main.init(SB), 7, $0
++ RET
++
++TEXT main·main(SB),7,$0 // TEXT main.main(SB), 7, $0
++ BL main·foo(SB) // CALL main.foo(SB)
++ RET
+--- a/src/cmd/cgo/main.go
++++ b/src/cmd/cgo/main.go
+@@ -156,7 +156,7 @@
+ "ppc64": 8,
+ "ppc64le": 8,
+ "s390": 4,
+- "s390x": 4,
++ "s390x": 8,
+ }
+
+ var cPrefix string
+--- a/src/cmd/compile/internal/gc/cgen.go
++++ b/src/cmd/compile/internal/gc/cgen.go
+@@ -247,7 +247,7 @@
+ return
+ }
+
+- if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable {
++ if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' || Ctxt.Arch.Thechar == 'z') && n.Addable {
+ Thearch.Gmove(n, res)
+ return
+ }
+@@ -1832,7 +1832,7 @@
+ // but they don't support direct generation of a bool value yet.
+ // We can fix that as we go.
+ switch Ctxt.Arch.Thechar {
+- case '0', '5', '7', '9':
++ case '0', '5', '7', '9', 'z':
+ Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
+ }
+ Cgen(n, res)
+@@ -1842,7 +1842,7 @@
+ return
+ }
+
+- if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
++ if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' && Ctxt.Arch.Thechar != 'z' {
+ // no need for a temporary
+ bgenNonZero(n, nil, wantTrue, likely, to)
+ return
+@@ -2640,7 +2640,7 @@
+ // in peep and optoas in order to enable this.
+ // TODO(rsc): ppc64 needs to support the relevant instructions
+ // in peep and optoas in order to enable this.
+- if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
++ if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' || Ctxt.Arch.Thechar == 'z' {
+ goto longdiv
+ }
+ w = int(nl.Type.Width * 8)
+--- a/src/cmd/compile/internal/gc/gsubr.go
++++ b/src/cmd/compile/internal/gc/gsubr.go
+@@ -57,7 +57,7 @@
+ return true
+
+ case OADDR:
+- return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
++ return Thearch.Thechar == '6' || Thearch.Thechar == '9' || Ctxt.Arch.Thechar == 'z' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ }
+
+ return false
+@@ -83,7 +83,7 @@
+ p := Prog(as)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Val = nil
+- if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
++ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' && Ctxt.Arch.Thechar != 'z' {
+ p.From.Type = obj.TYPE_CONST
+ if likely > 0 {
+ p.From.Offset = 1
+@@ -449,7 +449,7 @@
+ case OADDR:
+ Naddr(a, n.Left)
+ a.Etype = uint8(Tptr)
+- if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
++ if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' && Thearch.Thechar != 'z' { // TODO(rsc): Do this even for arm, ppc64.
+ a.Width = int64(Widthptr)
+ }
+ if a.Type != obj.TYPE_MEM {
+--- a/src/cmd/compile/internal/gc/lex.go
++++ b/src/cmd/compile/internal/gc/lex.go
+@@ -216,14 +216,14 @@
+ var flag_shared int
+ var flag_dynlink bool
+ switch Thearch.Thechar {
+- case '5', '6', '7', '8', '9':
++ case '5', '6', '7', '8', '9', 'z':
+ obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
+ }
+ if Thearch.Thechar == '6' {
+ obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+ }
+ switch Thearch.Thechar {
+- case '5', '6', '7', '8', '9':
++ case '5', '6', '7', '8', '9', 'z':
+ flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
+ }
+ obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
+--- a/src/cmd/compile/internal/gc/pgen.go
++++ b/src/cmd/compile/internal/gc/pgen.go
+@@ -293,7 +293,7 @@
+ if haspointers(n.Type) {
+ stkptrsize = Stksize
+ }
+- if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
++ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z' {
+ Stksize = Rnd(Stksize, int64(Widthptr))
+ }
+ if Stksize >= 1<<31 {
+@@ -330,7 +330,7 @@
+ Fatalf("bad checknil")
+ }
+
+- if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
++ if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+ var reg Node
+ Regalloc(®, Types[Tptr], n)
+ Cgen(n, ®)
+--- a/src/cmd/compile/internal/gc/reg.go
++++ b/src/cmd/compile/internal/gc/reg.go
+@@ -249,7 +249,7 @@
+ p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+
+ // TODO(rsc): Remove special case here.
+- if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
++ if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z') && v.etype == TBOOL {
+ p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ }
+ p1.From.Type = obj.TYPE_REG
+@@ -302,7 +302,7 @@
+ // TODO(rsc): Remove special case here.
+ case obj.TYPE_ADDR:
+ var bit Bits
+- if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
++ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z' {
+ goto memcase
+ }
+ a.Type = obj.TYPE_MEM
+@@ -1114,7 +1114,7 @@
+
+ // Currently we never generate three register forms.
+ // If we do, this will need to change.
+- if p.From3Type() != obj.TYPE_NONE {
++ if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
+ Fatalf("regopt not implemented for from3")
+ }
+
+--- a/src/cmd/compile/internal/gc/walk.go
++++ b/src/cmd/compile/internal/gc/walk.go
+@@ -617,7 +617,7 @@
+
+ if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" {
+ switch Thearch.Thechar {
+- case '5', '6', '7':
++ case '5', '6', '7', 'z':
+ n.Op = OSQRT
+ n.Left = n.List.N
+ n.List = nil
+@@ -3307,6 +3307,11 @@
+ // Constants adding to width?
+ w := int(l.Type.Width * 8)
+
++ if Thearch.Thechar == 'z' && w != 32 && w != 64 {
++ // only supports 32-bit and 64-bit rotates
++ return
++ }
++
+ if Smallintconst(l.Right) && Smallintconst(r.Right) {
+ sl := int(Mpgetfix(l.Right.Val().U.(*Mpint)))
+ if sl >= 0 {
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/cgen.go
+@@ -0,0 +1,178 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package s390x
++
++import (
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++)
++
++type direction int
++
++const (
++ _FORWARDS direction = iota
++ _BACKWARDS
++)
++
++// blockcopy copies w bytes from &n to &res
++func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
++ var dst gc.Node
++ var src gc.Node
++ if n.Ullman >= res.Ullman {
++ gc.Agenr(n, &dst, res) // temporarily use dst
++ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
++ gins(s390x.AMOVD, &dst, &src)
++ if res.Op == gc.ONAME {
++ gc.Gvardef(res)
++ }
++ gc.Agen(res, &dst)
++ } else {
++ if res.Op == gc.ONAME {
++ gc.Gvardef(res)
++ }
++ gc.Agenr(res, &dst, res)
++ gc.Agenr(n, &src, nil)
++ }
++ defer gc.Regfree(&src)
++ defer gc.Regfree(&dst)
++
++ var tmp gc.Node
++ gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
++ defer gc.Regfree(&tmp)
++
++ offset := int64(0)
++ dir := _FORWARDS
++ if osrc < odst && odst < osrc+w {
++ // Reverse. Can't use MVC, fall back onto basic moves.
++ dir = _BACKWARDS
++ const copiesPerIter = 2
++ if w >= 8*copiesPerIter {
++ cnt := w - (w % (8 * copiesPerIter))
++ ginscon(s390x.AADD, w, &src)
++ ginscon(s390x.AADD, w, &dst)
++
++ var end gc.Node
++ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
++ p := gins(s390x.ASUB, nil, &end)
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = cnt
++ p.Reg = src.Reg
++
++ var label *obj.Prog
++ for i := 0; i < copiesPerIter; i++ {
++ offset := int64(-8 * (i + 1))
++ p := gins(s390x.AMOVD, &src, &tmp)
++ p.From.Type = obj.TYPE_MEM
++ p.From.Offset = offset
++ if i == 0 {
++ label = p
++ }
++ p = gins(s390x.AMOVD, &tmp, &dst)
++ p.To.Type = obj.TYPE_MEM
++ p.To.Offset = offset
++ }
++
++ ginscon(s390x.ASUB, 8*copiesPerIter, &src)
++ ginscon(s390x.ASUB, 8*copiesPerIter, &dst)
++ gins(s390x.ACMP, &src, &end)
++ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label)
++ gc.Regfree(&end)
++
++ w -= cnt
++ } else {
++ offset = w
++ }
++ }
++
++ if dir == _FORWARDS && w > 1024 {
++ // Loop over MVCs
++ cnt := w - (w % 256)
++
++ var end gc.Node
++ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
++ add := gins(s390x.AADD, nil, &end)
++ add.From.Type = obj.TYPE_CONST
++ add.From.Offset = cnt
++ add.Reg = src.Reg
++
++ mvc := gins(s390x.AMVC, &src, &dst)
++ mvc.From.Type = obj.TYPE_MEM
++ mvc.From.Offset = 0
++ mvc.To.Type = obj.TYPE_MEM
++ mvc.To.Offset = 0
++ mvc.From3 = new(obj.Addr)
++ mvc.From3.Type = obj.TYPE_CONST
++ mvc.From3.Offset = 256
++
++ ginscon(s390x.AADD, 256, &src)
++ ginscon(s390x.AADD, 256, &dst)
++ gins(s390x.ACMP, &src, &end)
++ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc)
++ gc.Regfree(&end)
++
++ w -= cnt
++ }
++
++ for w > 0 {
++ cnt := w
++ // If in reverse we can only do 8, 4, 2 or 1 bytes at a time.
++ if dir == _BACKWARDS {
++ switch {
++ case cnt >= 8:
++ cnt = 8
++ case cnt >= 4:
++ cnt = 4
++ case cnt >= 2:
++ cnt = 2
++ }
++ } else if cnt > 256 {
++ cnt = 256
++ }
++
++ switch cnt {
++ case 8, 4, 2, 1:
++ op := s390x.AMOVB
++ switch cnt {
++ case 8:
++ op = s390x.AMOVD
++ case 4:
++ op = s390x.AMOVW
++ case 2:
++ op = s390x.AMOVH
++ }
++ load := gins(op, &src, &tmp)
++ load.From.Type = obj.TYPE_MEM
++ load.From.Offset = offset
++
++ store := gins(op, &tmp, &dst)
++ store.To.Type = obj.TYPE_MEM
++ store.To.Offset = offset
++
++ if dir == _BACKWARDS {
++ load.From.Offset -= cnt
++ store.To.Offset -= cnt
++ }
++
++ default:
++ p := gins(s390x.AMVC, &src, &dst)
++ p.From.Type = obj.TYPE_MEM
++ p.From.Offset = offset
++ p.To.Type = obj.TYPE_MEM
++ p.To.Offset = offset
++ p.From3 = new(obj.Addr)
++ p.From3.Type = obj.TYPE_CONST
++ p.From3.Offset = cnt
++ }
++
++ switch dir {
++ case _FORWARDS:
++ offset += cnt
++ case _BACKWARDS:
++ offset -= cnt
++ }
++ w -= cnt
++ }
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/galign.go
+@@ -0,0 +1,95 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package s390x
++
++import (
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++)
++
++var thechar int = 'z'
++
++var thestring string = "s390x"
++
++var thelinkarch *obj.LinkArch
++
++func linkarchinit() {
++ thestring = obj.Getgoarch()
++ gc.Thearch.Thestring = thestring
++ gc.Thearch.Thelinkarch = &s390x.Links390x
++}
++
++var MAXWIDTH int64 = 1 << 50
++
++/*
++ * go declares several platform-specific type aliases:
++ * int, uint, and uintptr
++ */
++var typedefs = []gc.Typedef{
++ gc.Typedef{"int", gc.TINT, gc.TINT64},
++ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
++ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
++}
++
++func betypeinit() {
++ gc.Widthptr = 8
++ gc.Widthint = 8
++ gc.Widthreg = 8
++}
++
++func Main() {
++ gc.Thearch.Thechar = thechar
++ gc.Thearch.Thestring = thestring
++ gc.Thearch.Thelinkarch = thelinkarch
++ gc.Thearch.Typedefs = typedefs
++ gc.Thearch.REGSP = s390x.REGSP
++ gc.Thearch.REGCTXT = s390x.REGCTXT
++ gc.Thearch.REGCALLX = s390x.REG_R3
++ gc.Thearch.REGCALLX2 = s390x.REG_R4
++ gc.Thearch.REGRETURN = s390x.REG_R3
++ gc.Thearch.REGMIN = s390x.REG_R0
++ gc.Thearch.REGMAX = s390x.REG_R15
++ gc.Thearch.FREGMIN = s390x.REG_F0
++ gc.Thearch.FREGMAX = s390x.REG_F15
++ gc.Thearch.MAXWIDTH = MAXWIDTH
++ gc.Thearch.ReservedRegs = resvd
++
++ gc.Thearch.Betypeinit = betypeinit
++ gc.Thearch.Cgen_hmul = cgen_hmul
++ gc.Thearch.Cgen_shift = cgen_shift
++ gc.Thearch.Clearfat = clearfat
++ gc.Thearch.Defframe = defframe
++ gc.Thearch.Dodiv = dodiv
++ gc.Thearch.Excise = excise
++ gc.Thearch.Expandchecks = expandchecks
++ gc.Thearch.Getg = getg
++ gc.Thearch.Gins = gins
++ gc.Thearch.Ginscmp = ginscmp
++ gc.Thearch.Ginscon = ginscon
++ gc.Thearch.Ginsnop = ginsnop
++ gc.Thearch.Gmove = gmove
++ gc.Thearch.Linkarchinit = linkarchinit
++ gc.Thearch.Peep = peep
++ gc.Thearch.Proginfo = proginfo
++ gc.Thearch.Regtyp = regtyp
++ gc.Thearch.Sameaddr = sameaddr
++ gc.Thearch.Smallindir = smallindir
++ gc.Thearch.Stackaddr = stackaddr
++ gc.Thearch.Blockcopy = blockcopy
++ gc.Thearch.Sudoaddable = sudoaddable
++ gc.Thearch.Sudoclean = sudoclean
++ gc.Thearch.Excludedregs = excludedregs
++ gc.Thearch.RtoB = RtoB
++ gc.Thearch.FtoB = RtoB
++ gc.Thearch.BtoR = BtoR
++ gc.Thearch.BtoF = BtoF
++ gc.Thearch.Optoas = optoas
++ gc.Thearch.Doregbits = doregbits
++ gc.Thearch.Regnames = regnames
++
++ gc.Main()
++ gc.Exit(0)
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/ggen.go
+@@ -0,0 +1,578 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package s390x
++
++import (
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++ "fmt"
++)
++
++// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
++// to have a loop of clear instructions (e.g. XCs) rather than just generating
++// multiple instructions (i.e. loop unrolling).
++// Must be between 256 and 4096.
++const clearLoopCutoff = 1024
++
++func defframe(ptxt *obj.Prog) {
++ var n *gc.Node
++
++ // fill in argument size, stack size
++ ptxt.To.Type = obj.TYPE_TEXTSIZE
++
++ ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
++ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
++ ptxt.To.Offset = int64(frame)
++
++ // insert code to zero ambiguously live variables
++ // so that the garbage collector only sees initialized values
++ // when it looks for pointers.
++ p := ptxt
++
++ hi := int64(0)
++ lo := hi
++
++ // iterate through declarations - they are sorted in decreasing xoffset order.
++ for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
++ n = l.N
++ if !n.Name.Needzero {
++ continue
++ }
++ if n.Class != gc.PAUTO {
++ gc.Fatalf("needzero class %d", n.Class)
++ }
++ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
++ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
++ }
++
++ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
++ // merge with range we already have
++ lo = n.Xoffset
++
++ continue
++ }
++
++ // zero old range
++ p = zerorange(p, int64(frame), lo, hi)
++
++ // set new range
++ hi = n.Xoffset + n.Type.Width
++
++ lo = n.Xoffset
++ }
++
++ // zero final range
++ zerorange(p, int64(frame), lo, hi)
++}
++
++// zerorange clears the stack in the given range.
++func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
++ cnt := hi - lo
++ if cnt == 0 {
++ return p
++ }
++
++ // Adjust the frame to account for LR.
++ frame += gc.Ctxt.FixedFrameSize()
++ offset := frame + lo
++ reg := s390x.REGSP
++
++ // If the offset cannot fit in a 12-bit unsigned displacement then we
++ // need to create a copy of the stack pointer that we can adjust.
++ // We also need to do this if we are going to loop.
++ if offset < 0 || offset > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
++ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset, obj.TYPE_REG, s390x.REGRT1, 0)
++ p.Reg = int16(s390x.REGSP)
++ reg = s390x.REGRT1
++ offset = 0
++ }
++
++ // Generate a loop of large clears.
++ if cnt > clearLoopCutoff {
++ n := cnt - (cnt % 256)
++ end := s390x.REGRT2
++ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset+n, obj.TYPE_REG, end, 0)
++ p.Reg = int16(reg)
++ p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
++ p.From3 = new(obj.Addr)
++ p.From3.Type = obj.TYPE_CONST
++ p.From3.Offset = 256
++ pl := p
++ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
++ p = appendpp(p, s390x.ACMP, obj.TYPE_REG, reg, 0, obj.TYPE_REG, end, 0)
++ p = appendpp(p, s390x.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
++ gc.Patch(p, pl)
++
++ cnt -= n
++ }
++
++ // Generate remaining clear instructions without a loop.
++ for cnt > 0 {
++ n := cnt
++
++ // Can clear at most 256 bytes per instruction.
++ if n > 256 {
++ n = 256
++ }
++
++ switch n {
++ // Handle very small clears with move instructions.
++ case 8, 4, 2, 1:
++ ins := s390x.AMOVB
++ switch n {
++ case 8:
++ ins = s390x.AMOVD
++ case 4:
++ ins = s390x.AMOVW
++ case 2:
++ ins = s390x.AMOVH
++ }
++ p = appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, offset)
++
++ // Handle clears that would require multiple move instructions with XC.
++ default:
++ p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
++ p.From3 = new(obj.Addr)
++ p.From3.Type = obj.TYPE_CONST
++ p.From3.Offset = n
++ }
++
++ cnt -= n
++ offset += n
++ }
++
++ return p
++}
++
++func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
++ q := gc.Ctxt.NewProg()
++ gc.Clearp(q)
++ q.As = int16(as)
++ q.Lineno = p.Lineno
++ q.From.Type = int16(ftype)
++ q.From.Reg = int16(freg)
++ q.From.Offset = foffset
++ q.To.Type = int16(ttype)
++ q.To.Reg = int16(treg)
++ q.To.Offset = toffset
++ q.Link = p.Link
++ p.Link = q
++ return q
++}
++
++func ginsnop() {
++ var reg gc.Node
++ gc.Nodreg(®, gc.Types[gc.TINT], s390x.REG_R0)
++ gins(s390x.AOR, ®, ®)
++}
++
++var panicdiv *gc.Node
++
++/*
++ * generate division.
++ * generates one of:
++ * res = nl / nr
++ * res = nl % nr
++ * according to op.
++ */
++func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
++ // Have to be careful about handling
++ // most negative int divided by -1 correctly.
++ // The hardware will generate undefined result.
++ // Also need to explicitly trap on division on zero,
++ // the hardware will silently generate undefined result.
++ // DIVW will leave unpredicable result in higher 32-bit,
++ // so always use DIVD/DIVDU.
++ t := nl.Type
++
++ t0 := t
++ check := 0
++ if gc.Issigned[t.Etype] {
++ check = 1
++ if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<= nr.Ullman {
++ gc.Cgen(nl, &tl)
++ gc.Cgen(nr, &tr)
++ } else {
++ gc.Cgen(nr, &tr)
++ gc.Cgen(nl, &tl)
++ }
++
++ if t != t0 {
++ // Convert
++ tl2 := tl
++
++ tr2 := tr
++ tl.Type = t
++ tr.Type = t
++ gmove(&tl2, &tl)
++ gmove(&tr2, &tr)
++ }
++
++ // Handle divide-by-zero panic.
++ p1 := gins(optoas(gc.OCMP, t), &tr, nil)
++
++ p1.To.Type = obj.TYPE_REG
++ p1.To.Reg = s390x.REGZERO
++ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
++ if panicdiv == nil {
++ panicdiv = gc.Sysfunc("panicdivide")
++ }
++ gc.Ginscall(panicdiv, -1)
++ gc.Patch(p1, gc.Pc)
++
++ var p2 *obj.Prog
++ if check != 0 {
++ var nm1 gc.Node
++ gc.Nodconst(&nm1, t, -1)
++ gins(optoas(gc.OCMP, t), &tr, &nm1)
++ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
++ if op == gc.ODIV {
++ // a / (-1) is -a.
++ gins(optoas(gc.OMINUS, t), nil, &tl)
++
++ gmove(&tl, res)
++ } else {
++ // a % (-1) is 0.
++ var nz gc.Node
++ gc.Nodconst(&nz, t, 0)
++
++ gmove(&nz, res)
++ }
++
++ p2 = gc.Gbranch(obj.AJMP, nil, 0)
++ gc.Patch(p1, gc.Pc)
++ }
++
++ p1 = gins(a, &tr, &tl)
++ if op == gc.ODIV {
++ gc.Regfree(&tr)
++ gmove(&tl, res)
++ } else {
++ // A%B = A-(A/B*B)
++ var tm gc.Node
++ gc.Regalloc(&tm, t, nil)
++
++ // patch div to use the 3 register form
++ // TODO(minux): add gins3?
++ p1.Reg = p1.To.Reg
++
++ p1.To.Reg = tm.Reg
++ gins(optoas(gc.OMUL, t), &tr, &tm)
++ gc.Regfree(&tr)
++ gins(optoas(gc.OSUB, t), &tm, &tl)
++ gc.Regfree(&tm)
++ gmove(&tl, res)
++ }
++
++ gc.Regfree(&tl)
++ if check != 0 {
++ gc.Patch(p2, gc.Pc)
++ }
++}
++
++/*
++ * generate high multiply:
++ * res = (nl*nr) >> width
++ */
++func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
++ // largest ullman on left.
++ if nl.Ullman < nr.Ullman {
++ nl, nr = nr, nl
++ }
++
++ t := nl.Type
++ w := int(t.Width) * 8
++ var n1 gc.Node
++ gc.Cgenr(nl, &n1, res)
++ var n2 gc.Node
++ gc.Cgenr(nr, &n2, nil)
++ switch gc.Simtype[t.Etype] {
++ case gc.TINT8,
++ gc.TINT16,
++ gc.TINT32:
++ gins(optoas(gc.OMUL, t), &n2, &n1)
++ p := gins(s390x.ASRAD, nil, &n1)
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = int64(w)
++
++ case gc.TUINT8,
++ gc.TUINT16,
++ gc.TUINT32:
++ gins(optoas(gc.OMUL, t), &n2, &n1)
++ p := gins(s390x.ASRD, nil, &n1)
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = int64(w)
++
++ case gc.TINT64,
++ gc.TUINT64:
++ gins(s390x.AMULHDU, &n2, &n1)
++
++ default:
++ gc.Fatalf("cgen_hmul %v", t)
++ }
++
++ gc.Cgen(&n1, res)
++ gc.Regfree(&n1)
++ gc.Regfree(&n2)
++}
++
++/*
++ * generate shift according to op, one of:
++ * res = nl << nr
++ * res = nl >> nr
++ */
++func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
++ a := optoas(op, nl.Type)
++
++ if nr.Op == gc.OLITERAL {
++ var n1 gc.Node
++ gc.Regalloc(&n1, nl.Type, res)
++ gc.Cgen(nl, &n1)
++ sc := uint64(nr.Int())
++ if sc >= uint64(nl.Type.Width*8) {
++ // large shift gets 2 shifts by width-1
++ var n3 gc.Node
++ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
++
++ gins(a, &n3, &n1)
++ gins(a, &n3, &n1)
++ } else {
++ gins(a, nr, &n1)
++ }
++ gmove(&n1, res)
++ gc.Regfree(&n1)
++ return
++ }
++
++ if nl.Ullman >= gc.UINF {
++ var n4 gc.Node
++ gc.Tempname(&n4, nl.Type)
++ gc.Cgen(nl, &n4)
++ nl = &n4
++ }
++
++ if nr.Ullman >= gc.UINF {
++ var n5 gc.Node
++ gc.Tempname(&n5, nr.Type)
++ gc.Cgen(nr, &n5)
++ nr = &n5
++ }
++
++ // Allow either uint32 or uint64 as shift type,
++ // to avoid unnecessary conversion from uint32 to uint64
++ // just to do the comparison.
++ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
++
++ if tcount.Etype < gc.TUINT32 {
++ tcount = gc.Types[gc.TUINT32]
++ }
++
++ var n1 gc.Node
++ gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
++ var n3 gc.Node
++ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
++
++ var n2 gc.Node
++ gc.Regalloc(&n2, nl.Type, res)
++
++ if nl.Ullman >= nr.Ullman {
++ gc.Cgen(nl, &n2)
++ gc.Cgen(nr, &n1)
++ gmove(&n1, &n3)
++ } else {
++ gc.Cgen(nr, &n1)
++ gmove(&n1, &n3)
++ gc.Cgen(nl, &n2)
++ }
++
++ gc.Regfree(&n3)
++
++ // test and fix up large shifts
++ if !bounded {
++ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
++ gins(optoas(gc.OCMP, tcount), &n1, &n3)
++ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, 1)
++ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
++ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
++ gins(a, &n3, &n2)
++ } else {
++ gc.Nodconst(&n3, nl.Type, 0)
++ gmove(&n3, &n2)
++ }
++
++ gc.Patch(p1, gc.Pc)
++ }
++
++ gins(a, &n1, &n2)
++
++ gmove(&n2, res)
++
++ gc.Regfree(&n1)
++ gc.Regfree(&n2)
++}
++
++// clearfat clears (i.e. replaces with zeros) the value pointed to by nl.
++func clearfat(nl *gc.Node) {
++ if gc.Debug['g'] != 0 {
++ fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
++ }
++
++ // Avoid taking the address for simple enough types.
++ if gc.Componentgen(nil, nl) {
++ return
++ }
++
++ var dst gc.Node
++ gc.Regalloc(&dst, gc.Types[gc.Tptr], nil)
++ gc.Agen(nl, &dst)
++
++ var boff int64
++ w := nl.Type.Width
++ if w > clearLoopCutoff {
++ // Generate a loop clearing 256 bytes per iteration using XCs.
++ var end gc.Node
++ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
++ p := gins(s390x.AMOVD, &dst, &end)
++ p.From.Type = obj.TYPE_ADDR
++ p.From.Offset = w - (w % 256)
++
++ p = gins(s390x.AXC, &dst, &dst)
++ p.From.Type = obj.TYPE_MEM
++ p.From.Offset = 0
++ p.To.Type = obj.TYPE_MEM
++ p.To.Offset = 0
++ p.From3 = new(obj.Addr)
++ p.From3.Offset = 256
++ p.From3.Type = obj.TYPE_CONST
++ pl := p
++
++ ginscon(s390x.AADD, 256, &dst)
++ gins(s390x.ACMP, &dst, &end)
++ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), pl)
++ gc.Regfree(&end)
++ w = w % 256
++ }
++
++ // Generate instructions to clear the remaining memory.
++ for w > 0 {
++ n := w
++
++ // Can clear at most 256 bytes per instruction.
++ if n > 256 {
++ n = 256
++ }
++
++ switch n {
++ // Handle very small clears using moves.
++ case 8, 4, 2, 1:
++ ins := s390x.AMOVB
++ switch n {
++ case 8:
++ ins = s390x.AMOVD
++ case 4:
++ ins = s390x.AMOVW
++ case 2:
++ ins = s390x.AMOVH
++ }
++ p := gins(ins, nil, &dst)
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = 0
++ p.To.Type = obj.TYPE_MEM
++ p.To.Offset = boff
++
++ // Handle clears that would require multiple moves with a XC.
++ default:
++ p := gins(s390x.AXC, &dst, &dst)
++ p.From.Type = obj.TYPE_MEM
++ p.From.Offset = boff
++ p.To.Type = obj.TYPE_MEM
++ p.To.Offset = boff
++ p.From3 = new(obj.Addr)
++ p.From3.Offset = n
++ p.From3.Type = obj.TYPE_CONST
++ }
++
++ boff += n
++ w -= n
++ }
++
++ gc.Regfree(&dst)
++}
++
++// Called after regopt and peep have run.
++// Expand CHECKNIL pseudo-op into actual nil pointer check.
++func expandchecks(firstp *obj.Prog) {
++ for p := firstp; p != nil; p = p.Link {
++ if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
++ fmt.Printf("expandchecks: %v\n", p)
++ }
++ if p.As != obj.ACHECKNIL {
++ continue
++ }
++ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
++ gc.Warnl(int(p.Lineno), "generated nil check")
++ }
++ if p.From.Type != obj.TYPE_REG {
++ gc.Fatalf("invalid nil check %v\n", p)
++ }
++
++ // check is
++ // CMPBNE arg, $0, 2(PC) [likely]
++ // MOVD R0, 0(R0)
++ p1 := gc.Ctxt.NewProg()
++
++ gc.Clearp(p1)
++ p1.Link = p.Link
++ p.Link = p1
++ p1.Lineno = p.Lineno
++ p1.Pc = 9999
++ p.As = s390x.ACMPBNE
++ p.From3 = new(obj.Addr)
++ p.From3.Type = obj.TYPE_CONST
++ p.From3.Offset = 0
++
++ p.To.Type = obj.TYPE_BRANCH
++ p.To.Val = p1.Link
++
++ // crash by write to memory address 0.
++ p1.As = s390x.AMOVD
++
++ p1.From.Type = obj.TYPE_REG
++ p1.From.Reg = s390x.REGZERO
++ p1.To.Type = obj.TYPE_MEM
++ p1.To.Reg = s390x.REGZERO
++ p1.To.Offset = 0
++ }
++}
++
++// res = runtime.getg()
++func getg(res *gc.Node) {
++ var n1 gc.Node
++ gc.Nodreg(&n1, res.Type, s390x.REGG)
++ gmove(&n1, res)
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/gsubr.go
+@@ -0,0 +1,1136 @@
++// Derived from Inferno utils/6c/txt.c
++// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/compile/internal/big"
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++ "fmt"
++)
++
++var resvd = []int{
++ s390x.REGZERO, // R0
++ s390x.REGTMP, // R10
++ s390x.REGTMP2, // R11
++ s390x.REGCTXT, // R12
++ s390x.REGG, // R13
++ s390x.REG_LR, // R14
++ s390x.REGSP, // R15
++}
++
++// generate
++// as $c, n
++func ginscon(as int, c int64, n2 *gc.Node) {
++ var n1 gc.Node
++
++ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
++
++ if as != s390x.AMOVD && (c < -s390x.BIG || c > s390x.BIG) || n2.Op != gc.OREGISTER || as == s390x.AMULLD {
++ // cannot have more than 16-bit of immediate in ADD, etc.
++ // instead, MOV into register first.
++ var ntmp gc.Node
++ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
++
++ rawgins(s390x.AMOVD, &n1, &ntmp)
++ rawgins(as, &ntmp, n2)
++ gc.Regfree(&ntmp)
++ return
++ }
++
++ rawgins(as, &n1, n2)
++}
++
++// generate
++// as n, $c (CMP/CMPU)
++func ginscon2(as int, n2 *gc.Node, c int64) {
++ var n1 gc.Node
++
++ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
++
++ switch as {
++ default:
++ gc.Fatalf("ginscon2")
++
++ case s390x.ACMP:
++ if -s390x.BIG <= c && c <= s390x.BIG {
++ rawgins(as, n2, &n1)
++ return
++ }
++
++ case s390x.ACMPU:
++ if 0 <= c && c <= 2*s390x.BIG {
++ rawgins(as, n2, &n1)
++ return
++ }
++ }
++
++ // MOV n1 into register first
++ var ntmp gc.Node
++ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
++
++ rawgins(s390x.AMOVD, &n1, &ntmp)
++ rawgins(as, n2, &ntmp)
++ gc.Regfree(&ntmp)
++}
++
++func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
++ if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
++ // Reverse comparison to place constant last.
++ op = gc.Brrev(op)
++ n1, n2 = n2, n1
++ }
++
++ var r1, r2, g1, g2 gc.Node
++ gc.Regalloc(&r1, t, n1)
++ gc.Regalloc(&g1, n1.Type, &r1)
++ gc.Cgen(n1, &g1)
++ gmove(&g1, &r1)
++ if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
++ ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
++ } else {
++ gc.Regalloc(&r2, t, n2)
++ gc.Regalloc(&g2, n1.Type, &r2)
++ gc.Cgen(n2, &g2)
++ gmove(&g2, &r2)
++ rawgins(optoas(gc.OCMP, t), &r1, &r2)
++ gc.Regfree(&g2)
++ gc.Regfree(&r2)
++ }
++ gc.Regfree(&g1)
++ gc.Regfree(&r1)
++ return gc.Gbranch(optoas(op, t), nil, likely)
++}
++
++// set up nodes representing 2^63
++var (
++ bigi gc.Node
++ bigf gc.Node
++ bignodes_did bool
++)
++
++func bignodes() {
++ if bignodes_did {
++ return
++ }
++ bignodes_did = true
++
++ var i big.Int
++ i.SetInt64(1)
++ i.Lsh(&i, 63)
++
++ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
++ bigi.SetBigInt(&i)
++
++ bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
++}
++
++// gmvc tries to move f to t using a mvc instruction.
++// If successful it returns true, otherwise it returns false.
++func gmvc(f, t *gc.Node) bool {
++ ft := int(gc.Simsimtype(f.Type))
++ tt := int(gc.Simsimtype(t.Type))
++
++ if ft != tt {
++ return false
++ }
++
++ if f.Op != gc.OINDREG || t.Op != gc.OINDREG {
++ return false
++ }
++
++ if f.Xoffset < 0 || f.Xoffset >= 4096-8 {
++ return false
++ }
++
++ if t.Xoffset < 0 || t.Xoffset >= 4096-8 {
++ return false
++ }
++
++ var len int64
++ switch ft {
++ case gc.TUINT8, gc.TINT8, gc.TBOOL:
++ len = 1
++ case gc.TUINT16, gc.TINT16:
++ len = 2
++ case gc.TUINT32, gc.TINT32, gc.TFLOAT32:
++ len = 4
++ case gc.TUINT64, gc.TINT64, gc.TFLOAT64, gc.TPTR64:
++ len = 8
++ case gc.TUNSAFEPTR:
++ len = int64(gc.Widthptr)
++ default:
++ return false
++ }
++
++ p := gc.Prog(s390x.AMVC)
++ gc.Naddr(&p.From, f)
++ gc.Naddr(&p.To, t)
++ p.From3 = new(obj.Addr)
++ p.From3.Offset = len
++ p.From3.Type = obj.TYPE_CONST
++ return true
++}
++
++// generate move:
++// t = f
++// hard part is conversions.
++func gmove(f *gc.Node, t *gc.Node) {
++ if gc.Debug['M'] != 0 {
++ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
++ }
++
++ ft := int(gc.Simsimtype(f.Type))
++ tt := int(gc.Simsimtype(t.Type))
++ cvt := t.Type
++
++ if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
++ gc.Complexmove(f, t)
++ return
++ }
++
++ // cannot have two memory operands
++ var a int
++ if gc.Ismem(f) && gc.Ismem(t) {
++ if gmvc(f, t) {
++ return
++ }
++ goto hard
++ }
++
++ // convert constant to desired type
++ if f.Op == gc.OLITERAL {
++ var con gc.Node
++ f.Convconst(&con, t.Type)
++ f = &con
++ ft = tt // so big switch will choose a simple mov
++
++ // some constants can't move directly to memory.
++ if gc.Ismem(t) {
++ // float constants come from memory.
++ if gc.Isfloat[tt] {
++ goto hard
++ }
++
++ // all immediates are 16-bit sign-extended
++ // unless moving into a register.
++ if gc.Isint[tt] {
++ if i := con.Int(); int64(int16(i)) != i {
++ goto hard
++ }
++ }
++
++ // immediate moves to memory have a 12-bit unsigned displacement
++ if t.Xoffset < 0 || t.Xoffset >= 4096-8 {
++ goto hard
++ }
++ }
++ }
++
++ // a float-to-int or int-to-float conversion requires the source operand in a register
++ if gc.Ismem(f) && ((gc.Isfloat[ft] && gc.Isint[tt]) || (gc.Isint[ft] && gc.Isfloat[tt])) {
++ cvt = f.Type
++ goto hard
++ }
++
++ // a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register
++ if gc.Ismem(f) && gc.Isfloat[ft] && gc.Isfloat[tt] && (ft != tt) {
++ cvt = f.Type
++ goto hard
++ }
++
++ // value -> value copy, only one memory operand.
++ // figure out the instruction to use.
++ // break out of switch for one-instruction gins.
++ // goto rdst for "destination must be register".
++ // goto hard for "convert to cvt type first".
++ // otherwise handle and return.
++ switch uint32(ft)<<16 | uint32(tt) {
++ default:
++ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
++
++ // integer copy and truncate
++ case gc.TINT8<<16 | gc.TINT8,
++ gc.TUINT8<<16 | gc.TINT8,
++ gc.TINT16<<16 | gc.TINT8,
++ gc.TUINT16<<16 | gc.TINT8,
++ gc.TINT32<<16 | gc.TINT8,
++ gc.TUINT32<<16 | gc.TINT8,
++ gc.TINT64<<16 | gc.TINT8,
++ gc.TUINT64<<16 | gc.TINT8:
++ a = s390x.AMOVB
++
++ case gc.TINT8<<16 | gc.TUINT8,
++ gc.TUINT8<<16 | gc.TUINT8,
++ gc.TINT16<<16 | gc.TUINT8,
++ gc.TUINT16<<16 | gc.TUINT8,
++ gc.TINT32<<16 | gc.TUINT8,
++ gc.TUINT32<<16 | gc.TUINT8,
++ gc.TINT64<<16 | gc.TUINT8,
++ gc.TUINT64<<16 | gc.TUINT8:
++ a = s390x.AMOVBZ
++
++ case gc.TINT16<<16 | gc.TINT16,
++ gc.TUINT16<<16 | gc.TINT16,
++ gc.TINT32<<16 | gc.TINT16,
++ gc.TUINT32<<16 | gc.TINT16,
++ gc.TINT64<<16 | gc.TINT16,
++ gc.TUINT64<<16 | gc.TINT16:
++ a = s390x.AMOVH
++
++ case gc.TINT16<<16 | gc.TUINT16,
++ gc.TUINT16<<16 | gc.TUINT16,
++ gc.TINT32<<16 | gc.TUINT16,
++ gc.TUINT32<<16 | gc.TUINT16,
++ gc.TINT64<<16 | gc.TUINT16,
++ gc.TUINT64<<16 | gc.TUINT16:
++ a = s390x.AMOVHZ
++
++ case gc.TINT32<<16 | gc.TINT32,
++ gc.TUINT32<<16 | gc.TINT32,
++ gc.TINT64<<16 | gc.TINT32,
++ gc.TUINT64<<16 | gc.TINT32:
++ a = s390x.AMOVW
++
++ case gc.TINT32<<16 | gc.TUINT32,
++ gc.TUINT32<<16 | gc.TUINT32,
++ gc.TINT64<<16 | gc.TUINT32,
++ gc.TUINT64<<16 | gc.TUINT32:
++ a = s390x.AMOVWZ
++
++ case gc.TINT64<<16 | gc.TINT64,
++ gc.TINT64<<16 | gc.TUINT64,
++ gc.TUINT64<<16 | gc.TINT64,
++ gc.TUINT64<<16 | gc.TUINT64:
++ a = s390x.AMOVD
++
++ // sign extend int8
++ case gc.TINT8<<16 | gc.TINT16,
++ gc.TINT8<<16 | gc.TUINT16,
++ gc.TINT8<<16 | gc.TINT32,
++ gc.TINT8<<16 | gc.TUINT32,
++ gc.TINT8<<16 | gc.TINT64,
++ gc.TINT8<<16 | gc.TUINT64:
++ a = s390x.AMOVB
++ goto rdst
++
++ // sign extend uint8
++ case gc.TUINT8<<16 | gc.TINT16,
++ gc.TUINT8<<16 | gc.TUINT16,
++ gc.TUINT8<<16 | gc.TINT32,
++ gc.TUINT8<<16 | gc.TUINT32,
++ gc.TUINT8<<16 | gc.TINT64,
++ gc.TUINT8<<16 | gc.TUINT64:
++ a = s390x.AMOVBZ
++ goto rdst
++
++ // sign extend int16
++ case gc.TINT16<<16 | gc.TINT32,
++ gc.TINT16<<16 | gc.TUINT32,
++ gc.TINT16<<16 | gc.TINT64,
++ gc.TINT16<<16 | gc.TUINT64:
++ a = s390x.AMOVH
++ goto rdst
++
++ // zero extend uint16
++ case gc.TUINT16<<16 | gc.TINT32,
++ gc.TUINT16<<16 | gc.TUINT32,
++ gc.TUINT16<<16 | gc.TINT64,
++ gc.TUINT16<<16 | gc.TUINT64:
++ a = s390x.AMOVHZ
++ goto rdst
++
++ // sign extend int32
++ case gc.TINT32<<16 | gc.TINT64,
++ gc.TINT32<<16 | gc.TUINT64:
++ a = s390x.AMOVW
++ goto rdst
++
++ // zero extend uint32
++ case gc.TUINT32<<16 | gc.TINT64,
++ gc.TUINT32<<16 | gc.TUINT64:
++ a = s390x.AMOVWZ
++ goto rdst
++
++ // float to integer
++ case gc.TFLOAT32<<16 | gc.TUINT8,
++ gc.TFLOAT32<<16 | gc.TUINT16:
++ cvt = gc.Types[gc.TUINT32]
++ goto hard
++
++ case gc.TFLOAT32<<16 | gc.TUINT32:
++ a = s390x.ACLFEBR
++ goto rdst
++
++ case gc.TFLOAT32<<16 | gc.TUINT64:
++ a = s390x.ACLGEBR
++ goto rdst
++
++ case gc.TFLOAT64<<16 | gc.TUINT8,
++ gc.TFLOAT64<<16 | gc.TUINT16:
++ cvt = gc.Types[gc.TUINT32]
++ goto hard
++
++ case gc.TFLOAT64<<16 | gc.TUINT32:
++ a = s390x.ACLFDBR
++ goto rdst
++
++ case gc.TFLOAT64<<16 | gc.TUINT64:
++ a = s390x.ACLGDBR
++ goto rdst
++
++ case gc.TFLOAT32<<16 | gc.TINT8,
++ gc.TFLOAT32<<16 | gc.TINT16:
++ cvt = gc.Types[gc.TINT32]
++ goto hard
++
++ case gc.TFLOAT32<<16 | gc.TINT32:
++ a = s390x.ACFEBRA
++ goto rdst
++
++ case gc.TFLOAT32<<16 | gc.TINT64:
++ a = s390x.ACGEBRA
++ goto rdst
++
++ case gc.TFLOAT64<<16 | gc.TINT8,
++ gc.TFLOAT64<<16 | gc.TINT16:
++ cvt = gc.Types[gc.TINT32]
++ goto hard
++
++ case gc.TFLOAT64<<16 | gc.TINT32:
++ a = s390x.ACFDBRA
++ goto rdst
++
++ case gc.TFLOAT64<<16 | gc.TINT64:
++ a = s390x.ACGDBRA
++ goto rdst
++
++ // integer to float
++ case gc.TUINT8<<16 | gc.TFLOAT32,
++ gc.TUINT16<<16 | gc.TFLOAT32:
++ cvt = gc.Types[gc.TUINT32]
++ goto hard
++
++ case gc.TUINT32<<16 | gc.TFLOAT32:
++ a = s390x.ACELFBR
++ goto rdst
++
++ case gc.TUINT64<<16 | gc.TFLOAT32:
++ a = s390x.ACELGBR
++ goto rdst
++
++ case gc.TUINT8<<16 | gc.TFLOAT64,
++ gc.TUINT16<<16 | gc.TFLOAT64:
++ cvt = gc.Types[gc.TUINT32]
++ goto hard
++
++ case gc.TUINT32<<16 | gc.TFLOAT64:
++ a = s390x.ACDLFBR
++ goto rdst
++
++ case gc.TUINT64<<16 | gc.TFLOAT64:
++ a = s390x.ACDLGBR
++ goto rdst
++
++ case gc.TINT8<<16 | gc.TFLOAT32,
++ gc.TINT16<<16 | gc.TFLOAT32:
++ cvt = gc.Types[gc.TINT32]
++ goto hard
++
++ case gc.TINT32<<16 | gc.TFLOAT32:
++ a = s390x.ACEFBRA
++ goto rdst
++
++ case gc.TINT64<<16 | gc.TFLOAT32:
++ a = s390x.ACEGBRA
++ goto rdst
++
++ case gc.TINT8<<16 | gc.TFLOAT64,
++ gc.TINT16<<16 | gc.TFLOAT64:
++ cvt = gc.Types[gc.TINT32]
++ goto hard
++
++ case gc.TINT32<<16 | gc.TFLOAT64:
++ a = s390x.ACDFBRA
++ goto rdst
++
++ case gc.TINT64<<16 | gc.TFLOAT64:
++ a = s390x.ACDGBRA
++ goto rdst
++
++ // float to float
++ case gc.TFLOAT32<<16 | gc.TFLOAT32:
++ a = s390x.AFMOVS
++
++ case gc.TFLOAT64<<16 | gc.TFLOAT64:
++ a = s390x.AFMOVD
++
++ case gc.TFLOAT32<<16 | gc.TFLOAT64:
++ a = s390x.ALDEBR
++ goto rdst
++
++ case gc.TFLOAT64<<16 | gc.TFLOAT32:
++ a = s390x.ALEDBR
++ goto rdst
++ }
++
++ gins(a, f, t)
++ return
++
++ // requires register destination
++rdst:
++ if t != nil && t.Op == gc.OREGISTER {
++ gins(a, f, t)
++ return
++ } else {
++ var r1 gc.Node
++ gc.Regalloc(&r1, t.Type, t)
++
++ gins(a, f, &r1)
++ gmove(&r1, t)
++ gc.Regfree(&r1)
++ return
++ }
++
++ // requires register intermediate
++hard:
++ var r1 gc.Node
++ gc.Regalloc(&r1, cvt, t)
++
++ gmove(f, &r1)
++ gmove(&r1, t)
++ gc.Regfree(&r1)
++ return
++}
++
++func intLiteral(n *gc.Node) (x int64, ok bool) {
++ switch {
++ case n == nil:
++ return
++ case gc.Isconst(n, gc.CTINT):
++ return n.Int(), true
++ case gc.Isconst(n, gc.CTBOOL):
++ return int64(obj.Bool2int(n.Bool())), true
++ }
++ return
++}
++
++// gins is called by the front end.
++// It synthesizes some multiple-instruction sequences
++// so the front end can stay simpler.
++func gins(as int, f, t *gc.Node) *obj.Prog {
++ if t != nil {
++ if as >= obj.A_ARCHSPECIFIC {
++ if x, ok := intLiteral(f); ok {
++ ginscon(as, x, t)
++ return nil // caller must not use
++ }
++ }
++ if as == s390x.ACMP || as == s390x.ACMPU {
++ if x, ok := intLiteral(t); ok {
++ ginscon2(as, f, x)
++ return nil // caller must not use
++ }
++ }
++ }
++ return rawgins(as, f, t)
++}
++
++// generate one instruction:
++// as f, t
++func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
++ // self move check
++ // TODO(mundaym): use sized math and extend to MOVB, MOVWZ etc.
++ switch as {
++ case s390x.AMOVD, s390x.AFMOVS, s390x.AFMOVD:
++ if f != nil && t != nil &&
++ f.Op == gc.OREGISTER && t.Op == gc.OREGISTER &&
++ f.Reg == t.Reg {
++ return nil
++ }
++ }
++
++ p := gc.Prog(as)
++ gc.Naddr(&p.From, f)
++ gc.Naddr(&p.To, t)
++
++ switch as {
++ // Bad things the front end has done to us. Crash to find call stack.
++ case s390x.AMULLD:
++ if p.From.Type == obj.TYPE_CONST {
++ gc.Debug['h'] = 1
++ gc.Fatalf("bad inst: %v", p)
++ }
++ case s390x.ACMP, s390x.ACMPU:
++ if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
++ gc.Debug['h'] = 1
++ gc.Fatalf("bad inst: %v", p)
++ }
++ }
++
++ if gc.Debug['g'] != 0 {
++ fmt.Printf("%v\n", p)
++ }
++
++ w := int32(0)
++ switch as {
++ case s390x.AMOVB, s390x.AMOVBZ:
++ w = 1
++
++ case s390x.AMOVH, s390x.AMOVHZ:
++ w = 2
++
++ case s390x.AMOVW, s390x.AMOVWZ:
++ w = 4
++
++ case s390x.AMOVD:
++ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
++ break
++ }
++ w = 8
++ }
++
++ if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
++ gc.Dump("f", f)
++ gc.Dump("t", t)
++ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
++ }
++
++ return p
++}
++
++// optoas returns the Axxx equivalent of Oxxx for type t
++func optoas(op gc.Op, t *gc.Type) int {
++ if t == nil {
++ gc.Fatalf("optoas: t is nil")
++ }
++
++ // avoid constant conversions in switches below
++ const (
++ OMINUS_ = uint32(gc.OMINUS) << 16
++ OLSH_ = uint32(gc.OLSH) << 16
++ ORSH_ = uint32(gc.ORSH) << 16
++ OADD_ = uint32(gc.OADD) << 16
++ OSUB_ = uint32(gc.OSUB) << 16
++ OMUL_ = uint32(gc.OMUL) << 16
++ ODIV_ = uint32(gc.ODIV) << 16
++ OOR_ = uint32(gc.OOR) << 16
++ OAND_ = uint32(gc.OAND) << 16
++ OXOR_ = uint32(gc.OXOR) << 16
++ OEQ_ = uint32(gc.OEQ) << 16
++ ONE_ = uint32(gc.ONE) << 16
++ OLT_ = uint32(gc.OLT) << 16
++ OLE_ = uint32(gc.OLE) << 16
++ OGE_ = uint32(gc.OGE) << 16
++ OGT_ = uint32(gc.OGT) << 16
++ OCMP_ = uint32(gc.OCMP) << 16
++ OAS_ = uint32(gc.OAS) << 16
++ OHMUL_ = uint32(gc.OHMUL) << 16
++ OSQRT_ = uint32(gc.OSQRT) << 16
++ OLROT_ = uint32(gc.OLROT) << 16
++ )
++
++ a := int(obj.AXXX)
++ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
++ default:
++ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
++
++ case OEQ_ | gc.TBOOL,
++ OEQ_ | gc.TINT8,
++ OEQ_ | gc.TUINT8,
++ OEQ_ | gc.TINT16,
++ OEQ_ | gc.TUINT16,
++ OEQ_ | gc.TINT32,
++ OEQ_ | gc.TUINT32,
++ OEQ_ | gc.TINT64,
++ OEQ_ | gc.TUINT64,
++ OEQ_ | gc.TPTR32,
++ OEQ_ | gc.TPTR64,
++ OEQ_ | gc.TFLOAT32,
++ OEQ_ | gc.TFLOAT64:
++ a = s390x.ABEQ
++
++ case ONE_ | gc.TBOOL,
++ ONE_ | gc.TINT8,
++ ONE_ | gc.TUINT8,
++ ONE_ | gc.TINT16,
++ ONE_ | gc.TUINT16,
++ ONE_ | gc.TINT32,
++ ONE_ | gc.TUINT32,
++ ONE_ | gc.TINT64,
++ ONE_ | gc.TUINT64,
++ ONE_ | gc.TPTR32,
++ ONE_ | gc.TPTR64,
++ ONE_ | gc.TFLOAT32,
++ ONE_ | gc.TFLOAT64:
++ a = s390x.ABNE
++
++ case OLT_ | gc.TINT8, // ACMP
++ OLT_ | gc.TINT16,
++ OLT_ | gc.TINT32,
++ OLT_ | gc.TINT64,
++ OLT_ | gc.TUINT8,
++ // ACMPU
++ OLT_ | gc.TUINT16,
++ OLT_ | gc.TUINT32,
++ OLT_ | gc.TUINT64,
++ OLT_ | gc.TFLOAT32,
++ // AFCMPU
++ OLT_ | gc.TFLOAT64:
++ a = s390x.ABLT
++
++ case OLE_ | gc.TINT8, // ACMP
++ OLE_ | gc.TINT16,
++ OLE_ | gc.TINT32,
++ OLE_ | gc.TINT64,
++ OLE_ | gc.TUINT8,
++ // ACMPU
++ OLE_ | gc.TUINT16,
++ OLE_ | gc.TUINT32,
++ OLE_ | gc.TUINT64,
++ OLE_ | gc.TFLOAT32,
++ OLE_ | gc.TFLOAT64:
++ a = s390x.ABLE
++
++ case OGT_ | gc.TINT8,
++ OGT_ | gc.TINT16,
++ OGT_ | gc.TINT32,
++ OGT_ | gc.TINT64,
++ OGT_ | gc.TUINT8,
++ OGT_ | gc.TUINT16,
++ OGT_ | gc.TUINT32,
++ OGT_ | gc.TUINT64,
++ OGT_ | gc.TFLOAT32,
++ OGT_ | gc.TFLOAT64:
++ a = s390x.ABGT
++
++ case OGE_ | gc.TINT8,
++ OGE_ | gc.TINT16,
++ OGE_ | gc.TINT32,
++ OGE_ | gc.TINT64,
++ OGE_ | gc.TUINT8,
++ OGE_ | gc.TUINT16,
++ OGE_ | gc.TUINT32,
++ OGE_ | gc.TUINT64,
++ OGE_ | gc.TFLOAT32,
++ OGE_ | gc.TFLOAT64:
++ a = s390x.ABGE
++
++ case OCMP_ | gc.TBOOL,
++ OCMP_ | gc.TINT8,
++ OCMP_ | gc.TINT16,
++ OCMP_ | gc.TINT32,
++ OCMP_ | gc.TPTR32,
++ OCMP_ | gc.TINT64:
++ a = s390x.ACMP
++
++ case OCMP_ | gc.TUINT8,
++ OCMP_ | gc.TUINT16,
++ OCMP_ | gc.TUINT32,
++ OCMP_ | gc.TUINT64,
++ OCMP_ | gc.TPTR64:
++ a = s390x.ACMPU
++
++ case OCMP_ | gc.TFLOAT32:
++ a = s390x.ACEBR
++
++ case OCMP_ | gc.TFLOAT64:
++ a = s390x.AFCMPU
++
++ case OAS_ | gc.TBOOL,
++ OAS_ | gc.TINT8:
++ a = s390x.AMOVB
++
++ case OAS_ | gc.TUINT8:
++ a = s390x.AMOVBZ
++
++ case OAS_ | gc.TINT16:
++ a = s390x.AMOVH
++
++ case OAS_ | gc.TUINT16:
++ a = s390x.AMOVHZ
++
++ case OAS_ | gc.TINT32:
++ a = s390x.AMOVW
++
++ case OAS_ | gc.TUINT32,
++ OAS_ | gc.TPTR32:
++ a = s390x.AMOVWZ
++
++ case OAS_ | gc.TINT64,
++ OAS_ | gc.TUINT64,
++ OAS_ | gc.TPTR64:
++ a = s390x.AMOVD
++
++ case OAS_ | gc.TFLOAT32:
++ a = s390x.AFMOVS
++
++ case OAS_ | gc.TFLOAT64:
++ a = s390x.AFMOVD
++
++ case OADD_ | gc.TINT8,
++ OADD_ | gc.TUINT8,
++ OADD_ | gc.TINT16,
++ OADD_ | gc.TUINT16,
++ OADD_ | gc.TINT32,
++ OADD_ | gc.TUINT32,
++ OADD_ | gc.TPTR32,
++ OADD_ | gc.TINT64,
++ OADD_ | gc.TUINT64,
++ OADD_ | gc.TPTR64:
++ a = s390x.AADD
++
++ case OADD_ | gc.TFLOAT32:
++ a = s390x.AFADDS
++
++ case OADD_ | gc.TFLOAT64:
++ a = s390x.AFADD
++
++ case OSUB_ | gc.TINT8,
++ OSUB_ | gc.TUINT8,
++ OSUB_ | gc.TINT16,
++ OSUB_ | gc.TUINT16,
++ OSUB_ | gc.TINT32,
++ OSUB_ | gc.TUINT32,
++ OSUB_ | gc.TPTR32,
++ OSUB_ | gc.TINT64,
++ OSUB_ | gc.TUINT64,
++ OSUB_ | gc.TPTR64:
++ a = s390x.ASUB
++
++ case OSUB_ | gc.TFLOAT32:
++ a = s390x.AFSUBS
++
++ case OSUB_ | gc.TFLOAT64:
++ a = s390x.AFSUB
++
++ case OMINUS_ | gc.TINT8,
++ OMINUS_ | gc.TUINT8,
++ OMINUS_ | gc.TINT16,
++ OMINUS_ | gc.TUINT16,
++ OMINUS_ | gc.TINT32,
++ OMINUS_ | gc.TUINT32,
++ OMINUS_ | gc.TPTR32,
++ OMINUS_ | gc.TINT64,
++ OMINUS_ | gc.TUINT64,
++ OMINUS_ | gc.TPTR64:
++ a = s390x.ANEG
++
++ case OAND_ | gc.TINT8,
++ OAND_ | gc.TUINT8,
++ OAND_ | gc.TINT16,
++ OAND_ | gc.TUINT16,
++ OAND_ | gc.TINT32,
++ OAND_ | gc.TUINT32,
++ OAND_ | gc.TPTR32,
++ OAND_ | gc.TINT64,
++ OAND_ | gc.TUINT64,
++ OAND_ | gc.TPTR64:
++ a = s390x.AAND
++
++ case OOR_ | gc.TINT8,
++ OOR_ | gc.TUINT8,
++ OOR_ | gc.TINT16,
++ OOR_ | gc.TUINT16,
++ OOR_ | gc.TINT32,
++ OOR_ | gc.TUINT32,
++ OOR_ | gc.TPTR32,
++ OOR_ | gc.TINT64,
++ OOR_ | gc.TUINT64,
++ OOR_ | gc.TPTR64:
++ a = s390x.AOR
++
++ case OXOR_ | gc.TINT8,
++ OXOR_ | gc.TUINT8,
++ OXOR_ | gc.TINT16,
++ OXOR_ | gc.TUINT16,
++ OXOR_ | gc.TINT32,
++ OXOR_ | gc.TUINT32,
++ OXOR_ | gc.TPTR32,
++ OXOR_ | gc.TINT64,
++ OXOR_ | gc.TUINT64,
++ OXOR_ | gc.TPTR64:
++ a = s390x.AXOR
++
++ case OLSH_ | gc.TINT8,
++ OLSH_ | gc.TUINT8,
++ OLSH_ | gc.TINT16,
++ OLSH_ | gc.TUINT16,
++ OLSH_ | gc.TINT32,
++ OLSH_ | gc.TUINT32,
++ OLSH_ | gc.TPTR32,
++ OLSH_ | gc.TINT64,
++ OLSH_ | gc.TUINT64,
++ OLSH_ | gc.TPTR64:
++ a = s390x.ASLD
++
++ case ORSH_ | gc.TUINT8,
++ ORSH_ | gc.TUINT16,
++ ORSH_ | gc.TUINT32,
++ ORSH_ | gc.TPTR32,
++ ORSH_ | gc.TUINT64,
++ ORSH_ | gc.TPTR64:
++ a = s390x.ASRD
++
++ case ORSH_ | gc.TINT8,
++ ORSH_ | gc.TINT16,
++ ORSH_ | gc.TINT32,
++ ORSH_ | gc.TINT64:
++ a = s390x.ASRAD
++
++ case OHMUL_ | gc.TINT64,
++ OHMUL_ | gc.TUINT64,
++ OHMUL_ | gc.TPTR64:
++ a = s390x.AMULHDU
++
++ case OMUL_ | gc.TINT8,
++ OMUL_ | gc.TINT16,
++ OMUL_ | gc.TINT32,
++ OMUL_ | gc.TINT64:
++ a = s390x.AMULLD
++
++ case OMUL_ | gc.TUINT8,
++ OMUL_ | gc.TUINT16,
++ OMUL_ | gc.TUINT32,
++ OMUL_ | gc.TPTR32,
++ // don't use word multiply, the high 32-bit are undefined.
++ OMUL_ | gc.TUINT64,
++ OMUL_ | gc.TPTR64:
++ // for 64-bit multiplies, signedness doesn't matter.
++ a = s390x.AMULLD
++
++ case OMUL_ | gc.TFLOAT32:
++ a = s390x.AFMULS
++
++ case OMUL_ | gc.TFLOAT64:
++ a = s390x.AFMUL
++
++ case ODIV_ | gc.TINT8,
++ ODIV_ | gc.TINT16,
++ ODIV_ | gc.TINT32,
++ ODIV_ | gc.TINT64:
++ a = s390x.ADIVD
++
++ case ODIV_ | gc.TUINT8,
++ ODIV_ | gc.TUINT16,
++ ODIV_ | gc.TUINT32,
++ ODIV_ | gc.TPTR32,
++ ODIV_ | gc.TUINT64,
++ ODIV_ | gc.TPTR64:
++ a = s390x.ADIVDU
++
++ case ODIV_ | gc.TFLOAT32:
++ a = s390x.AFDIVS
++
++ case ODIV_ | gc.TFLOAT64:
++ a = s390x.AFDIV
++
++ case OSQRT_ | gc.TFLOAT64:
++ a = s390x.AFSQRT
++
++ case OLROT_ | gc.TUINT32,
++ OLROT_ | gc.TPTR32,
++ OLROT_ | gc.TINT32:
++ a = s390x.ARLL
++
++ case OLROT_ | gc.TUINT64,
++ OLROT_ | gc.TPTR64,
++ OLROT_ | gc.TINT64:
++ a = s390x.ARLLG
++ }
++
++ return a
++}
++
++const (
++ ODynam = 1 << 0
++ OAddable = 1 << 1
++)
++
++var clean [20]gc.Node
++
++var cleani int = 0
++
++func sudoclean() {
++ if clean[cleani-1].Op != gc.OEMPTY {
++ gc.Regfree(&clean[cleani-1])
++ }
++ if clean[cleani-2].Op != gc.OEMPTY {
++ gc.Regfree(&clean[cleani-2])
++ }
++ cleani -= 2
++}
++
++/*
++ * generate code to compute address of n,
++ * a reference to a (perhaps nested) field inside
++ * an array or struct.
++ * return 0 on failure, 1 on success.
++ * on success, leaves usable address in a.
++ *
++ * caller is responsible for calling sudoclean
++ * after successful sudoaddable,
++ * to release the register used for a.
++ */
++func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
++ if n.Type == nil {
++ return false
++ }
++
++ *a = obj.Addr{}
++
++ switch n.Op {
++ case gc.OLITERAL:
++ if !gc.Isconst(n, gc.CTINT) {
++ return false
++ }
++ v := n.Int()
++ switch as {
++ default:
++ return false
++
++ // operations that can cope with a 32-bit immediate
++ // TODO(mundaym): logical operations can work on high bits
++ case s390x.AADD,
++ s390x.AADDC,
++ s390x.ASUB,
++ s390x.AMULLW,
++ s390x.AAND,
++ s390x.AOR,
++ s390x.AXOR,
++ s390x.ASLD,
++ s390x.ASLW,
++ s390x.ASRAW,
++ s390x.ASRAD,
++ s390x.ASRW,
++ s390x.ASRD,
++ s390x.AMOVB,
++ s390x.AMOVBZ,
++ s390x.AMOVH,
++ s390x.AMOVHZ,
++ s390x.AMOVW,
++ s390x.AMOVWZ,
++ s390x.AMOVD:
++ if int64(int32(v)) != v {
++ return false
++ }
++
++ // for comparisons avoid immediates unless they can
++ // fit into a int8/uint8
++ // this favours combined compare and branch instructions
++ case s390x.ACMP:
++ if int64(int8(v)) != v {
++ return false
++ }
++ case s390x.ACMPU:
++ if int64(uint8(v)) != v {
++ return false
++ }
++ }
++
++ cleani += 2
++ reg := &clean[cleani-1]
++ reg1 := &clean[cleani-2]
++ reg.Op = gc.OEMPTY
++ reg1.Op = gc.OEMPTY
++ gc.Naddr(a, n)
++ return true
++
++ case gc.ODOT,
++ gc.ODOTPTR:
++ cleani += 2
++ reg := &clean[cleani-1]
++ reg1 := &clean[cleani-2]
++ reg.Op = gc.OEMPTY
++ reg1.Op = gc.OEMPTY
++ var nn *gc.Node
++ var oary [10]int64
++ o := gc.Dotoffset(n, oary[:], &nn)
++ if nn == nil {
++ sudoclean()
++ return false
++ }
++
++ if nn.Addable && o == 1 && oary[0] >= 0 {
++ // directly addressable set of DOTs
++ n1 := *nn
++
++ n1.Type = n.Type
++ n1.Xoffset += oary[0]
++ // check that the offset fits into a 12-bit displacement
++ if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
++ sudoclean()
++ return false
++ }
++ gc.Naddr(a, &n1)
++ return true
++ }
++
++ gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
++ n1 := *reg
++ n1.Op = gc.OINDREG
++ if oary[0] >= 0 {
++ gc.Agen(nn, reg)
++ n1.Xoffset = oary[0]
++ } else {
++ gc.Cgen(nn, reg)
++ gc.Cgen_checknil(reg)
++ n1.Xoffset = -(oary[0] + 1)
++ }
++
++ for i := 1; i < o; i++ {
++ if oary[i] >= 0 {
++ gc.Fatalf("can't happen")
++ }
++ gins(s390x.AMOVD, &n1, reg)
++ gc.Cgen_checknil(reg)
++ n1.Xoffset = -(oary[i] + 1)
++ }
++
++ a.Type = obj.TYPE_NONE
++ a.Index = obj.TYPE_NONE
++ // check that the offset fits into a 12-bit displacement
++ if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
++ tmp := n1
++ tmp.Op = gc.OREGISTER
++ tmp.Type = gc.Types[gc.Tptr]
++ tmp.Xoffset = 0
++ gc.Cgen_checknil(&tmp)
++ ginscon(s390x.AADD, n1.Xoffset, &tmp)
++ n1.Xoffset = 0
++ }
++ gc.Naddr(a, &n1)
++ return true
++ }
++
++ return false
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/peep.go
+@@ -0,0 +1,1827 @@
++// Derived from Inferno utils/6c/peep.c
++// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++ "fmt"
++)
++
++var gactive uint32
++
++func peep(firstp *obj.Prog) {
++ g := gc.Flowstart(firstp, nil)
++ if g == nil {
++ return
++ }
++ gactive = 0
++
++ // promote zero moves to MOVD so that they are more likely to
++ // be optimized in later passes
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++ if isMove(p) && p.As != s390x.AMOVD && regzer(&p.From) != 0 && isGPR(&p.To) {
++ p.As = s390x.AMOVD
++ }
++ }
++
++ // constant propagation
++ // find MOV $con,R followed by
++ // another MOV $con,R without
++ // setting R in the interim
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++ switch p.As {
++ case s390x.AMOVD,
++ s390x.AMOVW, s390x.AMOVWZ,
++ s390x.AMOVH, s390x.AMOVHZ,
++ s390x.AMOVB, s390x.AMOVBZ,
++ s390x.AFMOVS, s390x.AFMOVD:
++ if regtyp(&p.To) {
++ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
++ conprop(r)
++ }
++ }
++ }
++ }
++
++ for {
++ changed := false
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++
++ // TODO(austin) Handle smaller moves. arm and amd64
++ // distinguish between moves that moves that *must*
++ // sign/zero extend and moves that don't care so they
++ // can eliminate moves that don't care without
++ // breaking moves that do care. This might let us
++ // simplify or remove the next peep loop, too.
++ if p.As == s390x.AMOVD || p.As == s390x.AFMOVD || p.As == s390x.AFMOVS {
++ if regtyp(&p.To) {
++ // Convert uses to $0 to uses of R0 and
++ // propagate R0
++ if p.As == s390x.AMOVD && regzer(&p.From) != 0 {
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = s390x.REGZERO
++ }
++
++ // Try to eliminate reg->reg moves
++ if isGPR(&p.From) || isFPR(&p.From) {
++ if copyprop(r) || (subprop(r) && copyprop(r)) {
++ excise(r)
++ changed = true
++ }
++ }
++ }
++ }
++ }
++ if !changed {
++ break
++ }
++ }
++
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("pass7 copyprop", g.Start, 0)
++ }
++
++ /*
++ * For any kind of MOV in (AFMOVS, AMOVW, AMOVWZ, AMOVH, AMOVHZ, AMOVB, AMOVBZ)
++ * MOV Ra, Rb; ...; MOV Rb, Rc; -> MOV Ra, Rc;
++ */
++
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++
++ switch p.As {
++ case s390x.AMOVW, s390x.AMOVWZ,
++ s390x.AMOVH, s390x.AMOVHZ,
++ s390x.AMOVB, s390x.AMOVBZ:
++
++ if regzer(&p.From) == 1 && regtyp(&p.To) {
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = s390x.REGZERO
++ }
++
++ if ((regtyp(&p.From) || regzer(&p.From) == 1 ||
++ p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_SCONST) &&
++ regtyp(&p.To)) != true {
++ continue
++ }
++
++ default:
++ continue
++ }
++
++ r0 := r
++ p0 := r0.Prog
++ s0 := &p0.From
++ v0 := &p0.To
++ r1 := gc.Uniqs(r0)
++
++ // v0used: 0 means must not be used;
++ // 1 means didn't find, but can't decide;
++ // 2 means found a use, must be used;
++ // v0used is used as a tag to decide if r0 can be eliminited.
++ var v0used int = 1
++
++ for ; ; r1 = gc.Uniqs(r1) {
++ var p1 *obj.Prog
++
++ if r1 == nil || r1 == r0 {
++ break
++ }
++ if gc.Uniqp(r1) == nil {
++ break
++ }
++ breakloop := false
++ p1 = r1.Prog
++
++ if p1.As == p0.As && copyas(&p0.To, &p1.From) &&
++ (regtyp(&p0.From) || p0.From.Reg == s390x.REGZERO || regtyp(&p1.To) ||
++ (p0.From.Type != obj.TYPE_CONST && p0.From.Type != obj.TYPE_FCONST && p0.From.Type != obj.TYPE_SCONST && p1.To.Type == obj.TYPE_MEM)) {
++ if gc.Debug['D'] != 0 {
++ fmt.Printf("mov prop\n")
++ fmt.Printf("%v\n", p0)
++ fmt.Printf("%v\n", p1)
++ }
++ p1.From = p0.From
++ } else {
++ t := copyu(p1, v0, nil)
++ if gc.Debug['D'] != 0 {
++ fmt.Printf("try v0 mov prop t=%d\n", t)
++ fmt.Printf("%v\n", p0)
++ fmt.Printf("%v\n", p1)
++ }
++ switch t {
++ case 0: // miss
++ case 1: // use
++ v0used = 2
++ case 2, // rar
++ 4: // use and set
++ v0used = 2
++ breakloop = true
++ case 3: // set
++ if v0used != 2 {
++ v0used = 0
++ }
++ breakloop = true
++ default:
++ }
++
++ if regtyp(s0) {
++ t = copyu(p1, s0, nil)
++ if gc.Debug['D'] != 0 {
++ fmt.Printf("try s0 mov prop t=%d\n", t)
++ fmt.Printf("%v\n", p0)
++ fmt.Printf("%v\n", p1)
++ }
++ switch t {
++ case 0, // miss
++ 1: // use
++ case 2, // rar
++ 4: // use and set
++ breakloop = true
++ case 3: // set
++ breakloop = true
++ default:
++ }
++ }
++ }
++ if breakloop {
++ break
++ }
++ }
++ if v0used == 0 {
++ excise(r0)
++ }
++ }
++
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("pass 7 MOV copy propagation", g.Start, 0)
++ }
++
++ /*
++ * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
++ */
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++ switch p.As {
++ default:
++ continue
++
++ case s390x.AMOVH,
++ s390x.AMOVHZ,
++ s390x.AMOVB,
++ s390x.AMOVBZ,
++ s390x.AMOVW,
++ s390x.AMOVWZ:
++ if p.To.Type != obj.TYPE_REG {
++ continue
++ }
++ }
++
++ r1 := r.Link
++ if r1 == nil {
++ continue
++ }
++ // If this is a branch target then the cast might be needed
++ if gc.Uniqp(r1) == nil {
++ continue
++ }
++ p1 := r1.Prog
++ if p1.As != p.As {
++ continue
++ }
++ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
++ continue
++ }
++ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
++ continue
++ }
++ excise(r1)
++ }
++
++ // Remove redundant moves/casts
++ fuseMoveChains(g.Start)
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("fuse move chains", g.Start, 0)
++ }
++
++ // Fuse memory zeroing instructions into XC instructions
++ fuseClear(g.Start)
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("fuse clears", g.Start, 0)
++ }
++
++ // load pipelining
++ // push any load from memory as early as possible
++ // to give it time to complete before use.
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++ switch p.As {
++ case s390x.AMOVB,
++ s390x.AMOVW,
++ s390x.AMOVD:
++
++ if regtyp(&p.To) && !regconsttyp(&p.From) {
++ pushback(r)
++ }
++ }
++ }
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("pass8 push load as early as possible", g.Start, 0)
++ }
++
++ /*
++ * look for OP a, b, c; MOV c, d; -> OP a, b, d;
++ */
++
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++
++ switch p.As {
++ case s390x.AADD,
++ s390x.AADDC,
++ s390x.AADDME,
++ s390x.AADDE,
++ s390x.AADDZE,
++ s390x.AAND,
++ s390x.AANDN,
++ s390x.ADIVW,
++ s390x.ADIVWU,
++ s390x.ADIVD,
++ s390x.ADIVDU,
++ s390x.AMULLW,
++ s390x.AMULHDU,
++ s390x.AMULLD,
++ s390x.ANAND,
++ s390x.ANOR,
++ s390x.AOR,
++ s390x.AORN,
++ s390x.ASLW,
++ s390x.ASRAW,
++ s390x.ASRW,
++ s390x.ASLD,
++ s390x.ASRAD,
++ s390x.ASRD,
++ s390x.ARLL,
++ s390x.ARLLG,
++ s390x.ASUB,
++ s390x.ASUBC,
++ s390x.ASUBME,
++ s390x.ASUBE,
++ s390x.ASUBZE,
++ s390x.AXOR:
++ if p.To.Type != obj.TYPE_REG {
++ continue
++ }
++ if p.Reg == 0 { // Only for 3 ops instruction
++ continue
++ }
++ default:
++ continue
++ }
++
++ r1 := r.Link
++ for ; r1 != nil; r1 = r1.Link {
++ if r1.Prog.As != obj.ANOP {
++ break
++ }
++ }
++
++ if r1 == nil {
++ continue
++ }
++
++ p1 := r1.Prog
++ switch p1.As {
++ case s390x.AMOVD:
++ if p1.To.Type != obj.TYPE_REG {
++ continue
++ }
++
++ default:
++ continue
++ }
++ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
++ continue
++ }
++
++ if trymergeopmv(r1) {
++ p.To = p1.To
++ excise(r1)
++ }
++ }
++
++ if gc.Debug['v'] != 0 {
++ gc.Dumpit("Merge operation and move", g.Start, 0)
++ }
++
++ /*
++ * look for CMP x, y; Branch -> Compare and branch
++ */
++ for r := g.Start; r != nil; r = r.Link {
++ p := r.Prog
++ r1 := gc.Uniqs(r)
++ if r1 == nil {
++ continue
++ }
++ p1 := r1.Prog
++
++ var ins int16
++ switch p.As {
++ case s390x.ACMP:
++ switch p1.As {
++ case s390x.ABCL, s390x.ABC:
++ continue
++ case s390x.ABEQ:
++ ins = s390x.ACMPBEQ
++ case s390x.ABGE:
++ ins = s390x.ACMPBGE
++ case s390x.ABGT:
++ ins = s390x.ACMPBGT
++ case s390x.ABLE:
++ ins = s390x.ACMPBLE
++ case s390x.ABLT:
++ ins = s390x.ACMPBLT
++ case s390x.ABNE:
++ ins = s390x.ACMPBNE
++ default:
++ continue
++ }
++
++ case s390x.ACMPU:
++ switch p1.As {
++ case s390x.ABCL, s390x.ABC:
++ continue
++ case s390x.ABEQ:
++ ins = s390x.ACMPUBEQ
++ case s390x.ABGE:
++ ins = s390x.ACMPUBGE
++ case s390x.ABGT:
++ ins = s390x.ACMPUBGT
++ case s390x.ABLE:
++ ins = s390x.ACMPUBLE
++ case s390x.ABLT:
++ ins = s390x.ACMPUBLT
++ case s390x.ABNE:
++ ins = s390x.ACMPUBNE
++ default:
++ continue
++ }
++
++ case s390x.ACMPW, s390x.ACMPWU:
++ continue
++
++ default:
++ continue
++ }
++
++ if gc.Debug['D'] != 0 {
++ fmt.Printf("cnb %v; %v -> ", p, p1)
++ }
++
++ if p1.To.Sym != nil {
++ continue
++ }
++
++ if p.To.Type == obj.TYPE_REG {
++ p1.As = ins
++ p1.From = p.From
++ p1.Reg = p.To.Reg
++ p1.From3 = nil
++ } else if p.To.Type == obj.TYPE_CONST {
++ switch p.As {
++ case s390x.ACMP, s390x.ACMPW:
++ if (p.To.Offset < -(1 << 7)) || (p.To.Offset >= ((1 << 7) - 1)) {
++ continue
++ }
++ case s390x.ACMPU, s390x.ACMPWU:
++ if p.To.Offset >= (1 << 8) {
++ continue
++ }
++ default:
++ }
++ p1.As = ins
++ p1.From = p.From
++ p1.Reg = 0
++ p1.From3 = new(obj.Addr)
++ *(p1.From3) = p.To
++ } else {
++ continue
++ }
++
++ if gc.Debug['D'] != 0 {
++ fmt.Printf("%v\n", p1)
++ }
++ excise(r)
++ }
++
++ if gc.Debug['v'] != 0 {
++ gc.Dumpit("compare and branch", g.Start, 0)
++ }
++
++ // Fuse LOAD/STORE instructions into LOAD/STORE MULTIPLE instructions
++ fuseMultiple(g.Start)
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ gc.Dumpit("pass 7 fuse load/store instructions", g.Start, 0)
++ }
++
++ gc.Flowend(g)
++}
++
++func conprop(r0 *gc.Flow) {
++ p0 := r0.Prog
++ v0 := &p0.To
++ r := r0
++ for {
++ r = gc.Uniqs(r)
++ if r == nil || r == r0 {
++ return
++ }
++ if gc.Uniqp(r) == nil {
++ return
++ }
++
++ p := r.Prog
++ t := copyu(p, v0, nil)
++ switch t {
++ case 0, // miss
++ 1: // use
++ continue
++ case 3: // set
++ if p.As == p0.As && p.From.Type == p0.From.Type && p.From.Reg == p0.From.Reg && p.From.Node == p0.From.Node &&
++ p.From.Offset == p0.From.Offset && p.From.Scale == p0.From.Scale && p.From.Index == p0.From.Index {
++ if p.From.Val == p0.From.Val {
++ excise(r)
++ continue
++ }
++ }
++ }
++ break
++ }
++}
++
++// is 'a' a register or constant?
++func regconsttyp(a *obj.Addr) bool {
++ if regtyp(a) {
++ return true
++ }
++ switch a.Type {
++ case obj.TYPE_CONST,
++ obj.TYPE_FCONST,
++ obj.TYPE_SCONST,
++ obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
++ return true
++ }
++
++ return false
++}
++
++func pushback(r0 *gc.Flow) {
++ var r *gc.Flow
++
++ var b *gc.Flow
++ p0 := r0.Prog
++ for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
++ p := r.Prog
++ if p.As != obj.ANOP {
++ if !regconsttyp(&p.From) || !regtyp(&p.To) {
++ break
++ }
++ if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
++ break
++ }
++ }
++
++ if p.As == obj.ACALL {
++ break
++ }
++ b = r
++ }
++
++ if b == nil {
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ fmt.Printf("no pushback: %v\n", r0.Prog)
++ if r != nil {
++ fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
++ }
++ }
++
++ return
++ }
++
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ fmt.Printf("pushback\n")
++ for r := b; ; r = r.Link {
++ fmt.Printf("\t%v\n", r.Prog)
++ if r == r0 {
++ break
++ }
++ }
++ }
++
++ t := obj.Prog(*r0.Prog)
++ for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
++ p0 = r.Link.Prog
++ p := r.Prog
++ p0.As = p.As
++ p0.Lineno = p.Lineno
++ p0.From = p.From
++ p0.To = p.To
++ p0.From3 = p.From3
++ p0.Reg = p.Reg
++ p0.RegTo2 = p.RegTo2
++ if r == b {
++ break
++ }
++ }
++
++ p0 = r.Prog
++ p0.As = t.As
++ p0.Lineno = t.Lineno
++ p0.From = t.From
++ p0.To = t.To
++ p0.From3 = t.From3
++ p0.Reg = t.Reg
++ p0.RegTo2 = t.RegTo2
++
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ fmt.Printf("\tafter\n")
++ for r := (*gc.Flow)(b); ; r = r.Link {
++ fmt.Printf("\t%v\n", r.Prog)
++ if r == r0 {
++ break
++ }
++ }
++ }
++}
++
++func excise(r *gc.Flow) {
++ p := r.Prog
++ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
++ fmt.Printf("%v ===delete===\n", p)
++ }
++ obj.Nopout(p)
++ gc.Ostats.Ndelmov++
++}
++
++/*
++ * regzer returns 1 if a's value is 0 (a is R0 or $0)
++ */
++func regzer(a *obj.Addr) int {
++ if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
++ if a.Sym == nil && a.Reg == 0 {
++ if a.Offset == 0 {
++ return 1
++ }
++ }
++ }
++ if a.Type == obj.TYPE_REG {
++ if a.Reg == s390x.REGZERO {
++ return 1
++ }
++ }
++ return 0
++}
++
++func regtyp(a *obj.Addr) bool {
++ // TODO(rsc): Floating point register exclusions?
++ return a.Type == obj.TYPE_REG && s390x.REG_R0 <= a.Reg && a.Reg <= s390x.REG_F15 && a.Reg != s390x.REGZERO
++}
++
++// isGPR returns true if a refers to a general purpose register (GPR).
++// R0/REGZERO is treated as a GPR.
++func isGPR(a *obj.Addr) bool {
++ return a.Type == obj.TYPE_REG &&
++ s390x.REG_R0 <= a.Reg &&
++ a.Reg <= s390x.REG_R15
++}
++
++func isFPR(a *obj.Addr) bool {
++ return a.Type == obj.TYPE_REG &&
++ s390x.REG_F0 <= a.Reg &&
++ a.Reg <= s390x.REG_F15
++}
++
++func isConst(a *obj.Addr) bool {
++ return a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_FCONST
++}
++
++// isIndirectMem returns true if a refers to a memory location addressable by a
++// register and an offset, such as:
++// x+8(R1)
++// and
++// 0(R10)
++// It returns false if the address contains an index register such as:
++// 16(R1)(R2*1)
++func isIndirectMem(a *obj.Addr) bool {
++ return a.Type == obj.TYPE_MEM &&
++ a.Index == 0 &&
++ (a.Name == obj.NAME_NONE || a.Name == obj.NAME_AUTO || a.Name == obj.NAME_PARAM)
++}
++
++/*
++ * the idea is to substitute
++ * one register for another
++ * from one MOV to another
++ * MOV a, R1
++ * ADD b, R1 / no use of R2
++ * MOV R1, R2
++ * would be converted to
++ * MOV a, R2
++ * ADD b, R2
++ * MOV R2, R1
++ * hopefully, then the former or latter MOV
++ * will be eliminated by copy propagation.
++ *
++ * r0 (the argument, not the register) is the MOV at the end of the
++ * above sequences. This returns 1 if it modified any instructions.
++ */
++func subprop(r0 *gc.Flow) bool {
++ p := r0.Prog
++ v1 := &p.From
++ if !regtyp(v1) {
++ return false
++ }
++ v2 := &p.To
++ if !regtyp(v2) {
++ return false
++ }
++ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
++ if gc.Uniqs(r) == nil {
++ break
++ }
++ p = r.Prog
++ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
++ continue
++ }
++ if p.Info.Flags&gc.Call != 0 {
++ return false
++ }
++
++ if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
++ if p.To.Type == v1.Type {
++ if p.To.Reg == v1.Reg {
++ copysub(&p.To, v1, v2)
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
++ if p.From.Type == v2.Type {
++ fmt.Printf(" excise")
++ }
++ fmt.Printf("\n")
++ }
++
++ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
++ p = r.Prog
++ copysub(&p.From, v1, v2)
++ copysub1(p, v1, v2)
++ copysub(&p.To, v1, v2)
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("%v\n", r.Prog)
++ }
++ }
++
++ v1.Reg, v2.Reg = v2.Reg, v1.Reg
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("%v last\n", r.Prog)
++ }
++ return true
++ }
++ }
++ }
++
++ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
++ break
++ }
++ }
++
++ return false
++}
++
++/*
++ * The idea is to remove redundant copies.
++ * v1->v2 F=0
++ * (use v2 s/v2/v1/)*
++ * set v1 F=1
++ * use v2 return fail (v1->v2 move must remain)
++ * -----------------
++ * v1->v2 F=0
++ * (use v2 s/v2/v1/)*
++ * set v1 F=1
++ * set v2 return success (caller can remove v1->v2 move)
++ */
++func copyprop(r0 *gc.Flow) bool {
++ p := r0.Prog
++ v1 := &p.From
++ v2 := &p.To
++ if copyas(v1, v2) {
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("eliminating self-move: %v\n", r0.Prog)
++ }
++ return true
++ }
++
++ gactive++
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
++ }
++ return copy1(v1, v2, r0.S1, 0)
++}
++
++// copy1 replaces uses of v2 with v1 starting at r and returns true if
++// all uses were rewritten.
++func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
++ if uint32(r.Active) == gactive {
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("act set; return true\n")
++ }
++ return true
++ }
++
++ r.Active = int32(gactive)
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
++ }
++ var t int
++ var p *obj.Prog
++ for ; r != nil; r = r.S1 {
++ p = r.Prog
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("%v", p)
++ }
++ if f == 0 && gc.Uniqp(r) == nil {
++ // Multiple predecessors; conservatively
++ // assume v1 was set on other path
++ f = 1
++
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; merge; f=%d", f)
++ }
++ }
++
++ t = copyu(p, v2, nil)
++ switch t {
++ case 2: /* rar, can't split */
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
++ }
++ return false
++
++ case 3: /* set */
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
++ }
++ return true
++
++ case 1, /* used, substitute */
++ 4: /* use and set */
++ if f != 0 {
++ if gc.Debug['P'] == 0 {
++ return false
++ }
++ if t == 4 {
++ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
++ } else {
++ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
++ }
++ return false
++ }
++
++ if copyu(p, v2, v1) != 0 {
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; sub fail; return 0\n")
++ }
++ return false
++ }
++
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
++ }
++ if t == 4 {
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
++ }
++ return true
++ }
++ }
++
++ if f == 0 {
++ t = copyu(p, v1, nil)
++ if f == 0 && (t == 2 || t == 3 || t == 4) {
++ f = 1
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
++ }
++ }
++ }
++
++ if gc.Debug['P'] != 0 {
++ fmt.Printf("\n")
++ }
++ if r.S2 != nil {
++ if !copy1(v1, v2, r.S2, f) {
++ return false
++ }
++ }
++ }
++
++ return true
++}
++
++// If s==nil, copyu returns the set/use of v in p; otherwise, it
++// modifies p to replace reads of v with reads of s and returns 0 for
++// success or non-zero for failure.
++//
++// If s==nil, copy returns one of the following values:
++// 1 if v only used
++// 2 if v is set and used in one address (read-alter-rewrite;
++// can't substitute)
++// 3 if v is only set
++// 4 if v is set in one address and used in another (so addresses
++// can be rewritten independently)
++// 0 otherwise (not touched)
++func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
++ if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
++ // Currently we never generate a From3 with anything other than a constant in it.
++ fmt.Printf("copyu: From3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
++ }
++
++ switch p.As {
++ default:
++ fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
++ return 2
++
++ case /* read p->from, write p->to */
++ s390x.AMOVH,
++ s390x.AMOVHZ,
++ s390x.AMOVB,
++ s390x.AMOVBZ,
++ s390x.AMOVW,
++ s390x.AMOVWZ,
++ s390x.AMOVD,
++ s390x.ANEG,
++ s390x.AADDME,
++ s390x.AADDZE,
++ s390x.ASUBME,
++ s390x.ASUBZE,
++ s390x.AFMOVS,
++ s390x.AFMOVD,
++ s390x.ALEDBR,
++ s390x.AFNEG,
++ s390x.ALDEBR,
++ s390x.ACLFEBR,
++ s390x.ACLGEBR,
++ s390x.ACLFDBR,
++ s390x.ACLGDBR,
++ s390x.ACFEBRA,
++ s390x.ACGEBRA,
++ s390x.ACFDBRA,
++ s390x.ACGDBRA,
++ s390x.ACELFBR,
++ s390x.ACELGBR,
++ s390x.ACDLFBR,
++ s390x.ACDLGBR,
++ s390x.ACEFBRA,
++ s390x.ACEGBRA,
++ s390x.ACDFBRA,
++ s390x.ACDGBRA,
++ s390x.AFSQRT:
++
++ if s != nil {
++ copysub(&p.From, v, s)
++
++ // Update only indirect uses of v in p->to
++ if !copyas(&p.To, v) {
++ copysub(&p.To, v, s)
++ }
++ return 0
++ }
++
++ if copyas(&p.To, v) {
++ // Fix up implicit from
++ if p.From.Type == obj.TYPE_NONE {
++ p.From = p.To
++ }
++ if copyau(&p.From, v) {
++ return 4
++ }
++ return 3
++ }
++
++ if copyau(&p.From, v) {
++ return 1
++ }
++ if copyau(&p.To, v) {
++ // p->to only indirectly uses v
++ return 1
++ }
++
++ return 0
++
++ // read p->from, read p->reg, write p->to
++ case s390x.AADD,
++ s390x.AADDC,
++ s390x.AADDE,
++ s390x.ASUB,
++ s390x.ASLW,
++ s390x.ASRW,
++ s390x.ASRAW,
++ s390x.ASLD,
++ s390x.ASRD,
++ s390x.ASRAD,
++ s390x.ARLL,
++ s390x.ARLLG,
++ s390x.AOR,
++ s390x.AORN,
++ s390x.AAND,
++ s390x.AANDN,
++ s390x.ANAND,
++ s390x.ANOR,
++ s390x.AXOR,
++ s390x.AMULLW,
++ s390x.AMULLD,
++ s390x.ADIVW,
++ s390x.ADIVD,
++ s390x.ADIVWU,
++ s390x.ADIVDU,
++ s390x.AFADDS,
++ s390x.AFADD,
++ s390x.AFSUBS,
++ s390x.AFSUB,
++ s390x.AFMULS,
++ s390x.AFMUL,
++ s390x.AFDIVS,
++ s390x.AFDIV:
++ if s != nil {
++ copysub(&p.From, v, s)
++ copysub1(p, v, s)
++
++ // Update only indirect uses of v in p->to
++ if !copyas(&p.To, v) {
++ copysub(&p.To, v, s)
++ }
++ }
++
++ if copyas(&p.To, v) {
++ if p.Reg == 0 {
++ // Fix up implicit reg (e.g., ADD
++ // R3,R4 -> ADD R3,R4,R4) so we can
++ // update reg and to separately.
++ p.Reg = p.To.Reg
++ }
++
++ if copyau(&p.From, v) {
++ return 4
++ }
++ if copyau1(p, v) {
++ return 4
++ }
++ return 3
++ }
++
++ if copyau(&p.From, v) {
++ return 1
++ }
++ if copyau1(p, v) {
++ return 1
++ }
++ if copyau(&p.To, v) {
++ return 1
++ }
++ return 0
++
++ case s390x.ABEQ,
++ s390x.ABGT,
++ s390x.ABGE,
++ s390x.ABLT,
++ s390x.ABLE,
++ s390x.ABNE,
++ s390x.ABVC,
++ s390x.ABVS:
++ return 0
++
++ case obj.ACHECKNIL, /* read p->from */
++ s390x.ACMP, /* read p->from, read p->to */
++ s390x.ACMPU,
++ s390x.ACMPW,
++ s390x.ACMPWU,
++ s390x.AFCMPO,
++ s390x.AFCMPU,
++ s390x.ACEBR,
++ s390x.AMVC,
++ s390x.ACLC,
++ s390x.AXC,
++ s390x.AOC,
++ s390x.ANC:
++ if s != nil {
++ copysub(&p.From, v, s)
++ copysub(&p.To, v, s)
++ return 0
++ }
++
++ if copyau(&p.From, v) {
++ return 1
++ }
++ if copyau(&p.To, v) {
++ return 1
++ }
++ return 0
++
++ case s390x.ACMPBNE, s390x.ACMPBEQ,
++ s390x.ACMPBLT, s390x.ACMPBLE,
++ s390x.ACMPBGT, s390x.ACMPBGE,
++ s390x.ACMPUBNE, s390x.ACMPUBEQ,
++ s390x.ACMPUBLT, s390x.ACMPUBLE,
++ s390x.ACMPUBGT, s390x.ACMPUBGE:
++ if s != nil {
++ copysub(&p.From, v, s)
++ copysub1(p, v, s)
++ return 0
++ }
++ if copyau(&p.From, v) {
++ return 1
++ }
++ if copyau1(p, v) {
++ return 1
++ }
++ return 0
++
++ case s390x.ACLEAR:
++ if s != nil {
++ copysub(&p.To, v, s)
++ return 0
++ }
++ if copyau(&p.To, v) {
++ return 1
++ }
++ return 0
++
++ // go never generates a branch to a GPR
++ // read p->to
++ case s390x.ABR:
++ if s != nil {
++ copysub(&p.To, v, s)
++ return 0
++ }
++
++ if copyau(&p.To, v) {
++ return 1
++ }
++ return 0
++
++ case obj.ARET, obj.AUNDEF:
++ if s != nil {
++ return 0
++ }
++
++ // All registers die at this point, so claim
++ // everything is set (and not used).
++ return 3
++
++ case s390x.ABL:
++ if v.Type == obj.TYPE_REG {
++ if s390x.REGARG != -1 && v.Reg == s390x.REGARG {
++ return 2
++ }
++
++ if p.From.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
++ return 2
++ }
++ }
++
++ if s != nil {
++ copysub(&p.To, v, s)
++ return 0
++ }
++
++ if copyau(&p.To, v) {
++ return 4
++ }
++ return 3
++
++ case obj.ATEXT:
++ if v.Type == obj.TYPE_REG {
++ if v.Reg == s390x.REGARG {
++ return 3
++ }
++ }
++ return 0
++
++ case obj.APCDATA,
++ obj.AFUNCDATA,
++ obj.AVARDEF,
++ obj.AVARKILL,
++ obj.AVARLIVE,
++ obj.AUSEFIELD,
++ obj.ANOP:
++ return 0
++ }
++}
++
++// copyas returns 1 if a and v address the same register.
++//
++// If a is the from operand, this means this operation reads the
++// register in v. If a is the to operand, this means this operation
++// writes the register in v.
++func copyas(a *obj.Addr, v *obj.Addr) bool {
++ if regtyp(v) {
++ if a.Type == v.Type {
++ if a.Reg == v.Reg {
++ return true
++ }
++ }
++ }
++ return false
++}
++
++// copyau returns 1 if a either directly or indirectly addresses the
++// same register as v.
++//
++// If a is the from operand, this means this operation reads the
++// register in v. If a is the to operand, this means the operation
++// either reads or writes the register in v (if !copyas(a, v), then
++// the operation reads the register in v).
++func copyau(a *obj.Addr, v *obj.Addr) bool {
++ if copyas(a, v) {
++ return true
++ }
++ if v.Type == obj.TYPE_REG {
++ if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
++ if v.Reg == a.Reg {
++ return true
++ }
++ }
++ }
++ return false
++}
++
++// copyau1 returns 1 if p->reg references the same register as v and v
++// is a direct reference.
++func copyau1(p *obj.Prog, v *obj.Addr) bool {
++ if regtyp(v) && v.Reg != 0 {
++ if p.Reg == v.Reg {
++ return true
++ }
++ }
++ return false
++}
++
++// copysub replaces v with s in a
++func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr) {
++ if copyau(a, v) {
++ a.Reg = s.Reg
++ }
++}
++
++// copysub1 replaces v with s in p
++func copysub1(p *obj.Prog, v *obj.Addr, s *obj.Addr) {
++ if copyau1(p, v) {
++ p.Reg = s.Reg
++ }
++}
++
++func sameaddr(a *obj.Addr, v *obj.Addr) bool {
++ if a.Type != v.Type {
++ return false
++ }
++ if regtyp(v) && a.Reg == v.Reg {
++ return true
++ }
++ if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
++ if v.Offset == a.Offset {
++ return true
++ }
++ }
++ return false
++}
++
++func smallindir(a *obj.Addr, reg *obj.Addr) bool {
++ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
++}
++
++func stackaddr(a *obj.Addr) bool {
++ return a.Type == obj.TYPE_REG && a.Reg == s390x.REGSP
++}
++
++func trymergeopmv(r *gc.Flow) bool {
++ p := r.Prog
++ reg := p.From.Reg
++ r2 := gc.Uniqs(r)
++
++ for ; r2 != nil; r2 = gc.Uniqs(r2) {
++ p2 := r2.Prog
++ switch p2.As {
++ case obj.ANOP:
++ continue
++
++ case s390x.AEXRL,
++ s390x.ASYSCALL,
++ s390x.ABR,
++ s390x.ABC,
++ s390x.ABEQ,
++ s390x.ABGE,
++ s390x.ABGT,
++ s390x.ABLE,
++ s390x.ABLT,
++ s390x.ABNE,
++ s390x.ACMPBEQ,
++ s390x.ACMPBGE,
++ s390x.ACMPBGT,
++ s390x.ACMPBLE,
++ s390x.ACMPBLT,
++ s390x.ACMPBNE:
++ return false
++
++ case s390x.ACMP,
++ s390x.ACMPU,
++ s390x.ACMPW,
++ s390x.ACMPWU:
++ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
++ return false
++ }
++ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
++ //different from other instructions, To.Reg is a source register in CMP
++ return false
++ }
++ continue
++
++ case s390x.AMOVD,
++ s390x.AMOVW, s390x.AMOVWZ,
++ s390x.AMOVH, s390x.AMOVHZ,
++ s390x.AMOVB, s390x.AMOVBZ:
++ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.From.Type == obj.TYPE_ADDR && p2.From.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.To.Type == obj.TYPE_ADDR && p2.To.Reg == reg {
++ //For store operations
++ //also use; can't change
++ return false
++ }
++ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
++ return true
++ }
++ continue
++
++ case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
++ if p2.From.Type == obj.TYPE_MEM && p2.From.Reg == reg {
++ return false
++ }
++ if p2.To.Type == obj.TYPE_MEM && p2.To.Reg == reg {
++ return false
++ }
++ continue
++
++ default:
++ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.From.Type == obj.TYPE_ADDR && p2.From.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.Reg != 0 && p2.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.From3 != nil && p2.From3.Type == obj.TYPE_REG && p2.From3.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.From3 != nil && p2.From3.Type == obj.TYPE_ADDR && p2.From3.Reg == reg {
++ //use; can't change
++ return false
++ }
++ if p2.To.Type == obj.TYPE_ADDR && p2.To.Reg == reg {
++ //For store operations
++ //also use; can't change
++ return false
++ }
++ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
++ if p2.Reg == 0 {
++ //p2.To is also used as source in 2 operands instruction
++ return false
++ } else {
++ //def; can change
++ return true
++ }
++ }
++ continue
++ }
++ }
++ return false
++}
++
++func isMove(p *obj.Prog) bool {
++ switch p.As {
++ case s390x.AMOVD,
++ s390x.AMOVW, s390x.AMOVWZ,
++ s390x.AMOVH, s390x.AMOVHZ,
++ s390x.AMOVB, s390x.AMOVBZ,
++ s390x.AFMOVD, s390x.AFMOVS:
++ return true
++ }
++ return false
++}
++
++func isLoad(p *obj.Prog) bool {
++ if !isMove(p) {
++ return false
++ }
++ if !(isGPR(&p.To) || isFPR(&p.To)) {
++ return false
++ }
++ if p.From.Type != obj.TYPE_MEM {
++ return false
++ }
++ return true
++}
++
++func isStore(p *obj.Prog) bool {
++ if !isMove(p) {
++ return false
++ }
++ if !(isGPR(&p.From) || isFPR(&p.From) || isConst(&p.From)) {
++ return false
++ }
++ if p.To.Type != obj.TYPE_MEM {
++ return false
++ }
++ return true
++}
++
++// fuseMoveChains looks to see if destination register is used
++// again and if not merges the moves.
++//
++// Look for this pattern (sequence of moves):
++// MOVB $17, R1
++// MOVBZ R1, R1
++// Replace with:
++// MOVBZ $17, R1
++func fuseMoveChains(r *gc.Flow) {
++ for ; r != nil; r = r.Link {
++ p := r.Prog
++ if !isMove(p) || !isGPR(&p.To) {
++ continue
++ }
++
++ // r is a move with a destination register
++ var move *gc.Flow
++ for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
++ if rr == r {
++ // loop
++ break
++ }
++ if gc.Uniqp(rr) == nil {
++ // branch target: leave alone
++ break
++ }
++ pp := rr.Prog
++ if isMove(pp) && isGPR(&pp.From) && pp.From.Reg == p.To.Reg {
++ if pp.To.Type == obj.TYPE_MEM {
++ if p.From.Type == obj.TYPE_MEM ||
++ p.From.Type == obj.TYPE_ADDR {
++ break
++ }
++ if p.From.Type == obj.TYPE_CONST &&
++ int64(int16(p.From.Offset)) != p.From.Offset {
++ break
++ }
++ }
++ move = rr
++ break
++ }
++ if pp.As == obj.ANOP {
++ continue
++ }
++ break
++ }
++
++ // we have a move that reads from our destination reg, check if any future
++ // instructions also read from the reg
++ if move != nil && move.Prog.From.Reg != move.Prog.To.Reg {
++ safe := true
++ visited := make(map[*gc.Flow]bool)
++ children := make([]*gc.Flow, 0)
++ if move.S1 != nil {
++ children = append(children, move.S1)
++ }
++ if move.S2 != nil {
++ children = append(children, move.S2)
++ }
++ for len(children) > 0 {
++ rr := children[0]
++ if visited[rr] {
++ children = children[1:]
++ continue
++ } else {
++ visited[rr] = true
++ }
++ pp := rr.Prog
++ t := copyu(pp, &p.To, nil)
++ if t == 0 { // not found
++ if rr.S1 != nil {
++ children = append(children, rr.S1)
++ }
++ if rr.S2 != nil {
++ children = append(children, rr.S2)
++ }
++ children = children[1:]
++ continue
++ }
++ if t == 3 { // set
++ children = children[1:]
++ continue
++ }
++ // t is 1, 2 or 4: use
++ safe = false
++ break
++ }
++ if !safe {
++ move = nil
++ }
++ }
++
++ if move == nil {
++ continue
++ }
++
++ pp := move.Prog
++ execute := false
++
++ // at this point we have something like:
++ // MOV* anything, reg1
++ // MOV* reg1, reg2/mem
++ // now check if this is a cast that cannot be forward propagated
++ if p.As == pp.As || regzer(&p.From) == 1 {
++ // if the operations match or our source is zero then we
++ // can always propagate
++ execute = true
++ }
++ if !execute && isConst(&p.From) {
++ v := p.From.Offset
++ switch p.As {
++ case s390x.AMOVWZ:
++ v = int64(uint32(v))
++ case s390x.AMOVHZ:
++ v = int64(uint16(v))
++ case s390x.AMOVBZ:
++ v = int64(uint8(v))
++ case s390x.AMOVW:
++ v = int64(int32(v))
++ case s390x.AMOVH:
++ v = int64(int16(v))
++ case s390x.AMOVB:
++ v = int64(int8(v))
++ }
++ p.From.Offset = v
++ execute = true
++ }
++ if !execute && isGPR(&p.From) {
++ switch p.As {
++ case s390x.AMOVD:
++ fallthrough
++ case s390x.AMOVWZ:
++ if pp.As == s390x.AMOVWZ {
++ execute = true
++ break
++ }
++ fallthrough
++ case s390x.AMOVHZ:
++ if pp.As == s390x.AMOVHZ {
++ execute = true
++ break
++ }
++ fallthrough
++ case s390x.AMOVBZ:
++ if pp.As == s390x.AMOVBZ {
++ execute = true
++ break
++ }
++ }
++ }
++ if !execute {
++ if (p.As == s390x.AMOVB || p.As == s390x.AMOVBZ) && (pp.As == s390x.AMOVB || pp.As == s390x.AMOVBZ) {
++ execute = true
++ }
++ if (p.As == s390x.AMOVH || p.As == s390x.AMOVHZ) && (pp.As == s390x.AMOVH || pp.As == s390x.AMOVHZ) {
++ execute = true
++ }
++ if (p.As == s390x.AMOVW || p.As == s390x.AMOVWZ) && (pp.As == s390x.AMOVW || pp.As == s390x.AMOVWZ) {
++ execute = true
++ }
++ }
++
++ if execute {
++ pp.From = p.From
++ excise(r)
++ }
++ }
++ return
++}
++
++// fuseClear merges memory clear operations.
++//
++// Looks for this pattern (sequence of clears):
++// MOVD R0, n(R15)
++// MOVD R0, n+8(R15)
++// MOVD R0, n+16(R15)
++// Replaces with:
++// CLEAR $24, n(R15)
++func fuseClear(r *gc.Flow) {
++ var align int64
++ var clear *obj.Prog
++ for ; r != nil; r = r.Link {
++ // If there is a branch into the instruction stream then
++ // we can't fuse into previous instructions.
++ if gc.Uniqp(r) == nil {
++ clear = nil
++ }
++
++ p := r.Prog
++ if p.As == obj.ANOP {
++ continue
++ }
++ if p.As == s390x.AXC {
++ if p.From.Reg == p.To.Reg && p.From.Offset == p.To.Offset {
++ // TODO(mundaym): merge clears?
++ p.As = s390x.ACLEAR
++ p.From.Offset = p.From3.Offset
++ p.From3 = nil
++ p.From.Type = obj.TYPE_CONST
++ p.From.Reg = 0
++ clear = p
++ } else {
++ clear = nil
++ }
++ continue
++ }
++
++ // Is our source a constant zero?
++ if regzer(&p.From) == 0 {
++ clear = nil
++ continue
++ }
++
++ // Are we moving to memory?
++ if p.To.Type != obj.TYPE_MEM ||
++ p.To.Index != 0 ||
++ p.To.Offset >= 4096 ||
++ !(p.To.Name == obj.NAME_NONE || p.To.Name == obj.NAME_AUTO || p.To.Name == obj.NAME_PARAM) {
++ clear = nil
++ continue
++ }
++
++ size := int64(0)
++ switch p.As {
++ default:
++ clear = nil
++ continue
++ case s390x.AMOVB, s390x.AMOVBZ:
++ size = 1
++ case s390x.AMOVH, s390x.AMOVHZ:
++ size = 2
++ case s390x.AMOVW, s390x.AMOVWZ:
++ size = 4
++ case s390x.AMOVD:
++ size = 8
++ }
++
++ // doubleword aligned clears should be kept doubleword
++ // aligned
++ if (size == 8 && align != 8) || (size != 8 && align == 8) {
++ clear = nil
++ }
++
++ if clear != nil &&
++ clear.To.Reg == p.To.Reg &&
++ clear.To.Name == p.To.Name &&
++ clear.To.Node == p.To.Node &&
++ clear.To.Sym == p.To.Sym {
++
++ min := clear.To.Offset
++ max := clear.To.Offset + clear.From.Offset
++
++ // previous clear is already clearing this region
++ if min <= p.To.Offset && max >= p.To.Offset+size {
++ excise(r)
++ continue
++ }
++
++ // merge forwards
++ if max == p.To.Offset {
++ clear.From.Offset += size
++ excise(r)
++ continue
++ }
++
++ // merge backwards
++ if min-size == p.To.Offset {
++ clear.From.Offset += size
++ clear.To.Offset -= size
++ excise(r)
++ continue
++ }
++ }
++
++ // transform into clear
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = size
++ p.From.Reg = 0
++ p.As = s390x.ACLEAR
++ clear = p
++ align = size
++ }
++}
++
++// fuseMultiple merges memory loads and stores into load multiple and
++// store multiple operations.
++//
++// Looks for this pattern (sequence of loads or stores):
++// MOVD R1, 0(R15)
++// MOVD R2, 8(R15)
++// MOVD R3, 16(R15)
++// Replaces with:
++// STMG R1, R3, 0(R15)
++func fuseMultiple(r *gc.Flow) {
++ var fused *obj.Prog
++ for ; r != nil; r = r.Link {
++ // If there is a branch into the instruction stream then
++ // we can't fuse into previous instructions.
++ if gc.Uniqp(r) == nil {
++ fused = nil
++ }
++
++ p := r.Prog
++
++ isStore := isGPR(&p.From) && isIndirectMem(&p.To)
++ isLoad := isGPR(&p.To) && isIndirectMem(&p.From)
++
++ // are we a candidate?
++ size := int64(0)
++ switch p.As {
++ default:
++ fused = nil
++ continue
++ case obj.ANOP:
++ // skip over nops
++ continue
++ case s390x.AMOVW, s390x.AMOVWZ:
++ size = 4
++ // TODO(mundaym): 32-bit load multiple is currently not supported
++ // as it requires sign/zero extension.
++ if !isStore {
++ fused = nil
++ continue
++ }
++ case s390x.AMOVD:
++ size = 8
++ if !isLoad && !isStore {
++ fused = nil
++ continue
++ }
++ }
++
++ // If we merge two loads/stores with different source/destination Nodes
++ // then we will lose a reference the second Node which means that the
++ // compiler might mark the Node as unused and free its slot on the stack.
++ // TODO(mundaym): allow this by adding a dummy reference to the Node.
++ if fused == nil ||
++ fused.From.Node != p.From.Node ||
++ fused.From.Type != p.From.Type ||
++ fused.To.Node != p.To.Node ||
++ fused.To.Type != p.To.Type {
++ fused = p
++ continue
++ }
++
++ // check two addresses
++ ca := func(a, b *obj.Addr, offset int64) bool {
++ return a.Reg == b.Reg && a.Offset+offset == b.Offset &&
++ a.Sym == b.Sym && a.Name == b.Name
++ }
++
++ switch fused.As {
++ default:
++ fused = p
++ case s390x.AMOVW, s390x.AMOVWZ:
++ if size == 4 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 4) {
++ fused.As = s390x.ASTMY
++ fused.Reg = p.From.Reg
++ excise(r)
++ } else {
++ fused = p
++ }
++ case s390x.AMOVD:
++ if size == 8 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 8) {
++ fused.As = s390x.ASTMG
++ fused.Reg = p.From.Reg
++ excise(r)
++ } else if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, 8) {
++ fused.As = s390x.ALMG
++ fused.Reg = fused.To.Reg
++ fused.To.Reg = p.To.Reg
++ excise(r)
++ } else {
++ fused = p
++ }
++ case s390x.ASTMG, s390x.ASTMY:
++ if (fused.As == s390x.ASTMY && size != 4) ||
++ (fused.As == s390x.ASTMG && size != 8) {
++ fused = p
++ continue
++ }
++ offset := size * int64(fused.Reg-fused.From.Reg+1)
++ if fused.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, offset) {
++ fused.Reg = p.From.Reg
++ excise(r)
++ } else {
++ fused = p
++ }
++ case s390x.ALMG:
++ offset := 8 * int64(fused.To.Reg-fused.Reg+1)
++ if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, offset) {
++ fused.To.Reg = p.To.Reg
++ excise(r)
++ } else {
++ fused = p
++ }
++ }
++ }
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/prog.go
+@@ -0,0 +1,185 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package s390x
++
++import (
++ "cmd/compile/internal/gc"
++ "cmd/internal/obj"
++ "cmd/internal/obj/s390x"
++)
++
++const (
++ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
++ RightRdwr uint32 = gc.RightRead | gc.RightWrite
++)
++
++// This table gives the basic information about instruction
++// generated by the compiler and processed in the optimizer.
++// See opt.h for bit definitions.
++//
++// Instructions not generated need not be listed.
++// As an exception to that rule, we typically write down all the
++// size variants of an operation even if we just use a subset.
++//
++// The table is formatted for 8-space tabs.
++var progtable = [s390x.ALAST]obj.ProgInfo{
++ obj.ATYPE: {Flags: gc.Pseudo | gc.Skip},
++ obj.ATEXT: {Flags: gc.Pseudo},
++ obj.AFUNCDATA: {Flags: gc.Pseudo},
++ obj.APCDATA: {Flags: gc.Pseudo},
++ obj.AUNDEF: {Flags: gc.Break},
++ obj.AUSEFIELD: {Flags: gc.OK},
++ obj.ACHECKNIL: {Flags: gc.LeftRead},
++ obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
++ obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
++ obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
++
++ // NOP is an internal no-op that also stands
++ // for USED and SET annotations.
++ obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
++
++ // Integer
++ s390x.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ARLL: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ARLLG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
++ s390x.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
++
++ // Floating point.
++ s390x.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
++ s390x.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
++ s390x.ACEBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
++ s390x.ALEDBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ALDEBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.AFSQRT: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
++
++ // Conversions
++ s390x.ACEFBRA: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACDFBRA: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACEGBRA: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACDGBRA: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACFEBRA: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACFDBRA: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACGEBRA: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACGDBRA: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACELFBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACDLFBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACELGBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACDLGBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACLFEBR: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACLFDBR: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACLGEBR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
++ s390x.ACLGDBR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
++
++ // Moves
++ s390x.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
++ s390x.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
++ s390x.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
++
++ // Storage operations
++ s390x.AMVC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
++ s390x.ACLC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightRead | gc.RightAddr},
++ s390x.AXC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
++ s390x.AOC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
++ s390x.ANC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
++
++ // Jumps
++ s390x.ABR: {Flags: gc.Jump | gc.Break},
++ s390x.ABL: {Flags: gc.Call},
++ s390x.ABEQ: {Flags: gc.Cjmp},
++ s390x.ABNE: {Flags: gc.Cjmp},
++ s390x.ABGE: {Flags: gc.Cjmp},
++ s390x.ABLT: {Flags: gc.Cjmp},
++ s390x.ABGT: {Flags: gc.Cjmp},
++ s390x.ABLE: {Flags: gc.Cjmp},
++ s390x.ACMPBEQ: {Flags: gc.Cjmp},
++ s390x.ACMPBNE: {Flags: gc.Cjmp},
++ s390x.ACMPBGE: {Flags: gc.Cjmp},
++ s390x.ACMPBLT: {Flags: gc.Cjmp},
++ s390x.ACMPBGT: {Flags: gc.Cjmp},
++ s390x.ACMPBLE: {Flags: gc.Cjmp},
++ s390x.ACMPUBEQ: {Flags: gc.Cjmp},
++ s390x.ACMPUBNE: {Flags: gc.Cjmp},
++ s390x.ACMPUBGE: {Flags: gc.Cjmp},
++ s390x.ACMPUBLT: {Flags: gc.Cjmp},
++ s390x.ACMPUBGT: {Flags: gc.Cjmp},
++ s390x.ACMPUBLE: {Flags: gc.Cjmp},
++
++ // Macros
++ s390x.ACLEAR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
++
++ // Load/store multiple
++ s390x.ASTMG: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
++ s390x.ASTMY: {Flags: gc.SizeL | gc.LeftRead | gc.RightAddr | gc.RightWrite},
++ s390x.ALMG: {Flags: gc.SizeQ | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
++ s390x.ALMY: {Flags: gc.SizeL | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
++
++ obj.ARET: {Flags: gc.Break},
++}
++
++func proginfo(p *obj.Prog) {
++ info := &p.Info
++ *info = progtable[p.As]
++ if info.Flags == 0 {
++ gc.Fatalf("proginfo: unknown instruction %v", p)
++ }
++
++ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
++ info.Flags &^= gc.RegRead
++ info.Flags |= gc.RightRead /*CanRegRead |*/
++ }
++
++ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
++ info.Regindex |= RtoB(int(p.From.Reg))
++ }
++
++ if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
++ info.Regindex |= RtoB(int(p.To.Reg))
++ }
++
++ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
++ info.Flags &^= gc.LeftRead
++ info.Flags |= gc.LeftAddr
++ }
++
++ switch p.As {
++ // load multiple sets a range of registers
++ case s390x.ALMG, s390x.ALMY:
++ for r := p.Reg; r <= p.To.Reg; r++ {
++ info.Regset |= RtoB(int(r))
++ }
++ // store multiple reads a range of registers
++ case s390x.ASTMG, s390x.ASTMY:
++ for r := p.From.Reg; r <= p.Reg; r++ {
++ info.Reguse |= RtoB(int(r))
++ }
++ }
++}
+--- /dev/null
++++ b/src/cmd/compile/internal/s390x/reg.go
+@@ -0,0 +1,130 @@
++// Derived from Inferno utils/6c/reg.c
++// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import "cmd/internal/obj/s390x"
++import "cmd/compile/internal/gc"
++
++const (
++ NREGVAR = 32 /* 16 general + 16 floating */
++)
++
++var regname = []string{
++ ".R0",
++ ".R1",
++ ".R2",
++ ".R3",
++ ".R4",
++ ".R5",
++ ".R6",
++ ".R7",
++ ".R8",
++ ".R9",
++ ".R10",
++ ".R11",
++ ".R12",
++ ".R13",
++ ".R14",
++ ".R15",
++ ".F0",
++ ".F1",
++ ".F2",
++ ".F3",
++ ".F4",
++ ".F5",
++ ".F6",
++ ".F7",
++ ".F8",
++ ".F9",
++ ".F10",
++ ".F11",
++ ".F12",
++ ".F13",
++ ".F14",
++ ".F15",
++}
++
++func regnames(n *int) []string {
++ *n = NREGVAR
++ return regname
++}
++
++func excludedregs() uint64 {
++ // Exclude registers with fixed functions
++ return RtoB(s390x.REG_R0) |
++ RtoB(s390x.REGSP) |
++ RtoB(s390x.REGG) |
++ RtoB(s390x.REGTMP) |
++ RtoB(s390x.REGTMP2) |
++ RtoB(s390x.REG_LR)
++}
++
++func doregbits(r int) uint64 {
++ return 0
++}
++
++/*
++ * track register variables including external registers:
++ * bit reg
++ * 0 R0
++ * ... ...
++ * 15 R15
++ * 16+0 F0
++ * 16+1 F1
++ * ... ...
++ * 16+15 F15
++ */
++func RtoB(r int) uint64 {
++ if r >= s390x.REG_R0 && r <= s390x.REG_R15 {
++ return 1 << uint(r-s390x.REG_R0)
++ }
++ if r >= s390x.REG_F0 && r <= s390x.REG_F15 {
++ return 1 << uint(16+r-s390x.REG_F0)
++ }
++ return 0
++}
++
++func BtoR(b uint64) int {
++ b &= 0xffff
++ if b == 0 {
++ return 0
++ }
++ return gc.Bitno(b) + s390x.REG_R0
++}
++
++func BtoF(b uint64) int {
++ b >>= 16
++ b &= 0xffff
++ if b == 0 {
++ return 0
++ }
++ return gc.Bitno(b) + s390x.REG_F0
++}
+--- a/src/cmd/compile/main.go
++++ b/src/cmd/compile/main.go
+@@ -10,6 +10,7 @@
+ "cmd/compile/internal/arm64"
+ "cmd/compile/internal/mips64"
+ "cmd/compile/internal/ppc64"
++ "cmd/compile/internal/s390x"
+ "cmd/compile/internal/x86"
+ "cmd/internal/obj"
+ "fmt"
+@@ -38,5 +39,7 @@
+ mips64.Main()
+ case "ppc64", "ppc64le":
+ ppc64.Main()
++ case "s390x":
++ s390x.Main()
+ }
+ }
+--- a/src/cmd/dist/build.go
++++ b/src/cmd/dist/build.go
+@@ -58,6 +58,7 @@
+ "mips64le",
+ "ppc64",
+ "ppc64le",
++ "s390x",
+ }
+
+ // The known operating systems.
+--- a/src/cmd/dist/buildtool.go
++++ b/src/cmd/dist/buildtool.go
+@@ -37,6 +37,7 @@
+ "compile/internal/mips64",
+ "compile/internal/ppc64",
+ "compile/internal/x86",
++ "compile/internal/s390x",
+ "internal/gcprog",
+ "internal/obj",
+ "internal/obj/arm",
+@@ -44,6 +45,7 @@
+ "internal/obj/mips",
+ "internal/obj/ppc64",
+ "internal/obj/x86",
++ "internal/obj/s390x",
+ "link",
+ "link/internal/amd64",
+ "link/internal/arm",
+@@ -52,6 +54,7 @@
+ "link/internal/mips64",
+ "link/internal/ppc64",
+ "link/internal/x86",
++ "link/internal/s390x",
+ }
+
+ func bootstrapBuildTools() {
+--- a/src/cmd/dist/test.go
++++ b/src/cmd/dist/test.go
+@@ -667,7 +667,7 @@
+ return false
+ case "shared":
+ switch pair {
+- case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le":
++ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x":
+ return true
+ }
+ return false
+@@ -711,7 +711,7 @@
+ case "android-arm",
+ "dragonfly-386", "dragonfly-amd64",
+ "freebsd-386", "freebsd-amd64", "freebsd-arm",
+- "linux-386", "linux-amd64", "linux-arm",
++ "linux-386", "linux-amd64", "linux-arm", "linux-s390x",
+ "netbsd-386", "netbsd-amd64":
+
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external")
+--- a/src/cmd/dist/util.go
++++ b/src/cmd/dist/util.go
+@@ -452,6 +452,8 @@
+ } else {
+ gohostarch = "mips64le"
+ }
++ case strings.Contains(out, "s390x"):
++ gohostarch = "s390x"
+ case gohostos == "darwin":
+ if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM_") {
+ gohostarch = "arm"
+--- a/src/cmd/go/build.go
++++ b/src/cmd/go/build.go
+@@ -377,7 +377,7 @@
+ fatalf("-buildmode=pie not supported by gccgo")
+ } else {
+ switch platform {
+- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le",
++ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
+ "android/amd64", "android/arm", "android/arm64", "android/386":
+ codegenArg = "-shared"
+ default:
+@@ -391,7 +391,7 @@
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
++ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+ default:
+ fatalf("-buildmode=shared not supported on %s\n", platform)
+ }
+@@ -409,7 +409,7 @@
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
++ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+ buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
+ default:
+ fatalf("-linkshared not supported on %s\n", platform)
+@@ -2976,6 +2976,8 @@
+ return []string{"-m64"}
+ case "arm":
+ return []string{"-marm"} // not thumb
++ case "s390x":
++ return []string{"-m64", "-march=z196"}
+ }
+ return nil
+ }
+--- a/src/cmd/internal/obj/link.go
++++ b/src/cmd/internal/obj/link.go
+@@ -521,6 +521,9 @@
+ // R_ADDRPOWER_DS but inserts the offset from the TOC to the address of the the
+ // relocated symbol rather than the symbol's address.
+ R_ADDRPOWER_TOCREL_DS
++
++ // R_PCRELDBL is for S390x (z) 2-byte aligned addresses (e.g. R_390_PLT32DBL)
++ R_PCRELDBL
+ )
+
+ type Auto struct {
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/a.out.go
+@@ -0,0 +1,888 @@
++// Based on cmd/internal/obj/ppc64/a.out.go.
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import "cmd/internal/obj"
++
++//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p s390x
++
++const (
++ NSNAME = 8
++ NSYM = 50
++ NREG = 16 // number of general purpose registers
++ NFREG = 16 // number of floating point registers
++)
++
++const (
++ REG_R0 = obj.RBaseS390X + iota
++ REG_R1
++ REG_R2
++ REG_R3
++ REG_R4
++ REG_R5
++ REG_R6
++ REG_R7
++ REG_R8
++ REG_R9
++ REG_R10
++ REG_R11
++ REG_R12
++ REG_R13
++ REG_R14
++ REG_R15
++
++ REG_F0
++ REG_F1
++ REG_F2
++ REG_F3
++ REG_F4
++ REG_F5
++ REG_F6
++ REG_F7
++ REG_F8
++ REG_F9
++ REG_F10
++ REG_F11
++ REG_F12
++ REG_F13
++ REG_F14
++ REG_F15
++
++ // V0-V15 are aliases for F0-F15
++ // We keep them in a separate space to make printing etc. easier
++ // If the code generator ever emits vector instructions it will
++ // need to take into account the aliasing.
++ REG_V0
++ REG_V1
++ REG_V2
++ REG_V3
++ REG_V4
++ REG_V5
++ REG_V6
++ REG_V7
++ REG_V8
++ REG_V9
++ REG_V10
++ REG_V11
++ REG_V12
++ REG_V13
++ REG_V14
++ REG_V15
++ REG_V16
++ REG_V17
++ REG_V18
++ REG_V19
++ REG_V20
++ REG_V21
++ REG_V22
++ REG_V23
++ REG_V24
++ REG_V25
++ REG_V26
++ REG_V27
++ REG_V28
++ REG_V29
++ REG_V30
++ REG_V31
++
++ REG_AR0
++ REG_AR1
++ REG_AR2
++ REG_AR3
++ REG_AR4
++ REG_AR5
++ REG_AR6
++ REG_AR7
++ REG_AR8
++ REG_AR9
++ REG_AR10
++ REG_AR11
++ REG_AR12
++ REG_AR13
++ REG_AR14
++ REG_AR15
++
++ REG_RESERVED // end of allocated registers
++
++ REGZERO = REG_R0 // set to zero
++ REGARG = -1 // -1 disables passing the first argument in register
++ REGRT1 = REG_R3 // used during zeroing of the stack - not reserved
++ REGRT2 = REG_R4 // used during zeroing of the stack - not reserved
++ REGTMP = REG_R10 // scratch register used in the assembler and linker
++ REGTMP2 = REG_R11 // scratch register used in the assembler and linker
++ REGCTXT = REG_R12 // context for closures
++ REGG = REG_R13 // G
++ REG_LR = REG_R14 // link register
++ REGSP = REG_R15 // stack pointer
++)
++
++const (
++ BIG = 32768 - 8
++ DISP12 = 4096
++ DISP16 = 65536
++ DISP20 = 1048576
++)
++
++const (
++ // mark flags
++ LABEL = 1 << 0
++ LEAF = 1 << 1
++ FLOAT = 1 << 2
++ BRANCH = 1 << 3
++ LOAD = 1 << 4
++ FCMP = 1 << 5
++ SYNC = 1 << 6
++ LIST = 1 << 7
++ FOLL = 1 << 8
++ NOSCHED = 1 << 9
++)
++
++const ( // comments from func aclass in asmz.go
++ C_NONE = iota
++ C_REG // general-purpose register (64-bit)
++ C_FREG // floating-point register (64-bit)
++ C_VREG // vector register (128-bit)
++ C_AREG // access register (32-bit)
++ C_ZCON // constant == 0
++ C_SCON // 0 <= constant <= 0x7fff (positive int16)
++ C_UCON // constant & 0xffff == 0 (int16 or uint16)
++ C_ADDCON // 0 > constant >= -0x8000 (negative int16)
++ C_ANDCON // constant <= 0xffff
++ C_LCON // constant (int32 or uint32)
++ C_DCON // constant (int64 or uint64)
++ C_SACON // computed address, 16-bit displacement, possibly SP-relative
++ C_LACON // computed address, 32-bit displacement, possibly SP-relative
++ C_DACON // computed address, 64-bit displacment?
++ C_SBRA // short branch
++ C_LBRA // long branch
++ C_SAUTO // short auto
++ C_LAUTO // long auto
++ C_ZOREG // heap address, register-based, displacement == 0
++ C_SOREG // heap address, register-based, int16 displacement
++ C_LOREG // heap address, register-based, int32 displacement
++ C_TLS_LE // TLS - local exec model (for executables)
++ C_TLS_IE // TLS - initial exec model (for shared libraries loaded at program startup)
++ C_GOK // general address
++ C_ADDR // relocation for extern or static symbols (loads and stores)
++ C_SYMADDR // relocation for extern or static symbols (address taking)
++ C_GOTADDR // GOT slot for a symbol in -dynlink mode
++ C_TEXTSIZE // text size
++ C_ANY
++ C_NCLASS // must be the last
++)
++
++const (
++ // integer arithmetic
++ AADD = obj.ABaseS390X + obj.A_ARCHSPECIFIC + iota
++ AADDC
++ AADDME
++ AADDE
++ AADDZE
++ ADIVW
++ ADIVWU
++ ADIVD
++ ADIVDU
++ AMULLW
++ AMULLD
++ AMULHDU
++ ASUB
++ ASUBC
++ ASUBME
++ ASUBV
++ ASUBE
++ ASUBZE
++ ANEG
++
++ // integer moves
++ AMOVWBR
++ AMOVB
++ AMOVBZ
++ AMOVH
++ AMOVHBR
++ AMOVHZ
++ AMOVW
++ AMOVWZ
++ AMOVD
++ AMOVDBR
++
++ // integer bitwise
++ AAND
++ AANDN
++ ANAND
++ ANOR
++ AOR
++ AORN
++ AXOR
++ ASLW
++ ASLD
++ ASRW
++ ASRAW
++ ASRD
++ ASRAD
++ ARLL
++ ARLLG
++
++ // floating point
++ AFABS
++ AFADD
++ AFADDS
++ AFCMPO
++ AFCMPU
++ ACEBR
++ AFDIV
++ AFDIVS
++ AFMADD
++ AFMADDS
++ AFMOVD
++ AFMOVS
++ AFMSUB
++ AFMSUBS
++ AFMUL
++ AFMULS
++ AFNABS
++ AFNEG
++ AFNMADD
++ AFNMADDS
++ AFNMSUB
++ AFNMSUBS
++ ALEDBR
++ ALDEBR
++ AFSUB
++ AFSUBS
++ AFSQRT
++ AFSQRTS
++
++ // convert from int32/int64 to float/float64
++ ACEFBRA
++ ACDFBRA
++ ACEGBRA
++ ACDGBRA
++
++ // convert from float/float64 to int32/int64
++ ACFEBRA
++ ACFDBRA
++ ACGEBRA
++ ACGDBRA
++
++ // convert from uint32/uint64 to float/float64
++ ACELFBR
++ ACDLFBR
++ ACELGBR
++ ACDLGBR
++
++ // convert from float/float64 to uint32/uint64
++ ACLFEBR
++ ACLFDBR
++ ACLGEBR
++ ACLGDBR
++
++ // compare
++ ACMP
++ ACMPU
++ ACMPW
++ ACMPWU
++
++ // compare and swap
++ ACS
++ ACSG
++
++ // serialize
++ ASYNC
++
++ // branch
++ ABC
++ ABCL
++ ABEQ
++ ABGE
++ ABGT
++ ABLE
++ ABLT
++ ABNE
++ ABVC
++ ABVS
++ ASYSCALL
++
++ // compare and branch
++ ACMPBEQ
++ ACMPBGE
++ ACMPBGT
++ ACMPBLE
++ ACMPBLT
++ ACMPBNE
++ ACMPUBEQ
++ ACMPUBGE
++ ACMPUBGT
++ ACMPUBLE
++ ACMPUBLT
++ ACMPUBNE
++
++ // storage-and-storage
++ AMVC
++ ACLC
++ AXC
++ AOC
++ ANC
++
++ // load
++ AEXRL
++ ALARL
++ ALA
++ ALAY
++
++ // load/store multiple
++ ALMY
++ ALMG
++ ASTMY
++ ASTMG
++
++ // store clock
++ ASTCK
++ ASTCKC
++ ASTCKE
++ ASTCKF
++
++ // macros
++ ACLEAR
++
++ // vector
++ AVA
++ AVAB
++ AVAH
++ AVAF
++ AVAG
++ AVAQ
++ AVACC
++ AVACCB
++ AVACCH
++ AVACCF
++ AVACCG
++ AVACCQ
++ AVAC
++ AVACQ
++ AVACCC
++ AVACCCQ
++ AVN
++ AVNC
++ AVAVG
++ AVAVGB
++ AVAVGH
++ AVAVGF
++ AVAVGG
++ AVAVGL
++ AVAVGLB
++ AVAVGLH
++ AVAVGLF
++ AVAVGLG
++ AVCKSM
++ AVCEQ
++ AVCEQB
++ AVCEQH
++ AVCEQF
++ AVCEQG
++ AVCEQBS
++ AVCEQHS
++ AVCEQFS
++ AVCEQGS
++ AVCH
++ AVCHB
++ AVCHH
++ AVCHF
++ AVCHG
++ AVCHBS
++ AVCHHS
++ AVCHFS
++ AVCHGS
++ AVCHL
++ AVCHLB
++ AVCHLH
++ AVCHLF
++ AVCHLG
++ AVCHLBS
++ AVCHLHS
++ AVCHLFS
++ AVCHLGS
++ AVCLZ
++ AVCLZB
++ AVCLZH
++ AVCLZF
++ AVCLZG
++ AVCTZ
++ AVCTZB
++ AVCTZH
++ AVCTZF
++ AVCTZG
++ AVEC
++ AVECB
++ AVECH
++ AVECF
++ AVECG
++ AVECL
++ AVECLB
++ AVECLH
++ AVECLF
++ AVECLG
++ AVERIM
++ AVERIMB
++ AVERIMH
++ AVERIMF
++ AVERIMG
++ AVERLL
++ AVERLLB
++ AVERLLH
++ AVERLLF
++ AVERLLG
++ AVERLLV
++ AVERLLVB
++ AVERLLVH
++ AVERLLVF
++ AVERLLVG
++ AVESLV
++ AVESLVB
++ AVESLVH
++ AVESLVF
++ AVESLVG
++ AVESL
++ AVESLB
++ AVESLH
++ AVESLF
++ AVESLG
++ AVESRA
++ AVESRAB
++ AVESRAH
++ AVESRAF
++ AVESRAG
++ AVESRAV
++ AVESRAVB
++ AVESRAVH
++ AVESRAVF
++ AVESRAVG
++ AVESRL
++ AVESRLB
++ AVESRLH
++ AVESRLF
++ AVESRLG
++ AVESRLV
++ AVESRLVB
++ AVESRLVH
++ AVESRLVF
++ AVESRLVG
++ AVX
++ AVFAE
++ AVFAEB
++ AVFAEH
++ AVFAEF
++ AVFAEBS
++ AVFAEHS
++ AVFAEFS
++ AVFAEZB
++ AVFAEZH
++ AVFAEZF
++ AVFAEZBS
++ AVFAEZHS
++ AVFAEZFS
++ AVFEE
++ AVFEEB
++ AVFEEH
++ AVFEEF
++ AVFEEBS
++ AVFEEHS
++ AVFEEFS
++ AVFEEZB
++ AVFEEZH
++ AVFEEZF
++ AVFEEZBS
++ AVFEEZHS
++ AVFEEZFS
++ AVFENE
++ AVFENEB
++ AVFENEH
++ AVFENEF
++ AVFENEBS
++ AVFENEHS
++ AVFENEFS
++ AVFENEZB
++ AVFENEZH
++ AVFENEZF
++ AVFENEZBS
++ AVFENEZHS
++ AVFENEZFS
++ AVFA
++ AVFADB
++ AWFADB
++ AWFK
++ AWFKDB
++ AVFCE
++ AVFCEDB
++ AVFCEDBS
++ AWFCEDB
++ AWFCEDBS
++ AVFCH
++ AVFCHDB
++ AVFCHDBS
++ AWFCHDB
++ AWFCHDBS
++ AVFCHE
++ AVFCHEDB
++ AVFCHEDBS
++ AWFCHEDB
++ AWFCHEDBS
++ AWFC
++ AWFCDB
++ AVCDG
++ AVCDGB
++ AWCDGB
++ AVCDLG
++ AVCDLGB
++ AWCDLGB
++ AVCGD
++ AVCGDB
++ AWCGDB
++ AVCLGD
++ AVCLGDB
++ AWCLGDB
++ AVFD
++ AVFDDB
++ AWFDDB
++ AVLDE
++ AVLDEB
++ AWLDEB
++ AVLED
++ AVLEDB
++ AWLEDB
++ AVFM
++ AVFMDB
++ AWFMDB
++ AVFMA
++ AVFMADB
++ AWFMADB
++ AVFMS
++ AVFMSDB
++ AWFMSDB
++ AVFPSO
++ AVFPSODB
++ AWFPSODB
++ AVFLCDB
++ AWFLCDB
++ AVFLNDB
++ AWFLNDB
++ AVFLPDB
++ AWFLPDB
++ AVFSQ
++ AVFSQDB
++ AWFSQDB
++ AVFS
++ AVFSDB
++ AWFSDB
++ AVFTCI
++ AVFTCIDB
++ AWFTCIDB
++ AVGFM
++ AVGFMB
++ AVGFMH
++ AVGFMF
++ AVGFMG
++ AVGFMA
++ AVGFMAB
++ AVGFMAH
++ AVGFMAF
++ AVGFMAG
++ AVGEF
++ AVGEG
++ AVGBM
++ AVZERO
++ AVONE
++ AVGM
++ AVGMB
++ AVGMH
++ AVGMF
++ AVGMG
++ AVISTR
++ AVISTRB
++ AVISTRH
++ AVISTRF
++ AVISTRBS
++ AVISTRHS
++ AVISTRFS
++ AVL
++ AVLR
++ AVLREP
++ AVLREPB
++ AVLREPH
++ AVLREPF
++ AVLREPG
++ AVLC
++ AVLCB
++ AVLCH
++ AVLCF
++ AVLCG
++ AVLEH
++ AVLEF
++ AVLEG
++ AVLEB
++ AVLEIH
++ AVLEIF
++ AVLEIG
++ AVLEIB
++ AVFI
++ AVFIDB
++ AWFIDB
++ AVLGV
++ AVLGVB
++ AVLGVH
++ AVLGVF
++ AVLGVG
++ AVLLEZ
++ AVLLEZB
++ AVLLEZH
++ AVLLEZF
++ AVLLEZG
++ AVLM
++ AVLP
++ AVLPB
++ AVLPH
++ AVLPF
++ AVLPG
++ AVLBB
++ AVLVG
++ AVLVGB
++ AVLVGH
++ AVLVGF
++ AVLVGG
++ AVLVGP
++ AVLL
++ AVMX
++ AVMXB
++ AVMXH
++ AVMXF
++ AVMXG
++ AVMXL
++ AVMXLB
++ AVMXLH
++ AVMXLF
++ AVMXLG
++ AVMRH
++ AVMRHB
++ AVMRHH
++ AVMRHF
++ AVMRHG
++ AVMRL
++ AVMRLB
++ AVMRLH
++ AVMRLF
++ AVMRLG
++ AVMN
++ AVMNB
++ AVMNH
++ AVMNF
++ AVMNG
++ AVMNL
++ AVMNLB
++ AVMNLH
++ AVMNLF
++ AVMNLG
++ AVMAE
++ AVMAEB
++ AVMAEH
++ AVMAEF
++ AVMAH
++ AVMAHB
++ AVMAHH
++ AVMAHF
++ AVMALE
++ AVMALEB
++ AVMALEH
++ AVMALEF
++ AVMALH
++ AVMALHB
++ AVMALHH
++ AVMALHF
++ AVMALO
++ AVMALOB
++ AVMALOH
++ AVMALOF
++ AVMAL
++ AVMALB
++ AVMALHW
++ AVMALF
++ AVMAO
++ AVMAOB
++ AVMAOH
++ AVMAOF
++ AVME
++ AVMEB
++ AVMEH
++ AVMEF
++ AVMH
++ AVMHB
++ AVMHH
++ AVMHF
++ AVMLE
++ AVMLEB
++ AVMLEH
++ AVMLEF
++ AVMLH
++ AVMLHB
++ AVMLHH
++ AVMLHF
++ AVMLO
++ AVMLOB
++ AVMLOH
++ AVMLOF
++ AVML
++ AVMLB
++ AVMLHW
++ AVMLF
++ AVMO
++ AVMOB
++ AVMOH
++ AVMOF
++ AVNO
++ AVNOT
++ AVO
++ AVPK
++ AVPKH
++ AVPKF
++ AVPKG
++ AVPKLS
++ AVPKLSH
++ AVPKLSF
++ AVPKLSG
++ AVPKLSHS
++ AVPKLSFS
++ AVPKLSGS
++ AVPKS
++ AVPKSH
++ AVPKSF
++ AVPKSG
++ AVPKSHS
++ AVPKSFS
++ AVPKSGS
++ AVPERM
++ AVPDI
++ AVPOPCT
++ AVREP
++ AVREPB
++ AVREPH
++ AVREPF
++ AVREPG
++ AVREPI
++ AVREPIB
++ AVREPIH
++ AVREPIF
++ AVREPIG
++ AVSCEF
++ AVSCEG
++ AVSEL
++ AVSL
++ AVSLB
++ AVSLDB
++ AVSRA
++ AVSRAB
++ AVSRL
++ AVSRLB
++ AVSEG
++ AVSEGB
++ AVSEGH
++ AVSEGF
++ AVST
++ AVSTEH
++ AVSTEF
++ AVSTEG
++ AVSTEB
++ AVSTM
++ AVSTL
++ AVSTRC
++ AVSTRCB
++ AVSTRCH
++ AVSTRCF
++ AVSTRCBS
++ AVSTRCHS
++ AVSTRCFS
++ AVSTRCZB
++ AVSTRCZH
++ AVSTRCZF
++ AVSTRCZBS
++ AVSTRCZHS
++ AVSTRCZFS
++ AVS
++ AVSB
++ AVSH
++ AVSF
++ AVSG
++ AVSQ
++ AVSCBI
++ AVSCBIB
++ AVSCBIH
++ AVSCBIF
++ AVSCBIG
++ AVSCBIQ
++ AVSBCBI
++ AVSBCBIQ
++ AVSBI
++ AVSBIQ
++ AVSUMG
++ AVSUMGH
++ AVSUMGF
++ AVSUMQ
++ AVSUMQF
++ AVSUMQG
++ AVSUM
++ AVSUMB
++ AVSUMH
++ AVTM
++ AVUPH
++ AVUPHB
++ AVUPHH
++ AVUPHF
++ AVUPLH
++ AVUPLHB
++ AVUPLHH
++ AVUPLHF
++ AVUPLL
++ AVUPLLB
++ AVUPLLH
++ AVUPLLF
++ AVUPL
++ AVUPLB
++ AVUPLHW
++ AVUPLF
++
++ // binary
++ ABYTE
++ AWORD
++ ADWORD
++
++ // end marker
++ ALAST
++
++ // aliases
++ ABR = obj.AJMP
++ ABL = obj.ACALL
++)
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/anames.go
+@@ -0,0 +1,650 @@
++// Generated by stringer -i a.out.go -o anames.go -p s390x
++// Do not edit.
++
++package s390x
++
++import "cmd/internal/obj"
++
++var Anames = []string{
++ obj.A_ARCHSPECIFIC: "ADD",
++ "ADDC",
++ "ADDME",
++ "ADDE",
++ "ADDZE",
++ "DIVW",
++ "DIVWU",
++ "DIVD",
++ "DIVDU",
++ "MULLW",
++ "MULLD",
++ "MULHDU",
++ "SUB",
++ "SUBC",
++ "SUBME",
++ "SUBV",
++ "SUBE",
++ "SUBZE",
++ "NEG",
++ "MOVWBR",
++ "MOVB",
++ "MOVBZ",
++ "MOVH",
++ "MOVHBR",
++ "MOVHZ",
++ "MOVW",
++ "MOVWZ",
++ "MOVD",
++ "MOVDBR",
++ "AND",
++ "ANDN",
++ "NAND",
++ "NOR",
++ "OR",
++ "ORN",
++ "XOR",
++ "SLW",
++ "SLD",
++ "SRW",
++ "SRAW",
++ "SRD",
++ "SRAD",
++ "RLL",
++ "RLLG",
++ "FABS",
++ "FADD",
++ "FADDS",
++ "FCMPO",
++ "FCMPU",
++ "CEBR",
++ "FDIV",
++ "FDIVS",
++ "FMADD",
++ "FMADDS",
++ "FMOVD",
++ "FMOVS",
++ "FMSUB",
++ "FMSUBS",
++ "FMUL",
++ "FMULS",
++ "FNABS",
++ "FNEG",
++ "FNMADD",
++ "FNMADDS",
++ "FNMSUB",
++ "FNMSUBS",
++ "LEDBR",
++ "LDEBR",
++ "FSUB",
++ "FSUBS",
++ "FSQRT",
++ "FSQRTS",
++ "CEFBRA",
++ "CDFBRA",
++ "CEGBRA",
++ "CDGBRA",
++ "CFEBRA",
++ "CFDBRA",
++ "CGEBRA",
++ "CGDBRA",
++ "CELFBR",
++ "CDLFBR",
++ "CELGBR",
++ "CDLGBR",
++ "CLFEBR",
++ "CLFDBR",
++ "CLGEBR",
++ "CLGDBR",
++ "CMP",
++ "CMPU",
++ "CMPW",
++ "CMPWU",
++ "CS",
++ "CSG",
++ "SYNC",
++ "BC",
++ "BCL",
++ "BEQ",
++ "BGE",
++ "BGT",
++ "BLE",
++ "BLT",
++ "BNE",
++ "BVC",
++ "BVS",
++ "SYSCALL",
++ "CMPBEQ",
++ "CMPBGE",
++ "CMPBGT",
++ "CMPBLE",
++ "CMPBLT",
++ "CMPBNE",
++ "CMPUBEQ",
++ "CMPUBGE",
++ "CMPUBGT",
++ "CMPUBLE",
++ "CMPUBLT",
++ "CMPUBNE",
++ "MVC",
++ "CLC",
++ "XC",
++ "OC",
++ "NC",
++ "EXRL",
++ "LARL",
++ "LA",
++ "LAY",
++ "LMY",
++ "LMG",
++ "STMY",
++ "STMG",
++ "STCK",
++ "STCKC",
++ "STCKE",
++ "STCKF",
++ "CLEAR",
++ "VA",
++ "VAB",
++ "VAH",
++ "VAF",
++ "VAG",
++ "VAQ",
++ "VACC",
++ "VACCB",
++ "VACCH",
++ "VACCF",
++ "VACCG",
++ "VACCQ",
++ "VAC",
++ "VACQ",
++ "VACCC",
++ "VACCCQ",
++ "VN",
++ "VNC",
++ "VAVG",
++ "VAVGB",
++ "VAVGH",
++ "VAVGF",
++ "VAVGG",
++ "VAVGL",
++ "VAVGLB",
++ "VAVGLH",
++ "VAVGLF",
++ "VAVGLG",
++ "VCKSM",
++ "VCEQ",
++ "VCEQB",
++ "VCEQH",
++ "VCEQF",
++ "VCEQG",
++ "VCEQBS",
++ "VCEQHS",
++ "VCEQFS",
++ "VCEQGS",
++ "VCH",
++ "VCHB",
++ "VCHH",
++ "VCHF",
++ "VCHG",
++ "VCHBS",
++ "VCHHS",
++ "VCHFS",
++ "VCHGS",
++ "VCHL",
++ "VCHLB",
++ "VCHLH",
++ "VCHLF",
++ "VCHLG",
++ "VCHLBS",
++ "VCHLHS",
++ "VCHLFS",
++ "VCHLGS",
++ "VCLZ",
++ "VCLZB",
++ "VCLZH",
++ "VCLZF",
++ "VCLZG",
++ "VCTZ",
++ "VCTZB",
++ "VCTZH",
++ "VCTZF",
++ "VCTZG",
++ "VEC",
++ "VECB",
++ "VECH",
++ "VECF",
++ "VECG",
++ "VECL",
++ "VECLB",
++ "VECLH",
++ "VECLF",
++ "VECLG",
++ "VERIM",
++ "VERIMB",
++ "VERIMH",
++ "VERIMF",
++ "VERIMG",
++ "VERLL",
++ "VERLLB",
++ "VERLLH",
++ "VERLLF",
++ "VERLLG",
++ "VERLLV",
++ "VERLLVB",
++ "VERLLVH",
++ "VERLLVF",
++ "VERLLVG",
++ "VESLV",
++ "VESLVB",
++ "VESLVH",
++ "VESLVF",
++ "VESLVG",
++ "VESL",
++ "VESLB",
++ "VESLH",
++ "VESLF",
++ "VESLG",
++ "VESRA",
++ "VESRAB",
++ "VESRAH",
++ "VESRAF",
++ "VESRAG",
++ "VESRAV",
++ "VESRAVB",
++ "VESRAVH",
++ "VESRAVF",
++ "VESRAVG",
++ "VESRL",
++ "VESRLB",
++ "VESRLH",
++ "VESRLF",
++ "VESRLG",
++ "VESRLV",
++ "VESRLVB",
++ "VESRLVH",
++ "VESRLVF",
++ "VESRLVG",
++ "VX",
++ "VFAE",
++ "VFAEB",
++ "VFAEH",
++ "VFAEF",
++ "VFAEBS",
++ "VFAEHS",
++ "VFAEFS",
++ "VFAEZB",
++ "VFAEZH",
++ "VFAEZF",
++ "VFAEZBS",
++ "VFAEZHS",
++ "VFAEZFS",
++ "VFEE",
++ "VFEEB",
++ "VFEEH",
++ "VFEEF",
++ "VFEEBS",
++ "VFEEHS",
++ "VFEEFS",
++ "VFEEZB",
++ "VFEEZH",
++ "VFEEZF",
++ "VFEEZBS",
++ "VFEEZHS",
++ "VFEEZFS",
++ "VFENE",
++ "VFENEB",
++ "VFENEH",
++ "VFENEF",
++ "VFENEBS",
++ "VFENEHS",
++ "VFENEFS",
++ "VFENEZB",
++ "VFENEZH",
++ "VFENEZF",
++ "VFENEZBS",
++ "VFENEZHS",
++ "VFENEZFS",
++ "VFA",
++ "VFADB",
++ "WFADB",
++ "WFK",
++ "WFKDB",
++ "VFCE",
++ "VFCEDB",
++ "VFCEDBS",
++ "WFCEDB",
++ "WFCEDBS",
++ "VFCH",
++ "VFCHDB",
++ "VFCHDBS",
++ "WFCHDB",
++ "WFCHDBS",
++ "VFCHE",
++ "VFCHEDB",
++ "VFCHEDBS",
++ "WFCHEDB",
++ "WFCHEDBS",
++ "WFC",
++ "WFCDB",
++ "VCDG",
++ "VCDGB",
++ "WCDGB",
++ "VCDLG",
++ "VCDLGB",
++ "WCDLGB",
++ "VCGD",
++ "VCGDB",
++ "WCGDB",
++ "VCLGD",
++ "VCLGDB",
++ "WCLGDB",
++ "VFD",
++ "VFDDB",
++ "WFDDB",
++ "VLDE",
++ "VLDEB",
++ "WLDEB",
++ "VLED",
++ "VLEDB",
++ "WLEDB",
++ "VFM",
++ "VFMDB",
++ "WFMDB",
++ "VFMA",
++ "VFMADB",
++ "WFMADB",
++ "VFMS",
++ "VFMSDB",
++ "WFMSDB",
++ "VFPSO",
++ "VFPSODB",
++ "WFPSODB",
++ "VFLCDB",
++ "WFLCDB",
++ "VFLNDB",
++ "WFLNDB",
++ "VFLPDB",
++ "WFLPDB",
++ "VFSQ",
++ "VFSQDB",
++ "WFSQDB",
++ "VFS",
++ "VFSDB",
++ "WFSDB",
++ "VFTCI",
++ "VFTCIDB",
++ "WFTCIDB",
++ "VGFM",
++ "VGFMB",
++ "VGFMH",
++ "VGFMF",
++ "VGFMG",
++ "VGFMA",
++ "VGFMAB",
++ "VGFMAH",
++ "VGFMAF",
++ "VGFMAG",
++ "VGEF",
++ "VGEG",
++ "VGBM",
++ "VZERO",
++ "VONE",
++ "VGM",
++ "VGMB",
++ "VGMH",
++ "VGMF",
++ "VGMG",
++ "VISTR",
++ "VISTRB",
++ "VISTRH",
++ "VISTRF",
++ "VISTRBS",
++ "VISTRHS",
++ "VISTRFS",
++ "VL",
++ "VLR",
++ "VLREP",
++ "VLREPB",
++ "VLREPH",
++ "VLREPF",
++ "VLREPG",
++ "VLC",
++ "VLCB",
++ "VLCH",
++ "VLCF",
++ "VLCG",
++ "VLEH",
++ "VLEF",
++ "VLEG",
++ "VLEB",
++ "VLEIH",
++ "VLEIF",
++ "VLEIG",
++ "VLEIB",
++ "VFI",
++ "VFIDB",
++ "WFIDB",
++ "VLGV",
++ "VLGVB",
++ "VLGVH",
++ "VLGVF",
++ "VLGVG",
++ "VLLEZ",
++ "VLLEZB",
++ "VLLEZH",
++ "VLLEZF",
++ "VLLEZG",
++ "VLM",
++ "VLP",
++ "VLPB",
++ "VLPH",
++ "VLPF",
++ "VLPG",
++ "VLBB",
++ "VLVG",
++ "VLVGB",
++ "VLVGH",
++ "VLVGF",
++ "VLVGG",
++ "VLVGP",
++ "VLL",
++ "VMX",
++ "VMXB",
++ "VMXH",
++ "VMXF",
++ "VMXG",
++ "VMXL",
++ "VMXLB",
++ "VMXLH",
++ "VMXLF",
++ "VMXLG",
++ "VMRH",
++ "VMRHB",
++ "VMRHH",
++ "VMRHF",
++ "VMRHG",
++ "VMRL",
++ "VMRLB",
++ "VMRLH",
++ "VMRLF",
++ "VMRLG",
++ "VMN",
++ "VMNB",
++ "VMNH",
++ "VMNF",
++ "VMNG",
++ "VMNL",
++ "VMNLB",
++ "VMNLH",
++ "VMNLF",
++ "VMNLG",
++ "VMAE",
++ "VMAEB",
++ "VMAEH",
++ "VMAEF",
++ "VMAH",
++ "VMAHB",
++ "VMAHH",
++ "VMAHF",
++ "VMALE",
++ "VMALEB",
++ "VMALEH",
++ "VMALEF",
++ "VMALH",
++ "VMALHB",
++ "VMALHH",
++ "VMALHF",
++ "VMALO",
++ "VMALOB",
++ "VMALOH",
++ "VMALOF",
++ "VMAL",
++ "VMALB",
++ "VMALHW",
++ "VMALF",
++ "VMAO",
++ "VMAOB",
++ "VMAOH",
++ "VMAOF",
++ "VME",
++ "VMEB",
++ "VMEH",
++ "VMEF",
++ "VMH",
++ "VMHB",
++ "VMHH",
++ "VMHF",
++ "VMLE",
++ "VMLEB",
++ "VMLEH",
++ "VMLEF",
++ "VMLH",
++ "VMLHB",
++ "VMLHH",
++ "VMLHF",
++ "VMLO",
++ "VMLOB",
++ "VMLOH",
++ "VMLOF",
++ "VML",
++ "VMLB",
++ "VMLHW",
++ "VMLF",
++ "VMO",
++ "VMOB",
++ "VMOH",
++ "VMOF",
++ "VNO",
++ "VNOT",
++ "VO",
++ "VPK",
++ "VPKH",
++ "VPKF",
++ "VPKG",
++ "VPKLS",
++ "VPKLSH",
++ "VPKLSF",
++ "VPKLSG",
++ "VPKLSHS",
++ "VPKLSFS",
++ "VPKLSGS",
++ "VPKS",
++ "VPKSH",
++ "VPKSF",
++ "VPKSG",
++ "VPKSHS",
++ "VPKSFS",
++ "VPKSGS",
++ "VPERM",
++ "VPDI",
++ "VPOPCT",
++ "VREP",
++ "VREPB",
++ "VREPH",
++ "VREPF",
++ "VREPG",
++ "VREPI",
++ "VREPIB",
++ "VREPIH",
++ "VREPIF",
++ "VREPIG",
++ "VSCEF",
++ "VSCEG",
++ "VSEL",
++ "VSL",
++ "VSLB",
++ "VSLDB",
++ "VSRA",
++ "VSRAB",
++ "VSRL",
++ "VSRLB",
++ "VSEG",
++ "VSEGB",
++ "VSEGH",
++ "VSEGF",
++ "VST",
++ "VSTEH",
++ "VSTEF",
++ "VSTEG",
++ "VSTEB",
++ "VSTM",
++ "VSTL",
++ "VSTRC",
++ "VSTRCB",
++ "VSTRCH",
++ "VSTRCF",
++ "VSTRCBS",
++ "VSTRCHS",
++ "VSTRCFS",
++ "VSTRCZB",
++ "VSTRCZH",
++ "VSTRCZF",
++ "VSTRCZBS",
++ "VSTRCZHS",
++ "VSTRCZFS",
++ "VS",
++ "VSB",
++ "VSH",
++ "VSF",
++ "VSG",
++ "VSQ",
++ "VSCBI",
++ "VSCBIB",
++ "VSCBIH",
++ "VSCBIF",
++ "VSCBIG",
++ "VSCBIQ",
++ "VSBCBI",
++ "VSBCBIQ",
++ "VSBI",
++ "VSBIQ",
++ "VSUMG",
++ "VSUMGH",
++ "VSUMGF",
++ "VSUMQ",
++ "VSUMQF",
++ "VSUMQG",
++ "VSUM",
++ "VSUMB",
++ "VSUMH",
++ "VTM",
++ "VUPH",
++ "VUPHB",
++ "VUPHH",
++ "VUPHF",
++ "VUPLH",
++ "VUPLHB",
++ "VUPLHH",
++ "VUPLHF",
++ "VUPLL",
++ "VUPLLB",
++ "VUPLLH",
++ "VUPLLF",
++ "VUPL",
++ "VUPLB",
++ "VUPLHW",
++ "VUPLF",
++ "BYTE",
++ "WORD",
++ "DWORD",
++ "LAST",
++}
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/anamesz.go
+@@ -0,0 +1,35 @@
++package s390x
++
++var cnamesz = []string{
++ "NONE",
++ "REG",
++ "FREG",
++ "VREG",
++ "AREG",
++ "ZCON",
++ "SCON",
++ "UCON",
++ "ADDCON",
++ "ANDCON",
++ "LCON",
++ "DCON",
++ "SACON",
++ "LACON",
++ "DACON",
++ "SBRA",
++ "LBRA",
++ "SAUTO",
++ "LAUTO",
++ "ZOREG",
++ "SOREG",
++ "LOREG",
++ "TLS_LE",
++ "TLS_IE",
++ "GOK",
++ "ADDR",
++ "SYMADDR",
++ "GOTADDR",
++ "TEXTSIZE",
++ "ANY",
++ "NCLASS",
++}
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/asmz.go
+@@ -0,0 +1,4774 @@
++// Based on cmd/internal/obj/ppc64/asm9.go.
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/internal/obj"
++ "log"
++ "math"
++ "sort"
++)
++
++// instruction layout.
++const (
++ FuncAlign = 16
++)
++
++type Optab struct {
++ as int16 // opcode
++ a1 uint8 // From
++ a2 uint8 // Reg
++ a3 uint8 // From3
++ a4 uint8 // To
++ type_ int8
++ param int16 // REGSP for auto variables
++}
++
++var optab = []Optab{
++ // instruction, From, Reg, From3, To, type, param
++ Optab{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0},
++ Optab{obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0},
++
++ // move register
++ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 1, 0},
++ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
++
++ // load constant
++ Optab{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
++ Optab{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
++ Optab{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
++ Optab{AMOVD, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
++ Optab{AMOVW, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
++ Optab{AMOVWZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
++ Optab{AMOVB, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
++ Optab{AMOVBZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
++
++ // store constant
++ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
++ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
++ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_LOREG, 72, 0},
++ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
++ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
++ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
++ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
++ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
++
++ // store
++ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
++ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
++ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
++ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
++ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
++
++ // load
++ Optab{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVDBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVHBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
++ Optab{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVDBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVHBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
++ Optab{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
++ Optab{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
++ Optab{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
++ Optab{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
++ Optab{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
++
++ // integer arithmetic
++ Optab{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 0},
++ Optab{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
++ Optab{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
++ Optab{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
++ Optab{AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 0},
++ Optab{AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
++ Optab{AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
++ Optab{AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
++ Optab{AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 0},
++ Optab{AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
++ Optab{AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
++ Optab{AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
++ Optab{ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 0},
++ Optab{ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 0},
++ Optab{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 0},
++ Optab{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
++ Optab{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 0},
++ Optab{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 0},
++ Optab{AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 0},
++ Optab{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 0},
++ Optab{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 0},
++
++ // integer logical
++ Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 0},
++ Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
++ Optab{AAND, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
++ Optab{AAND, C_LCON, C_REG, C_NONE, C_REG, 23, 0},
++ Optab{AOR, C_REG, C_REG, C_NONE, C_REG, 6, 0},
++ Optab{AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
++ Optab{AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
++ Optab{AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 0},
++ Optab{ASLD, C_REG, C_NONE, C_NONE, C_REG, 7, 0},
++ Optab{ASLD, C_REG, C_REG, C_NONE, C_REG, 7, 0},
++ Optab{ASLD, C_SCON, C_REG, C_NONE, C_REG, 7, 0},
++ Optab{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 7, 0},
++
++ // compare and swap
++ Optab{ACSG, C_REG, C_REG, C_NONE, C_SOREG, 79, 0},
++
++ // floating point
++ Optab{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 0},
++ Optab{AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 0},
++ Optab{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 0},
++ Optab{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 0},
++ Optab{AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 0},
++ Optab{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 0},
++ Optab{AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 0},
++ Optab{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, REGSP},
++ Optab{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 0},
++ Optab{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 0},
++ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
++ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 0},
++ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 0},
++ Optab{AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 67, 0},
++ Optab{ACEFBRA, C_REG, C_NONE, C_NONE, C_FREG, 82, 0},
++ Optab{ACFEBRA, C_FREG, C_NONE, C_NONE, C_REG, 83, 0},
++
++ // load symbol address (plus offset)
++ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_REG, 19, 0},
++ Optab{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 93, 0},
++ Optab{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 94, 0},
++ Optab{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 95, 0},
++
++ // system call
++ Optab{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 0},
++ Optab{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 0},
++
++ // branch
++ Optab{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 0},
++ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 0},
++ Optab{ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 0},
++ Optab{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 0},
++ Optab{ABR, C_NONE, C_NONE, C_NONE, C_REG, 18, 0},
++ Optab{ABR, C_REG, C_NONE, C_NONE, C_REG, 18, 0},
++ Optab{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
++ Optab{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
++ Optab{ACMPBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
++ Optab{ACMPBEQ, C_REG, C_NONE, C_ADDCON, C_SBRA, 90, 0},
++ Optab{ACMPBEQ, C_REG, C_NONE, C_SCON, C_SBRA, 90, 0},
++ Optab{ACMPUBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
++ Optab{ACMPUBEQ, C_REG, C_NONE, C_ANDCON, C_SBRA, 90, 0},
++
++ // compare
++ Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
++ Optab{ACMP, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
++ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
++ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
++ Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 0},
++ Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 0},
++
++ // 32-bit access registers
++ Optab{AMOVW, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
++ Optab{AMOVWZ, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
++ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
++ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
++
++ // macros
++ Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LOREG, 96, 0},
++ Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LAUTO, 96, REGSP},
++
++ // load/store multiple
++ Optab{ASTMG, C_REG, C_REG, C_NONE, C_LOREG, 97, 0},
++ Optab{ASTMG, C_REG, C_REG, C_NONE, C_LAUTO, 97, REGSP},
++ Optab{ALMG, C_LOREG, C_REG, C_NONE, C_REG, 98, 0},
++ Optab{ALMG, C_LAUTO, C_REG, C_NONE, C_REG, 98, REGSP},
++
++ // bytes
++ Optab{ABYTE, C_SCON, C_NONE, C_NONE, C_NONE, 40, 0},
++ Optab{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 0},
++ Optab{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 0},
++ Optab{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 0},
++
++ // fast synchronization
++ Optab{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 81, 0},
++
++ // store clock
++ Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SAUTO, 88, REGSP},
++ Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SOREG, 88, 0},
++
++ // storage and storage
++ Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LOREG, 84, 0},
++ Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
++ Optab{AMVC, C_LAUTO, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
++
++ // address
++ Optab{ALARL, C_LCON, C_NONE, C_NONE, C_REG, 85, 0},
++ Optab{ALARL, C_SYMADDR, C_NONE, C_NONE, C_REG, 85, 0},
++ Optab{ALA, C_SOREG, C_NONE, C_NONE, C_REG, 86, 0},
++ Optab{ALA, C_SAUTO, C_NONE, C_NONE, C_REG, 86, REGSP},
++ Optab{AEXRL, C_SYMADDR, C_NONE, C_NONE, C_REG, 87, 0},
++
++ // misc
++ Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 0},
++ Optab{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0},
++ Optab{obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0},
++ Optab{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0},
++ Optab{obj.ANOP, C_SAUTO, C_NONE, C_NONE, C_NONE, 0, 0},
++
++ // vector instructions
++
++ // VRX store
++ Optab{AVST, C_VREG, C_NONE, C_NONE, C_SOREG, 100, 0},
++ Optab{AVST, C_VREG, C_NONE, C_NONE, C_SAUTO, 100, REGSP},
++ Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SOREG, 100, 0},
++ Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 100, REGSP},
++
++ // VRX load
++ Optab{AVL, C_SOREG, C_NONE, C_NONE, C_VREG, 101, 0},
++ Optab{AVL, C_SAUTO, C_NONE, C_NONE, C_VREG, 101, REGSP},
++ Optab{AVLEG, C_SOREG, C_NONE, C_SCON, C_VREG, 101, 0},
++ Optab{AVLEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 101, REGSP},
++
++ // VRV scatter
++ Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SOREG, 102, 0},
++ Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 102, REGSP},
++
++ // VRV gather
++ Optab{AVGEG, C_SOREG, C_NONE, C_SCON, C_VREG, 103, 0},
++ Optab{AVGEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 103, REGSP},
++
++ // VRS element shift/rotate and load gr to/from vr element
++ Optab{AVESLG, C_SCON, C_VREG, C_NONE, C_VREG, 104, 0},
++ Optab{AVESLG, C_REG, C_VREG, C_NONE, C_VREG, 104, 0},
++ Optab{AVESLG, C_SCON, C_NONE, C_NONE, C_VREG, 104, 0},
++ Optab{AVESLG, C_REG, C_NONE, C_NONE, C_VREG, 104, 0},
++ Optab{AVLGVG, C_SCON, C_VREG, C_NONE, C_REG, 104, 0},
++ Optab{AVLGVG, C_REG, C_VREG, C_NONE, C_REG, 104, 0},
++ Optab{AVLVGG, C_SCON, C_REG, C_NONE, C_VREG, 104, 0},
++ Optab{AVLVGG, C_REG, C_REG, C_NONE, C_VREG, 104, 0},
++
++ // VRS store multiple
++ Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SOREG, 105, 0},
++ Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SAUTO, 105, REGSP},
++
++ // VRS load multiple
++ Optab{AVLM, C_SOREG, C_VREG, C_NONE, C_VREG, 106, 0},
++ Optab{AVLM, C_SAUTO, C_VREG, C_NONE, C_VREG, 106, REGSP},
++
++ // VRS store with length
++ Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SOREG, 107, 0},
++ Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SAUTO, 107, REGSP},
++
++ // VRS load with length
++ Optab{AVLL, C_SOREG, C_NONE, C_REG, C_VREG, 108, 0},
++ Optab{AVLL, C_SAUTO, C_NONE, C_REG, C_VREG, 108, REGSP},
++
++ // VRI-a
++ Optab{AVGBM, C_ANDCON, C_NONE, C_NONE, C_VREG, 109, 0},
++ Optab{AVZERO, C_NONE, C_NONE, C_NONE, C_VREG, 109, 0},
++ Optab{AVREPIG, C_ADDCON, C_NONE, C_NONE, C_VREG, 109, 0},
++ Optab{AVREPIG, C_SCON, C_NONE, C_NONE, C_VREG, 109, 0},
++ Optab{AVLEIG, C_ADDCON, C_NONE, C_SCON, C_VREG, 109, 0},
++ Optab{AVLEIG, C_SCON, C_NONE, C_SCON, C_VREG, 109, 0},
++
++ // VRI-b generate mask
++ Optab{AVGMG, C_SCON, C_NONE, C_SCON, C_VREG, 110, 0},
++
++ // VRI-c replicate
++ Optab{AVREPG, C_UCON, C_VREG, C_NONE, C_VREG, 111, 0},
++
++ // VRI-d element rotate and insert under mask and
++ // shift left double by byte
++ Optab{AVERIMG, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
++ Optab{AVSLDB, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
++
++ // VRI-d fp test data class immediate
++ Optab{AVFTCIDB, C_SCON, C_VREG, C_NONE, C_VREG, 113, 0},
++
++ // VRR-a load reg
++ Optab{AVLR, C_VREG, C_NONE, C_NONE, C_VREG, 114, 0},
++
++ // VRR-a compare
++ Optab{AVECG, C_VREG, C_NONE, C_NONE, C_VREG, 115, 0},
++
++ // VRR-b
++ Optab{AVCEQG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
++ Optab{AVFAEF, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
++ Optab{AVPKSG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
++
++ // VRR-c
++ Optab{AVAQ, C_VREG, C_VREG, C_NONE, C_VREG, 118, 0},
++ Optab{AVAQ, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
++ Optab{AVNOT, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
++ Optab{AVPDI, C_VREG, C_VREG, C_SCON, C_VREG, 123, 0},
++
++ // VRR-c shifts
++ Optab{AVERLLVG, C_VREG, C_VREG, C_NONE, C_VREG, 119, 0},
++ Optab{AVERLLVG, C_VREG, C_NONE, C_NONE, C_VREG, 119, 0},
++
++ // VRR-d
++ // 2 3 1 4
++ Optab{AVACQ, C_VREG, C_VREG, C_VREG, C_VREG, 120, 0},
++
++ // VRR-e
++ Optab{AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 121, 0},
++
++ // VRR-f
++ Optab{AVLVGP, C_REG, C_REG, C_NONE, C_VREG, 122, 0},
++
++ Optab{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0},
++}
++
++type Oprang struct {
++ start []Optab
++ stop []Optab
++}
++
++var oprange [ALAST & obj.AMask]Oprang
++
++var xcmp [C_NCLASS][C_NCLASS]uint8
++
++func spanz(ctxt *obj.Link, cursym *obj.LSym) {
++ p := cursym.Text
++ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
++ return
++ }
++ ctxt.Cursym = cursym
++ ctxt.Autosize = int32(p.To.Offset)
++
++ if oprange[AANDN&obj.AMask].start == nil {
++ buildop(ctxt)
++ }
++
++ buffer := make([]byte, 0)
++ changed := true
++ loop := 0
++ for changed {
++ if loop > 10 {
++ ctxt.Diag("stuck in spanz loop")
++ break
++ }
++ changed = false
++ buffer = buffer[:0]
++ ctxt.Cursym.R = make([]obj.Reloc, 0)
++ for p := cursym.Text; p != nil; p = p.Link {
++ pc := int64(len(buffer))
++ if pc != p.Pc {
++ changed = true
++ }
++ p.Pc = pc
++ ctxt.Pc = p.Pc
++ ctxt.Curp = p
++ asmout(ctxt, &buffer)
++ if pc == int64(len(buffer)) {
++ switch p.As {
++ case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT:
++ // ok
++ default:
++ ctxt.Diag("zero-width instruction\n%v", p)
++ }
++ }
++ }
++ loop++
++ }
++
++ cursym.Size = int64(len(buffer))
++ if cursym.Size%FuncAlign != 0 {
++ cursym.Size += FuncAlign - (cursym.Size % FuncAlign)
++ }
++ obj.Symgrow(ctxt, cursym, cursym.Size)
++ copy(cursym.P, buffer)
++}
++
++func isint32(v int64) bool {
++ return int64(int32(v)) == v
++}
++
++func isuint32(v uint64) bool {
++ return uint64(uint32(v)) == v
++}
++
++func aclass(ctxt *obj.Link, a *obj.Addr) int {
++ switch a.Type {
++ case obj.TYPE_NONE:
++ return C_NONE
++
++ case obj.TYPE_REG:
++ if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
++ return C_REG
++ }
++ if REG_F0 <= a.Reg && a.Reg <= REG_F15 {
++ return C_FREG
++ }
++ if REG_AR0 <= a.Reg && a.Reg <= REG_AR15 {
++ return C_AREG
++ }
++ if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
++ return C_VREG
++ }
++ return C_GOK
++
++ case obj.TYPE_MEM:
++ switch a.Name {
++ case obj.NAME_EXTERN,
++ obj.NAME_STATIC:
++ if a.Sym == nil {
++ // must have a symbol
++ break
++ }
++ ctxt.Instoffset = a.Offset
++ if a.Sym.Type == obj.STLSBSS {
++ if ctxt.Flag_shared != 0 {
++ return C_TLS_IE // initial exec model
++ }
++ return C_TLS_LE // local exec model
++ }
++ return C_ADDR
++
++ case obj.NAME_GOTREF:
++ return C_GOTADDR
++
++ case obj.NAME_AUTO:
++ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
++ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
++ return C_SAUTO
++ }
++ return C_LAUTO
++
++ case obj.NAME_PARAM:
++ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
++ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
++ return C_SAUTO
++ }
++ return C_LAUTO
++
++ case obj.NAME_NONE:
++ ctxt.Instoffset = a.Offset
++ if ctxt.Instoffset == 0 {
++ return C_ZOREG
++ }
++ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
++ return C_SOREG
++ }
++ return C_LOREG
++ }
++
++ return C_GOK
++
++ case obj.TYPE_TEXTSIZE:
++ return C_TEXTSIZE
++
++ case obj.TYPE_FCONST:
++ if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 {
++ return C_ZCON
++ }
++ ctxt.Diag("cannot handle the floating point constant %v", a.Val)
++
++ case obj.TYPE_CONST,
++ obj.TYPE_ADDR:
++ switch a.Name {
++ case obj.TYPE_NONE:
++ ctxt.Instoffset = a.Offset
++ if a.Reg != 0 {
++ if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
++ return C_SACON
++ }
++ if isint32(ctxt.Instoffset) {
++ return C_LACON
++ }
++ return C_DACON
++ }
++ goto consize
++
++ case obj.NAME_EXTERN,
++ obj.NAME_STATIC:
++ s := a.Sym
++ if s == nil {
++ break
++ }
++ ctxt.Instoffset = s.Value + a.Offset
++ if s.Type == obj.SCONST {
++ goto consize
++ }
++
++ return C_SYMADDR
++
++ case obj.NAME_AUTO:
++ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
++ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
++ return C_SACON
++ }
++ return C_LACON
++
++ case obj.NAME_PARAM:
++ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
++ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
++ return C_SACON
++ }
++ return C_LACON
++ }
++
++ return C_GOK
++
++ consize:
++ if ctxt.Instoffset == 0 {
++ return C_ZCON
++ }
++ if ctxt.Instoffset >= 0 {
++ if ctxt.Instoffset <= 0x7fff {
++ return C_SCON
++ }
++ if ctxt.Instoffset <= 0xffff {
++ return C_ANDCON
++ }
++ if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
++ return C_UCON
++ }
++ if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
++ return C_LCON
++ }
++ return C_DCON
++ }
++
++ if ctxt.Instoffset >= -0x8000 {
++ return C_ADDCON
++ }
++ if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
++ return C_UCON
++ }
++ if isint32(ctxt.Instoffset) {
++ return C_LCON
++ }
++ return C_DCON
++
++ case obj.TYPE_BRANCH:
++ return C_SBRA
++ }
++
++ return C_GOK
++}
++
++func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
++ a1 := int(p.Optab)
++ if a1 != 0 {
++ return &optab[a1-1:][0]
++ }
++ a1 = int(p.From.Class)
++ if a1 == 0 {
++ a1 = aclass(ctxt, &p.From) + 1
++ p.From.Class = int8(a1)
++ }
++
++ a1--
++ a3 := C_NONE + 1
++ if p.From3 != nil {
++ a3 = int(p.From3.Class)
++ if a3 == 0 {
++ a3 = aclass(ctxt, p.From3) + 1
++ p.From3.Class = int8(a3)
++ }
++ }
++
++ a3--
++ a4 := int(p.To.Class)
++ if a4 == 0 {
++ a4 = aclass(ctxt, &p.To) + 1
++ p.To.Class = int8(a4)
++ }
++
++ a4--
++ a2 := C_NONE
++ if p.Reg != 0 {
++ if REG_R0 <= p.Reg && p.Reg <= REG_R15 {
++ a2 = C_REG
++ } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
++ a2 = C_VREG
++ } else if REG_F0 <= p.Reg && p.Reg <= REG_F15 {
++ a2 = C_FREG
++ } else if REG_AR0 <= p.Reg && p.Reg <= REG_AR15 {
++ a2 = C_AREG
++ }
++ }
++
++ r0 := p.As & obj.AMask
++
++ o := oprange[r0].start
++ if o == nil {
++ o = oprange[r0].stop /* just generate an error */
++ }
++
++ e := oprange[r0].stop
++ c1 := xcmp[a1][:]
++ c3 := xcmp[a3][:]
++ c4 := xcmp[a4][:]
++ for ; -cap(o) < -cap(e); o = o[1:] {
++ if int(o[0].a2) == a2 {
++ if c1[o[0].a1] != 0 {
++ if c3[o[0].a3] != 0 {
++ if c4[o[0].a4] != 0 {
++ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
++ return &o[0]
++ }
++ }
++ }
++ }
++ }
++
++ // cannot find a case; abort
++ ctxt.Diag("illegal combination %v %v %v %v %v\n", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
++ ctxt.Diag("prog: %v\n", p)
++ return nil
++}
++
++func cmp(a int, b int) bool {
++ if a == b {
++ return true
++ }
++ switch a {
++ case C_DCON:
++ if b == C_LCON {
++ return true
++ }
++ fallthrough
++ case C_LCON:
++ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
++ return true
++ }
++
++ case C_ADDCON:
++ if b == C_ZCON || b == C_SCON {
++ return true
++ }
++
++ case C_ANDCON:
++ if b == C_ZCON || b == C_SCON {
++ return true
++ }
++
++ case C_UCON:
++ if b == C_ZCON || b == C_SCON {
++ return true
++ }
++
++ case C_SCON:
++ if b == C_ZCON {
++ return true
++ }
++
++ case C_LACON:
++ if b == C_SACON {
++ return true
++ }
++
++ case C_LBRA:
++ if b == C_SBRA {
++ return true
++ }
++
++ case C_LAUTO:
++ if b == C_SAUTO {
++ return true
++ }
++
++ case C_LOREG:
++ if b == C_ZOREG || b == C_SOREG {
++ return true
++ }
++
++ case C_SOREG:
++ if b == C_ZOREG {
++ return true
++ }
++
++ case C_ANY:
++ return true
++ }
++
++ return false
++}
++
++type ocmp []Optab
++
++func (x ocmp) Len() int {
++ return len(x)
++}
++
++func (x ocmp) Swap(i, j int) {
++ x[i], x[j] = x[j], x[i]
++}
++
++func (x ocmp) Less(i, j int) bool {
++ p1 := &x[i]
++ p2 := &x[j]
++ n := int(p1.as) - int(p2.as)
++ if n != 0 {
++ return n < 0
++ }
++ n = int(p1.a1) - int(p2.a1)
++ if n != 0 {
++ return n < 0
++ }
++ n = int(p1.a2) - int(p2.a2)
++ if n != 0 {
++ return n < 0
++ }
++ n = int(p1.a3) - int(p2.a3)
++ if n != 0 {
++ return n < 0
++ }
++ n = int(p1.a4) - int(p2.a4)
++ if n != 0 {
++ return n < 0
++ }
++ return false
++}
++func opset(a, b0 int16) {
++ oprange[a&obj.AMask] = oprange[b0]
++}
++
++func buildop(ctxt *obj.Link) {
++ var n int
++
++ for i := 0; i < C_NCLASS; i++ {
++ for n = 0; n < C_NCLASS; n++ {
++ if cmp(n, i) {
++ xcmp[i][n] = 1
++ }
++ }
++ }
++ for n = 0; optab[n].as != obj.AXXX; n++ {
++ }
++ sort.Sort(ocmp(optab[:n]))
++ for i := 0; i < n; i++ {
++ r := optab[i].as
++ r0 := r & obj.AMask
++ oprange[r0].start = optab[i:]
++ for optab[i].as == r {
++ i++
++ }
++ oprange[r0].stop = optab[i:]
++ i--
++
++ // opset() aliases optab ranges for similar instructions, to reduce the number of optabs in the array.
++ // oprange[] is used by oplook() to find the Optab entry that applies to a given Prog.
++ switch r {
++ default:
++ ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
++ log.Fatalf("bad code")
++
++ case ADIVW: /* op Rb[,Ra],Rd */
++ opset(AADDE, r0)
++ opset(AMULLD, r0)
++ opset(AMULHDU, r0)
++ opset(ADIVD, r0)
++ opset(ADIVDU, r0)
++ opset(ADIVWU, r0)
++
++ case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
++ opset(AMOVH, r0)
++ opset(AMOVHZ, r0)
++
++ case ALA:
++ opset(ALAY, r0)
++
++ case ALARL:
++
++ case AMVC:
++ opset(ACLC, r0)
++ opset(AXC, r0)
++ opset(AOC, r0)
++ opset(ANC, r0)
++
++ case AEXRL:
++
++ case ASTCK:
++ opset(ASTCKC, r0)
++ opset(ASTCKE, r0)
++ opset(ASTCKF, r0)
++
++ case ACLEAR:
++
++ case ASTMG:
++ opset(ASTMY, r0)
++
++ case ALMG:
++ opset(ALMY, r0)
++
++ case AAND: /* logical op Rb,Rs,Ra; no literal */
++ opset(AANDN, r0)
++ opset(ANAND, r0)
++ opset(ANOR, r0)
++ opset(AORN, r0)
++
++ case AADDME: /* op Ra, Rd */
++ opset(AADDZE, r0)
++ opset(ASUBME, r0)
++ opset(ASUBZE, r0)
++
++ case AADDC:
++
++ case ABEQ:
++ opset(ABGE, r0)
++ opset(ABGT, r0)
++ opset(ABLE, r0)
++ opset(ABLT, r0)
++ opset(ABNE, r0)
++ opset(ABVC, r0)
++ opset(ABVS, r0)
++
++ case ABR:
++ opset(ABL, r0)
++
++ case ABC:
++ opset(ABCL, r0)
++
++ case AFABS: /* fop [s,]d */
++ opset(AFNABS, r0)
++ opset(AFNEG, r0)
++ opset(ALEDBR, r0)
++ opset(ALDEBR, r0)
++ opset(AFSQRT, r0)
++ opset(AFSQRTS, r0)
++
++ case AFADD:
++ opset(AFADDS, r0)
++ opset(AFDIV, r0)
++ opset(AFDIVS, r0)
++ opset(AFSUB, r0)
++ opset(AFSUBS, r0)
++
++ case AFMADD:
++ opset(AFMADDS, r0)
++ opset(AFMSUB, r0)
++ opset(AFMSUBS, r0)
++ opset(AFNMADD, r0)
++ opset(AFNMADDS, r0)
++ opset(AFNMSUB, r0)
++ opset(AFNMSUBS, r0)
++
++ case AFMUL:
++ opset(AFMULS, r0)
++
++ case AFCMPO:
++ opset(AFCMPU, r0)
++ opset(ACEBR, r0)
++
++ case ANEG: /* op [Ra,] Rd */
++
++ case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */
++ opset(AXOR, r0)
++
++ case ASLD:
++ opset(ASRD, r0)
++ opset(ASLW, r0)
++ opset(ASRW, r0)
++ opset(ASRAD, r0)
++ opset(ASRAW, r0)
++ opset(ARLL, r0)
++ opset(ARLLG, r0)
++
++ case ACSG:
++ opset(ACS, r0)
++
++ case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
++ opset(ASUBC, r0)
++ opset(ASUBE, r0)
++
++ case ASYNC:
++
++ case AFMOVD:
++ opset(AFMOVS, r0)
++
++ case ASYSCALL: /* just the op; flow of control */
++
++ case AMOVDBR:
++ opset(AMOVWBR, r0)
++
++ case AMOVHBR: // no reg-reg moves
++
++ case ACMP:
++ opset(ACMPW, r0)
++
++ case ACMPU:
++ opset(ACMPWU, r0)
++
++ case ACEFBRA:
++ opset(ACDFBRA, r0)
++ opset(ACEGBRA, r0)
++ opset(ACDGBRA, r0)
++ opset(ACELFBR, r0)
++ opset(ACDLFBR, r0)
++ opset(ACELGBR, r0)
++ opset(ACDLGBR, r0)
++
++ case ACFEBRA:
++ opset(ACFDBRA, r0)
++ opset(ACGEBRA, r0)
++ opset(ACGDBRA, r0)
++ opset(ACLFEBR, r0)
++ opset(ACLFDBR, r0)
++ opset(ACLGEBR, r0)
++ opset(ACLGDBR, r0)
++
++ case ACMPBEQ:
++ opset(ACMPBGE, r0)
++ opset(ACMPBGT, r0)
++ opset(ACMPBLE, r0)
++ opset(ACMPBLT, r0)
++ opset(ACMPBNE, r0)
++
++ case ACMPUBEQ:
++ opset(ACMPUBGE, r0)
++ opset(ACMPUBGT, r0)
++ opset(ACMPUBLE, r0)
++ opset(ACMPUBLT, r0)
++ opset(ACMPUBNE, r0)
++
++ case AVL:
++ opset(AVLLEZB, r0)
++ opset(AVLLEZH, r0)
++ opset(AVLLEZF, r0)
++ opset(AVLLEZG, r0)
++ opset(AVLREPB, r0)
++ opset(AVLREPH, r0)
++ opset(AVLREPF, r0)
++ opset(AVLREPG, r0)
++
++ case AVST:
++
++ case AVLEG:
++ opset(AVLBB, r0)
++ opset(AVLEB, r0)
++ opset(AVLEH, r0)
++ opset(AVLEF, r0)
++ opset(AVLEG, r0)
++ opset(AVLREP, r0)
++
++ case AVSTEG:
++ opset(AVSTEB, r0)
++ opset(AVSTEH, r0)
++ opset(AVSTEF, r0)
++
++ case AVSCEG:
++ opset(AVSCEF, r0)
++
++ case AVGEG:
++ opset(AVGEF, r0)
++
++ case AVESLG:
++ opset(AVESLB, r0)
++ opset(AVESLH, r0)
++ opset(AVESLF, r0)
++ opset(AVERLLB, r0)
++ opset(AVERLLH, r0)
++ opset(AVERLLF, r0)
++ opset(AVERLLG, r0)
++ opset(AVESRAB, r0)
++ opset(AVESRAH, r0)
++ opset(AVESRAF, r0)
++ opset(AVESRAG, r0)
++ opset(AVESRLB, r0)
++ opset(AVESRLH, r0)
++ opset(AVESRLF, r0)
++ opset(AVESRLG, r0)
++
++ case AVLGVG:
++ opset(AVLGVB, r0)
++ opset(AVLGVH, r0)
++ opset(AVLGVF, r0)
++
++ case AVLVGG:
++ opset(AVLVGB, r0)
++ opset(AVLVGH, r0)
++ opset(AVLVGF, r0)
++
++ case AVLL:
++
++ case AVSTL:
++
++ case AVLM:
++
++ case AVSTM:
++
++ case AVGBM:
++
++ case AVZERO:
++ opset(AVONE, r0)
++
++ case AVREPIG:
++ opset(AVREPIB, r0)
++ opset(AVREPIH, r0)
++ opset(AVREPIF, r0)
++
++ case AVLEIG:
++ opset(AVLEIB, r0)
++ opset(AVLEIH, r0)
++ opset(AVLEIF, r0)
++
++ case AVGMG:
++ opset(AVGMB, r0)
++ opset(AVGMH, r0)
++ opset(AVGMF, r0)
++
++ case AVREPG:
++ opset(AVREPB, r0)
++ opset(AVREPH, r0)
++ opset(AVREPF, r0)
++
++ case AVERIMG:
++ opset(AVERIMB, r0)
++ opset(AVERIMH, r0)
++ opset(AVERIMF, r0)
++
++ case AVSLDB:
++
++ case AVFTCIDB:
++ opset(AWFTCIDB, r0)
++
++ case AVLR:
++ opset(AVUPHB, r0)
++ opset(AVUPHH, r0)
++ opset(AVUPHF, r0)
++ opset(AVUPLHB, r0)
++ opset(AVUPLHH, r0)
++ opset(AVUPLHF, r0)
++ opset(AVUPLB, r0)
++ opset(AVUPLHW, r0)
++ opset(AVUPLF, r0)
++ opset(AVUPLLB, r0)
++ opset(AVUPLLH, r0)
++ opset(AVUPLLF, r0)
++ opset(AVCLZB, r0)
++ opset(AVCLZH, r0)
++ opset(AVCLZF, r0)
++ opset(AVCLZG, r0)
++ opset(AVCTZB, r0)
++ opset(AVCTZH, r0)
++ opset(AVCTZF, r0)
++ opset(AVCTZG, r0)
++ opset(AVLDEB, r0)
++ opset(AWLDEB, r0)
++ opset(AVFLCDB, r0)
++ opset(AWFLCDB, r0)
++ opset(AVFLNDB, r0)
++ opset(AWFLNDB, r0)
++ opset(AVFLPDB, r0)
++ opset(AWFLPDB, r0)
++ opset(AVFSQDB, r0)
++ opset(AWFSQDB, r0)
++ opset(AVISTRB, r0)
++ opset(AVISTRH, r0)
++ opset(AVISTRF, r0)
++ opset(AVISTRBS, r0)
++ opset(AVISTRHS, r0)
++ opset(AVISTRFS, r0)
++ opset(AVLCB, r0)
++ opset(AVLCH, r0)
++ opset(AVLCF, r0)
++ opset(AVLCG, r0)
++ opset(AVLPB, r0)
++ opset(AVLPH, r0)
++ opset(AVLPF, r0)
++ opset(AVLPG, r0)
++ opset(AVPOPCT, r0)
++ opset(AVSEGB, r0)
++ opset(AVSEGH, r0)
++ opset(AVSEGF, r0)
++
++ case AVECG:
++ opset(AVECB, r0)
++ opset(AVECH, r0)
++ opset(AVECF, r0)
++ opset(AVECLB, r0)
++ opset(AVECLH, r0)
++ opset(AVECLF, r0)
++ opset(AVECLG, r0)
++ opset(AWFCDB, r0)
++ opset(AWFKDB, r0)
++
++ case AVCEQG:
++ opset(AVCEQB, r0)
++ opset(AVCEQH, r0)
++ opset(AVCEQF, r0)
++ opset(AVCEQBS, r0)
++ opset(AVCEQHS, r0)
++ opset(AVCEQFS, r0)
++ opset(AVCEQGS, r0)
++ opset(AVCHB, r0)
++ opset(AVCHH, r0)
++ opset(AVCHF, r0)
++ opset(AVCHG, r0)
++ opset(AVCHBS, r0)
++ opset(AVCHHS, r0)
++ opset(AVCHFS, r0)
++ opset(AVCHGS, r0)
++ opset(AVCHLB, r0)
++ opset(AVCHLH, r0)
++ opset(AVCHLF, r0)
++ opset(AVCHLG, r0)
++ opset(AVCHLBS, r0)
++ opset(AVCHLHS, r0)
++ opset(AVCHLFS, r0)
++ opset(AVCHLGS, r0)
++
++ case AVFAEF:
++ opset(AVFAEB, r0)
++ opset(AVFAEH, r0)
++ opset(AVFAEBS, r0)
++ opset(AVFAEHS, r0)
++ opset(AVFAEFS, r0)
++ opset(AVFAEZB, r0)
++ opset(AVFAEZH, r0)
++ opset(AVFAEZF, r0)
++ opset(AVFAEZBS, r0)
++ opset(AVFAEZHS, r0)
++ opset(AVFAEZFS, r0)
++ opset(AVFEEB, r0)
++ opset(AVFEEH, r0)
++ opset(AVFEEF, r0)
++ opset(AVFEEBS, r0)
++ opset(AVFEEHS, r0)
++ opset(AVFEEFS, r0)
++ opset(AVFEEZB, r0)
++ opset(AVFEEZH, r0)
++ opset(AVFEEZF, r0)
++ opset(AVFEEZBS, r0)
++ opset(AVFEEZHS, r0)
++ opset(AVFEEZFS, r0)
++ opset(AVFENEB, r0)
++ opset(AVFENEH, r0)
++ opset(AVFENEF, r0)
++ opset(AVFENEBS, r0)
++ opset(AVFENEHS, r0)
++ opset(AVFENEFS, r0)
++ opset(AVFENEZB, r0)
++ opset(AVFENEZH, r0)
++ opset(AVFENEZF, r0)
++ opset(AVFENEZBS, r0)
++ opset(AVFENEZHS, r0)
++ opset(AVFENEZFS, r0)
++
++ case AVPKSG:
++ opset(AVPKSH, r0)
++ opset(AVPKSF, r0)
++ opset(AVPKSHS, r0)
++ opset(AVPKSFS, r0)
++ opset(AVPKSGS, r0)
++ opset(AVPKLSH, r0)
++ opset(AVPKLSF, r0)
++ opset(AVPKLSG, r0)
++ opset(AVPKLSHS, r0)
++ opset(AVPKLSFS, r0)
++ opset(AVPKLSGS, r0)
++
++ case AVAQ:
++ opset(AVAB, r0)
++ opset(AVAH, r0)
++ opset(AVAF, r0)
++ opset(AVAG, r0)
++ opset(AVACCB, r0)
++ opset(AVACCH, r0)
++ opset(AVACCF, r0)
++ opset(AVACCG, r0)
++ opset(AVACCQ, r0)
++ opset(AVN, r0)
++ opset(AVNC, r0)
++ opset(AVAVGB, r0)
++ opset(AVAVGH, r0)
++ opset(AVAVGF, r0)
++ opset(AVAVGG, r0)
++ opset(AVAVGLB, r0)
++ opset(AVAVGLH, r0)
++ opset(AVAVGLF, r0)
++ opset(AVAVGLG, r0)
++ opset(AVCKSM, r0)
++ opset(AVX, r0)
++ opset(AVFADB, r0)
++ opset(AWFADB, r0)
++ opset(AVFCEDB, r0)
++ opset(AVFCEDBS, r0)
++ opset(AWFCEDB, r0)
++ opset(AWFCEDBS, r0)
++ opset(AVFCHDB, r0)
++ opset(AVFCHDBS, r0)
++ opset(AWFCHDB, r0)
++ opset(AWFCHDBS, r0)
++ opset(AVFCHEDB, r0)
++ opset(AVFCHEDBS, r0)
++ opset(AWFCHEDB, r0)
++ opset(AWFCHEDBS, r0)
++ opset(AVFMDB, r0)
++ opset(AWFMDB, r0)
++ opset(AVGFMB, r0)
++ opset(AVGFMH, r0)
++ opset(AVGFMF, r0)
++ opset(AVGFMG, r0)
++ opset(AVMXB, r0)
++ opset(AVMXH, r0)
++ opset(AVMXF, r0)
++ opset(AVMXG, r0)
++ opset(AVMXLB, r0)
++ opset(AVMXLH, r0)
++ opset(AVMXLF, r0)
++ opset(AVMXLG, r0)
++ opset(AVMNB, r0)
++ opset(AVMNH, r0)
++ opset(AVMNF, r0)
++ opset(AVMNG, r0)
++ opset(AVMNLB, r0)
++ opset(AVMNLH, r0)
++ opset(AVMNLF, r0)
++ opset(AVMNLG, r0)
++ opset(AVMRHB, r0)
++ opset(AVMRHH, r0)
++ opset(AVMRHF, r0)
++ opset(AVMRHG, r0)
++ opset(AVMRLB, r0)
++ opset(AVMRLH, r0)
++ opset(AVMRLF, r0)
++ opset(AVMRLG, r0)
++ opset(AVMEB, r0)
++ opset(AVMEH, r0)
++ opset(AVMEF, r0)
++ opset(AVMLEB, r0)
++ opset(AVMLEH, r0)
++ opset(AVMLEF, r0)
++ opset(AVMOB, r0)
++ opset(AVMOH, r0)
++ opset(AVMOF, r0)
++ opset(AVMLOB, r0)
++ opset(AVMLOH, r0)
++ opset(AVMLOF, r0)
++ opset(AVMHB, r0)
++ opset(AVMHH, r0)
++ opset(AVMHF, r0)
++ opset(AVMLHB, r0)
++ opset(AVMLHH, r0)
++ opset(AVMLHF, r0)
++ opset(AVMLH, r0)
++ opset(AVMLHW, r0)
++ opset(AVMLF, r0)
++ opset(AVNO, r0)
++ opset(AVO, r0)
++ opset(AVPKH, r0)
++ opset(AVPKF, r0)
++ opset(AVPKG, r0)
++ opset(AVSUMGH, r0)
++ opset(AVSUMGF, r0)
++ opset(AVSUMQF, r0)
++ opset(AVSUMQG, r0)
++ opset(AVSUMB, r0)
++ opset(AVSUMH, r0)
++
++ case AVNOT:
++
++ case AVERLLVG:
++ opset(AVERLLVB, r0)
++ opset(AVERLLVH, r0)
++ opset(AVERLLVF, r0)
++ opset(AVESLVB, r0)
++ opset(AVESLVH, r0)
++ opset(AVESLVF, r0)
++ opset(AVESLVG, r0)
++ opset(AVESRAVB, r0)
++ opset(AVESRAVH, r0)
++ opset(AVESRAVF, r0)
++ opset(AVESRAVG, r0)
++ opset(AVESRLVB, r0)
++ opset(AVESRLVH, r0)
++ opset(AVESRLVF, r0)
++ opset(AVESRLVG, r0)
++ opset(AVFDDB, r0)
++ opset(AWFDDB, r0)
++ opset(AVFSDB, r0)
++ opset(AWFSDB, r0)
++ opset(AVSL, r0)
++ opset(AVSLB, r0)
++ opset(AVSRA, r0)
++ opset(AVSRAB, r0)
++ opset(AVSRL, r0)
++ opset(AVSRLB, r0)
++ opset(AVSF, r0)
++ opset(AVSG, r0)
++ opset(AVSQ, r0)
++ opset(AVSCBIB, r0)
++ opset(AVSCBIH, r0)
++ opset(AVSCBIF, r0)
++ opset(AVSCBIG, r0)
++ opset(AVSCBIQ, r0)
++
++ case AVACQ:
++ opset(AVACCCQ, r0)
++ opset(AVGFMAB, r0)
++ opset(AVGFMAH, r0)
++ opset(AVGFMAF, r0)
++ opset(AVGFMAG, r0)
++ opset(AVMALB, r0)
++ opset(AVMALHW, r0)
++ opset(AVMALF, r0)
++ opset(AVMAHB, r0)
++ opset(AVMAHH, r0)
++ opset(AVMAHF, r0)
++ opset(AVMALHB, r0)
++ opset(AVMALHH, r0)
++ opset(AVMALHF, r0)
++ opset(AVMAEB, r0)
++ opset(AVMAEH, r0)
++ opset(AVMAEF, r0)
++ opset(AVMALEB, r0)
++ opset(AVMALEH, r0)
++ opset(AVMALEF, r0)
++ opset(AVMAOB, r0)
++ opset(AVMAOH, r0)
++ opset(AVMAOF, r0)
++ opset(AVMALOB, r0)
++ opset(AVMALOH, r0)
++ opset(AVMALOF, r0)
++ opset(AVSTRCB, r0)
++ opset(AVSTRCH, r0)
++ opset(AVSTRCF, r0)
++ opset(AVSTRCBS, r0)
++ opset(AVSTRCHS, r0)
++ opset(AVSTRCFS, r0)
++ opset(AVSTRCZB, r0)
++ opset(AVSTRCZH, r0)
++ opset(AVSTRCZF, r0)
++ opset(AVSTRCZBS, r0)
++ opset(AVSTRCZHS, r0)
++ opset(AVSTRCZFS, r0)
++ opset(AVSBCBIQ, r0)
++ opset(AVSBIQ, r0)
++
++ case AVSEL:
++ opset(AVFMADB, r0)
++ opset(AWFMADB, r0)
++ opset(AVFMSDB, r0)
++ opset(AWFMSDB, r0)
++ opset(AVPERM, r0)
++
++ case AVLVGP:
++
++ case AVPDI:
++
++ case AADD,
++ AMOVW,
++ /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
++ AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
++ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
++ AMOVB, /* macro: move byte with sign extension */
++ AMULLW,
++ /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
++ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
++ ABYTE,
++ AWORD,
++ ADWORD,
++ obj.ANOP,
++ obj.ATEXT,
++ obj.AUNDEF,
++ obj.AFUNCDATA,
++ obj.APCDATA:
++ break
++ }
++ }
++}
++
++const (
++ op_A uint32 = 0x5A00 // FORMAT_RX1 ADD (32)
++ op_AD uint32 = 0x6A00 // FORMAT_RX1 ADD NORMALIZED (long HFP)
++ op_ADB uint32 = 0xED1A // FORMAT_RXE ADD (long BFP)
++ op_ADBR uint32 = 0xB31A // FORMAT_RRE ADD (long BFP)
++ op_ADR uint32 = 0x2A00 // FORMAT_RR ADD NORMALIZED (long HFP)
++ op_ADTR uint32 = 0xB3D2 // FORMAT_RRF1 ADD (long DFP)
++ op_ADTRA uint32 = 0xB3D2 // FORMAT_RRF1 ADD (long DFP)
++ op_AE uint32 = 0x7A00 // FORMAT_RX1 ADD NORMALIZED (short HFP)
++ op_AEB uint32 = 0xED0A // FORMAT_RXE ADD (short BFP)
++ op_AEBR uint32 = 0xB30A // FORMAT_RRE ADD (short BFP)
++ op_AER uint32 = 0x3A00 // FORMAT_RR ADD NORMALIZED (short HFP)
++ op_AFI uint32 = 0xC209 // FORMAT_RIL1 ADD IMMEDIATE (32)
++ op_AG uint32 = 0xE308 // FORMAT_RXY1 ADD (64)
++ op_AGF uint32 = 0xE318 // FORMAT_RXY1 ADD (64<-32)
++ op_AGFI uint32 = 0xC208 // FORMAT_RIL1 ADD IMMEDIATE (64<-32)
++ op_AGFR uint32 = 0xB918 // FORMAT_RRE ADD (64<-32)
++ op_AGHI uint32 = 0xA70B // FORMAT_RI1 ADD HALFWORD IMMEDIATE (64)
++ op_AGHIK uint32 = 0xECD9 // FORMAT_RIE4 ADD IMMEDIATE (64<-16)
++ op_AGR uint32 = 0xB908 // FORMAT_RRE ADD (64)
++ op_AGRK uint32 = 0xB9E8 // FORMAT_RRF1 ADD (64)
++ op_AGSI uint32 = 0xEB7A // FORMAT_SIY ADD IMMEDIATE (64<-8)
++ op_AH uint32 = 0x4A00 // FORMAT_RX1 ADD HALFWORD
++ op_AHHHR uint32 = 0xB9C8 // FORMAT_RRF1 ADD HIGH (32)
++ op_AHHLR uint32 = 0xB9D8 // FORMAT_RRF1 ADD HIGH (32)
++ op_AHI uint32 = 0xA70A // FORMAT_RI1 ADD HALFWORD IMMEDIATE (32)
++ op_AHIK uint32 = 0xECD8 // FORMAT_RIE4 ADD IMMEDIATE (32<-16)
++ op_AHY uint32 = 0xE37A // FORMAT_RXY1 ADD HALFWORD
++ op_AIH uint32 = 0xCC08 // FORMAT_RIL1 ADD IMMEDIATE HIGH (32)
++ op_AL uint32 = 0x5E00 // FORMAT_RX1 ADD LOGICAL (32)
++ op_ALC uint32 = 0xE398 // FORMAT_RXY1 ADD LOGICAL WITH CARRY (32)
++ op_ALCG uint32 = 0xE388 // FORMAT_RXY1 ADD LOGICAL WITH CARRY (64)
++ op_ALCGR uint32 = 0xB988 // FORMAT_RRE ADD LOGICAL WITH CARRY (64)
++ op_ALCR uint32 = 0xB998 // FORMAT_RRE ADD LOGICAL WITH CARRY (32)
++ op_ALFI uint32 = 0xC20B // FORMAT_RIL1 ADD LOGICAL IMMEDIATE (32)
++ op_ALG uint32 = 0xE30A // FORMAT_RXY1 ADD LOGICAL (64)
++ op_ALGF uint32 = 0xE31A // FORMAT_RXY1 ADD LOGICAL (64<-32)
++ op_ALGFI uint32 = 0xC20A // FORMAT_RIL1 ADD LOGICAL IMMEDIATE (64<-32)
++ op_ALGFR uint32 = 0xB91A // FORMAT_RRE ADD LOGICAL (64<-32)
++ op_ALGHSIK uint32 = 0xECDB // FORMAT_RIE4 ADD LOGICAL WITH SIGNED IMMEDIATE (64<-16)
++ op_ALGR uint32 = 0xB90A // FORMAT_RRE ADD LOGICAL (64)
++ op_ALGRK uint32 = 0xB9EA // FORMAT_RRF1 ADD LOGICAL (64)
++ op_ALGSI uint32 = 0xEB7E // FORMAT_SIY ADD LOGICAL WITH SIGNED IMMEDIATE (64<-8)
++ op_ALHHHR uint32 = 0xB9CA // FORMAT_RRF1 ADD LOGICAL HIGH (32)
++ op_ALHHLR uint32 = 0xB9DA // FORMAT_RRF1 ADD LOGICAL HIGH (32)
++ op_ALHSIK uint32 = 0xECDA // FORMAT_RIE4 ADD LOGICAL WITH SIGNED IMMEDIATE (32<-16)
++ op_ALR uint32 = 0x1E00 // FORMAT_RR ADD LOGICAL (32)
++ op_ALRK uint32 = 0xB9FA // FORMAT_RRF1 ADD LOGICAL (32)
++ op_ALSI uint32 = 0xEB6E // FORMAT_SIY ADD LOGICAL WITH SIGNED IMMEDIATE (32<-8)
++ op_ALSIH uint32 = 0xCC0A // FORMAT_RIL1 ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
++ op_ALSIHN uint32 = 0xCC0B // FORMAT_RIL1 ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
++ op_ALY uint32 = 0xE35E // FORMAT_RXY1 ADD LOGICAL (32)
++ op_AP uint32 = 0xFA00 // FORMAT_SS2 ADD DECIMAL
++ op_AR uint32 = 0x1A00 // FORMAT_RR ADD (32)
++ op_ARK uint32 = 0xB9F8 // FORMAT_RRF1 ADD (32)
++ op_ASI uint32 = 0xEB6A // FORMAT_SIY ADD IMMEDIATE (32<-8)
++ op_AU uint32 = 0x7E00 // FORMAT_RX1 ADD UNNORMALIZED (short HFP)
++ op_AUR uint32 = 0x3E00 // FORMAT_RR ADD UNNORMALIZED (short HFP)
++ op_AW uint32 = 0x6E00 // FORMAT_RX1 ADD UNNORMALIZED (long HFP)
++ op_AWR uint32 = 0x2E00 // FORMAT_RR ADD UNNORMALIZED (long HFP)
++ op_AXBR uint32 = 0xB34A // FORMAT_RRE ADD (extended BFP)
++ op_AXR uint32 = 0x3600 // FORMAT_RR ADD NORMALIZED (extended HFP)
++ op_AXTR uint32 = 0xB3DA // FORMAT_RRF1 ADD (extended DFP)
++ op_AXTRA uint32 = 0xB3DA // FORMAT_RRF1 ADD (extended DFP)
++ op_AY uint32 = 0xE35A // FORMAT_RXY1 ADD (32)
++ op_BAKR uint32 = 0xB240 // FORMAT_RRE BRANCH AND STACK
++ op_BAL uint32 = 0x4500 // FORMAT_RX1 BRANCH AND LINK
++ op_BALR uint32 = 0x0500 // FORMAT_RR BRANCH AND LINK
++ op_BAS uint32 = 0x4D00 // FORMAT_RX1 BRANCH AND SAVE
++ op_BASR uint32 = 0x0D00 // FORMAT_RR BRANCH AND SAVE
++ op_BASSM uint32 = 0x0C00 // FORMAT_RR BRANCH AND SAVE AND SET MODE
++ op_BC uint32 = 0x4700 // FORMAT_RX2 BRANCH ON CONDITION
++ op_BCR uint32 = 0x0700 // FORMAT_RR BRANCH ON CONDITION
++ op_BCT uint32 = 0x4600 // FORMAT_RX1 BRANCH ON COUNT (32)
++ op_BCTG uint32 = 0xE346 // FORMAT_RXY1 BRANCH ON COUNT (64)
++ op_BCTGR uint32 = 0xB946 // FORMAT_RRE BRANCH ON COUNT (64)
++ op_BCTR uint32 = 0x0600 // FORMAT_RR BRANCH ON COUNT (32)
++ op_BPP uint32 = 0xC700 // FORMAT_SMI BRANCH PREDICTION PRELOAD
++ op_BPRP uint32 = 0xC500 // FORMAT_MII BRANCH PREDICTION RELATIVE PRELOAD
++ op_BRAS uint32 = 0xA705 // FORMAT_RI2 BRANCH RELATIVE AND SAVE
++ op_BRASL uint32 = 0xC005 // FORMAT_RIL2 BRANCH RELATIVE AND SAVE LONG
++ op_BRC uint32 = 0xA704 // FORMAT_RI3 BRANCH RELATIVE ON CONDITION
++ op_BRCL uint32 = 0xC004 // FORMAT_RIL3 BRANCH RELATIVE ON CONDITION LONG
++ op_BRCT uint32 = 0xA706 // FORMAT_RI2 BRANCH RELATIVE ON COUNT (32)
++ op_BRCTG uint32 = 0xA707 // FORMAT_RI2 BRANCH RELATIVE ON COUNT (64)
++ op_BRCTH uint32 = 0xCC06 // FORMAT_RIL2 BRANCH RELATIVE ON COUNT HIGH (32)
++ op_BRXH uint32 = 0x8400 // FORMAT_RSI BRANCH RELATIVE ON INDEX HIGH (32)
++ op_BRXHG uint32 = 0xEC44 // FORMAT_RIE5 BRANCH RELATIVE ON INDEX HIGH (64)
++ op_BRXLE uint32 = 0x8500 // FORMAT_RSI BRANCH RELATIVE ON INDEX LOW OR EQ. (32)
++ op_BRXLG uint32 = 0xEC45 // FORMAT_RIE5 BRANCH RELATIVE ON INDEX LOW OR EQ. (64)
++ op_BSA uint32 = 0xB25A // FORMAT_RRE BRANCH AND SET AUTHORITY
++ op_BSG uint32 = 0xB258 // FORMAT_RRE BRANCH IN SUBSPACE GROUP
++ op_BSM uint32 = 0x0B00 // FORMAT_RR BRANCH AND SET MODE
++ op_BXH uint32 = 0x8600 // FORMAT_RS1 BRANCH ON INDEX HIGH (32)
++ op_BXHG uint32 = 0xEB44 // FORMAT_RSY1 BRANCH ON INDEX HIGH (64)
++ op_BXLE uint32 = 0x8700 // FORMAT_RS1 BRANCH ON INDEX LOW OR EQUAL (32)
++ op_BXLEG uint32 = 0xEB45 // FORMAT_RSY1 BRANCH ON INDEX LOW OR EQUAL (64)
++ op_C uint32 = 0x5900 // FORMAT_RX1 COMPARE (32)
++ op_CD uint32 = 0x6900 // FORMAT_RX1 COMPARE (long HFP)
++ op_CDB uint32 = 0xED19 // FORMAT_RXE COMPARE (long BFP)
++ op_CDBR uint32 = 0xB319 // FORMAT_RRE COMPARE (long BFP)
++ op_CDFBR uint32 = 0xB395 // FORMAT_RRE CONVERT FROM FIXED (32 to long BFP)
++ op_CDFBRA uint32 = 0xB395 // FORMAT_RRF5 CONVERT FROM FIXED (32 to long BFP)
++ op_CDFR uint32 = 0xB3B5 // FORMAT_RRE CONVERT FROM FIXED (32 to long HFP)
++ op_CDFTR uint32 = 0xB951 // FORMAT_RRE CONVERT FROM FIXED (32 to long DFP)
++ op_CDGBR uint32 = 0xB3A5 // FORMAT_RRE CONVERT FROM FIXED (64 to long BFP)
++ op_CDGBRA uint32 = 0xB3A5 // FORMAT_RRF5 CONVERT FROM FIXED (64 to long BFP)
++ op_CDGR uint32 = 0xB3C5 // FORMAT_RRE CONVERT FROM FIXED (64 to long HFP)
++ op_CDGTR uint32 = 0xB3F1 // FORMAT_RRE CONVERT FROM FIXED (64 to long DFP)
++ op_CDGTRA uint32 = 0xB3F1 // FORMAT_RRF5 CONVERT FROM FIXED (64 to long DFP)
++ op_CDLFBR uint32 = 0xB391 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to long BFP)
++ op_CDLFTR uint32 = 0xB953 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to long DFP)
++ op_CDLGBR uint32 = 0xB3A1 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to long BFP)
++ op_CDLGTR uint32 = 0xB952 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to long DFP)
++ op_CDR uint32 = 0x2900 // FORMAT_RR COMPARE (long HFP)
++ op_CDS uint32 = 0xBB00 // FORMAT_RS1 COMPARE DOUBLE AND SWAP (32)
++ op_CDSG uint32 = 0xEB3E // FORMAT_RSY1 COMPARE DOUBLE AND SWAP (64)
++ op_CDSTR uint32 = 0xB3F3 // FORMAT_RRE CONVERT FROM SIGNED PACKED (64 to long DFP)
++ op_CDSY uint32 = 0xEB31 // FORMAT_RSY1 COMPARE DOUBLE AND SWAP (32)
++ op_CDTR uint32 = 0xB3E4 // FORMAT_RRE COMPARE (long DFP)
++ op_CDUTR uint32 = 0xB3F2 // FORMAT_RRE CONVERT FROM UNSIGNED PACKED (64 to long DFP)
++ op_CDZT uint32 = 0xEDAA // FORMAT_RSL CONVERT FROM ZONED (to long DFP)
++ op_CE uint32 = 0x7900 // FORMAT_RX1 COMPARE (short HFP)
++ op_CEB uint32 = 0xED09 // FORMAT_RXE COMPARE (short BFP)
++ op_CEBR uint32 = 0xB309 // FORMAT_RRE COMPARE (short BFP)
++ op_CEDTR uint32 = 0xB3F4 // FORMAT_RRE COMPARE BIASED EXPONENT (long DFP)
++ op_CEFBR uint32 = 0xB394 // FORMAT_RRE CONVERT FROM FIXED (32 to short BFP)
++ op_CEFBRA uint32 = 0xB394 // FORMAT_RRF5 CONVERT FROM FIXED (32 to short BFP)
++ op_CEFR uint32 = 0xB3B4 // FORMAT_RRE CONVERT FROM FIXED (32 to short HFP)
++ op_CEGBR uint32 = 0xB3A4 // FORMAT_RRE CONVERT FROM FIXED (64 to short BFP)
++ op_CEGBRA uint32 = 0xB3A4 // FORMAT_RRF5 CONVERT FROM FIXED (64 to short BFP)
++ op_CEGR uint32 = 0xB3C4 // FORMAT_RRE CONVERT FROM FIXED (64 to short HFP)
++ op_CELFBR uint32 = 0xB390 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to short BFP)
++ op_CELGBR uint32 = 0xB3A0 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to short BFP)
++ op_CER uint32 = 0x3900 // FORMAT_RR COMPARE (short HFP)
++ op_CEXTR uint32 = 0xB3FC // FORMAT_RRE COMPARE BIASED EXPONENT (extended DFP)
++ op_CFC uint32 = 0xB21A // FORMAT_S COMPARE AND FORM CODEWORD
++ op_CFDBR uint32 = 0xB399 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 32)
++ op_CFDBRA uint32 = 0xB399 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 32)
++ op_CFDR uint32 = 0xB3B9 // FORMAT_RRF5 CONVERT TO FIXED (long HFP to 32)
++ op_CFDTR uint32 = 0xB941 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 32)
++ op_CFEBR uint32 = 0xB398 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 32)
++ op_CFEBRA uint32 = 0xB398 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 32)
++ op_CFER uint32 = 0xB3B8 // FORMAT_RRF5 CONVERT TO FIXED (short HFP to 32)
++ op_CFI uint32 = 0xC20D // FORMAT_RIL1 COMPARE IMMEDIATE (32)
++ op_CFXBR uint32 = 0xB39A // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 32)
++ op_CFXBRA uint32 = 0xB39A // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 32)
++ op_CFXR uint32 = 0xB3BA // FORMAT_RRF5 CONVERT TO FIXED (extended HFP to 32)
++ op_CFXTR uint32 = 0xB949 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 32)
++ op_CG uint32 = 0xE320 // FORMAT_RXY1 COMPARE (64)
++ op_CGDBR uint32 = 0xB3A9 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 64)
++ op_CGDBRA uint32 = 0xB3A9 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 64)
++ op_CGDR uint32 = 0xB3C9 // FORMAT_RRF5 CONVERT TO FIXED (long HFP to 64)
++ op_CGDTR uint32 = 0xB3E1 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 64)
++ op_CGDTRA uint32 = 0xB3E1 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 64)
++ op_CGEBR uint32 = 0xB3A8 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 64)
++ op_CGEBRA uint32 = 0xB3A8 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 64)
++ op_CGER uint32 = 0xB3C8 // FORMAT_RRF5 CONVERT TO FIXED (short HFP to 64)
++ op_CGF uint32 = 0xE330 // FORMAT_RXY1 COMPARE (64<-32)
++ op_CGFI uint32 = 0xC20C // FORMAT_RIL1 COMPARE IMMEDIATE (64<-32)
++ op_CGFR uint32 = 0xB930 // FORMAT_RRE COMPARE (64<-32)
++ op_CGFRL uint32 = 0xC60C // FORMAT_RIL2 COMPARE RELATIVE LONG (64<-32)
++ op_CGH uint32 = 0xE334 // FORMAT_RXY1 COMPARE HALFWORD (64<-16)
++ op_CGHI uint32 = 0xA70F // FORMAT_RI1 COMPARE HALFWORD IMMEDIATE (64<-16)
++ op_CGHRL uint32 = 0xC604 // FORMAT_RIL2 COMPARE HALFWORD RELATIVE LONG (64<-16)
++ op_CGHSI uint32 = 0xE558 // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (64<-16)
++ op_CGIB uint32 = 0xECFC // FORMAT_RIS COMPARE IMMEDIATE AND BRANCH (64<-8)
++ op_CGIJ uint32 = 0xEC7C // FORMAT_RIE3 COMPARE IMMEDIATE AND BRANCH RELATIVE (64<-8)
++ op_CGIT uint32 = 0xEC70 // FORMAT_RIE1 COMPARE IMMEDIATE AND TRAP (64<-16)
++ op_CGR uint32 = 0xB920 // FORMAT_RRE COMPARE (64)
++ op_CGRB uint32 = 0xECE4 // FORMAT_RRS COMPARE AND BRANCH (64)
++ op_CGRJ uint32 = 0xEC64 // FORMAT_RIE2 COMPARE AND BRANCH RELATIVE (64)
++ op_CGRL uint32 = 0xC608 // FORMAT_RIL2 COMPARE RELATIVE LONG (64)
++ op_CGRT uint32 = 0xB960 // FORMAT_RRF3 COMPARE AND TRAP (64)
++ op_CGXBR uint32 = 0xB3AA // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 64)
++ op_CGXBRA uint32 = 0xB3AA // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 64)
++ op_CGXR uint32 = 0xB3CA // FORMAT_RRF5 CONVERT TO FIXED (extended HFP to 64)
++ op_CGXTR uint32 = 0xB3E9 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 64)
++ op_CGXTRA uint32 = 0xB3E9 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 64)
++ op_CH uint32 = 0x4900 // FORMAT_RX1 COMPARE HALFWORD (32<-16)
++ op_CHF uint32 = 0xE3CD // FORMAT_RXY1 COMPARE HIGH (32)
++ op_CHHR uint32 = 0xB9CD // FORMAT_RRE COMPARE HIGH (32)
++ op_CHHSI uint32 = 0xE554 // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (16)
++ op_CHI uint32 = 0xA70E // FORMAT_RI1 COMPARE HALFWORD IMMEDIATE (32<-16)
++ op_CHLR uint32 = 0xB9DD // FORMAT_RRE COMPARE HIGH (32)
++ op_CHRL uint32 = 0xC605 // FORMAT_RIL2 COMPARE HALFWORD RELATIVE LONG (32<-16)
++ op_CHSI uint32 = 0xE55C // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (32<-16)
++ op_CHY uint32 = 0xE379 // FORMAT_RXY1 COMPARE HALFWORD (32<-16)
++ op_CIB uint32 = 0xECFE // FORMAT_RIS COMPARE IMMEDIATE AND BRANCH (32<-8)
++ op_CIH uint32 = 0xCC0D // FORMAT_RIL1 COMPARE IMMEDIATE HIGH (32)
++ op_CIJ uint32 = 0xEC7E // FORMAT_RIE3 COMPARE IMMEDIATE AND BRANCH RELATIVE (32<-8)
++ op_CIT uint32 = 0xEC72 // FORMAT_RIE1 COMPARE IMMEDIATE AND TRAP (32<-16)
++ op_CKSM uint32 = 0xB241 // FORMAT_RRE CHECKSUM
++ op_CL uint32 = 0x5500 // FORMAT_RX1 COMPARE LOGICAL (32)
++ op_CLC uint32 = 0xD500 // FORMAT_SS1 COMPARE LOGICAL (character)
++ op_CLCL uint32 = 0x0F00 // FORMAT_RR COMPARE LOGICAL LONG
++ op_CLCLE uint32 = 0xA900 // FORMAT_RS1 COMPARE LOGICAL LONG EXTENDED
++ op_CLCLU uint32 = 0xEB8F // FORMAT_RSY1 COMPARE LOGICAL LONG UNICODE
++ op_CLFDBR uint32 = 0xB39D // FORMAT_RRF5 CONVERT TO LOGICAL (long BFP to 32)
++ op_CLFDTR uint32 = 0xB943 // FORMAT_RRF5 CONVERT TO LOGICAL (long DFP to 32)
++ op_CLFEBR uint32 = 0xB39C // FORMAT_RRF5 CONVERT TO LOGICAL (short BFP to 32)
++ op_CLFHSI uint32 = 0xE55D // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (32<-16)
++ op_CLFI uint32 = 0xC20F // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE (32)
++ op_CLFIT uint32 = 0xEC73 // FORMAT_RIE1 COMPARE LOGICAL IMMEDIATE AND TRAP (32<-16)
++ op_CLFXBR uint32 = 0xB39E // FORMAT_RRF5 CONVERT TO LOGICAL (extended BFP to 32)
++ op_CLFXTR uint32 = 0xB94B // FORMAT_RRF5 CONVERT TO LOGICAL (extended DFP to 32)
++ op_CLG uint32 = 0xE321 // FORMAT_RXY1 COMPARE LOGICAL (64)
++ op_CLGDBR uint32 = 0xB3AD // FORMAT_RRF5 CONVERT TO LOGICAL (long BFP to 64)
++ op_CLGDTR uint32 = 0xB942 // FORMAT_RRF5 CONVERT TO LOGICAL (long DFP to 64)
++ op_CLGEBR uint32 = 0xB3AC // FORMAT_RRF5 CONVERT TO LOGICAL (short BFP to 64)
++ op_CLGF uint32 = 0xE331 // FORMAT_RXY1 COMPARE LOGICAL (64<-32)
++ op_CLGFI uint32 = 0xC20E // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE (64<-32)
++ op_CLGFR uint32 = 0xB931 // FORMAT_RRE COMPARE LOGICAL (64<-32)
++ op_CLGFRL uint32 = 0xC60E // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64<-32)
++ op_CLGHRL uint32 = 0xC606 // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64<-16)
++ op_CLGHSI uint32 = 0xE559 // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (64<-16)
++ op_CLGIB uint32 = 0xECFD // FORMAT_RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (64<-8)
++ op_CLGIJ uint32 = 0xEC7D // FORMAT_RIE3 COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (64<-8)
++ op_CLGIT uint32 = 0xEC71 // FORMAT_RIE1 COMPARE LOGICAL IMMEDIATE AND TRAP (64<-16)
++ op_CLGR uint32 = 0xB921 // FORMAT_RRE COMPARE LOGICAL (64)
++ op_CLGRB uint32 = 0xECE5 // FORMAT_RRS COMPARE LOGICAL AND BRANCH (64)
++ op_CLGRJ uint32 = 0xEC65 // FORMAT_RIE2 COMPARE LOGICAL AND BRANCH RELATIVE (64)
++ op_CLGRL uint32 = 0xC60A // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64)
++ op_CLGRT uint32 = 0xB961 // FORMAT_RRF3 COMPARE LOGICAL AND TRAP (64)
++ op_CLGT uint32 = 0xEB2B // FORMAT_RSY2 COMPARE LOGICAL AND TRAP (64)
++ op_CLGXBR uint32 = 0xB3AE // FORMAT_RRF5 CONVERT TO LOGICAL (extended BFP to 64)
++ op_CLGXTR uint32 = 0xB94A // FORMAT_RRF5 CONVERT TO LOGICAL (extended DFP to 64)
++ op_CLHF uint32 = 0xE3CF // FORMAT_RXY1 COMPARE LOGICAL HIGH (32)
++ op_CLHHR uint32 = 0xB9CF // FORMAT_RRE COMPARE LOGICAL HIGH (32)
++ op_CLHHSI uint32 = 0xE555 // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (16)
++ op_CLHLR uint32 = 0xB9DF // FORMAT_RRE COMPARE LOGICAL HIGH (32)
++ op_CLHRL uint32 = 0xC607 // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (32<-16)
++ op_CLI uint32 = 0x9500 // FORMAT_SI COMPARE LOGICAL (immediate)
++ op_CLIB uint32 = 0xECFF // FORMAT_RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (32<-8)
++ op_CLIH uint32 = 0xCC0F // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE HIGH (32)
++ op_CLIJ uint32 = 0xEC7F // FORMAT_RIE3 COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (32<-8)
++ op_CLIY uint32 = 0xEB55 // FORMAT_SIY COMPARE LOGICAL (immediate)
++ op_CLM uint32 = 0xBD00 // FORMAT_RS2 COMPARE LOGICAL CHAR. UNDER MASK (low)
++ op_CLMH uint32 = 0xEB20 // FORMAT_RSY2 COMPARE LOGICAL CHAR. UNDER MASK (high)
++ op_CLMY uint32 = 0xEB21 // FORMAT_RSY2 COMPARE LOGICAL CHAR. UNDER MASK (low)
++ op_CLR uint32 = 0x1500 // FORMAT_RR COMPARE LOGICAL (32)
++ op_CLRB uint32 = 0xECF7 // FORMAT_RRS COMPARE LOGICAL AND BRANCH (32)
++ op_CLRJ uint32 = 0xEC77 // FORMAT_RIE2 COMPARE LOGICAL AND BRANCH RELATIVE (32)
++ op_CLRL uint32 = 0xC60F // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (32)
++ op_CLRT uint32 = 0xB973 // FORMAT_RRF3 COMPARE LOGICAL AND TRAP (32)
++ op_CLST uint32 = 0xB25D // FORMAT_RRE COMPARE LOGICAL STRING
++ op_CLT uint32 = 0xEB23 // FORMAT_RSY2 COMPARE LOGICAL AND TRAP (32)
++ op_CLY uint32 = 0xE355 // FORMAT_RXY1 COMPARE LOGICAL (32)
++ op_CMPSC uint32 = 0xB263 // FORMAT_RRE COMPRESSION CALL
++ op_CP uint32 = 0xF900 // FORMAT_SS2 COMPARE DECIMAL
++ op_CPSDR uint32 = 0xB372 // FORMAT_RRF2 COPY SIGN (long)
++ op_CPYA uint32 = 0xB24D // FORMAT_RRE COPY ACCESS
++ op_CR uint32 = 0x1900 // FORMAT_RR COMPARE (32)
++ op_CRB uint32 = 0xECF6 // FORMAT_RRS COMPARE AND BRANCH (32)
++ op_CRDTE uint32 = 0xB98F // FORMAT_RRF2 COMPARE AND REPLACE DAT TABLE ENTRY
++ op_CRJ uint32 = 0xEC76 // FORMAT_RIE2 COMPARE AND BRANCH RELATIVE (32)
++ op_CRL uint32 = 0xC60D // FORMAT_RIL2 COMPARE RELATIVE LONG (32)
++ op_CRT uint32 = 0xB972 // FORMAT_RRF3 COMPARE AND TRAP (32)
++ op_CS uint32 = 0xBA00 // FORMAT_RS1 COMPARE AND SWAP (32)
++ op_CSCH uint32 = 0xB230 // FORMAT_S CLEAR SUBCHANNEL
++ op_CSDTR uint32 = 0xB3E3 // FORMAT_RRF4 CONVERT TO SIGNED PACKED (long DFP to 64)
++ op_CSG uint32 = 0xEB30 // FORMAT_RSY1 COMPARE AND SWAP (64)
++ op_CSP uint32 = 0xB250 // FORMAT_RRE COMPARE AND SWAP AND PURGE
++ op_CSPG uint32 = 0xB98A // FORMAT_RRE COMPARE AND SWAP AND PURGE
++ op_CSST uint32 = 0xC802 // FORMAT_SSF COMPARE AND SWAP AND STORE
++ op_CSXTR uint32 = 0xB3EB // FORMAT_RRF4 CONVERT TO SIGNED PACKED (extended DFP to 128)
++ op_CSY uint32 = 0xEB14 // FORMAT_RSY1 COMPARE AND SWAP (32)
++ op_CU12 uint32 = 0xB2A7 // FORMAT_RRF3 CONVERT UTF-8 TO UTF-16
++ op_CU14 uint32 = 0xB9B0 // FORMAT_RRF3 CONVERT UTF-8 TO UTF-32
++ op_CU21 uint32 = 0xB2A6 // FORMAT_RRF3 CONVERT UTF-16 TO UTF-8
++ op_CU24 uint32 = 0xB9B1 // FORMAT_RRF3 CONVERT UTF-16 TO UTF-32
++ op_CU41 uint32 = 0xB9B2 // FORMAT_RRE CONVERT UTF-32 TO UTF-8
++ op_CU42 uint32 = 0xB9B3 // FORMAT_RRE CONVERT UTF-32 TO UTF-16
++ op_CUDTR uint32 = 0xB3E2 // FORMAT_RRE CONVERT TO UNSIGNED PACKED (long DFP to 64)
++ op_CUSE uint32 = 0xB257 // FORMAT_RRE COMPARE UNTIL SUBSTRING EQUAL
++ op_CUTFU uint32 = 0xB2A7 // FORMAT_RRF3 CONVERT UTF-8 TO UNICODE
++ op_CUUTF uint32 = 0xB2A6 // FORMAT_RRF3 CONVERT UNICODE TO UTF-8
++ op_CUXTR uint32 = 0xB3EA // FORMAT_RRE CONVERT TO UNSIGNED PACKED (extended DFP to 128)
++ op_CVB uint32 = 0x4F00 // FORMAT_RX1 CONVERT TO BINARY (32)
++ op_CVBG uint32 = 0xE30E // FORMAT_RXY1 CONVERT TO BINARY (64)
++ op_CVBY uint32 = 0xE306 // FORMAT_RXY1 CONVERT TO BINARY (32)
++ op_CVD uint32 = 0x4E00 // FORMAT_RX1 CONVERT TO DECIMAL (32)
++ op_CVDG uint32 = 0xE32E // FORMAT_RXY1 CONVERT TO DECIMAL (64)
++ op_CVDY uint32 = 0xE326 // FORMAT_RXY1 CONVERT TO DECIMAL (32)
++ op_CXBR uint32 = 0xB349 // FORMAT_RRE COMPARE (extended BFP)
++ op_CXFBR uint32 = 0xB396 // FORMAT_RRE CONVERT FROM FIXED (32 to extended BFP)
++ op_CXFBRA uint32 = 0xB396 // FORMAT_RRF5 CONVERT FROM FIXED (32 to extended BFP)
++ op_CXFR uint32 = 0xB3B6 // FORMAT_RRE CONVERT FROM FIXED (32 to extended HFP)
++ op_CXFTR uint32 = 0xB959 // FORMAT_RRE CONVERT FROM FIXED (32 to extended DFP)
++ op_CXGBR uint32 = 0xB3A6 // FORMAT_RRE CONVERT FROM FIXED (64 to extended BFP)
++ op_CXGBRA uint32 = 0xB3A6 // FORMAT_RRF5 CONVERT FROM FIXED (64 to extended BFP)
++ op_CXGR uint32 = 0xB3C6 // FORMAT_RRE CONVERT FROM FIXED (64 to extended HFP)
++ op_CXGTR uint32 = 0xB3F9 // FORMAT_RRE CONVERT FROM FIXED (64 to extended DFP)
++ op_CXGTRA uint32 = 0xB3F9 // FORMAT_RRF5 CONVERT FROM FIXED (64 to extended DFP)
++ op_CXLFBR uint32 = 0xB392 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to extended BFP)
++ op_CXLFTR uint32 = 0xB95B // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to extended DFP)
++ op_CXLGBR uint32 = 0xB3A2 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to extended BFP)
++ op_CXLGTR uint32 = 0xB95A // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to extended DFP)
++ op_CXR uint32 = 0xB369 // FORMAT_RRE COMPARE (extended HFP)
++ op_CXSTR uint32 = 0xB3FB // FORMAT_RRE CONVERT FROM SIGNED PACKED (128 to extended DFP)
++ op_CXTR uint32 = 0xB3EC // FORMAT_RRE COMPARE (extended DFP)
++ op_CXUTR uint32 = 0xB3FA // FORMAT_RRE CONVERT FROM UNSIGNED PACKED (128 to ext. DFP)
++ op_CXZT uint32 = 0xEDAB // FORMAT_RSL CONVERT FROM ZONED (to extended DFP)
++ op_CY uint32 = 0xE359 // FORMAT_RXY1 COMPARE (32)
++ op_CZDT uint32 = 0xEDA8 // FORMAT_RSL CONVERT TO ZONED (from long DFP)
++ op_CZXT uint32 = 0xEDA9 // FORMAT_RSL CONVERT TO ZONED (from extended DFP)
++ op_D uint32 = 0x5D00 // FORMAT_RX1 DIVIDE (32<-64)
++ op_DD uint32 = 0x6D00 // FORMAT_RX1 DIVIDE (long HFP)
++ op_DDB uint32 = 0xED1D // FORMAT_RXE DIVIDE (long BFP)
++ op_DDBR uint32 = 0xB31D // FORMAT_RRE DIVIDE (long BFP)
++ op_DDR uint32 = 0x2D00 // FORMAT_RR DIVIDE (long HFP)
++ op_DDTR uint32 = 0xB3D1 // FORMAT_RRF1 DIVIDE (long DFP)
++ op_DDTRA uint32 = 0xB3D1 // FORMAT_RRF1 DIVIDE (long DFP)
++ op_DE uint32 = 0x7D00 // FORMAT_RX1 DIVIDE (short HFP)
++ op_DEB uint32 = 0xED0D // FORMAT_RXE DIVIDE (short BFP)
++ op_DEBR uint32 = 0xB30D // FORMAT_RRE DIVIDE (short BFP)
++ op_DER uint32 = 0x3D00 // FORMAT_RR DIVIDE (short HFP)
++ op_DIDBR uint32 = 0xB35B // FORMAT_RRF2 DIVIDE TO INTEGER (long BFP)
++ op_DIEBR uint32 = 0xB353 // FORMAT_RRF2 DIVIDE TO INTEGER (short BFP)
++ op_DL uint32 = 0xE397 // FORMAT_RXY1 DIVIDE LOGICAL (32<-64)
++ op_DLG uint32 = 0xE387 // FORMAT_RXY1 DIVIDE LOGICAL (64<-128)
++ op_DLGR uint32 = 0xB987 // FORMAT_RRE DIVIDE LOGICAL (64<-128)
++ op_DLR uint32 = 0xB997 // FORMAT_RRE DIVIDE LOGICAL (32<-64)
++ op_DP uint32 = 0xFD00 // FORMAT_SS2 DIVIDE DECIMAL
++ op_DR uint32 = 0x1D00 // FORMAT_RR DIVIDE (32<-64)
++ op_DSG uint32 = 0xE30D // FORMAT_RXY1 DIVIDE SINGLE (64)
++ op_DSGF uint32 = 0xE31D // FORMAT_RXY1 DIVIDE SINGLE (64<-32)
++ op_DSGFR uint32 = 0xB91D // FORMAT_RRE DIVIDE SINGLE (64<-32)
++ op_DSGR uint32 = 0xB90D // FORMAT_RRE DIVIDE SINGLE (64)
++ op_DXBR uint32 = 0xB34D // FORMAT_RRE DIVIDE (extended BFP)
++ op_DXR uint32 = 0xB22D // FORMAT_RRE DIVIDE (extended HFP)
++ op_DXTR uint32 = 0xB3D9 // FORMAT_RRF1 DIVIDE (extended DFP)
++ op_DXTRA uint32 = 0xB3D9 // FORMAT_RRF1 DIVIDE (extended DFP)
++ op_EAR uint32 = 0xB24F // FORMAT_RRE EXTRACT ACCESS
++ op_ECAG uint32 = 0xEB4C // FORMAT_RSY1 EXTRACT CACHE ATTRIBUTE
++ op_ECTG uint32 = 0xC801 // FORMAT_SSF EXTRACT CPU TIME
++ op_ED uint32 = 0xDE00 // FORMAT_SS1 EDIT
++ op_EDMK uint32 = 0xDF00 // FORMAT_SS1 EDIT AND MARK
++ op_EEDTR uint32 = 0xB3E5 // FORMAT_RRE EXTRACT BIASED EXPONENT (long DFP to 64)
++ op_EEXTR uint32 = 0xB3ED // FORMAT_RRE EXTRACT BIASED EXPONENT (extended DFP to 64)
++ op_EFPC uint32 = 0xB38C // FORMAT_RRE EXTRACT FPC
++ op_EPAIR uint32 = 0xB99A // FORMAT_RRE EXTRACT PRIMARY ASN AND INSTANCE
++ op_EPAR uint32 = 0xB226 // FORMAT_RRE EXTRACT PRIMARY ASN
++ op_EPSW uint32 = 0xB98D // FORMAT_RRE EXTRACT PSW
++ op_EREG uint32 = 0xB249 // FORMAT_RRE EXTRACT STACKED REGISTERS (32)
++ op_EREGG uint32 = 0xB90E // FORMAT_RRE EXTRACT STACKED REGISTERS (64)
++ op_ESAIR uint32 = 0xB99B // FORMAT_RRE EXTRACT SECONDARY ASN AND INSTANCE
++ op_ESAR uint32 = 0xB227 // FORMAT_RRE EXTRACT SECONDARY ASN
++ op_ESDTR uint32 = 0xB3E7 // FORMAT_RRE EXTRACT SIGNIFICANCE (long DFP)
++ op_ESEA uint32 = 0xB99D // FORMAT_RRE EXTRACT AND SET EXTENDED AUTHORITY
++ op_ESTA uint32 = 0xB24A // FORMAT_RRE EXTRACT STACKED STATE
++ op_ESXTR uint32 = 0xB3EF // FORMAT_RRE EXTRACT SIGNIFICANCE (extended DFP)
++ op_ETND uint32 = 0xB2EC // FORMAT_RRE EXTRACT TRANSACTION NESTING DEPTH
++ op_EX uint32 = 0x4400 // FORMAT_RX1 EXECUTE
++ op_EXRL uint32 = 0xC600 // FORMAT_RIL2 EXECUTE RELATIVE LONG
++ op_FIDBR uint32 = 0xB35F // FORMAT_RRF5 LOAD FP INTEGER (long BFP)
++ op_FIDBRA uint32 = 0xB35F // FORMAT_RRF5 LOAD FP INTEGER (long BFP)
++ op_FIDR uint32 = 0xB37F // FORMAT_RRE LOAD FP INTEGER (long HFP)
++ op_FIDTR uint32 = 0xB3D7 // FORMAT_RRF5 LOAD FP INTEGER (long DFP)
++ op_FIEBR uint32 = 0xB357 // FORMAT_RRF5 LOAD FP INTEGER (short BFP)
++ op_FIEBRA uint32 = 0xB357 // FORMAT_RRF5 LOAD FP INTEGER (short BFP)
++ op_FIER uint32 = 0xB377 // FORMAT_RRE LOAD FP INTEGER (short HFP)
++ op_FIXBR uint32 = 0xB347 // FORMAT_RRF5 LOAD FP INTEGER (extended BFP)
++ op_FIXBRA uint32 = 0xB347 // FORMAT_RRF5 LOAD FP INTEGER (extended BFP)
++ op_FIXR uint32 = 0xB367 // FORMAT_RRE LOAD FP INTEGER (extended HFP)
++ op_FIXTR uint32 = 0xB3DF // FORMAT_RRF5 LOAD FP INTEGER (extended DFP)
++ op_FLOGR uint32 = 0xB983 // FORMAT_RRE FIND LEFTMOST ONE
++ op_HDR uint32 = 0x2400 // FORMAT_RR HALVE (long HFP)
++ op_HER uint32 = 0x3400 // FORMAT_RR HALVE (short HFP)
++ op_HSCH uint32 = 0xB231 // FORMAT_S HALT SUBCHANNEL
++ op_IAC uint32 = 0xB224 // FORMAT_RRE INSERT ADDRESS SPACE CONTROL
++ op_IC uint32 = 0x4300 // FORMAT_RX1 INSERT CHARACTER
++ op_ICM uint32 = 0xBF00 // FORMAT_RS2 INSERT CHARACTERS UNDER MASK (low)
++ op_ICMH uint32 = 0xEB80 // FORMAT_RSY2 INSERT CHARACTERS UNDER MASK (high)
++ op_ICMY uint32 = 0xEB81 // FORMAT_RSY2 INSERT CHARACTERS UNDER MASK (low)
++ op_ICY uint32 = 0xE373 // FORMAT_RXY1 INSERT CHARACTER
++ op_IDTE uint32 = 0xB98E // FORMAT_RRF2 INVALIDATE DAT TABLE ENTRY
++ op_IEDTR uint32 = 0xB3F6 // FORMAT_RRF2 INSERT BIASED EXPONENT (64 to long DFP)
++ op_IEXTR uint32 = 0xB3FE // FORMAT_RRF2 INSERT BIASED EXPONENT (64 to extended DFP)
++ op_IIHF uint32 = 0xC008 // FORMAT_RIL1 INSERT IMMEDIATE (high)
++ op_IIHH uint32 = 0xA500 // FORMAT_RI1 INSERT IMMEDIATE (high high)
++ op_IIHL uint32 = 0xA501 // FORMAT_RI1 INSERT IMMEDIATE (high low)
++ op_IILF uint32 = 0xC009 // FORMAT_RIL1 INSERT IMMEDIATE (low)
++ op_IILH uint32 = 0xA502 // FORMAT_RI1 INSERT IMMEDIATE (low high)
++ op_IILL uint32 = 0xA503 // FORMAT_RI1 INSERT IMMEDIATE (low low)
++ op_IPK uint32 = 0xB20B // FORMAT_S INSERT PSW KEY
++ op_IPM uint32 = 0xB222 // FORMAT_RRE INSERT PROGRAM MASK
++ op_IPTE uint32 = 0xB221 // FORMAT_RRF1 INVALIDATE PAGE TABLE ENTRY
++ op_ISKE uint32 = 0xB229 // FORMAT_RRE INSERT STORAGE KEY EXTENDED
++ op_IVSK uint32 = 0xB223 // FORMAT_RRE INSERT VIRTUAL STORAGE KEY
++ op_KDB uint32 = 0xED18 // FORMAT_RXE COMPARE AND SIGNAL (long BFP)
++ op_KDBR uint32 = 0xB318 // FORMAT_RRE COMPARE AND SIGNAL (long BFP)
++ op_KDTR uint32 = 0xB3E0 // FORMAT_RRE COMPARE AND SIGNAL (long DFP)
++ op_KEB uint32 = 0xED08 // FORMAT_RXE COMPARE AND SIGNAL (short BFP)
++ op_KEBR uint32 = 0xB308 // FORMAT_RRE COMPARE AND SIGNAL (short BFP)
++ op_KIMD uint32 = 0xB93E // FORMAT_RRE COMPUTE INTERMEDIATE MESSAGE DIGEST
++ op_KLMD uint32 = 0xB93F // FORMAT_RRE COMPUTE LAST MESSAGE DIGEST
++ op_KM uint32 = 0xB92E // FORMAT_RRE CIPHER MESSAGE
++ op_KMAC uint32 = 0xB91E // FORMAT_RRE COMPUTE MESSAGE AUTHENTICATION CODE
++ op_KMC uint32 = 0xB92F // FORMAT_RRE CIPHER MESSAGE WITH CHAINING
++ op_KMCTR uint32 = 0xB92D // FORMAT_RRF2 CIPHER MESSAGE WITH COUNTER
++ op_KMF uint32 = 0xB92A // FORMAT_RRE CIPHER MESSAGE WITH CFB
++ op_KMO uint32 = 0xB92B // FORMAT_RRE CIPHER MESSAGE WITH OFB
++ op_KXBR uint32 = 0xB348 // FORMAT_RRE COMPARE AND SIGNAL (extended BFP)
++ op_KXTR uint32 = 0xB3E8 // FORMAT_RRE COMPARE AND SIGNAL (extended DFP)
++ op_L uint32 = 0x5800 // FORMAT_RX1 LOAD (32)
++ op_LA uint32 = 0x4100 // FORMAT_RX1 LOAD ADDRESS
++ op_LAA uint32 = 0xEBF8 // FORMAT_RSY1 LOAD AND ADD (32)
++ op_LAAG uint32 = 0xEBE8 // FORMAT_RSY1 LOAD AND ADD (64)
++ op_LAAL uint32 = 0xEBFA // FORMAT_RSY1 LOAD AND ADD LOGICAL (32)
++ op_LAALG uint32 = 0xEBEA // FORMAT_RSY1 LOAD AND ADD LOGICAL (64)
++ op_LAE uint32 = 0x5100 // FORMAT_RX1 LOAD ADDRESS EXTENDED
++ op_LAEY uint32 = 0xE375 // FORMAT_RXY1 LOAD ADDRESS EXTENDED
++ op_LAM uint32 = 0x9A00 // FORMAT_RS1 LOAD ACCESS MULTIPLE
++ op_LAMY uint32 = 0xEB9A // FORMAT_RSY1 LOAD ACCESS MULTIPLE
++ op_LAN uint32 = 0xEBF4 // FORMAT_RSY1 LOAD AND AND (32)
++ op_LANG uint32 = 0xEBE4 // FORMAT_RSY1 LOAD AND AND (64)
++ op_LAO uint32 = 0xEBF6 // FORMAT_RSY1 LOAD AND OR (32)
++ op_LAOG uint32 = 0xEBE6 // FORMAT_RSY1 LOAD AND OR (64)
++ op_LARL uint32 = 0xC000 // FORMAT_RIL2 LOAD ADDRESS RELATIVE LONG
++ op_LASP uint32 = 0xE500 // FORMAT_SSE LOAD ADDRESS SPACE PARAMETERS
++ op_LAT uint32 = 0xE39F // FORMAT_RXY1 LOAD AND TRAP (32L<-32)
++ op_LAX uint32 = 0xEBF7 // FORMAT_RSY1 LOAD AND EXCLUSIVE OR (32)
++ op_LAXG uint32 = 0xEBE7 // FORMAT_RSY1 LOAD AND EXCLUSIVE OR (64)
++ op_LAY uint32 = 0xE371 // FORMAT_RXY1 LOAD ADDRESS
++ op_LB uint32 = 0xE376 // FORMAT_RXY1 LOAD BYTE (32)
++ op_LBH uint32 = 0xE3C0 // FORMAT_RXY1 LOAD BYTE HIGH (32<-8)
++ op_LBR uint32 = 0xB926 // FORMAT_RRE LOAD BYTE (32)
++ op_LCDBR uint32 = 0xB313 // FORMAT_RRE LOAD COMPLEMENT (long BFP)
++ op_LCDFR uint32 = 0xB373 // FORMAT_RRE LOAD COMPLEMENT (long)
++ op_LCDR uint32 = 0x2300 // FORMAT_RR LOAD COMPLEMENT (long HFP)
++ op_LCEBR uint32 = 0xB303 // FORMAT_RRE LOAD COMPLEMENT (short BFP)
++ op_LCER uint32 = 0x3300 // FORMAT_RR LOAD COMPLEMENT (short HFP)
++ op_LCGFR uint32 = 0xB913 // FORMAT_RRE LOAD COMPLEMENT (64<-32)
++ op_LCGR uint32 = 0xB903 // FORMAT_RRE LOAD COMPLEMENT (64)
++ op_LCR uint32 = 0x1300 // FORMAT_RR LOAD COMPLEMENT (32)
++ op_LCTL uint32 = 0xB700 // FORMAT_RS1 LOAD CONTROL (32)
++ op_LCTLG uint32 = 0xEB2F // FORMAT_RSY1 LOAD CONTROL (64)
++ op_LCXBR uint32 = 0xB343 // FORMAT_RRE LOAD COMPLEMENT (extended BFP)
++ op_LCXR uint32 = 0xB363 // FORMAT_RRE LOAD COMPLEMENT (extended HFP)
++ op_LD uint32 = 0x6800 // FORMAT_RX1 LOAD (long)
++ op_LDE uint32 = 0xED24 // FORMAT_RXE LOAD LENGTHENED (short to long HFP)
++ op_LDEB uint32 = 0xED04 // FORMAT_RXE LOAD LENGTHENED (short to long BFP)
++ op_LDEBR uint32 = 0xB304 // FORMAT_RRE LOAD LENGTHENED (short to long BFP)
++ op_LDER uint32 = 0xB324 // FORMAT_RRE LOAD LENGTHENED (short to long HFP)
++ op_LDETR uint32 = 0xB3D4 // FORMAT_RRF4 LOAD LENGTHENED (short to long DFP)
++ op_LDGR uint32 = 0xB3C1 // FORMAT_RRE LOAD FPR FROM GR (64 to long)
++ op_LDR uint32 = 0x2800 // FORMAT_RR LOAD (long)
++ op_LDXBR uint32 = 0xB345 // FORMAT_RRE LOAD ROUNDED (extended to long BFP)
++ op_LDXBRA uint32 = 0xB345 // FORMAT_RRF5 LOAD ROUNDED (extended to long BFP)
++ op_LDXR uint32 = 0x2500 // FORMAT_RR LOAD ROUNDED (extended to long HFP)
++ op_LDXTR uint32 = 0xB3DD // FORMAT_RRF5 LOAD ROUNDED (extended to long DFP)
++ op_LDY uint32 = 0xED65 // FORMAT_RXY1 LOAD (long)
++ op_LE uint32 = 0x7800 // FORMAT_RX1 LOAD (short)
++ op_LEDBR uint32 = 0xB344 // FORMAT_RRE LOAD ROUNDED (long to short BFP)
++ op_LEDBRA uint32 = 0xB344 // FORMAT_RRF5 LOAD ROUNDED (long to short BFP)
++ op_LEDR uint32 = 0x3500 // FORMAT_RR LOAD ROUNDED (long to short HFP)
++ op_LEDTR uint32 = 0xB3D5 // FORMAT_RRF5 LOAD ROUNDED (long to short DFP)
++ op_LER uint32 = 0x3800 // FORMAT_RR LOAD (short)
++ op_LEXBR uint32 = 0xB346 // FORMAT_RRE LOAD ROUNDED (extended to short BFP)
++ op_LEXBRA uint32 = 0xB346 // FORMAT_RRF5 LOAD ROUNDED (extended to short BFP)
++ op_LEXR uint32 = 0xB366 // FORMAT_RRE LOAD ROUNDED (extended to short HFP)
++ op_LEY uint32 = 0xED64 // FORMAT_RXY1 LOAD (short)
++ op_LFAS uint32 = 0xB2BD // FORMAT_S LOAD FPC AND SIGNAL
++ op_LFH uint32 = 0xE3CA // FORMAT_RXY1 LOAD HIGH (32)
++ op_LFHAT uint32 = 0xE3C8 // FORMAT_RXY1 LOAD HIGH AND TRAP (32H<-32)
++ op_LFPC uint32 = 0xB29D // FORMAT_S LOAD FPC
++ op_LG uint32 = 0xE304 // FORMAT_RXY1 LOAD (64)
++ op_LGAT uint32 = 0xE385 // FORMAT_RXY1 LOAD AND TRAP (64)
++ op_LGB uint32 = 0xE377 // FORMAT_RXY1 LOAD BYTE (64)
++ op_LGBR uint32 = 0xB906 // FORMAT_RRE LOAD BYTE (64)
++ op_LGDR uint32 = 0xB3CD // FORMAT_RRE LOAD GR FROM FPR (long to 64)
++ op_LGF uint32 = 0xE314 // FORMAT_RXY1 LOAD (64<-32)
++ op_LGFI uint32 = 0xC001 // FORMAT_RIL1 LOAD IMMEDIATE (64<-32)
++ op_LGFR uint32 = 0xB914 // FORMAT_RRE LOAD (64<-32)
++ op_LGFRL uint32 = 0xC40C // FORMAT_RIL2 LOAD RELATIVE LONG (64<-32)
++ op_LGH uint32 = 0xE315 // FORMAT_RXY1 LOAD HALFWORD (64)
++ op_LGHI uint32 = 0xA709 // FORMAT_RI1 LOAD HALFWORD IMMEDIATE (64)
++ op_LGHR uint32 = 0xB907 // FORMAT_RRE LOAD HALFWORD (64)
++ op_LGHRL uint32 = 0xC404 // FORMAT_RIL2 LOAD HALFWORD RELATIVE LONG (64<-16)
++ op_LGR uint32 = 0xB904 // FORMAT_RRE LOAD (64)
++ op_LGRL uint32 = 0xC408 // FORMAT_RIL2 LOAD RELATIVE LONG (64)
++ op_LH uint32 = 0x4800 // FORMAT_RX1 LOAD HALFWORD (32)
++ op_LHH uint32 = 0xE3C4 // FORMAT_RXY1 LOAD HALFWORD HIGH (32<-16)
++ op_LHI uint32 = 0xA708 // FORMAT_RI1 LOAD HALFWORD IMMEDIATE (32)
++ op_LHR uint32 = 0xB927 // FORMAT_RRE LOAD HALFWORD (32)
++ op_LHRL uint32 = 0xC405 // FORMAT_RIL2 LOAD HALFWORD RELATIVE LONG (32<-16)
++ op_LHY uint32 = 0xE378 // FORMAT_RXY1 LOAD HALFWORD (32)
++ op_LLC uint32 = 0xE394 // FORMAT_RXY1 LOAD LOGICAL CHARACTER (32)
++ op_LLCH uint32 = 0xE3C2 // FORMAT_RXY1 LOAD LOGICAL CHARACTER HIGH (32<-8)
++ op_LLCR uint32 = 0xB994 // FORMAT_RRE LOAD LOGICAL CHARACTER (32)
++ op_LLGC uint32 = 0xE390 // FORMAT_RXY1 LOAD LOGICAL CHARACTER (64)
++ op_LLGCR uint32 = 0xB984 // FORMAT_RRE LOAD LOGICAL CHARACTER (64)
++ op_LLGF uint32 = 0xE316 // FORMAT_RXY1 LOAD LOGICAL (64<-32)
++ op_LLGFAT uint32 = 0xE39D // FORMAT_RXY1 LOAD LOGICAL AND TRAP (64<-32)
++ op_LLGFR uint32 = 0xB916 // FORMAT_RRE LOAD LOGICAL (64<-32)
++ op_LLGFRL uint32 = 0xC40E // FORMAT_RIL2 LOAD LOGICAL RELATIVE LONG (64<-32)
++ op_LLGH uint32 = 0xE391 // FORMAT_RXY1 LOAD LOGICAL HALFWORD (64)
++ op_LLGHR uint32 = 0xB985 // FORMAT_RRE LOAD LOGICAL HALFWORD (64)
++ op_LLGHRL uint32 = 0xC406 // FORMAT_RIL2 LOAD LOGICAL HALFWORD RELATIVE LONG (64<-16)
++ op_LLGT uint32 = 0xE317 // FORMAT_RXY1 LOAD LOGICAL THIRTY ONE BITS
++ op_LLGTAT uint32 = 0xE39C // FORMAT_RXY1 LOAD LOGICAL THIRTY ONE BITS AND TRAP (64<-31)
++ op_LLGTR uint32 = 0xB917 // FORMAT_RRE LOAD LOGICAL THIRTY ONE BITS
++ op_LLH uint32 = 0xE395 // FORMAT_RXY1 LOAD LOGICAL HALFWORD (32)
++ op_LLHH uint32 = 0xE3C6 // FORMAT_RXY1 LOAD LOGICAL HALFWORD HIGH (32<-16)
++ op_LLHR uint32 = 0xB995 // FORMAT_RRE LOAD LOGICAL HALFWORD (32)
++ op_LLHRL uint32 = 0xC402 // FORMAT_RIL2 LOAD LOGICAL HALFWORD RELATIVE LONG (32<-16)
++ op_LLIHF uint32 = 0xC00E // FORMAT_RIL1 LOAD LOGICAL IMMEDIATE (high)
++ op_LLIHH uint32 = 0xA50C // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (high high)
++ op_LLIHL uint32 = 0xA50D // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (high low)
++ op_LLILF uint32 = 0xC00F // FORMAT_RIL1 LOAD LOGICAL IMMEDIATE (low)
++ op_LLILH uint32 = 0xA50E // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (low high)
++ op_LLILL uint32 = 0xA50F // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (low low)
++ op_LM uint32 = 0x9800 // FORMAT_RS1 LOAD MULTIPLE (32)
++ op_LMD uint32 = 0xEF00 // FORMAT_SS5 LOAD MULTIPLE DISJOINT
++ op_LMG uint32 = 0xEB04 // FORMAT_RSY1 LOAD MULTIPLE (64)
++ op_LMH uint32 = 0xEB96 // FORMAT_RSY1 LOAD MULTIPLE HIGH
++ op_LMY uint32 = 0xEB98 // FORMAT_RSY1 LOAD MULTIPLE (32)
++ op_LNDBR uint32 = 0xB311 // FORMAT_RRE LOAD NEGATIVE (long BFP)
++ op_LNDFR uint32 = 0xB371 // FORMAT_RRE LOAD NEGATIVE (long)
++ op_LNDR uint32 = 0x2100 // FORMAT_RR LOAD NEGATIVE (long HFP)
++ op_LNEBR uint32 = 0xB301 // FORMAT_RRE LOAD NEGATIVE (short BFP)
++ op_LNER uint32 = 0x3100 // FORMAT_RR LOAD NEGATIVE (short HFP)
++ op_LNGFR uint32 = 0xB911 // FORMAT_RRE LOAD NEGATIVE (64<-32)
++ op_LNGR uint32 = 0xB901 // FORMAT_RRE LOAD NEGATIVE (64)
++ op_LNR uint32 = 0x1100 // FORMAT_RR LOAD NEGATIVE (32)
++ op_LNXBR uint32 = 0xB341 // FORMAT_RRE LOAD NEGATIVE (extended BFP)
++ op_LNXR uint32 = 0xB361 // FORMAT_RRE LOAD NEGATIVE (extended HFP)
++ op_LOC uint32 = 0xEBF2 // FORMAT_RSY2 LOAD ON CONDITION (32)
++ op_LOCG uint32 = 0xEBE2 // FORMAT_RSY2 LOAD ON CONDITION (64)
++ op_LOCGR uint32 = 0xB9E2 // FORMAT_RRF3 LOAD ON CONDITION (64)
++ op_LOCR uint32 = 0xB9F2 // FORMAT_RRF3 LOAD ON CONDITION (32)
++ op_LPD uint32 = 0xC804 // FORMAT_SSF LOAD PAIR DISJOINT (32)
++ op_LPDBR uint32 = 0xB310 // FORMAT_RRE LOAD POSITIVE (long BFP)
++ op_LPDFR uint32 = 0xB370 // FORMAT_RRE LOAD POSITIVE (long)
++ op_LPDG uint32 = 0xC805 // FORMAT_SSF LOAD PAIR DISJOINT (64)
++ op_LPDR uint32 = 0x2000 // FORMAT_RR LOAD POSITIVE (long HFP)
++ op_LPEBR uint32 = 0xB300 // FORMAT_RRE LOAD POSITIVE (short BFP)
++ op_LPER uint32 = 0x3000 // FORMAT_RR LOAD POSITIVE (short HFP)
++ op_LPGFR uint32 = 0xB910 // FORMAT_RRE LOAD POSITIVE (64<-32)
++ op_LPGR uint32 = 0xB900 // FORMAT_RRE LOAD POSITIVE (64)
++ op_LPQ uint32 = 0xE38F // FORMAT_RXY1 LOAD PAIR FROM QUADWORD
++ op_LPR uint32 = 0x1000 // FORMAT_RR LOAD POSITIVE (32)
++ op_LPSW uint32 = 0x8200 // FORMAT_S LOAD PSW
++ op_LPSWE uint32 = 0xB2B2 // FORMAT_S LOAD PSW EXTENDED
++ op_LPTEA uint32 = 0xB9AA // FORMAT_RRF2 LOAD PAGE TABLE ENTRY ADDRESS
++ op_LPXBR uint32 = 0xB340 // FORMAT_RRE LOAD POSITIVE (extended BFP)
++ op_LPXR uint32 = 0xB360 // FORMAT_RRE LOAD POSITIVE (extended HFP)
++ op_LR uint32 = 0x1800 // FORMAT_RR LOAD (32)
++ op_LRA uint32 = 0xB100 // FORMAT_RX1 LOAD REAL ADDRESS (32)
++ op_LRAG uint32 = 0xE303 // FORMAT_RXY1 LOAD REAL ADDRESS (64)
++ op_LRAY uint32 = 0xE313 // FORMAT_RXY1 LOAD REAL ADDRESS (32)
++ op_LRDR uint32 = 0x2500 // FORMAT_RR LOAD ROUNDED (extended to long HFP)
++ op_LRER uint32 = 0x3500 // FORMAT_RR LOAD ROUNDED (long to short HFP)
++ op_LRL uint32 = 0xC40D // FORMAT_RIL2 LOAD RELATIVE LONG (32)
++ op_LRV uint32 = 0xE31E // FORMAT_RXY1 LOAD REVERSED (32)
++ op_LRVG uint32 = 0xE30F // FORMAT_RXY1 LOAD REVERSED (64)
++ op_LRVGR uint32 = 0xB90F // FORMAT_RRE LOAD REVERSED (64)
++ op_LRVH uint32 = 0xE31F // FORMAT_RXY1 LOAD REVERSED (16)
++ op_LRVR uint32 = 0xB91F // FORMAT_RRE LOAD REVERSED (32)
++ op_LT uint32 = 0xE312 // FORMAT_RXY1 LOAD AND TEST (32)
++ op_LTDBR uint32 = 0xB312 // FORMAT_RRE LOAD AND TEST (long BFP)
++ op_LTDR uint32 = 0x2200 // FORMAT_RR LOAD AND TEST (long HFP)
++ op_LTDTR uint32 = 0xB3D6 // FORMAT_RRE LOAD AND TEST (long DFP)
++ op_LTEBR uint32 = 0xB302 // FORMAT_RRE LOAD AND TEST (short BFP)
++ op_LTER uint32 = 0x3200 // FORMAT_RR LOAD AND TEST (short HFP)
++ op_LTG uint32 = 0xE302 // FORMAT_RXY1 LOAD AND TEST (64)
++ op_LTGF uint32 = 0xE332 // FORMAT_RXY1 LOAD AND TEST (64<-32)
++ op_LTGFR uint32 = 0xB912 // FORMAT_RRE LOAD AND TEST (64<-32)
++ op_LTGR uint32 = 0xB902 // FORMAT_RRE LOAD AND TEST (64)
++ op_LTR uint32 = 0x1200 // FORMAT_RR LOAD AND TEST (32)
++ op_LTXBR uint32 = 0xB342 // FORMAT_RRE LOAD AND TEST (extended BFP)
++ op_LTXR uint32 = 0xB362 // FORMAT_RRE LOAD AND TEST (extended HFP)
++ op_LTXTR uint32 = 0xB3DE // FORMAT_RRE LOAD AND TEST (extended DFP)
++ op_LURA uint32 = 0xB24B // FORMAT_RRE LOAD USING REAL ADDRESS (32)
++ op_LURAG uint32 = 0xB905 // FORMAT_RRE LOAD USING REAL ADDRESS (64)
++ op_LXD uint32 = 0xED25 // FORMAT_RXE LOAD LENGTHENED (long to extended HFP)
++ op_LXDB uint32 = 0xED05 // FORMAT_RXE LOAD LENGTHENED (long to extended BFP)
++ op_LXDBR uint32 = 0xB305 // FORMAT_RRE LOAD LENGTHENED (long to extended BFP)
++ op_LXDR uint32 = 0xB325 // FORMAT_RRE LOAD LENGTHENED (long to extended HFP)
++ op_LXDTR uint32 = 0xB3DC // FORMAT_RRF4 LOAD LENGTHENED (long to extended DFP)
++ op_LXE uint32 = 0xED26 // FORMAT_RXE LOAD LENGTHENED (short to extended HFP)
++ op_LXEB uint32 = 0xED06 // FORMAT_RXE LOAD LENGTHENED (short to extended BFP)
++ op_LXEBR uint32 = 0xB306 // FORMAT_RRE LOAD LENGTHENED (short to extended BFP)
++ op_LXER uint32 = 0xB326 // FORMAT_RRE LOAD LENGTHENED (short to extended HFP)
++ op_LXR uint32 = 0xB365 // FORMAT_RRE LOAD (extended)
++ op_LY uint32 = 0xE358 // FORMAT_RXY1 LOAD (32)
++ op_LZDR uint32 = 0xB375 // FORMAT_RRE LOAD ZERO (long)
++ op_LZER uint32 = 0xB374 // FORMAT_RRE LOAD ZERO (short)
++ op_LZXR uint32 = 0xB376 // FORMAT_RRE LOAD ZERO (extended)
++ op_M uint32 = 0x5C00 // FORMAT_RX1 MULTIPLY (64<-32)
++ op_MAD uint32 = 0xED3E // FORMAT_RXF MULTIPLY AND ADD (long HFP)
++ op_MADB uint32 = 0xED1E // FORMAT_RXF MULTIPLY AND ADD (long BFP)
++ op_MADBR uint32 = 0xB31E // FORMAT_RRD MULTIPLY AND ADD (long BFP)
++ op_MADR uint32 = 0xB33E // FORMAT_RRD MULTIPLY AND ADD (long HFP)
++ op_MAE uint32 = 0xED2E // FORMAT_RXF MULTIPLY AND ADD (short HFP)
++ op_MAEB uint32 = 0xED0E // FORMAT_RXF MULTIPLY AND ADD (short BFP)
++ op_MAEBR uint32 = 0xB30E // FORMAT_RRD MULTIPLY AND ADD (short BFP)
++ op_MAER uint32 = 0xB32E // FORMAT_RRD MULTIPLY AND ADD (short HFP)
++ op_MAY uint32 = 0xED3A // FORMAT_RXF MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
++ op_MAYH uint32 = 0xED3C // FORMAT_RXF MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
++ op_MAYHR uint32 = 0xB33C // FORMAT_RRD MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
++ op_MAYL uint32 = 0xED38 // FORMAT_RXF MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
++ op_MAYLR uint32 = 0xB338 // FORMAT_RRD MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
++ op_MAYR uint32 = 0xB33A // FORMAT_RRD MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
++ op_MC uint32 = 0xAF00 // FORMAT_SI MONITOR CALL
++ op_MD uint32 = 0x6C00 // FORMAT_RX1 MULTIPLY (long HFP)
++ op_MDB uint32 = 0xED1C // FORMAT_RXE MULTIPLY (long BFP)
++ op_MDBR uint32 = 0xB31C // FORMAT_RRE MULTIPLY (long BFP)
++ op_MDE uint32 = 0x7C00 // FORMAT_RX1 MULTIPLY (short to long HFP)
++ op_MDEB uint32 = 0xED0C // FORMAT_RXE MULTIPLY (short to long BFP)
++ op_MDEBR uint32 = 0xB30C // FORMAT_RRE MULTIPLY (short to long BFP)
++ op_MDER uint32 = 0x3C00 // FORMAT_RR MULTIPLY (short to long HFP)
++ op_MDR uint32 = 0x2C00 // FORMAT_RR MULTIPLY (long HFP)
++ op_MDTR uint32 = 0xB3D0 // FORMAT_RRF1 MULTIPLY (long DFP)
++ op_MDTRA uint32 = 0xB3D0 // FORMAT_RRF1 MULTIPLY (long DFP)
++ op_ME uint32 = 0x7C00 // FORMAT_RX1 MULTIPLY (short to long HFP)
++ op_MEE uint32 = 0xED37 // FORMAT_RXE MULTIPLY (short HFP)
++ op_MEEB uint32 = 0xED17 // FORMAT_RXE MULTIPLY (short BFP)
++ op_MEEBR uint32 = 0xB317 // FORMAT_RRE MULTIPLY (short BFP)
++ op_MEER uint32 = 0xB337 // FORMAT_RRE MULTIPLY (short HFP)
++ op_MER uint32 = 0x3C00 // FORMAT_RR MULTIPLY (short to long HFP)
++ op_MFY uint32 = 0xE35C // FORMAT_RXY1 MULTIPLY (64<-32)
++ op_MGHI uint32 = 0xA70D // FORMAT_RI1 MULTIPLY HALFWORD IMMEDIATE (64)
++ op_MH uint32 = 0x4C00 // FORMAT_RX1 MULTIPLY HALFWORD (32)
++ op_MHI uint32 = 0xA70C // FORMAT_RI1 MULTIPLY HALFWORD IMMEDIATE (32)
++ op_MHY uint32 = 0xE37C // FORMAT_RXY1 MULTIPLY HALFWORD (32)
++ op_ML uint32 = 0xE396 // FORMAT_RXY1 MULTIPLY LOGICAL (64<-32)
++ op_MLG uint32 = 0xE386 // FORMAT_RXY1 MULTIPLY LOGICAL (128<-64)
++ op_MLGR uint32 = 0xB986 // FORMAT_RRE MULTIPLY LOGICAL (128<-64)
++ op_MLR uint32 = 0xB996 // FORMAT_RRE MULTIPLY LOGICAL (64<-32)
++ op_MP uint32 = 0xFC00 // FORMAT_SS2 MULTIPLY DECIMAL
++ op_MR uint32 = 0x1C00 // FORMAT_RR MULTIPLY (64<-32)
++ op_MS uint32 = 0x7100 // FORMAT_RX1 MULTIPLY SINGLE (32)
++ op_MSCH uint32 = 0xB232 // FORMAT_S MODIFY SUBCHANNEL
++ op_MSD uint32 = 0xED3F // FORMAT_RXF MULTIPLY AND SUBTRACT (long HFP)
++ op_MSDB uint32 = 0xED1F // FORMAT_RXF MULTIPLY AND SUBTRACT (long BFP)
++ op_MSDBR uint32 = 0xB31F // FORMAT_RRD MULTIPLY AND SUBTRACT (long BFP)
++ op_MSDR uint32 = 0xB33F // FORMAT_RRD MULTIPLY AND SUBTRACT (long HFP)
++ op_MSE uint32 = 0xED2F // FORMAT_RXF MULTIPLY AND SUBTRACT (short HFP)
++ op_MSEB uint32 = 0xED0F // FORMAT_RXF MULTIPLY AND SUBTRACT (short BFP)
++ op_MSEBR uint32 = 0xB30F // FORMAT_RRD MULTIPLY AND SUBTRACT (short BFP)
++ op_MSER uint32 = 0xB32F // FORMAT_RRD MULTIPLY AND SUBTRACT (short HFP)
++ op_MSFI uint32 = 0xC201 // FORMAT_RIL1 MULTIPLY SINGLE IMMEDIATE (32)
++ op_MSG uint32 = 0xE30C // FORMAT_RXY1 MULTIPLY SINGLE (64)
++ op_MSGF uint32 = 0xE31C // FORMAT_RXY1 MULTIPLY SINGLE (64<-32)
++ op_MSGFI uint32 = 0xC200 // FORMAT_RIL1 MULTIPLY SINGLE IMMEDIATE (64<-32)
++ op_MSGFR uint32 = 0xB91C // FORMAT_RRE MULTIPLY SINGLE (64<-32)
++ op_MSGR uint32 = 0xB90C // FORMAT_RRE MULTIPLY SINGLE (64)
++ op_MSR uint32 = 0xB252 // FORMAT_RRE MULTIPLY SINGLE (32)
++ op_MSTA uint32 = 0xB247 // FORMAT_RRE MODIFY STACKED STATE
++ op_MSY uint32 = 0xE351 // FORMAT_RXY1 MULTIPLY SINGLE (32)
++ op_MVC uint32 = 0xD200 // FORMAT_SS1 MOVE (character)
++ op_MVCDK uint32 = 0xE50F // FORMAT_SSE MOVE WITH DESTINATION KEY
++ op_MVCIN uint32 = 0xE800 // FORMAT_SS1 MOVE INVERSE
++ op_MVCK uint32 = 0xD900 // FORMAT_SS4 MOVE WITH KEY
++ op_MVCL uint32 = 0x0E00 // FORMAT_RR MOVE LONG
++ op_MVCLE uint32 = 0xA800 // FORMAT_RS1 MOVE LONG EXTENDED
++ op_MVCLU uint32 = 0xEB8E // FORMAT_RSY1 MOVE LONG UNICODE
++ op_MVCOS uint32 = 0xC800 // FORMAT_SSF MOVE WITH OPTIONAL SPECIFICATIONS
++ op_MVCP uint32 = 0xDA00 // FORMAT_SS4 MOVE TO PRIMARY
++ op_MVCS uint32 = 0xDB00 // FORMAT_SS4 MOVE TO SECONDARY
++ op_MVCSK uint32 = 0xE50E // FORMAT_SSE MOVE WITH SOURCE KEY
++ op_MVGHI uint32 = 0xE548 // FORMAT_SIL MOVE (64<-16)
++ op_MVHHI uint32 = 0xE544 // FORMAT_SIL MOVE (16<-16)
++ op_MVHI uint32 = 0xE54C // FORMAT_SIL MOVE (32<-16)
++ op_MVI uint32 = 0x9200 // FORMAT_SI MOVE (immediate)
++ op_MVIY uint32 = 0xEB52 // FORMAT_SIY MOVE (immediate)
++ op_MVN uint32 = 0xD100 // FORMAT_SS1 MOVE NUMERICS
++ op_MVO uint32 = 0xF100 // FORMAT_SS2 MOVE WITH OFFSET
++ op_MVPG uint32 = 0xB254 // FORMAT_RRE MOVE PAGE
++ op_MVST uint32 = 0xB255 // FORMAT_RRE MOVE STRING
++ op_MVZ uint32 = 0xD300 // FORMAT_SS1 MOVE ZONES
++ op_MXBR uint32 = 0xB34C // FORMAT_RRE MULTIPLY (extended BFP)
++ op_MXD uint32 = 0x6700 // FORMAT_RX1 MULTIPLY (long to extended HFP)
++ op_MXDB uint32 = 0xED07 // FORMAT_RXE MULTIPLY (long to extended BFP)
++ op_MXDBR uint32 = 0xB307 // FORMAT_RRE MULTIPLY (long to extended BFP)
++ op_MXDR uint32 = 0x2700 // FORMAT_RR MULTIPLY (long to extended HFP)
++ op_MXR uint32 = 0x2600 // FORMAT_RR MULTIPLY (extended HFP)
++ op_MXTR uint32 = 0xB3D8 // FORMAT_RRF1 MULTIPLY (extended DFP)
++ op_MXTRA uint32 = 0xB3D8 // FORMAT_RRF1 MULTIPLY (extended DFP)
++ op_MY uint32 = 0xED3B // FORMAT_RXF MULTIPLY UNNORMALIZED (long to ext. HFP)
++ op_MYH uint32 = 0xED3D // FORMAT_RXF MULTIPLY UNNORM. (long to ext. high HFP)
++ op_MYHR uint32 = 0xB33D // FORMAT_RRD MULTIPLY UNNORM. (long to ext. high HFP)
++ op_MYL uint32 = 0xED39 // FORMAT_RXF MULTIPLY UNNORM. (long to ext. low HFP)
++ op_MYLR uint32 = 0xB339 // FORMAT_RRD MULTIPLY UNNORM. (long to ext. low HFP)
++ op_MYR uint32 = 0xB33B // FORMAT_RRD MULTIPLY UNNORMALIZED (long to ext. HFP)
++ op_N uint32 = 0x5400 // FORMAT_RX1 AND (32)
++ op_NC uint32 = 0xD400 // FORMAT_SS1 AND (character)
++ op_NG uint32 = 0xE380 // FORMAT_RXY1 AND (64)
++ op_NGR uint32 = 0xB980 // FORMAT_RRE AND (64)
++ op_NGRK uint32 = 0xB9E4 // FORMAT_RRF1 AND (64)
++ op_NI uint32 = 0x9400 // FORMAT_SI AND (immediate)
++ op_NIAI uint32 = 0xB2FA // FORMAT_IE NEXT INSTRUCTION ACCESS INTENT
++ op_NIHF uint32 = 0xC00A // FORMAT_RIL1 AND IMMEDIATE (high)
++ op_NIHH uint32 = 0xA504 // FORMAT_RI1 AND IMMEDIATE (high high)
++ op_NIHL uint32 = 0xA505 // FORMAT_RI1 AND IMMEDIATE (high low)
++ op_NILF uint32 = 0xC00B // FORMAT_RIL1 AND IMMEDIATE (low)
++ op_NILH uint32 = 0xA506 // FORMAT_RI1 AND IMMEDIATE (low high)
++ op_NILL uint32 = 0xA507 // FORMAT_RI1 AND IMMEDIATE (low low)
++ op_NIY uint32 = 0xEB54 // FORMAT_SIY AND (immediate)
++ op_NR uint32 = 0x1400 // FORMAT_RR AND (32)
++ op_NRK uint32 = 0xB9F4 // FORMAT_RRF1 AND (32)
++ op_NTSTG uint32 = 0xE325 // FORMAT_RXY1 NONTRANSACTIONAL STORE
++ op_NY uint32 = 0xE354 // FORMAT_RXY1 AND (32)
++ op_O uint32 = 0x5600 // FORMAT_RX1 OR (32)
++ op_OC uint32 = 0xD600 // FORMAT_SS1 OR (character)
++ op_OG uint32 = 0xE381 // FORMAT_RXY1 OR (64)
++ op_OGR uint32 = 0xB981 // FORMAT_RRE OR (64)
++ op_OGRK uint32 = 0xB9E6 // FORMAT_RRF1 OR (64)
++ op_OI uint32 = 0x9600 // FORMAT_SI OR (immediate)
++ op_OIHF uint32 = 0xC00C // FORMAT_RIL1 OR IMMEDIATE (high)
++ op_OIHH uint32 = 0xA508 // FORMAT_RI1 OR IMMEDIATE (high high)
++ op_OIHL uint32 = 0xA509 // FORMAT_RI1 OR IMMEDIATE (high low)
++ op_OILF uint32 = 0xC00D // FORMAT_RIL1 OR IMMEDIATE (low)
++ op_OILH uint32 = 0xA50A // FORMAT_RI1 OR IMMEDIATE (low high)
++ op_OILL uint32 = 0xA50B // FORMAT_RI1 OR IMMEDIATE (low low)
++ op_OIY uint32 = 0xEB56 // FORMAT_SIY OR (immediate)
++ op_OR uint32 = 0x1600 // FORMAT_RR OR (32)
++ op_ORK uint32 = 0xB9F6 // FORMAT_RRF1 OR (32)
++ op_OY uint32 = 0xE356 // FORMAT_RXY1 OR (32)
++ op_PACK uint32 = 0xF200 // FORMAT_SS2 PACK
++ op_PALB uint32 = 0xB248 // FORMAT_RRE PURGE ALB
++ op_PC uint32 = 0xB218 // FORMAT_S PROGRAM CALL
++ op_PCC uint32 = 0xB92C // FORMAT_RRE PERFORM CRYPTOGRAPHIC COMPUTATION
++ op_PCKMO uint32 = 0xB928 // FORMAT_RRE PERFORM CRYPTOGRAPHIC KEY MGMT. OPERATIONS
++ op_PFD uint32 = 0xE336 // FORMAT_RXY2 PREFETCH DATA
++ op_PFDRL uint32 = 0xC602 // FORMAT_RIL3 PREFETCH DATA RELATIVE LONG
++ op_PFMF uint32 = 0xB9AF // FORMAT_RRE PERFORM FRAME MANAGEMENT FUNCTION
++ op_PFPO uint32 = 0x010A // FORMAT_E PERFORM FLOATING-POINT OPERATION
++ op_PGIN uint32 = 0xB22E // FORMAT_RRE PAGE IN
++ op_PGOUT uint32 = 0xB22F // FORMAT_RRE PAGE OUT
++ op_PKA uint32 = 0xE900 // FORMAT_SS6 PACK ASCII
++ op_PKU uint32 = 0xE100 // FORMAT_SS6 PACK UNICODE
++ op_PLO uint32 = 0xEE00 // FORMAT_SS5 PERFORM LOCKED OPERATION
++ op_POPCNT uint32 = 0xB9E1 // FORMAT_RRE POPULATION COUNT
++ op_PPA uint32 = 0xB2E8 // FORMAT_RRF3 PERFORM PROCESSOR ASSIST
++ op_PR uint32 = 0x0101 // FORMAT_E PROGRAM RETURN
++ op_PT uint32 = 0xB228 // FORMAT_RRE PROGRAM TRANSFER
++ op_PTF uint32 = 0xB9A2 // FORMAT_RRE PERFORM TOPOLOGY FUNCTION
++ op_PTFF uint32 = 0x0104 // FORMAT_E PERFORM TIMING FACILITY FUNCTION
++ op_PTI uint32 = 0xB99E // FORMAT_RRE PROGRAM TRANSFER WITH INSTANCE
++ op_PTLB uint32 = 0xB20D // FORMAT_S PURGE TLB
++ op_QADTR uint32 = 0xB3F5 // FORMAT_RRF2 QUANTIZE (long DFP)
++ op_QAXTR uint32 = 0xB3FD // FORMAT_RRF2 QUANTIZE (extended DFP)
++ op_RCHP uint32 = 0xB23B // FORMAT_S RESET CHANNEL PATH
++ op_RISBG uint32 = 0xEC55 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS
++ op_RISBGN uint32 = 0xEC59 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS
++ op_RISBHG uint32 = 0xEC5D // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS HIGH
++ op_RISBLG uint32 = 0xEC51 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS LOW
++ op_RLL uint32 = 0xEB1D // FORMAT_RSY1 ROTATE LEFT SINGLE LOGICAL (32)
++ op_RLLG uint32 = 0xEB1C // FORMAT_RSY1 ROTATE LEFT SINGLE LOGICAL (64)
++ op_RNSBG uint32 = 0xEC54 // FORMAT_RIE6 ROTATE THEN AND SELECTED BITS
++ op_ROSBG uint32 = 0xEC56 // FORMAT_RIE6 ROTATE THEN OR SELECTED BITS
++ op_RP uint32 = 0xB277 // FORMAT_S RESUME PROGRAM
++ op_RRBE uint32 = 0xB22A // FORMAT_RRE RESET REFERENCE BIT EXTENDED
++ op_RRBM uint32 = 0xB9AE // FORMAT_RRE RESET REFERENCE BITS MULTIPLE
++ op_RRDTR uint32 = 0xB3F7 // FORMAT_RRF2 REROUND (long DFP)
++ op_RRXTR uint32 = 0xB3FF // FORMAT_RRF2 REROUND (extended DFP)
++ op_RSCH uint32 = 0xB238 // FORMAT_S RESUME SUBCHANNEL
++ op_RXSBG uint32 = 0xEC57 // FORMAT_RIE6 ROTATE THEN EXCLUSIVE OR SELECTED BITS
++ op_S uint32 = 0x5B00 // FORMAT_RX1 SUBTRACT (32)
++ op_SAC uint32 = 0xB219 // FORMAT_S SET ADDRESS SPACE CONTROL
++ op_SACF uint32 = 0xB279 // FORMAT_S SET ADDRESS SPACE CONTROL FAST
++ op_SAL uint32 = 0xB237 // FORMAT_S SET ADDRESS LIMIT
++ op_SAM24 uint32 = 0x010C // FORMAT_E SET ADDRESSING MODE (24)
++ op_SAM31 uint32 = 0x010D // FORMAT_E SET ADDRESSING MODE (31)
++ op_SAM64 uint32 = 0x010E // FORMAT_E SET ADDRESSING MODE (64)
++ op_SAR uint32 = 0xB24E // FORMAT_RRE SET ACCESS
++ op_SCHM uint32 = 0xB23C // FORMAT_S SET CHANNEL MONITOR
++ op_SCK uint32 = 0xB204 // FORMAT_S SET CLOCK
++ op_SCKC uint32 = 0xB206 // FORMAT_S SET CLOCK COMPARATOR
++ op_SCKPF uint32 = 0x0107 // FORMAT_E SET CLOCK PROGRAMMABLE FIELD
++ op_SD uint32 = 0x6B00 // FORMAT_RX1 SUBTRACT NORMALIZED (long HFP)
++ op_SDB uint32 = 0xED1B // FORMAT_RXE SUBTRACT (long BFP)
++ op_SDBR uint32 = 0xB31B // FORMAT_RRE SUBTRACT (long BFP)
++ op_SDR uint32 = 0x2B00 // FORMAT_RR SUBTRACT NORMALIZED (long HFP)
++ op_SDTR uint32 = 0xB3D3 // FORMAT_RRF1 SUBTRACT (long DFP)
++ op_SDTRA uint32 = 0xB3D3 // FORMAT_RRF1 SUBTRACT (long DFP)
++ op_SE uint32 = 0x7B00 // FORMAT_RX1 SUBTRACT NORMALIZED (short HFP)
++ op_SEB uint32 = 0xED0B // FORMAT_RXE SUBTRACT (short BFP)
++ op_SEBR uint32 = 0xB30B // FORMAT_RRE SUBTRACT (short BFP)
++ op_SER uint32 = 0x3B00 // FORMAT_RR SUBTRACT NORMALIZED (short HFP)
++ op_SFASR uint32 = 0xB385 // FORMAT_RRE SET FPC AND SIGNAL
++ op_SFPC uint32 = 0xB384 // FORMAT_RRE SET FPC
++ op_SG uint32 = 0xE309 // FORMAT_RXY1 SUBTRACT (64)
++ op_SGF uint32 = 0xE319 // FORMAT_RXY1 SUBTRACT (64<-32)
++ op_SGFR uint32 = 0xB919 // FORMAT_RRE SUBTRACT (64<-32)
++ op_SGR uint32 = 0xB909 // FORMAT_RRE SUBTRACT (64)
++ op_SGRK uint32 = 0xB9E9 // FORMAT_RRF1 SUBTRACT (64)
++ op_SH uint32 = 0x4B00 // FORMAT_RX1 SUBTRACT HALFWORD
++ op_SHHHR uint32 = 0xB9C9 // FORMAT_RRF1 SUBTRACT HIGH (32)
++ op_SHHLR uint32 = 0xB9D9 // FORMAT_RRF1 SUBTRACT HIGH (32)
++ op_SHY uint32 = 0xE37B // FORMAT_RXY1 SUBTRACT HALFWORD
++ op_SIGP uint32 = 0xAE00 // FORMAT_RS1 SIGNAL PROCESSOR
++ op_SL uint32 = 0x5F00 // FORMAT_RX1 SUBTRACT LOGICAL (32)
++ op_SLA uint32 = 0x8B00 // FORMAT_RS1 SHIFT LEFT SINGLE (32)
++ op_SLAG uint32 = 0xEB0B // FORMAT_RSY1 SHIFT LEFT SINGLE (64)
++ op_SLAK uint32 = 0xEBDD // FORMAT_RSY1 SHIFT LEFT SINGLE (32)
++ op_SLB uint32 = 0xE399 // FORMAT_RXY1 SUBTRACT LOGICAL WITH BORROW (32)
++ op_SLBG uint32 = 0xE389 // FORMAT_RXY1 SUBTRACT LOGICAL WITH BORROW (64)
++ op_SLBGR uint32 = 0xB989 // FORMAT_RRE SUBTRACT LOGICAL WITH BORROW (64)
++ op_SLBR uint32 = 0xB999 // FORMAT_RRE SUBTRACT LOGICAL WITH BORROW (32)
++ op_SLDA uint32 = 0x8F00 // FORMAT_RS1 SHIFT LEFT DOUBLE
++ op_SLDL uint32 = 0x8D00 // FORMAT_RS1 SHIFT LEFT DOUBLE LOGICAL
++ op_SLDT uint32 = 0xED40 // FORMAT_RXF SHIFT SIGNIFICAND LEFT (long DFP)
++ op_SLFI uint32 = 0xC205 // FORMAT_RIL1 SUBTRACT LOGICAL IMMEDIATE (32)
++ op_SLG uint32 = 0xE30B // FORMAT_RXY1 SUBTRACT LOGICAL (64)
++ op_SLGF uint32 = 0xE31B // FORMAT_RXY1 SUBTRACT LOGICAL (64<-32)
++ op_SLGFI uint32 = 0xC204 // FORMAT_RIL1 SUBTRACT LOGICAL IMMEDIATE (64<-32)
++ op_SLGFR uint32 = 0xB91B // FORMAT_RRE SUBTRACT LOGICAL (64<-32)
++ op_SLGR uint32 = 0xB90B // FORMAT_RRE SUBTRACT LOGICAL (64)
++ op_SLGRK uint32 = 0xB9EB // FORMAT_RRF1 SUBTRACT LOGICAL (64)
++ op_SLHHHR uint32 = 0xB9CB // FORMAT_RRF1 SUBTRACT LOGICAL HIGH (32)
++ op_SLHHLR uint32 = 0xB9DB // FORMAT_RRF1 SUBTRACT LOGICAL HIGH (32)
++ op_SLL uint32 = 0x8900 // FORMAT_RS1 SHIFT LEFT SINGLE LOGICAL (32)
++ op_SLLG uint32 = 0xEB0D // FORMAT_RSY1 SHIFT LEFT SINGLE LOGICAL (64)
++ op_SLLK uint32 = 0xEBDF // FORMAT_RSY1 SHIFT LEFT SINGLE LOGICAL (32)
++ op_SLR uint32 = 0x1F00 // FORMAT_RR SUBTRACT LOGICAL (32)
++ op_SLRK uint32 = 0xB9FB // FORMAT_RRF1 SUBTRACT LOGICAL (32)
++ op_SLXT uint32 = 0xED48 // FORMAT_RXF SHIFT SIGNIFICAND LEFT (extended DFP)
++ op_SLY uint32 = 0xE35F // FORMAT_RXY1 SUBTRACT LOGICAL (32)
++ op_SP uint32 = 0xFB00 // FORMAT_SS2 SUBTRACT DECIMAL
++ op_SPKA uint32 = 0xB20A // FORMAT_S SET PSW KEY FROM ADDRESS
++ op_SPM uint32 = 0x0400 // FORMAT_RR SET PROGRAM MASK
++ op_SPT uint32 = 0xB208 // FORMAT_S SET CPU TIMER
++ op_SPX uint32 = 0xB210 // FORMAT_S SET PREFIX
++ op_SQD uint32 = 0xED35 // FORMAT_RXE SQUARE ROOT (long HFP)
++ op_SQDB uint32 = 0xED15 // FORMAT_RXE SQUARE ROOT (long BFP)
++ op_SQDBR uint32 = 0xB315 // FORMAT_RRE SQUARE ROOT (long BFP)
++ op_SQDR uint32 = 0xB244 // FORMAT_RRE SQUARE ROOT (long HFP)
++ op_SQE uint32 = 0xED34 // FORMAT_RXE SQUARE ROOT (short HFP)
++ op_SQEB uint32 = 0xED14 // FORMAT_RXE SQUARE ROOT (short BFP)
++ op_SQEBR uint32 = 0xB314 // FORMAT_RRE SQUARE ROOT (short BFP)
++ op_SQER uint32 = 0xB245 // FORMAT_RRE SQUARE ROOT (short HFP)
++ op_SQXBR uint32 = 0xB316 // FORMAT_RRE SQUARE ROOT (extended BFP)
++ op_SQXR uint32 = 0xB336 // FORMAT_RRE SQUARE ROOT (extended HFP)
++ op_SR uint32 = 0x1B00 // FORMAT_RR SUBTRACT (32)
++ op_SRA uint32 = 0x8A00 // FORMAT_RS1 SHIFT RIGHT SINGLE (32)
++ op_SRAG uint32 = 0xEB0A // FORMAT_RSY1 SHIFT RIGHT SINGLE (64)
++ op_SRAK uint32 = 0xEBDC // FORMAT_RSY1 SHIFT RIGHT SINGLE (32)
++ op_SRDA uint32 = 0x8E00 // FORMAT_RS1 SHIFT RIGHT DOUBLE
++ op_SRDL uint32 = 0x8C00 // FORMAT_RS1 SHIFT RIGHT DOUBLE LOGICAL
++ op_SRDT uint32 = 0xED41 // FORMAT_RXF SHIFT SIGNIFICAND RIGHT (long DFP)
++ op_SRK uint32 = 0xB9F9 // FORMAT_RRF1 SUBTRACT (32)
++ op_SRL uint32 = 0x8800 // FORMAT_RS1 SHIFT RIGHT SINGLE LOGICAL (32)
++ op_SRLG uint32 = 0xEB0C // FORMAT_RSY1 SHIFT RIGHT SINGLE LOGICAL (64)
++ op_SRLK uint32 = 0xEBDE // FORMAT_RSY1 SHIFT RIGHT SINGLE LOGICAL (32)
++ op_SRNM uint32 = 0xB299 // FORMAT_S SET BFP ROUNDING MODE (2 bit)
++ op_SRNMB uint32 = 0xB2B8 // FORMAT_S SET BFP ROUNDING MODE (3 bit)
++ op_SRNMT uint32 = 0xB2B9 // FORMAT_S SET DFP ROUNDING MODE
++ op_SRP uint32 = 0xF000 // FORMAT_SS3 SHIFT AND ROUND DECIMAL
++ op_SRST uint32 = 0xB25E // FORMAT_RRE SEARCH STRING
++ op_SRSTU uint32 = 0xB9BE // FORMAT_RRE SEARCH STRING UNICODE
++ op_SRXT uint32 = 0xED49 // FORMAT_RXF SHIFT SIGNIFICAND RIGHT (extended DFP)
++ op_SSAIR uint32 = 0xB99F // FORMAT_RRE SET SECONDARY ASN WITH INSTANCE
++ op_SSAR uint32 = 0xB225 // FORMAT_RRE SET SECONDARY ASN
++ op_SSCH uint32 = 0xB233 // FORMAT_S START SUBCHANNEL
++ op_SSKE uint32 = 0xB22B // FORMAT_RRF3 SET STORAGE KEY EXTENDED
++ op_SSM uint32 = 0x8000 // FORMAT_S SET SYSTEM MASK
++ op_ST uint32 = 0x5000 // FORMAT_RX1 STORE (32)
++ op_STAM uint32 = 0x9B00 // FORMAT_RS1 STORE ACCESS MULTIPLE
++ op_STAMY uint32 = 0xEB9B // FORMAT_RSY1 STORE ACCESS MULTIPLE
++ op_STAP uint32 = 0xB212 // FORMAT_S STORE CPU ADDRESS
++ op_STC uint32 = 0x4200 // FORMAT_RX1 STORE CHARACTER
++ op_STCH uint32 = 0xE3C3 // FORMAT_RXY1 STORE CHARACTER HIGH (8)
++ op_STCK uint32 = 0xB205 // FORMAT_S STORE CLOCK
++ op_STCKC uint32 = 0xB207 // FORMAT_S STORE CLOCK COMPARATOR
++ op_STCKE uint32 = 0xB278 // FORMAT_S STORE CLOCK EXTENDED
++ op_STCKF uint32 = 0xB27C // FORMAT_S STORE CLOCK FAST
++ op_STCM uint32 = 0xBE00 // FORMAT_RS2 STORE CHARACTERS UNDER MASK (low)
++ op_STCMH uint32 = 0xEB2C // FORMAT_RSY2 STORE CHARACTERS UNDER MASK (high)
++ op_STCMY uint32 = 0xEB2D // FORMAT_RSY2 STORE CHARACTERS UNDER MASK (low)
++ op_STCPS uint32 = 0xB23A // FORMAT_S STORE CHANNEL PATH STATUS
++ op_STCRW uint32 = 0xB239 // FORMAT_S STORE CHANNEL REPORT WORD
++ op_STCTG uint32 = 0xEB25 // FORMAT_RSY1 STORE CONTROL (64)
++ op_STCTL uint32 = 0xB600 // FORMAT_RS1 STORE CONTROL (32)
++ op_STCY uint32 = 0xE372 // FORMAT_RXY1 STORE CHARACTER
++ op_STD uint32 = 0x6000 // FORMAT_RX1 STORE (long)
++ op_STDY uint32 = 0xED67 // FORMAT_RXY1 STORE (long)
++ op_STE uint32 = 0x7000 // FORMAT_RX1 STORE (short)
++ op_STEY uint32 = 0xED66 // FORMAT_RXY1 STORE (short)
++ op_STFH uint32 = 0xE3CB // FORMAT_RXY1 STORE HIGH (32)
++ op_STFL uint32 = 0xB2B1 // FORMAT_S STORE FACILITY LIST
++ op_STFLE uint32 = 0xB2B0 // FORMAT_S STORE FACILITY LIST EXTENDED
++ op_STFPC uint32 = 0xB29C // FORMAT_S STORE FPC
++ op_STG uint32 = 0xE324 // FORMAT_RXY1 STORE (64)
++ op_STGRL uint32 = 0xC40B // FORMAT_RIL2 STORE RELATIVE LONG (64)
++ op_STH uint32 = 0x4000 // FORMAT_RX1 STORE HALFWORD
++ op_STHH uint32 = 0xE3C7 // FORMAT_RXY1 STORE HALFWORD HIGH (16)
++ op_STHRL uint32 = 0xC407 // FORMAT_RIL2 STORE HALFWORD RELATIVE LONG
++ op_STHY uint32 = 0xE370 // FORMAT_RXY1 STORE HALFWORD
++ op_STIDP uint32 = 0xB202 // FORMAT_S STORE CPU ID
++ op_STM uint32 = 0x9000 // FORMAT_RS1 STORE MULTIPLE (32)
++ op_STMG uint32 = 0xEB24 // FORMAT_RSY1 STORE MULTIPLE (64)
++ op_STMH uint32 = 0xEB26 // FORMAT_RSY1 STORE MULTIPLE HIGH
++ op_STMY uint32 = 0xEB90 // FORMAT_RSY1 STORE MULTIPLE (32)
++ op_STNSM uint32 = 0xAC00 // FORMAT_SI STORE THEN AND SYSTEM MASK
++ op_STOC uint32 = 0xEBF3 // FORMAT_RSY2 STORE ON CONDITION (32)
++ op_STOCG uint32 = 0xEBE3 // FORMAT_RSY2 STORE ON CONDITION (64)
++ op_STOSM uint32 = 0xAD00 // FORMAT_SI STORE THEN OR SYSTEM MASK
++ op_STPQ uint32 = 0xE38E // FORMAT_RXY1 STORE PAIR TO QUADWORD
++ op_STPT uint32 = 0xB209 // FORMAT_S STORE CPU TIMER
++ op_STPX uint32 = 0xB211 // FORMAT_S STORE PREFIX
++ op_STRAG uint32 = 0xE502 // FORMAT_SSE STORE REAL ADDRESS
++ op_STRL uint32 = 0xC40F // FORMAT_RIL2 STORE RELATIVE LONG (32)
++ op_STRV uint32 = 0xE33E // FORMAT_RXY1 STORE REVERSED (32)
++ op_STRVG uint32 = 0xE32F // FORMAT_RXY1 STORE REVERSED (64)
++ op_STRVH uint32 = 0xE33F // FORMAT_RXY1 STORE REVERSED (16)
++ op_STSCH uint32 = 0xB234 // FORMAT_S STORE SUBCHANNEL
++ op_STSI uint32 = 0xB27D // FORMAT_S STORE SYSTEM INFORMATION
++ op_STURA uint32 = 0xB246 // FORMAT_RRE STORE USING REAL ADDRESS (32)
++ op_STURG uint32 = 0xB925 // FORMAT_RRE STORE USING REAL ADDRESS (64)
++ op_STY uint32 = 0xE350 // FORMAT_RXY1 STORE (32)
++ op_SU uint32 = 0x7F00 // FORMAT_RX1 SUBTRACT UNNORMALIZED (short HFP)
++ op_SUR uint32 = 0x3F00 // FORMAT_RR SUBTRACT UNNORMALIZED (short HFP)
++ op_SVC uint32 = 0x0A00 // FORMAT_I SUPERVISOR CALL
++ op_SW uint32 = 0x6F00 // FORMAT_RX1 SUBTRACT UNNORMALIZED (long HFP)
++ op_SWR uint32 = 0x2F00 // FORMAT_RR SUBTRACT UNNORMALIZED (long HFP)
++ op_SXBR uint32 = 0xB34B // FORMAT_RRE SUBTRACT (extended BFP)
++ op_SXR uint32 = 0x3700 // FORMAT_RR SUBTRACT NORMALIZED (extended HFP)
++ op_SXTR uint32 = 0xB3DB // FORMAT_RRF1 SUBTRACT (extended DFP)
++ op_SXTRA uint32 = 0xB3DB // FORMAT_RRF1 SUBTRACT (extended DFP)
++ op_SY uint32 = 0xE35B // FORMAT_RXY1 SUBTRACT (32)
++ op_TABORT uint32 = 0xB2FC // FORMAT_S TRANSACTION ABORT
++ op_TAM uint32 = 0x010B // FORMAT_E TEST ADDRESSING MODE
++ op_TAR uint32 = 0xB24C // FORMAT_RRE TEST ACCESS
++ op_TB uint32 = 0xB22C // FORMAT_RRE TEST BLOCK
++ op_TBDR uint32 = 0xB351 // FORMAT_RRF5 CONVERT HFP TO BFP (long)
++ op_TBEDR uint32 = 0xB350 // FORMAT_RRF5 CONVERT HFP TO BFP (long to short)
++ op_TBEGIN uint32 = 0xE560 // FORMAT_SIL TRANSACTION BEGIN
++ op_TBEGINC uint32 = 0xE561 // FORMAT_SIL TRANSACTION BEGIN
++ op_TCDB uint32 = 0xED11 // FORMAT_RXE TEST DATA CLASS (long BFP)
++ op_TCEB uint32 = 0xED10 // FORMAT_RXE TEST DATA CLASS (short BFP)
++ op_TCXB uint32 = 0xED12 // FORMAT_RXE TEST DATA CLASS (extended BFP)
++ op_TDCDT uint32 = 0xED54 // FORMAT_RXE TEST DATA CLASS (long DFP)
++ op_TDCET uint32 = 0xED50 // FORMAT_RXE TEST DATA CLASS (short DFP)
++ op_TDCXT uint32 = 0xED58 // FORMAT_RXE TEST DATA CLASS (extended DFP)
++ op_TDGDT uint32 = 0xED55 // FORMAT_RXE TEST DATA GROUP (long DFP)
++ op_TDGET uint32 = 0xED51 // FORMAT_RXE TEST DATA GROUP (short DFP)
++ op_TDGXT uint32 = 0xED59 // FORMAT_RXE TEST DATA GROUP (extended DFP)
++ op_TEND uint32 = 0xB2F8 // FORMAT_S TRANSACTION END
++ op_THDER uint32 = 0xB358 // FORMAT_RRE CONVERT BFP TO HFP (short to long)
++ op_THDR uint32 = 0xB359 // FORMAT_RRE CONVERT BFP TO HFP (long)
++ op_TM uint32 = 0x9100 // FORMAT_SI TEST UNDER MASK
++ op_TMH uint32 = 0xA700 // FORMAT_RI1 TEST UNDER MASK HIGH
++ op_TMHH uint32 = 0xA702 // FORMAT_RI1 TEST UNDER MASK (high high)
++ op_TMHL uint32 = 0xA703 // FORMAT_RI1 TEST UNDER MASK (high low)
++ op_TML uint32 = 0xA701 // FORMAT_RI1 TEST UNDER MASK LOW
++ op_TMLH uint32 = 0xA700 // FORMAT_RI1 TEST UNDER MASK (low high)
++ op_TMLL uint32 = 0xA701 // FORMAT_RI1 TEST UNDER MASK (low low)
++ op_TMY uint32 = 0xEB51 // FORMAT_SIY TEST UNDER MASK
++ op_TP uint32 = 0xEBC0 // FORMAT_RSL TEST DECIMAL
++ op_TPI uint32 = 0xB236 // FORMAT_S TEST PENDING INTERRUPTION
++ op_TPROT uint32 = 0xE501 // FORMAT_SSE TEST PROTECTION
++ op_TR uint32 = 0xDC00 // FORMAT_SS1 TRANSLATE
++ op_TRACE uint32 = 0x9900 // FORMAT_RS1 TRACE (32)
++ op_TRACG uint32 = 0xEB0F // FORMAT_RSY1 TRACE (64)
++ op_TRAP2 uint32 = 0x01FF // FORMAT_E TRAP
++ op_TRAP4 uint32 = 0xB2FF // FORMAT_S TRAP
++ op_TRE uint32 = 0xB2A5 // FORMAT_RRE TRANSLATE EXTENDED
++ op_TROO uint32 = 0xB993 // FORMAT_RRF3 TRANSLATE ONE TO ONE
++ op_TROT uint32 = 0xB992 // FORMAT_RRF3 TRANSLATE ONE TO TWO
++ op_TRT uint32 = 0xDD00 // FORMAT_SS1 TRANSLATE AND TEST
++ op_TRTE uint32 = 0xB9BF // FORMAT_RRF3 TRANSLATE AND TEST EXTENDED
++ op_TRTO uint32 = 0xB991 // FORMAT_RRF3 TRANSLATE TWO TO ONE
++ op_TRTR uint32 = 0xD000 // FORMAT_SS1 TRANSLATE AND TEST REVERSE
++ op_TRTRE uint32 = 0xB9BD // FORMAT_RRF3 TRANSLATE AND TEST REVERSE EXTENDED
++ op_TRTT uint32 = 0xB990 // FORMAT_RRF3 TRANSLATE TWO TO TWO
++ op_TS uint32 = 0x9300 // FORMAT_S TEST AND SET
++ op_TSCH uint32 = 0xB235 // FORMAT_S TEST SUBCHANNEL
++ op_UNPK uint32 = 0xF300 // FORMAT_SS2 UNPACK
++ op_UNPKA uint32 = 0xEA00 // FORMAT_SS1 UNPACK ASCII
++ op_UNPKU uint32 = 0xE200 // FORMAT_SS1 UNPACK UNICODE
++ op_UPT uint32 = 0x0102 // FORMAT_E UPDATE TREE
++ op_X uint32 = 0x5700 // FORMAT_RX1 EXCLUSIVE OR (32)
++ op_XC uint32 = 0xD700 // FORMAT_SS1 EXCLUSIVE OR (character)
++ op_XG uint32 = 0xE382 // FORMAT_RXY1 EXCLUSIVE OR (64)
++ op_XGR uint32 = 0xB982 // FORMAT_RRE EXCLUSIVE OR (64)
++ op_XGRK uint32 = 0xB9E7 // FORMAT_RRF1 EXCLUSIVE OR (64)
++ op_XI uint32 = 0x9700 // FORMAT_SI EXCLUSIVE OR (immediate)
++ op_XIHF uint32 = 0xC006 // FORMAT_RIL1 EXCLUSIVE OR IMMEDIATE (high)
++ op_XILF uint32 = 0xC007 // FORMAT_RIL1 EXCLUSIVE OR IMMEDIATE (low)
++ op_XIY uint32 = 0xEB57 // FORMAT_SIY EXCLUSIVE OR (immediate)
++ op_XR uint32 = 0x1700 // FORMAT_RR EXCLUSIVE OR (32)
++ op_XRK uint32 = 0xB9F7 // FORMAT_RRF1 EXCLUSIVE OR (32)
++ op_XSCH uint32 = 0xB276 // FORMAT_S CANCEL SUBCHANNEL
++ op_XY uint32 = 0xE357 // FORMAT_RXY1 EXCLUSIVE OR (32)
++ op_ZAP uint32 = 0xF800 // FORMAT_SS2 ZERO AND ADD
++
++ // added in z13
++ op_CXPT uint32 = 0xEDAF // RSL-b CONVERT FROM PACKED (to extended DFP)
++ op_CDPT uint32 = 0xEDAE // RSL-b CONVERT FROM PACKED (to long DFP)
++ op_CPXT uint32 = 0xEDAD // RSL-b CONVERT TO PACKED (from extended DFP)
++ op_CPDT uint32 = 0xEDAC // RSL-b CONVERT TO PACKED (from long DFP)
++ op_LZRF uint32 = 0xE33B // RXY-a LOAD AND ZERO RIGHTMOST BYTE (32)
++ op_LZRG uint32 = 0xE32A // RXY-a LOAD AND ZERO RIGHTMOST BYTE (64)
++ op_LCCB uint32 = 0xE727 // RXE LOAD COUNT TO BLOCK BOUNDARY
++ op_LOCHHI uint32 = 0xEC4E // RIE-g LOAD HALFWORD HIGH IMMEDIATE ON CONDITION (32←16)
++ op_LOCHI uint32 = 0xEC42 // RIE-g LOAD HALFWORD IMMEDIATE ON CONDITION (32←16)
++ op_LOCGHI uint32 = 0xEC46 // RIE-g LOAD HALFWORD IMMEDIATE ON CONDITION (64←16)
++ op_LOCFH uint32 = 0xEBE0 // RSY-b LOAD HIGH ON CONDITION (32)
++ op_LOCFHR uint32 = 0xB9E0 // RRF-c LOAD HIGH ON CONDITION (32)
++ op_LLZRGF uint32 = 0xE33A // RXY-a LOAD LOGICAL AND ZERO RIGHTMOST BYTE (64←32)
++ op_STOCFH uint32 = 0xEBE1 // RSY-b STORE HIGH ON CONDITION
++ op_VA uint32 = 0xE7F3 // VRR-c VECTOR ADD
++ op_VACC uint32 = 0xE7F1 // VRR-c VECTOR ADD COMPUTE CARRY
++ op_VAC uint32 = 0xE7BB // VRR-d VECTOR ADD WITH CARRY
++ op_VACCC uint32 = 0xE7B9 // VRR-d VECTOR ADD WITH CARRY COMPUTE CARRY
++ op_VN uint32 = 0xE768 // VRR-c VECTOR AND
++ op_VNC uint32 = 0xE769 // VRR-c VECTOR AND WITH COMPLEMENT
++ op_VAVG uint32 = 0xE7F2 // VRR-c VECTOR AVERAGE
++ op_VAVGL uint32 = 0xE7F0 // VRR-c VECTOR AVERAGE LOGICAL
++ op_VCKSM uint32 = 0xE766 // VRR-c VECTOR CHECKSUM
++ op_VCEQ uint32 = 0xE7F8 // VRR-b VECTOR COMPARE EQUAL
++ op_VCH uint32 = 0xE7FB // VRR-b VECTOR COMPARE HIGH
++ op_VCHL uint32 = 0xE7F9 // VRR-b VECTOR COMPARE HIGH LOGICAL
++ op_VCLZ uint32 = 0xE753 // VRR-a VECTOR COUNT LEADING ZEROS
++ op_VCTZ uint32 = 0xE752 // VRR-a VECTOR COUNT TRAILING ZEROS
++ op_VEC uint32 = 0xE7DB // VRR-a VECTOR ELEMENT COMPARE
++ op_VECL uint32 = 0xE7D9 // VRR-a VECTOR ELEMENT COMPARE LOGICAL
++ op_VERIM uint32 = 0xE772 // VRI-d VECTOR ELEMENT ROTATE AND INSERT UNDER MASK
++ op_VERLL uint32 = 0xE733 // VRS-a VECTOR ELEMENT ROTATE LEFT LOGICAL
++ op_VERLLV uint32 = 0xE773 // VRR-c VECTOR ELEMENT ROTATE LEFT LOGICAL
++ op_VESLV uint32 = 0xE770 // VRR-c VECTOR ELEMENT SHIFT LEFT
++ op_VESL uint32 = 0xE730 // VRS-a VECTOR ELEMENT SHIFT LEFT
++ op_VESRA uint32 = 0xE73A // VRS-a VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
++ op_VESRAV uint32 = 0xE77A // VRR-c VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
++ op_VESRL uint32 = 0xE738 // VRS-a VECTOR ELEMENT SHIFT RIGHT LOGICAL
++ op_VESRLV uint32 = 0xE778 // VRR-c VECTOR ELEMENT SHIFT RIGHT LOGICAL
++ op_VX uint32 = 0xE76D // VRR-c VECTOR EXCLUSIVE OR
++ op_VFAE uint32 = 0xE782 // VRR-b VECTOR FIND ANY ELEMENT EQUAL
++ op_VFEE uint32 = 0xE780 // VRR-b VECTOR FIND ELEMENT EQUAL
++ op_VFENE uint32 = 0xE781 // VRR-b VECTOR FIND ELEMENT NOT EQUAL
++ op_VFA uint32 = 0xE7E3 // VRR-c VECTOR FP ADD
++ op_WFK uint32 = 0xE7CA // VRR-a VECTOR FP COMPARE AND SIGNAL SCALAR
++ op_VFCE uint32 = 0xE7E8 // VRR-c VECTOR FP COMPARE EQUAL
++ op_VFCH uint32 = 0xE7EB // VRR-c VECTOR FP COMPARE HIGH
++ op_VFCHE uint32 = 0xE7EA // VRR-c VECTOR FP COMPARE HIGH OR EQUAL
++ op_WFC uint32 = 0xE7CB // VRR-a VECTOR FP COMPARE SCALAR
++ op_VCDG uint32 = 0xE7C3 // VRR-a VECTOR FP CONVERT FROM FIXED 64-BIT
++ op_VCDLG uint32 = 0xE7C1 // VRR-a VECTOR FP CONVERT FROM LOGICAL 64-BIT
++ op_VCGD uint32 = 0xE7C2 // VRR-a VECTOR FP CONVERT TO FIXED 64-BIT
++ op_VCLGD uint32 = 0xE7C0 // VRR-a VECTOR FP CONVERT TO LOGICAL 64-BIT
++ op_VFD uint32 = 0xE7E5 // VRR-c VECTOR FP DIVIDE
++ op_VLDE uint32 = 0xE7C4 // VRR-a VECTOR FP LOAD LENGTHENED
++ op_VLED uint32 = 0xE7C5 // VRR-a VECTOR FP LOAD ROUNDED
++ op_VFM uint32 = 0xE7E7 // VRR-c VECTOR FP MULTIPLY
++ op_VFMA uint32 = 0xE78F // VRR-e VECTOR FP MULTIPLY AND ADD
++ op_VFMS uint32 = 0xE78E // VRR-e VECTOR FP MULTIPLY AND SUBTRACT
++ op_VFPSO uint32 = 0xE7CC // VRR-a VECTOR FP PERFORM SIGN OPERATION
++ op_VFSQ uint32 = 0xE7CE // VRR-a VECTOR FP SQUARE ROOT
++ op_VFS uint32 = 0xE7E2 // VRR-c VECTOR FP SUBTRACT
++ op_VFTCI uint32 = 0xE74A // VRI-e VECTOR FP TEST DATA CLASS IMMEDIATE
++ op_VGFM uint32 = 0xE7B4 // VRR-c VECTOR GALOIS FIELD MULTIPLY SUM
++ op_VGFMA uint32 = 0xE7BC // VRR-d VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE
++ op_VGEF uint32 = 0xE713 // VRV VECTOR GATHER ELEMENT (32)
++ op_VGEG uint32 = 0xE712 // VRV VECTOR GATHER ELEMENT (64)
++ op_VGBM uint32 = 0xE744 // VRI-a VECTOR GENERATE BYTE MASK
++ op_VGM uint32 = 0xE746 // VRI-b VECTOR GENERATE MASK
++ op_VISTR uint32 = 0xE75C // VRR-a VECTOR ISOLATE STRING
++ op_VL uint32 = 0xE706 // VRX VECTOR LOAD
++ op_VLR uint32 = 0xE756 // VRR-a VECTOR LOAD
++ op_VLREP uint32 = 0xE705 // VRX VECTOR LOAD AND REPLICATE
++ op_VLC uint32 = 0xE7DE // VRR-a VECTOR LOAD COMPLEMENT
++ op_VLEH uint32 = 0xE701 // VRX VECTOR LOAD ELEMENT (16)
++ op_VLEF uint32 = 0xE703 // VRX VECTOR LOAD ELEMENT (32)
++ op_VLEG uint32 = 0xE702 // VRX VECTOR LOAD ELEMENT (64)
++ op_VLEB uint32 = 0xE700 // VRX VECTOR LOAD ELEMENT (8)
++ op_VLEIH uint32 = 0xE741 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (16)
++ op_VLEIF uint32 = 0xE743 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (32)
++ op_VLEIG uint32 = 0xE742 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (64)
++ op_VLEIB uint32 = 0xE740 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (8)
++ op_VFI uint32 = 0xE7C7 // VRR-a VECTOR LOAD FP INTEGER
++ op_VLGV uint32 = 0xE721 // VRS-c VECTOR LOAD GR FROM VR ELEMENT
++ op_VLLEZ uint32 = 0xE704 // VRX VECTOR LOAD LOGICAL ELEMENT AND ZERO
++ op_VLM uint32 = 0xE736 // VRS-a VECTOR LOAD MULTIPLE
++ op_VLP uint32 = 0xE7DF // VRR-a VECTOR LOAD POSITIVE
++ op_VLBB uint32 = 0xE707 // VRX VECTOR LOAD TO BLOCK BOUNDARY
++ op_VLVG uint32 = 0xE722 // VRS-b VECTOR LOAD VR ELEMENT FROM GR
++ op_VLVGP uint32 = 0xE762 // VRR-f VECTOR LOAD VR FROM GRS DISJOINT
++ op_VLL uint32 = 0xE737 // VRS-b VECTOR LOAD WITH LENGTH
++ op_VMX uint32 = 0xE7FF // VRR-c VECTOR MAXIMUM
++ op_VMXL uint32 = 0xE7FD // VRR-c VECTOR MAXIMUM LOGICAL
++ op_VMRH uint32 = 0xE761 // VRR-c VECTOR MERGE HIGH
++ op_VMRL uint32 = 0xE760 // VRR-c VECTOR MERGE LOW
++ op_VMN uint32 = 0xE7FE // VRR-c VECTOR MINIMUM
++ op_VMNL uint32 = 0xE7FC // VRR-c VECTOR MINIMUM LOGICAL
++ op_VMAE uint32 = 0xE7AE // VRR-d VECTOR MULTIPLY AND ADD EVEN
++ op_VMAH uint32 = 0xE7AB // VRR-d VECTOR MULTIPLY AND ADD HIGH
++ op_VMALE uint32 = 0xE7AC // VRR-d VECTOR MULTIPLY AND ADD LOGICAL EVEN
++ op_VMALH uint32 = 0xE7A9 // VRR-d VECTOR MULTIPLY AND ADD LOGICAL HIGH
++ op_VMALO uint32 = 0xE7AD // VRR-d VECTOR MULTIPLY AND ADD LOGICAL ODD
++ op_VMAL uint32 = 0xE7AA // VRR-d VECTOR MULTIPLY AND ADD LOW
++ op_VMAO uint32 = 0xE7AF // VRR-d VECTOR MULTIPLY AND ADD ODD
++ op_VME uint32 = 0xE7A6 // VRR-c VECTOR MULTIPLY EVEN
++ op_VMH uint32 = 0xE7A3 // VRR-c VECTOR MULTIPLY HIGH
++ op_VMLE uint32 = 0xE7A4 // VRR-c VECTOR MULTIPLY EVEN LOGICAL
++ op_VMLH uint32 = 0xE7A1 // VRR-c VECTOR MULTIPLY HIGH LOGICAL
++ op_VMLO uint32 = 0xE7A5 // VRR-c VECTOR MULTIPLY ODD LOGICAL
++ op_VML uint32 = 0xE7A2 // VRR-c VECTOR MULTIPLY LOW
++ op_VMO uint32 = 0xE7A7 // VRR-c VECTOR MULTIPLY ODD
++ op_VNO uint32 = 0xE76B // VRR-c VECTOR NOR
++ op_VO uint32 = 0xE76A // VRR-c VECTOR OR
++ op_VPK uint32 = 0xE794 // VRR-c VECTOR PACK
++ op_VPKLS uint32 = 0xE795 // VRR-b VECTOR PACK LOGICAL SATURATE
++ op_VPKS uint32 = 0xE797 // VRR-b VECTOR PACK SATURATE
++ op_VPERM uint32 = 0xE78C // VRR-e VECTOR PERMUTE
++ op_VPDI uint32 = 0xE784 // VRR-c VECTOR PERMUTE DOUBLEWORD IMMEDIATE
++ op_VPOPCT uint32 = 0xE750 // VRR-a VECTOR POPULATION COUNT
++ op_VREP uint32 = 0xE74D // VRI-c VECTOR REPLICATE
++ op_VREPI uint32 = 0xE745 // VRI-a VECTOR REPLICATE IMMEDIATE
++ op_VSCEF uint32 = 0xE71B // VRV VECTOR SCATTER ELEMENT (32)
++ op_VSCEG uint32 = 0xE71A // VRV VECTOR SCATTER ELEMENT (64)
++ op_VSEL uint32 = 0xE78D // VRR-e VECTOR SELECT
++ op_VSL uint32 = 0xE774 // VRR-c VECTOR SHIFT LEFT
++ op_VSLB uint32 = 0xE775 // VRR-c VECTOR SHIFT LEFT BY BYTE
++ op_VSLDB uint32 = 0xE777 // VRI-d VECTOR SHIFT LEFT DOUBLE BY BYTE
++ op_VSRA uint32 = 0xE77E // VRR-c VECTOR SHIFT RIGHT ARITHMETIC
++ op_VSRAB uint32 = 0xE77F // VRR-c VECTOR SHIFT RIGHT ARITHMETIC BY BYTE
++ op_VSRL uint32 = 0xE77C // VRR-c VECTOR SHIFT RIGHT LOGICAL
++ op_VSRLB uint32 = 0xE77D // VRR-c VECTOR SHIFT RIGHT LOGICAL BY BYTE
++ op_VSEG uint32 = 0xE75F // VRR-a VECTOR SIGN EXTEND TO DOUBLEWORD
++ op_VST uint32 = 0xE70E // VRX VECTOR STORE
++ op_VSTEH uint32 = 0xE709 // VRX VECTOR STORE ELEMENT (16)
++ op_VSTEF uint32 = 0xE70B // VRX VECTOR STORE ELEMENT (32)
++ op_VSTEG uint32 = 0xE70A // VRX VECTOR STORE ELEMENT (64)
++ op_VSTEB uint32 = 0xE708 // VRX VECTOR STORE ELEMENT (8)
++ op_VSTM uint32 = 0xE73E // VRS-a VECTOR STORE MULTIPLE
++ op_VSTL uint32 = 0xE73F // VRS-b VECTOR STORE WITH LENGTH
++ op_VSTRC uint32 = 0xE78A // VRR-d VECTOR STRING RANGE COMPARE
++ op_VS uint32 = 0xE7F7 // VRR-c VECTOR SUBTRACT
++ op_VSCBI uint32 = 0xE7F5 // VRR-c VECTOR SUBTRACT COMPUTE BORROW INDICATION
++ op_VSBCBI uint32 = 0xE7BD // VRR-d VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION
++ op_VSBI uint32 = 0xE7BF // VRR-d VECTOR SUBTRACT WITH BORROW INDICATION
++ op_VSUMG uint32 = 0xE765 // VRR-c VECTOR SUM ACROSS DOUBLEWORD
++ op_VSUMQ uint32 = 0xE767 // VRR-c VECTOR SUM ACROSS QUADWORD
++ op_VSUM uint32 = 0xE764 // VRR-c VECTOR SUM ACROSS WORD
++ op_VTM uint32 = 0xE7D8 // VRR-a VECTOR TEST UNDER MASK
++ op_VUPH uint32 = 0xE7D7 // VRR-a VECTOR UNPACK HIGH
++ op_VUPLH uint32 = 0xE7D5 // VRR-a VECTOR UNPACK LOGICAL HIGH
++ op_VUPLL uint32 = 0xE7D4 // VRR-a VECTOR UNPACK LOGICAL LOW
++ op_VUPL uint32 = 0xE7D6 // VRR-a VECTOR UNPACK LOW
++)
++
++func oclass(a *obj.Addr) int {
++ return int(a.Class) - 1
++}
++
++// Add a relocation for the immediate in a RIL style instruction.
++// The addend will be adjusted as required.
++func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
++ if sym == nil {
++ ctxt.Diag("require symbol to apply relocation")
++ }
++ offset := int64(2) // relocation offset from start of instruction
++ rel := obj.Addrel(ctxt.Cursym)
++ rel.Off = int32(ctxt.Pc + offset)
++ rel.Siz = 4
++ rel.Sym = sym
++ rel.Add = add + offset + int64(rel.Siz)
++ rel.Type = obj.R_PCRELDBL
++ return rel
++}
++
++func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Reloc {
++ if sym == nil {
++ ctxt.Diag("require symbol to apply relocation")
++ }
++ offset += int64(2) // relocation offset from start of instruction
++ rel := obj.Addrel(ctxt.Cursym)
++ rel.Off = int32(ctxt.Pc + offset)
++ rel.Siz = 4
++ rel.Sym = sym
++ rel.Add = add + offset + int64(rel.Siz)
++ rel.Type = obj.R_PCRELDBL
++ return rel
++}
++
++// Add a CALL relocation for the immediate in a RIL style instruction.
++// The addend will be adjusted as required.
++func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
++ if sym == nil {
++ ctxt.Diag("require symbol to apply relocation")
++ }
++ offset := int64(2) // relocation offset from start of instruction
++ rel := obj.Addrel(ctxt.Cursym)
++ rel.Off = int32(ctxt.Pc + offset)
++ rel.Siz = 4
++ rel.Sym = sym
++ rel.Add = add + offset + int64(rel.Siz)
++ rel.Type = obj.R_CALL
++ return rel
++}
++
++func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 {
++ switch p.As {
++ case ABEQ, ACMPBEQ, ACMPUBEQ:
++ return 0x8
++ case ABGE, ACMPBGE, ACMPUBGE:
++ return 0xA
++ case ABGT, ACMPBGT, ACMPUBGT:
++ return 0x2
++ case ABLE, ACMPBLE, ACMPUBLE:
++ return 0xC
++ case ABLT, ACMPBLT, ACMPUBLT:
++ return 0x4
++ case ABNE, ACMPBNE, ACMPUBNE:
++ return 0x7
++ case ABVC:
++ return 0x0 //needs extra instruction
++ case ABVS:
++ return 0x1
++ }
++ ctxt.Diag("unknown conditional branch %v", p.As)
++ return 0xF
++}
++
++func asmout(ctxt *obj.Link, asm *[]byte) {
++ p := ctxt.Curp
++ o := oplook(ctxt, p)
++ ctxt.Printp = p
++
++ switch o.type_ {
++ default:
++ ctxt.Diag("unknown type %d", o.type_)
++
++ case 0: // PSEUDO OPS
++ break
++
++ case 1: // MOV REG TO REG
++ switch p.As {
++ default:
++ ctxt.Diag("unhandled operation: %v", p.As)
++ case AMOVD:
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ // sign extend
++ case AMOVW:
++ zRRE(op_LGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ case AMOVH:
++ zRRE(op_LGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ case AMOVB:
++ zRRE(op_LGBR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ // zero extend
++ case AMOVWZ:
++ zRRE(op_LLGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ case AMOVHZ:
++ zRRE(op_LLGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ case AMOVBZ:
++ zRRE(op_LLGCR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ // reverse bytes
++ case AMOVDBR:
++ zRRE(op_LRVGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ case AMOVWBR:
++ zRRE(op_LRVR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ // floating point
++ case AFMOVD, AFMOVS:
++ zRR(op_LDR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case 2: /* int/cr/fp op Rb,[Ra],Rd */
++ r := int(p.Reg)
++ if r == 0 {
++ r = int(p.To.Reg)
++ }
++
++ var opcode uint32
++
++ switch p.As {
++ default:
++ ctxt.Diag("invalid opcode")
++ case AADD:
++ opcode = op_AGRK
++ case AADDC:
++ opcode = op_ALGRK
++ case AADDE:
++ opcode = op_ALCGR
++ case AMULLW:
++ opcode = op_MSGFR
++ case AMULLD:
++ opcode = op_MSGR
++ case AMULHDU:
++ opcode = op_MLGR
++ case ADIVW:
++ opcode = op_DSGFR
++ case ADIVWU:
++ opcode = op_DLR
++ case ADIVD:
++ opcode = op_DSGR
++ case ADIVDU:
++ opcode = op_DLGR
++ case AFADD:
++ opcode = op_ADBR
++ case AFADDS:
++ opcode = op_AEBR
++ case AFSUB:
++ opcode = op_SDBR
++ case AFSUBS:
++ opcode = op_SEBR
++ case AFDIV:
++ opcode = op_DDBR
++ case AFDIVS:
++ opcode = op_DEBR
++ }
++
++ switch p.As {
++ default:
++
++ case AADD, AADDC:
++ zRRF(opcode, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
++
++ case AADDE, AMULLW, AMULLD:
++ if r == int(p.To.Reg) {
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else if p.From.Reg == p.To.Reg {
++ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
++ } else {
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case ADIVW, ADIVWU, ADIVD, ADIVDU:
++ if p.As == ADIVWU || p.As == ADIVDU {
++ zRRE(op_LGR, REGTMP, REGZERO, asm)
++ }
++ zRRE(op_LGR, REGTMP2, uint32(r), asm)
++ zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
++ zRRE(op_LGR, uint32(p.To.Reg), REGTMP2, asm)
++
++ case AMULHDU:
++ zRRE(op_LGR, REGTMP2, uint32(r), asm)
++ zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
++ zRRE(op_LGR, uint32(p.To.Reg), REGTMP, asm)
++
++ case AFADD, AFADDS:
++ if r == int(p.To.Reg) {
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else if p.From.Reg == p.To.Reg {
++ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
++ } else {
++ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case AFSUB, AFSUBS, AFDIV, AFDIVS:
++ if r == int(p.To.Reg) {
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else if p.From.Reg == p.To.Reg {
++ zRRE(op_LGDR, REGTMP, uint32(r), asm)
++ zRRE(opcode, uint32(r), uint32(p.From.Reg), asm)
++ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(op_LDGR, uint32(r), REGTMP, asm)
++ } else {
++ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ }
++
++ case 3: // MOV CONSTANT TO REG
++ v := vregoff(ctxt, &p.From)
++ switch p.As {
++ case AMOVBZ:
++ v = int64(uint8(v))
++ case AMOVHZ:
++ v = int64(uint16(v))
++ case AMOVWZ:
++ v = int64(uint32(v))
++ case AMOVB:
++ v = int64(int8(v))
++ case AMOVH:
++ v = int64(int16(v))
++ case AMOVW:
++ v = int64(int32(v))
++ }
++ if v&0xffff == v {
++ zRI(op_LLILL, uint32(p.To.Reg), uint32(v), asm)
++ } else if v&0xffff0000 == v {
++ zRI(op_LLILH, uint32(p.To.Reg), uint32(v>>16), asm)
++ } else if v&0xffff00000000 == v {
++ zRI(op_LLIHL, uint32(p.To.Reg), uint32(v>>32), asm)
++ } else if uint64(v)&0xffff000000000000 == uint64(v) {
++ zRI(op_LLIHH, uint32(p.To.Reg), uint32(v>>48), asm)
++ } else if int64(int16(v)) == v {
++ zRI(op_LGHI, uint32(p.To.Reg), uint32(v), asm)
++ } else if int64(int32(v)) == v {
++ zRIL(a, op_LGFI, uint32(p.To.Reg), uint32(v), asm)
++ } else if int64(uint32(v)) == v {
++ zRIL(a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
++ } else if uint64(v)&0xffffffff00000000 == uint64(v) {
++ zRIL(a, op_LLIHF, uint32(p.To.Reg), uint32(v>>32), asm)
++ } else {
++ zRIL(a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
++ zRIL(a, op_IIHF, uint32(p.To.Reg), uint32(v>>32), asm)
++ }
++
++ case 5: /* syscall */ // This might be right, assuming SVC is the same as Power's SC
++ zI(op_SVC, 0, asm)
++
++ case 6: /* logical op Rb,[Rs,]Ra; no literal */
++ if p.To.Reg == 0 {
++ ctxt.Diag("literal operation on R0\n%v", p)
++ }
++
++ switch p.As {
++ case AAND, AOR, AXOR:
++ var opcode1, opcode2 uint32
++ switch p.As {
++ default:
++ case AAND:
++ opcode1 = op_NGR
++ opcode2 = op_NGRK
++ case AOR:
++ opcode1 = op_OGR
++ opcode2 = op_OGRK
++ case AXOR:
++ opcode1 = op_XGR
++ opcode2 = op_XGRK
++ }
++
++ r := int(p.Reg)
++ if r == 0 {
++ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else {
++ zRRF(opcode2, uint32(r), 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case AANDN, AORN:
++ var opcode1, opcode2 uint32
++ switch p.As {
++ default:
++ case AANDN:
++ opcode1 = op_NGR
++ opcode2 = op_NGRK
++ case AORN:
++ opcode1 = op_OGR
++ opcode2 = op_OGRK
++ }
++
++ r := int(p.Reg)
++ if r == 0 {
++ zRRE(op_LCGR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
++ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else {
++ zRRE(op_LCGR, REGTMP, uint32(r), asm)
++ zRRF(opcode2, REGTMP, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case ANAND, ANOR:
++ var opcode1, opcode2 uint32
++ switch p.As {
++ default:
++ case ANAND:
++ opcode1 = op_NGR
++ opcode2 = op_NGRK
++ case ANOR:
++ opcode1 = op_OGR
++ opcode2 = op_OGRK
++ }
++
++ r := int(p.Reg)
++ if r == 0 {
++ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else {
++ zRRF(opcode2, uint32(r), 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ zRRE(op_LCGR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
++ }
++
++ case 7: // shift left/right and rotate left
++ d2 := vregoff(ctxt, &p.From)
++ b2 := p.From.Reg
++ r3 := p.Reg
++ if r3 == 0 {
++ r3 = p.To.Reg
++ }
++ r1 := p.To.Reg
++ var opcode uint32
++ switch p.As {
++ default:
++ case ASLD:
++ opcode = op_SLLG
++ case ASRD:
++ opcode = op_SRLG
++ case ASLW:
++ opcode = op_SLLK
++ case ASRW:
++ opcode = op_SRLK
++ case ARLL:
++ opcode = op_RLL
++ case ARLLG:
++ opcode = op_RLLG
++ case ASRAW:
++ opcode = op_SRAK
++ case ASRAD:
++ opcode = op_SRAG
++ }
++ zRSY(opcode, uint32(r1), uint32(r3), uint32(b2), uint32(d2), asm)
++
++ case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
++ r := int(p.Reg)
++
++ switch p.As {
++ default:
++ case ASUB:
++ if r == 0 {
++ zRRE(op_SGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else {
++ zRRF(op_SGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
++ }
++ case ASUBC:
++ if r == 0 {
++ zRRE(op_SLGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else {
++ zRRF(op_SLGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
++ }
++
++ case ASUBE:
++ if r == 0 {
++ r = int(p.To.Reg)
++ }
++ if r == int(p.To.Reg) {
++ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else if p.From.Reg == p.To.Reg {
++ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(op_SLBGR, uint32(p.To.Reg), REGTMP, asm)
++ } else {
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++ }
++
++ case 11: /* br/bl lbra */
++ v := int32(0)
++
++ if p.Pcond != nil {
++ v = int32((p.Pcond.Pc - p.Pc) >> 1)
++ }
++
++ if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v {
++ zRI(op_BRC, 0xF, uint32(v), asm)
++ } else {
++ if p.As == ABL {
++ zRIL(b, op_BRASL, uint32(REG_LR), uint32(v), asm)
++ } else {
++ zRIL(c, op_BRCL, 0xF, uint32(v), asm)
++ }
++ if p.To.Sym != nil {
++ addcallreloc(ctxt, p.To.Sym, p.To.Offset)
++ }
++ }
++
++ case 15: /* br/bl (r) */
++ r := p.To.Reg
++ if p.As == ABCL || p.As == ABL {
++ zRR(op_BASR, uint32(REG_LR), uint32(r), asm)
++ } else {
++ zRR(op_BCR, 0xF, uint32(r), asm)
++ }
++
++ case 17, /* bc bo,bi,lbra (same for now) */
++ 16: /* bc bo,bi,sbra */
++ v := int32(0)
++ if p.Pcond != nil {
++ v = int32((p.Pcond.Pc - p.Pc) >> 1)
++ }
++ mask := branchMask(ctxt, p)
++ if p.To.Sym == nil && int32(int16(v)) == v {
++ zRI(op_BRC, mask, uint32(v), asm)
++ } else {
++ zRIL(c, op_BRCL, mask, uint32(v), asm)
++ }
++ if p.To.Sym != nil {
++ addrilreloc(ctxt, p.To.Sym, p.To.Offset)
++ }
++
++ case 18: // br/bl r
++ switch oclass(&p.To) {
++ case C_REG:
++ if p.As == ABL {
++ zRR(op_BASR, uint32(REG_LR), uint32(p.To.Reg), asm)
++ } else {
++ zRR(op_BCR, 0xF, uint32(p.To.Reg), asm)
++ }
++ default:
++ ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
++ }
++
++ case 19: // MOV $sym+n(SB) TO REG
++ d := vregoff(ctxt, &p.From)
++ zRIL(b, op_LARL, uint32(p.To.Reg), 0, asm)
++ if d&1 != 0 {
++ zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm)
++ d -= 1
++ }
++ addrilreloc(ctxt, p.From.Sym, d)
++
++ case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
++
++ if p.From.Sym != nil {
++ ctxt.Diag("%v is not supported", p)
++ }
++
++ v := vregoff(ctxt, &p.From)
++ r := p.Reg
++ if r == 0 {
++ r = p.To.Reg
++ }
++ switch p.As {
++ default:
++ case AADD:
++ if r == p.To.Reg {
++ zRIL(a, op_AGFI, uint32(p.To.Reg), uint32(v), asm)
++ } else if int64(int16(v)) == v {
++ zRIE(d, op_AGHIK, uint32(p.To.Reg), uint32(r), uint32(v), 0, 0, 0, 0, asm)
++ } else {
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ zRIL(a, op_AGFI, uint32(p.To.Reg), uint32(v), asm)
++ }
++ case AADDC:
++ if r != p.To.Reg {
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ }
++ zRIL(a, op_ALGFI, uint32(p.To.Reg), uint32(v), asm)
++ case AMULLW:
++ if r != p.To.Reg {
++ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
++ }
++ zRIL(a, op_MSGFI, uint32(p.To.Reg), uint32(v), asm)
++ }
++
++ case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */
++
++ v := vregoff(ctxt, &p.From)
++ var opcode uint32
++ r := p.Reg
++ if r == 0 {
++ r = p.To.Reg
++ }
++ if r == p.To.Reg {
++ switch p.As {
++ default:
++ ctxt.Diag("%v is not supported", p)
++ case AAND:
++ if v >= 0 { // needs zero extend
++ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
++ zRRE(op_NGR, uint32(p.To.Reg), REGTMP, asm)
++ } else if int64(int16(v)) == v {
++ zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
++ } else { // r.To.Reg & 0xffffffff00000000 & uint32(v)
++ zRIL(a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
++ }
++ case AOR:
++ if int64(uint32(v)) != v { // needs sign extend
++ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
++ zRRE(op_OGR, uint32(p.To.Reg), REGTMP, asm)
++ } else if int64(uint16(v)) == v {
++ zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
++ } else {
++ zRIL(a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
++ }
++ case AXOR:
++ if int64(uint32(v)) != v { // needs sign extend
++ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
++ zRRE(op_XGR, uint32(p.To.Reg), REGTMP, asm)
++ } else {
++ zRIL(a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
++ }
++ }
++ } else {
++ switch p.As {
++ default:
++ ctxt.Diag("%v is not supported", p)
++ case AAND:
++ opcode = op_NGRK
++ case AOR:
++ opcode = op_OGRK
++ case AXOR:
++ opcode = op_XGRK
++ }
++ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
++ zRRF(opcode, uint32(r), 0, uint32(p.To.Reg), REGTMP, asm)
++ }
++
++ case 26: // MOV LACON
++ v := regoff(ctxt, &p.From)
++ r := p.From.Reg
++ if r == 0 {
++ r = o.param
++ }
++ if v >= 0 && v < DISP12 {
++ zRX(op_LA, uint32(p.To.Reg), uint32(r), 0, uint32(v), asm)
++ } else if v >= -DISP20/2 && v < DISP20/2 {
++ zRXY(a, op_LAY, uint32(p.To.Reg), uint32(r), 0, uint32(v), asm)
++ } else {
++ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
++ zRX(op_LA, uint32(p.To.Reg), uint32(r), REGTMP, 0, asm)
++ }
++
++ case 31: /* dword */
++ wd := uint64(vregoff(ctxt, &p.From))
++ *asm = append(*asm,
++ uint8(wd>>56),
++ uint8(wd>>48),
++ uint8(wd>>40),
++ uint8(wd>>32),
++ uint8(wd>>24),
++ uint8(wd>>16),
++ uint8(wd>>8),
++ uint8(wd))
++
++ case 32: /* fmul frc,fra,frd */
++ r := int(p.Reg)
++ if r == 0 {
++ r = int(p.To.Reg)
++ }
++
++ var opcode uint32
++
++ switch p.As {
++ default:
++ ctxt.Diag("invalid opcode")
++ case AFMUL:
++ opcode = op_MDBR
++ case AFMULS:
++ opcode = op_MEEBR
++ }
++
++ if r == int(p.To.Reg) {
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ } else if p.From.Reg == p.To.Reg {
++ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
++ } else {
++ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
++ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++ }
++
++ case 33: /* fabs [frb,]frd; fmr. frb,frd */
++ r := p.From.Reg
++ if oclass(&p.From) == C_NONE {
++ r = p.To.Reg
++ }
++ var opcode uint32
++ switch p.As {
++ default:
++ case AFABS:
++ opcode = op_LPDBR
++ case AFNABS:
++ opcode = op_LNDBR
++ case AFNEG:
++ opcode = op_LCDFR
++ case ALEDBR:
++ opcode = op_LEDBR
++ case ALDEBR:
++ opcode = op_LDEBR
++ case AFSQRT:
++ opcode = op_SQDBR
++ case AFSQRTS:
++ opcode = op_SQEBR
++ }
++ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
++
++ case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
++
++ var opcode uint32
++
++ switch p.As {
++ default:
++ ctxt.Diag("invalid opcode")
++ case AFMADD:
++ opcode = op_MADBR
++ case AFMADDS:
++ opcode = op_MAEBR
++ case AFMSUB:
++ opcode = op_MSDBR
++ case AFMSUBS:
++ opcode = op_MSEBR
++ case AFNMADD:
++ opcode = op_MADBR
++ case AFNMADDS:
++ opcode = op_MAEBR
++ case AFNMSUB:
++ opcode = op_MSDBR
++ case AFNMSUBS:
++ opcode = op_MSEBR
++ }
++
++ zRR(op_LDR, uint32(p.To.Reg), uint32(p.Reg), asm)
++ zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From3.Reg), asm)
++
++ if p.As == AFNMADD || p.As == AFNMADDS || p.As == AFNMSUB || p.As == AFNMSUBS {
++ zRRE(op_LCDFR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
++ }
++
++ case 35: // MOVE REG TO LAUTO/LOREG
++ d2 := regoff(ctxt, &p.To)
++ b2 := p.To.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ x2 := p.To.Index
++ if d2 < -DISP20/2 || d2 >= DISP20/2 {
++ zRIL(a, op_LGFI, REGTMP, uint32(d2), asm)
++ if x2 != 0 {
++ zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
++ }
++ x2 = REGTMP
++ d2 = 0
++ }
++ zRXY(0, zopstore(ctxt, p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
++
++ case 36: // MOV LAUTO/LOREG TO REG
++ d2 := regoff(ctxt, &p.From)
++ b2 := p.From.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ x2 := p.From.Index
++ if d2 < -DISP20/2 || d2 >= DISP20/2 {
++ zRIL(a, op_LGFI, REGTMP, uint32(d2), asm)
++ if x2 != 0 {
++ zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
++ }
++ x2 = REGTMP
++ d2 = 0
++ }
++ zRXY(0, zopload(ctxt, p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
++
++ case 40: /* word and byte*/
++ wd := uint32(regoff(ctxt, &p.From))
++ if p.As == AWORD { //WORD
++ *asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd))
++ } else { //BYTE
++ *asm = append(*asm, uint8(wd))
++ }
++
++ case 47: /* op Ra, Rd; also op [Ra,] Rd */
++ switch p.As {
++ default:
++
++ case AADDME:
++ r := int(p.From.Reg)
++ if p.To.Reg == p.From.Reg {
++ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
++ r = REGTMP
++ }
++ zRIL(a, op_LGFI, uint32(p.To.Reg), 0xffffffff, asm) // p.To.Reg <- -1
++ zRRE(op_ALCGR, uint32(p.To.Reg), uint32(r), asm)
++
++ case AADDZE:
++ r := int(p.From.Reg)
++ if p.To.Reg == p.From.Reg {
++ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
++ r = REGTMP
++ }
++ zRRE(op_LGR, uint32(p.To.Reg), REGZERO, asm) // p.To.Reg <- 0
++ zRRE(op_ALCGR, uint32(p.To.Reg), uint32(r), asm)
++
++ case ASUBME:
++ r := int(p.From.Reg)
++ if p.To.Reg == p.From.Reg {
++ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
++ r = REGTMP
++ }
++ zRIL(a, op_LGFI, uint32(p.To.Reg), 0xffffffff, asm) // p.To.Reg <- -1
++ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(r), asm)
++
++ case ASUBZE:
++ r := int(p.From.Reg)
++ if p.To.Reg == p.From.Reg {
++ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
++ r = REGTMP
++ }
++ zRRE(op_LGR, uint32(p.To.Reg), REGZERO, asm) // p.To.Reg <- 0
++ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(r), asm)
++
++ case ANEG:
++ r := int(p.From.Reg)
++ if r == 0 {
++ r = int(p.To.Reg)
++ }
++ zRRE(op_LCGR, uint32(p.To.Reg), uint32(r), asm)
++ }
++
++ case 67: // AFMOVx $0, Fy -- move +0 into reg
++ var opcode uint32
++ switch p.As {
++ case AFMOVS:
++ opcode = op_LZER
++ case AFMOVD:
++ opcode = op_LZDR
++ }
++ zRRE(opcode, uint32(p.To.Reg), 0, asm)
++
++ case 68: /* ear arS,rD */
++ zRRE(op_EAR, uint32(p.To.Reg), uint32(p.From.Reg-REG_AR0), asm)
++
++ case 69: /* sar rS,arD */
++ zRRE(op_SAR, uint32(p.To.Reg-REG_AR0), uint32(p.From.Reg), asm)
++
++ case 70: /* [f]cmp r,r,cr*/
++ if p.Reg != 0 {
++ ctxt.Diag("unsupported nozero CC in Z")
++ }
++ if p.As == ACMPW || p.As == ACMPWU {
++ zRR(zoprr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
++ } else {
++ zRRE(zoprre(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
++ }
++
++ case 71: // cmp reg $constant
++ v := vregoff(ctxt, &p.To)
++ switch p.As {
++ case ACMP, ACMPW:
++ if int64(int32(v)) != v {
++ ctxt.Diag("%v overflows an int32", v)
++ }
++ case ACMPU, ACMPWU:
++ if int64(uint32(v)) != v {
++ ctxt.Diag("%v overflows a uint32", v)
++ }
++ }
++ zRIL(0, zopril(ctxt, p.As), uint32(p.From.Reg), uint32(regoff(ctxt, &p.To)), asm)
++
++ case 72: // MOV int32 -> s+o(r)(i*1)
++ v := regoff(ctxt, &p.From)
++ d := regoff(ctxt, &p.To)
++ r := p.To.Reg
++ x := p.To.Index
++ if r == 0 {
++ r = o.param
++ }
++ if p.From.Sym != nil {
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ if v&0x1 != 0 {
++ v -= 1
++ zRX(op_LA, REGTMP, REGTMP, 0, 1, asm)
++ }
++ addrilreloc(ctxt, p.From.Sym, int64(v))
++ if d < -DISP20/2 || d >= DISP20/2 {
++ zRIL(a, op_LGFI, REGTMP2, uint32(d), asm)
++ if x != 0 {
++ zRRE(op_AGR, REGTMP2, uint32(x), asm)
++ }
++ d = 0
++ x = REGTMP2
++ }
++ zRXY(0, zopstore(ctxt, p.As), REGTMP, uint32(x), uint32(r), uint32(d), asm)
++ } else if int32(int16(v)) == v && x == 0 {
++ if d < 0 || d >= DISP12 {
++ if r == REGTMP || r == REGTMP2 {
++ zRIL(a, op_AGFI, uint32(r), uint32(d), asm)
++ } else {
++ zRIL(a, op_LGFI, REGTMP, uint32(d), asm)
++ zRRE(op_AGR, REGTMP, uint32(r), asm)
++ r = REGTMP
++ }
++ d = 0
++ }
++ var opcode uint32
++ switch p.As {
++ case AMOVD:
++ opcode = op_MVGHI
++ case AMOVW, AMOVWZ:
++ opcode = op_MVHI
++ case AMOVH, AMOVHZ:
++ opcode = op_MVHHI
++ case AMOVB, AMOVBZ:
++ opcode = op_MVI
++ }
++ if opcode == op_MVI {
++ zSI(opcode, uint32(v), uint32(r), uint32(d), asm)
++ } else {
++ zSIL(opcode, uint32(r), uint32(d), uint32(v), asm)
++ }
++ } else {
++ zRIL(a, op_LGFI, REGTMP2, uint32(v), asm)
++ if d < -DISP20/2 || d >= DISP20/2 {
++ if r == REGTMP {
++ zRIL(a, op_AGFI, REGTMP, uint32(d), asm)
++ } else {
++ zRIL(a, op_LGFI, REGTMP, uint32(d), asm)
++ if x != 0 {
++ zRRE(op_AGR, REGTMP, uint32(x), asm)
++ }
++ x = REGTMP
++ }
++ d = 0
++ }
++ zRXY(0, zopstore(ctxt, p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm)
++ }
++
++ case 73: // MOV int32 -> addr
++ v := regoff(ctxt, &p.From)
++ d := regoff(ctxt, &p.To)
++ a := uint32(0)
++ if d&1 != 0 {
++ d -= 1
++ a = 1
++ }
++ zRIL(b, op_LARL, REGTMP, uint32(d), asm)
++ addrilreloc(ctxt, p.To.Sym, int64(d))
++ if p.From.Sym != nil {
++ zRIL(b, op_LARL, REGTMP2, 0, asm)
++ a := uint32(0)
++ if v&0x1 != 0 {
++ v -= 1
++ zRX(op_LA, REGTMP2, REGTMP2, 0, 1, asm)
++ }
++ addrilrelocoffset(ctxt, p.From.Sym, int64(v), sizeRIL)
++ zRXY(0, zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm)
++ } else if int32(int16(v)) == v {
++ var opcode uint32
++ switch p.As {
++ case AMOVD:
++ opcode = op_MVGHI
++ case AMOVW, AMOVWZ:
++ opcode = op_MVHI
++ case AMOVH, AMOVHZ:
++ opcode = op_MVHHI
++ case AMOVB, AMOVBZ:
++ opcode = op_MVI
++ }
++ if opcode == op_MVI {
++ zSI(opcode, uint32(v), REGTMP, a, asm)
++ } else {
++ zSIL(opcode, REGTMP, a, uint32(v), asm)
++ }
++ } else {
++ zRIL(a, op_LGFI, REGTMP2, uint32(v), asm)
++ zRXY(0, zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm)
++ }
++
++ case 74: // MOV sym+n(SB) TO REG (requires relocation)
++ i2 := regoff(ctxt, &p.To)
++ switch p.As {
++ case AMOVD:
++ zRIL(b, op_STGRL, uint32(p.From.Reg), 0, asm)
++ case AMOVW, AMOVWZ: // The zero extension doesn't affect store instructions
++ zRIL(b, op_STRL, uint32(p.From.Reg), 0, asm)
++ case AMOVH, AMOVHZ: // The zero extension doesn't affect store instructions
++ zRIL(b, op_STHRL, uint32(p.From.Reg), 0, asm)
++ case AMOVB, AMOVBZ: // The zero extension doesn't affect store instructions
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ adj := uint32(0) // adjustment needed for odd addresses
++ if i2&1 != 0 {
++ i2 -= 1
++ adj = 1
++ }
++ zRX(op_STC, uint32(p.From.Reg), 0, REGTMP, adj, asm)
++ case AFMOVD:
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ zRX(op_STD, uint32(p.From.Reg), 0, REGTMP, 0, asm)
++ case AFMOVS:
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm)
++ }
++ addrilreloc(ctxt, p.To.Sym, int64(i2))
++
++ case 75: // MOV REG TO sym+n(SB) (requires relocation)
++ i2 := regoff(ctxt, &p.From)
++ switch p.As {
++ case AMOVD:
++ if i2&1 != 0 {
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ zRXY(0, op_LG, uint32(p.To.Reg), REGTMP, 0, 1, asm)
++ i2 -= 1
++ } else {
++ zRIL(b, op_LGRL, uint32(p.To.Reg), uint32(d), asm)
++ }
++ case AMOVW:
++ zRIL(b, op_LGFRL, uint32(p.To.Reg), 0, asm)
++ case AMOVWZ:
++ zRIL(b, op_LLGFRL, uint32(p.To.Reg), 0, asm)
++ case AMOVH:
++ zRIL(b, op_LGHRL, uint32(p.To.Reg), 0, asm)
++ case AMOVHZ:
++ zRIL(b, op_LLGHRL, uint32(p.To.Reg), 0, asm)
++ case AMOVB, AMOVBZ:
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ adj := uint32(0) // adjustment needed for odd addresses
++ if i2&1 != 0 {
++ i2 -= 1
++ adj = 1
++ }
++ switch p.As {
++ case AMOVB:
++ zRXY(0, op_LGB, uint32(p.To.Reg), 0, REGTMP, adj, asm)
++ case AMOVBZ:
++ zRXY(0, op_LLGC, uint32(p.To.Reg), 0, REGTMP, adj, asm)
++ }
++ case AFMOVD:
++ zRIL(a, op_LARL, REGTMP, 0, asm)
++ zRX(op_LD, uint32(p.To.Reg), 0, REGTMP, 0, asm)
++ case AFMOVS:
++ zRIL(a, op_LARL, REGTMP, 0, asm)
++ zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm)
++ }
++ addrilreloc(ctxt, p.From.Sym, int64(i2))
++
++ case 77: /* syscall $scon */
++ if p.From.Offset > 255 || p.From.Offset < 1 {
++ ctxt.Diag("illegal system call; system call number out of range: %v", p)
++ zE(op_TRAP2, asm) // trap always
++ } else {
++ zI(op_SVC, uint32(p.From.Offset), asm)
++ }
++
++ case 78: /* undef */
++ /* "An instruction consisting entirely of binary 0s is guaranteed
++ always to be an illegal instruction." */
++ *asm = append(*asm, 0, 0, 0, 0)
++
++ case 79: /* cs,csg r1,r3,off(r2) -> compare & swap; if (r1 ==off(r2)) then off(r2)= r3 */
++ v := regoff(ctxt, &p.To)
++ if v < 0 {
++ v = 0
++ }
++ if p.As == ACS {
++ zRS(op_CS, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
++ } else if p.As == ACSG {
++ zRSY(op_CSG, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
++ }
++
++ case 81: /* SYNC-> BCR 14,0 */
++ zRR(op_BCR, 0xE, 0, asm)
++
++ case 82: /* conversion from GPR to FPR */
++ var opcode uint32
++ switch p.As {
++ default:
++ log.Fatalf("unexpected opcode %v", p.As)
++ case ACEFBRA:
++ opcode = op_CEFBRA
++ case ACDFBRA:
++ opcode = op_CDFBRA
++ case ACEGBRA:
++ opcode = op_CEGBRA
++ case ACDGBRA:
++ opcode = op_CDGBRA
++ case ACELFBR:
++ opcode = op_CELFBR
++ case ACDLFBR:
++ opcode = op_CDLFBR
++ case ACELGBR:
++ opcode = op_CELGBR
++ case ACDLGBR:
++ opcode = op_CDLGBR
++ }
++ /* set immediate operand M3 to 0 to use the default BFP rounding mode
++ (usually round to nearest, ties to even); M4 is reserved and must be 0 */
++ zRRF(opcode, 0, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++
++ case 83: /* conversion from FPR to GPR */
++ var opcode uint32
++ switch p.As {
++ default:
++ log.Fatalf("unexpected opcode %v", p.As)
++ case ACFEBRA:
++ opcode = op_CFEBRA
++ case ACFDBRA:
++ opcode = op_CFDBRA
++ case ACGEBRA:
++ opcode = op_CGEBRA
++ case ACGDBRA:
++ opcode = op_CGDBRA
++ case ACLFEBR:
++ opcode = op_CLFEBR
++ case ACLFDBR:
++ opcode = op_CLFDBR
++ case ACLGEBR:
++ opcode = op_CLGEBR
++ case ACLGDBR:
++ opcode = op_CLGDBR
++ }
++ /* set immediate operand M3 to 5 for rounding toward zero (required by Go spec); M4 is reserved and must be 0 */
++ zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
++
++ case 84: /* storage-and-storage operations (mvc, clc, xc, oc, nc) */
++ l := regoff(ctxt, p.From3)
++ if l < 1 || l > 256 {
++ ctxt.Diag("number of bytes (%v) not in range [1,256]", l)
++ }
++ if p.From.Index != 0 || p.To.Index != 0 {
++ ctxt.Diag("cannot use index reg")
++ }
++ b1 := p.To.Reg
++ b2 := p.From.Reg
++ if b1 == 0 {
++ b1 = o.param
++ }
++ if b2 == 0 {
++ b2 = o.param
++ }
++ d1 := regoff(ctxt, &p.To)
++ d2 := regoff(ctxt, &p.From)
++ if d1 < 0 || d1 >= DISP12 {
++ if b2 == REGTMP {
++ ctxt.Diag("REGTMP conflict")
++ }
++ if b1 != REGTMP {
++ zRRE(op_LGR, REGTMP, uint32(b1), asm)
++ }
++ zRIL(a, op_AGFI, REGTMP, uint32(d1), asm)
++ if d1 == d2 && b1 == b2 {
++ d2 = 0
++ b2 = REGTMP
++ }
++ d1 = 0
++ b1 = REGTMP
++ }
++ if d2 < 0 || d2 >= DISP12 {
++ if b1 == REGTMP2 {
++ ctxt.Diag("REGTMP2 conflict")
++ }
++ if b2 != REGTMP2 {
++ zRRE(op_LGR, REGTMP2, uint32(b2), asm)
++ }
++ zRIL(a, op_AGFI, REGTMP2, uint32(d2), asm)
++ d2 = 0
++ b2 = REGTMP2
++ }
++ var opcode uint32
++ switch p.As {
++ default:
++ ctxt.Diag("unexpected opcode %v", p.As)
++ case AMVC:
++ opcode = op_MVC
++ case ACLC:
++ opcode = op_CLC
++ // swap operand order for CLC so that it matches CMP
++ b1, b2 = b2, b1
++ d1, d2 = d2, d1
++ case AXC:
++ opcode = op_XC
++ case AOC:
++ opcode = op_OC
++ case ANC:
++ opcode = op_NC
++ }
++ zSS(a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm)
++
++ case 85: /* larl: load address relative long */
++ // When using larl directly, don't add a nop
++ v := regoff(ctxt, &p.From)
++ if p.From.Sym == nil {
++ if (v & 1) != 0 {
++ ctxt.Diag("cannot use LARL with odd offset: %v", v)
++ }
++ } else {
++ addrilreloc(ctxt, p.From.Sym, int64(v))
++ v = 0
++ }
++ zRIL(b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm)
++
++ case 86: /* lay?: load address */
++ d := vregoff(ctxt, &p.From)
++ x := p.From.Index
++ b := p.From.Reg
++ if b == 0 {
++ b = o.param
++ }
++ switch p.As {
++ case ALA:
++ zRX(op_LA, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
++ case ALAY:
++ zRXY(0, op_LAY, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
++ }
++
++ case 87: /* exrl: execute relative long */
++ v := vregoff(ctxt, &p.From)
++ if p.From.Sym == nil {
++ if v&1 != 0 {
++ ctxt.Diag("cannot use EXRL with odd offset: %v", v)
++ }
++ } else {
++ addrilreloc(ctxt, p.From.Sym, v)
++ v = 0
++ }
++ zRIL(b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm)
++
++ case 88: /* stck[cef]?: store clock (comparator/extended/fast) */
++ var opcode uint32
++ switch p.As {
++ case ASTCK:
++ opcode = op_STCK
++ case ASTCKC:
++ opcode = op_STCKC
++ case ASTCKE:
++ opcode = op_STCKE
++ case ASTCKF:
++ opcode = op_STCKF
++ }
++ v := vregoff(ctxt, &p.To)
++ r := int(p.To.Reg)
++ if r == 0 {
++ r = int(o.param)
++ }
++ zS(opcode, uint32(r), uint32(v), asm)
++
++ case 89:
++ var v int32
++ if p.Pcond != nil {
++ v = int32((p.Pcond.Pc - p.Pc) >> 1)
++ }
++ var opcode, opcode2 uint32
++ switch p.As {
++ case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
++ opcode = op_CGRJ
++ opcode2 = op_CGR
++ case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
++ opcode = op_CLGRJ
++ opcode2 = op_CLGR
++ }
++ mask := branchMask(ctxt, p)
++ if int32(int16(v)) != v {
++ zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm)
++ zRIL(c, op_BRCL, mask, uint32(v-sizeRRE/2), asm)
++ } else {
++ zRIE(b, opcode, uint32(p.From.Reg), uint32(p.Reg), uint32(v), 0, 0, mask, 0, asm)
++ }
++
++ case 90:
++ var v int32
++ if p.Pcond != nil {
++ v = int32((p.Pcond.Pc - p.Pc) >> 1)
++ }
++ var opcode, opcode2 uint32
++ switch p.As {
++ case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
++ opcode = op_CGIJ
++ opcode2 = op_CGFI
++ case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
++ opcode = op_CLGIJ
++ opcode2 = op_CLGFI
++ }
++ mask := branchMask(ctxt, p)
++ if int32(int16(v)) != v {
++ zRIL(0, opcode2, uint32(p.From.Reg), uint32(regoff(ctxt, p.From3)), asm)
++ zRIL(c, op_BRCL, mask, uint32(v-sizeRIL/2), asm)
++ } else {
++ zRIE(c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(regoff(ctxt, p.From3)), asm)
++ }
++
++ case 93: // GOT lookup
++ v := vregoff(ctxt, &p.To)
++ if v != 0 {
++ ctxt.Diag("invalid offset against GOT slot %v", p)
++ }
++ zRIL(b, op_LGRL, uint32(p.To.Reg), 0, asm)
++ rel := obj.Addrel(ctxt.Cursym)
++ rel.Off = int32(ctxt.Pc + 2)
++ rel.Siz = 4
++ rel.Sym = p.From.Sym
++ rel.Type = obj.R_GOTPCREL
++ rel.Add = 2 + int64(rel.Siz)
++
++ case 94: // TLS local exec model
++ zRIL(b, op_LARL, REGTMP, (sizeRIL+sizeRXY+sizeRI)>>1, asm)
++ zRXY(0, op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm)
++ zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm)
++ *asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0)
++ rel := obj.Addrel(ctxt.Cursym)
++ rel.Off = int32(ctxt.Pc + sizeRIL + sizeRXY + sizeRI)
++ rel.Siz = 8
++ rel.Sym = p.From.Sym
++ rel.Type = obj.R_TLS_LE
++ rel.Add = 0
++
++ case 95: // TLS initial exec model
++ // Assembly | Relocation symbol | Done Here?
++ // --------------------------------------------------------------
++ // ear %r11, %a0 | |
++ // sllg %r11, %r11, 32 | |
++ // ear %r11, %a1 | |
++ // larl %r10, @indntpoff | R_390_TLS_IEENT | Y
++ // lg %r10, 0(%r10) | R_390_TLS_LOAD (tag) | Y
++ // la %r10, 0(%r10, %r11) | |
++ // --------------------------------------------------------------
++
++ // R_390_TLS_IEENT
++ zRIL(b, op_LARL, REGTMP, 0, asm)
++ ieent := obj.Addrel(ctxt.Cursym)
++ ieent.Off = int32(ctxt.Pc + 2)
++ ieent.Siz = 4
++ ieent.Sym = p.From.Sym
++ ieent.Type = obj.R_TLS_IE
++ ieent.Add = 2 + int64(ieent.Siz)
++
++ // R_390_TLS_LOAD
++ zRXY(0, op_LGF, uint32(p.To.Reg), REGTMP, 0, 0, asm)
++ // TODO(mundaym): add R_390_TLS_LOAD relocation here
++ // not strictly required but might allow the linker to optimize
++
++ case 96: // CLEAR macro
++ length := vregoff(ctxt, &p.From)
++ offset := vregoff(ctxt, &p.To)
++ reg := p.To.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ if length <= 0 {
++ ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length)
++ }
++ for length > 0 {
++ if offset < 0 || offset >= DISP12 {
++ if offset >= -DISP20/2 && offset < DISP20/2 {
++ zRXY(0, op_LAY, REGTMP, uint32(reg), 0, uint32(offset), asm)
++ } else {
++ if reg != REGTMP {
++ zRRE(op_LGR, REGTMP, uint32(reg), asm)
++ }
++ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
++ }
++ reg = REGTMP
++ offset = 0
++ }
++ size := length
++ if size > 256 {
++ size = 256
++ }
++
++ switch size {
++ case 1:
++ zSI(op_MVI, 0, uint32(reg), uint32(offset), asm)
++ case 2:
++ zSIL(op_MVHHI, uint32(reg), uint32(offset), 0, asm)
++ case 4:
++ zSIL(op_MVHI, uint32(reg), uint32(offset), 0, asm)
++ case 8:
++ zSIL(op_MVGHI, uint32(reg), uint32(offset), 0, asm)
++ default:
++ zSS(a, op_XC, uint32(size-1), 0, uint32(reg), uint32(offset), uint32(reg), uint32(offset), asm)
++ }
++
++ length -= size
++ offset += size
++ }
++
++ case 97: // STORE MULTIPLE (STMG/STMY)
++ rstart := p.From.Reg
++ rend := p.Reg
++ offset := regoff(ctxt, &p.To)
++ reg := p.To.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ if offset < -DISP20/2 || offset >= DISP20/2 {
++ if reg != REGTMP {
++ zRRE(op_LGR, REGTMP, uint32(reg), asm)
++ }
++ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
++ reg = REGTMP
++ offset = 0
++ }
++ switch p.As {
++ case ASTMY:
++ if offset >= 0 && offset < DISP12 {
++ zRS(op_STM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ } else {
++ zRSY(op_STMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ }
++ case ASTMG:
++ zRSY(op_STMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ }
++
++ case 98: // LOAD MULTIPLE (LMG/LMY)
++ rstart := p.Reg
++ rend := p.To.Reg
++ offset := regoff(ctxt, &p.From)
++ reg := p.From.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ if offset < -DISP20/2 || offset >= DISP20/2 {
++ if reg != REGTMP {
++ zRRE(op_LGR, REGTMP, uint32(reg), asm)
++ }
++ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
++ reg = REGTMP
++ offset = 0
++ }
++ switch p.As {
++ case ALMY:
++ if offset >= 0 && offset < DISP12 {
++ zRS(op_LM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ } else {
++ zRSY(op_LMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ }
++ case ALMG:
++ zRSY(op_LMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
++ }
++
++ case 100: // VRX STORE
++ op, m3, _ := vop(p.As)
++ if p.From3 != nil {
++ m3 = uint32(vregoff(ctxt, p.From3))
++ }
++ b2 := p.To.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ d2 := uint32(vregoff(ctxt, &p.To))
++ zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
++
++ case 101: // VRX LOAD
++ op, m3, _ := vop(p.As)
++ if p.From3 != nil {
++ m3 = uint32(vregoff(ctxt, p.From3))
++ }
++ b2 := p.From.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ d2 := uint32(vregoff(ctxt, &p.From))
++ zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
++
++ case 102: // VRV SCATTER
++ op, m3, _ := vop(p.As)
++ if p.From3 != nil {
++ m3 = uint32(vregoff(ctxt, p.From3))
++ }
++ b2 := p.To.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ d2 := uint32(vregoff(ctxt, &p.To))
++ zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
++
++ case 103: // VRV GATHER
++ op, m3, _ := vop(p.As)
++ if p.From3 != nil {
++ m3 = uint32(vregoff(ctxt, p.From3))
++ }
++ b2 := p.From.Reg
++ if b2 == 0 {
++ b2 = o.param
++ }
++ d2 := uint32(vregoff(ctxt, &p.From))
++ zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
++
++ case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT
++ op, m4, _ := vop(p.As)
++ fr := p.Reg
++ if fr == 0 {
++ fr = p.To.Reg
++ }
++ bits := uint32(vregoff(ctxt, &p.From))
++ zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm)
++
++ case 105: // VRS STORE MULTIPLE
++ op, _, _ := vop(p.As)
++ offset := uint32(vregoff(ctxt, &p.To))
++ reg := p.To.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ zVRS(op, uint32(p.From.Reg), uint32(p.Reg), uint32(reg), offset, 0, asm)
++
++ case 106: // VRS LOAD MULTIPLE
++ op, _, _ := vop(p.As)
++ offset := uint32(vregoff(ctxt, &p.From))
++ reg := p.From.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ zVRS(op, uint32(p.Reg), uint32(p.To.Reg), uint32(reg), offset, 0, asm)
++
++ case 107: // VRS STORE WITH LENGTH
++ op, _, _ := vop(p.As)
++ offset := uint32(vregoff(ctxt, &p.To))
++ reg := p.To.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ zVRS(op, uint32(p.From.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
++
++ case 108: // VRS LOAD WITH LENGTH
++ op, _, _ := vop(p.As)
++ offset := uint32(vregoff(ctxt, &p.From))
++ reg := p.From.Reg
++ if reg == 0 {
++ reg = o.param
++ }
++ zVRS(op, uint32(p.To.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
++
++ case 109: // VRI-a instructions
++ op, _, _ := vop(p.As)
++ i2 := uint32(vregoff(ctxt, &p.From))
++ switch p.As {
++ case AVZERO:
++ i2 = 0
++ case AVONE:
++ i2 = 0xffff
++ }
++ m3 := uint32(0)
++ if p.From3 != nil {
++ m3 = uint32(vregoff(ctxt, p.From3))
++ }
++ zVRIa(op, uint32(p.To.Reg), i2, m3, asm)
++
++ case 110:
++ op, m4, _ := vop(p.As)
++ i2 := uint32(vregoff(ctxt, p.From3))
++ i3 := uint32(vregoff(ctxt, &p.From))
++ zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm)
++
++ case 111:
++ op, m4, _ := vop(p.As)
++ i2 := uint32(vregoff(ctxt, &p.From))
++ zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm)
++
++ case 112:
++ op, m5, _ := vop(p.As)
++ i4 := uint32(vregoff(ctxt, p.From3))
++ zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm)
++
++ case 113:
++ op, m4, _ := vop(p.As)
++ m5 := singleElementMask(p.As)
++ i3 := uint32(vregoff(ctxt, &p.From))
++ zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm)
++
++ case 114: // VRR-a
++ op, m3, m5 := vop(p.As)
++ m4 := singleElementMask(p.As)
++ zVRRa(op, uint32(p.To.Reg), uint32(p.From.Reg), m5, m4, m3, asm)
++
++ case 115: // VRR-a COMPARE
++ op, m3, m5 := vop(p.As)
++ m4 := singleElementMask(p.As)
++ zVRRa(op, uint32(p.From.Reg), uint32(p.To.Reg), m5, m4, m3, asm)
++
++ case 116: // VRR-a
++
++ case 117: // VRR-b
++ op, m4, m5 := vop(p.As)
++ zVRRb(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), m5, m4, asm)
++
++ case 118: // VRR-c
++ op, m4, m6 := vop(p.As)
++ m5 := singleElementMask(p.As)
++ v3 := p.Reg
++ if v3 == 0 {
++ v3 = p.To.Reg
++ }
++ zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(v3), m6, m5, m4, asm)
++
++ case 119: // VRR-c SHIFT/ROTATE/DIVIDE/SUB (rhs value on the left, like SLD, DIV etc.)
++ op, m4, m6 := vop(p.As)
++ m5 := singleElementMask(p.As)
++ v2 := p.Reg
++ if v2 == 0 {
++ v2 = p.To.Reg
++ }
++ zVRRc(op, uint32(p.To.Reg), uint32(v2), uint32(p.From.Reg), m6, m5, m4, asm)
++
++ case 120: // VRR-d
++ op, m6, _ := vop(p.As)
++ m5 := singleElementMask(p.As)
++ v1 := uint32(p.To.Reg)
++ v2 := uint32(p.From3.Reg)
++ v3 := uint32(p.From.Reg)
++ v4 := uint32(p.Reg)
++ zVRRd(op, v1, v2, v3, m6, m5, v4, asm)
++
++ case 121: // VRR-e
++ op, m6, _ := vop(p.As)
++ m5 := singleElementMask(p.As)
++ v1 := uint32(p.To.Reg)
++ v2 := uint32(p.From3.Reg)
++ v3 := uint32(p.From.Reg)
++ v4 := uint32(p.Reg)
++ zVRRe(op, v1, v2, v3, m5, m6, v4, asm)
++
++ case 122: // VRR-f LOAD VRS FROM GRS DISJOINT
++ op, _, _ := vop(p.As)
++ zVRRf(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm)
++
++ case 123: // VPDI $m4, V2, V3, V1
++ op, _, _ := vop(p.As)
++ m4 := regoff(ctxt, p.From3)
++ zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm)
++ }
++}
++
++func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
++ ctxt.Instoffset = 0
++ if a != nil {
++ aclass(ctxt, a)
++ }
++ return ctxt.Instoffset
++}
++
++func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
++ return int32(vregoff(ctxt, a))
++}
++
++/*
++ * load o(a), d
++ */
++func zopload(ctxt *obj.Link, a int16) uint32 {
++ switch a {
++ /* fixed point load */
++ case AMOVD:
++ return op_LG
++ case AMOVW:
++ return op_LGF
++ case AMOVWZ:
++ return op_LLGF
++ case AMOVH:
++ return op_LGH
++ case AMOVHZ:
++ return op_LLGH
++ case AMOVB:
++ return op_LGB
++ case AMOVBZ:
++ return op_LLGC
++
++ /* floating point load */
++ case AFMOVD:
++ return op_LDY
++ case AFMOVS:
++ return op_LEY
++
++ /* byte reversed load*/
++ case AMOVDBR:
++ return op_LRVG
++ case AMOVWBR:
++ return op_LRV
++ case AMOVHBR:
++ return op_LRVH
++ }
++
++ ctxt.Diag("unknown store opcode %v", obj.Aconv(int(a)))
++ return 0
++}
++
++/*
++ * store s,o(d)
++ */
++func zopstore(ctxt *obj.Link, a int16) uint32 {
++ switch a {
++ /* fixed point store */
++ case AMOVD:
++ return op_STG
++ case AMOVW, AMOVWZ:
++ return op_STY
++ case AMOVH, AMOVHZ:
++ return op_STHY
++ case AMOVB, AMOVBZ:
++ return op_STCY
++
++ /* floating point store */
++ case AFMOVD:
++ return op_STDY
++ case AFMOVS:
++ return op_STEY
++
++ /* byte reversed store */
++ case AMOVDBR:
++ return op_STRVG
++ case AMOVWBR:
++ return op_STRV
++ case AMOVHBR:
++ return op_STRVH
++ }
++
++ ctxt.Diag("unknown store opcode %v", obj.Aconv(int(a)))
++ return 0
++}
++
++func zoprre(ctxt *obj.Link, a int16) uint32 {
++ switch a {
++ case ACMP:
++ return op_CGR
++ case ACMPU:
++ return op_CLGR
++ case AFCMPO: //ordered
++ return op_KDBR
++ case AFCMPU: //unordered
++ return op_CDBR
++ case ACEBR:
++ return op_CEBR
++ }
++ ctxt.Diag("unknown rre opcode %v", obj.Aconv(int(a)))
++ return 0
++}
++
++func zoprr(ctxt *obj.Link, a int16) uint32 {
++ switch a {
++ case ACMPW:
++ return op_CR
++ case ACMPWU:
++ return op_CLR
++ }
++ ctxt.Diag("unknown rr opcode %v", obj.Aconv(int(a)))
++ return 0
++}
++
++func zopril(ctxt *obj.Link, a int16) uint32 {
++ switch a {
++ case ACMP:
++ return op_CGFI
++ case ACMPU:
++ return op_CLGFI
++ case ACMPW:
++ return op_CFI
++ case ACMPWU:
++ return op_CLFI
++ }
++ ctxt.Diag("unknown ril opcode %v", obj.Aconv(int(a)))
++ return 0
++}
++
++// z instructions sizes.
++const (
++ sizeE = 2
++ sizeI = 2
++ sizeIE = 4
++ sizeMII = 6
++ sizeRI = 4
++ sizeRI1 = 4
++ sizeRI2 = 4
++ sizeRI3 = 4
++ sizeRIE = 6
++ sizeRIE1 = 6
++ sizeRIE2 = 6
++ sizeRIE3 = 6
++ sizeRIE4 = 6
++ sizeRIE5 = 6
++ sizeRIE6 = 6
++ sizeRIL = 6
++ sizeRIL1 = 6
++ sizeRIL2 = 6
++ sizeRIL3 = 6
++ sizeRIS = 6
++ sizeRR = 2
++ sizeRRD = 4
++ sizeRRE = 4
++ sizeRRF = 4
++ sizeRRF1 = 4
++ sizeRRF2 = 4
++ sizeRRF3 = 4
++ sizeRRF4 = 4
++ sizeRRF5 = 4
++ sizeRRR = 2
++ sizeRRS = 6
++ sizeRS = 4
++ sizeRS1 = 4
++ sizeRS2 = 4
++ sizeRSI = 4
++ sizeRSL = 6
++ sizeRSY = 6
++ sizeRSY1 = 6
++ sizeRSY2 = 6
++ sizeRX = 4
++ sizeRX1 = 4
++ sizeRX2 = 4
++ sizeRXE = 6
++ sizeRXF = 6
++ sizeRXY = 6
++ sizeRXY1 = 6
++ sizeRXY2 = 6
++ sizeS = 4
++ sizeSI = 4
++ sizeSIL = 6
++ sizeSIY = 6
++ sizeSMI = 6
++ sizeSS = 6
++ sizeSS1 = 6
++ sizeSS2 = 6
++ sizeSS3 = 6
++ sizeSS4 = 6
++ sizeSS5 = 6
++ sizeSS6 = 6
++ sizeSSE = 6
++ sizeSSF = 6
++)
++
++// instruction format variations.
++const (
++ a = iota
++ b
++ c
++ d
++ e
++ f
++ g
++)
++
++func zE(op uint32, asm *[]byte) {
++ *asm = append(*asm, uint8(op>>8), uint8(op))
++}
++
++func zI(op, i1 uint32, asm *[]byte) {
++ *asm = append(*asm, uint8(op>>8), uint8(i1))
++}
++
++func zMII(op, m1, ri2, ri3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(m1)<<4)|uint8((ri2>>8)&0x0F),
++ uint8(ri2),
++ uint8(ri3>>16),
++ uint8(ri3>>8),
++ uint8(ri3))
++}
++
++func zRI(op, r1_m1, i2_ri2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1_m1)<<4)|(uint8(op)&0x0F),
++ uint8(i2_ri2>>8),
++ uint8(i2_ri2))
++}
++
++// Expected argument values for the instruction formats.
++//
++// Format a1 a2 a3 a4 a5 a6 a7
++// ------------------------------------
++// a r1, 0, i2, 0, 0, m3, 0
++// b r1, r2, ri4, 0, 0, m3, 0
++// c r1, m3, ri4, 0, 0, 0, i2
++// d r1, r3, i2, 0, 0, 0, 0
++// e r1, r3, ri2, 0, 0, 0, 0
++// f r1, r2, 0, i3, i4, 0, i5
++// g r1, m3, i2, 0, 0, 0, 0
++func zRIE(type_, op, r1, r2_m3_r3, i2_ri4_ri2, i3, i4, m3, i2_i5 uint32, asm *[]byte) {
++ *asm = append(*asm, uint8(op>>8), uint8(r1)<<4|uint8(r2_m3_r3&0x0F))
++
++ switch type_ {
++ default:
++ *asm = append(*asm, uint8(i2_ri4_ri2>>8), uint8(i2_ri4_ri2))
++ case f:
++ *asm = append(*asm, uint8(i3), uint8(i4))
++ }
++
++ switch type_ {
++ case a, b:
++ *asm = append(*asm, uint8(m3)<<4)
++ default:
++ *asm = append(*asm, uint8(i2_i5))
++ }
++
++ *asm = append(*asm, uint8(op))
++}
++
++func zRIL(type_, op, r1_m1, i2_ri2 uint32, asm *[]byte) {
++ if type_ == a || type_ == b {
++ r1_m1 = r1_m1 - obj.RBaseS390X // this is a register base
++ }
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1_m1)<<4)|(uint8(op)&0x0F),
++ uint8(i2_ri2>>24),
++ uint8(i2_ri2>>16),
++ uint8(i2_ri2>>8),
++ uint8(i2_ri2))
++}
++
++func zRIS(op, r1, m3, b4, d4, i2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(m3&0x0F),
++ (uint8(b4)<<4)|(uint8(d4>>8)&0x0F),
++ uint8(d4),
++ uint8(i2),
++ uint8(op))
++}
++
++func zRR(op, r1, r2 uint32, asm *[]byte) {
++ *asm = append(*asm, uint8(op>>8), (uint8(r1)<<4)|uint8(r2&0x0F))
++}
++
++func zRRD(op, r1, r3, r2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ uint8(r1)<<4,
++ (uint8(r3)<<4)|uint8(r2&0x0F))
++}
++
++func zRRE(op, r1, r2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ 0,
++ (uint8(r1)<<4)|uint8(r2&0x0F))
++}
++
++func zRRF(op, r3_m3, m4, r1, r2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ (uint8(r3_m3)<<4)|uint8(m4&0x0F),
++ (uint8(r1)<<4)|uint8(r2&0x0F))
++}
++
++func zRRS(op, r1, r2, b4, d4, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(r2&0x0F),
++ (uint8(b4)<<4)|uint8((d4>>8)&0x0F),
++ uint8(d4),
++ uint8(m3)<<4,
++ uint8(op))
++}
++
++func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(r3_m3&0x0F),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2))
++}
++
++func zRSI(op, r1, r3, ri2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(r3&0x0F),
++ uint8(ri2>>8),
++ uint8(ri2))
++}
++
++func zRSL(type_, op, l1, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(l1),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2),
++ uint8(op))
++}
++
++// (20b) d2 with (12b) dl2 and (8b) dh2.
++func zRSY(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
++ dl2 := uint16(d2) & 0x0FFF
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(r3_m3&0x0F),
++ (uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
++ uint8(dl2),
++ uint8(d2>>12),
++ uint8(op))
++}
++
++func zRX(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1_m1)<<4)|uint8(x2&0x0F),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2))
++}
++
++func zRXE(op, r1, x2, b2, d2, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1)<<4)|uint8(x2&0x0F),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2),
++ uint8(m3)<<4,
++ uint8(op))
++}
++
++func zRXF(op, r3, x2, b2, d2, m1 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r3)<<4)|uint8(x2&0x0F),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2),
++ uint8(m1)<<4,
++ uint8(op))
++}
++
++func zRXY(type_, op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
++ dl2 := uint16(d2) & 0x0FFF
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r1_m1)<<4)|uint8(x2&0x0F),
++ (uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
++ uint8(dl2),
++ uint8(d2>>12),
++ uint8(op))
++}
++
++func zS(op, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2))
++}
++
++func zSI(op, i2, b1, d1 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(i2),
++ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
++ uint8(d1))
++}
++
++func zSIL(op, b1, d1, i2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
++ uint8(d1),
++ uint8(i2>>8),
++ uint8(i2))
++}
++
++func zSIY(op, i2, b1, d1 uint32, asm *[]byte) {
++ dl1 := uint16(d1) & 0x0FFF
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(i2),
++ (uint8(b1)<<4)|(uint8(dl1>>8)&0x0F),
++ uint8(dl1),
++ uint8(d1>>12),
++ uint8(op))
++}
++
++func zSMI(op, m1, b3, d3, ri2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(m1)<<4,
++ (uint8(b3)<<4)|uint8((d3>>8)&0x0F),
++ uint8(d3),
++ uint8(ri2>>8),
++ uint8(ri2))
++}
++
++// Expected argument values for the instruction formats.
++//
++// Format a1 a2 a3 a4 a5 a6
++// -------------------------------
++// a l1, 0, b1, d1, b2, d2
++// b l1, l2, b1, d1, b2, d2
++// c l1, i3, b1, d1, b2, d2
++// d r1, r3, b1, d1, b2, d2
++// e r1, r3, b2, d2, b4, d4
++// f 0, l2, b1, d1, b2, d2
++func zSS(type_, op, l1_r1, l2_i3_r3, b1_b2, d1_d2, b2_b4, d2_d4 uint32, asm *[]byte) {
++ *asm = append(*asm, uint8(op>>8))
++
++ switch type_ {
++ case a:
++ *asm = append(*asm, uint8(l1_r1))
++ case b, c, d, e:
++ *asm = append(*asm, (uint8(l1_r1)<<4)|uint8(l2_i3_r3&0x0F))
++ case f:
++ *asm = append(*asm, uint8(l2_i3_r3))
++ }
++
++ *asm = append(*asm,
++ (uint8(b1_b2)<<4)|uint8((d1_d2>>8)&0x0F),
++ uint8(d1_d2),
++ (uint8(b2_b4)<<4)|uint8((d2_d4>>8)&0x0F),
++ uint8(d2_d4))
++}
++
++func zSSE(op, b1, d1, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(op),
++ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
++ uint8(d1),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2))
++}
++
++func zSSF(op, r3, b1, d1, b2, d2 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(r3)<<4)|(uint8(op)&0x0F),
++ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
++ uint8(d1),
++ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
++ uint8(d2))
++}
++
++func rxb(va, vb, vc, vd uint32) uint8 {
++ mask := uint8(0)
++ if va >= REG_V16 && va <= REG_V31 {
++ mask |= 0x8
++ }
++ if vb >= REG_V16 && vb <= REG_V31 {
++ mask |= 0x4
++ }
++ if vc >= REG_V16 && vc <= REG_V31 {
++ mask |= 0x2
++ }
++ if vd >= REG_V16 && vd <= REG_V31 {
++ mask |= 0x1
++ }
++ return mask
++}
++
++func zVRX(op, v1, x2, b2, d2, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(x2)&0xf),
++ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
++ uint8(d2),
++ (uint8(m3)<<4)|rxb(v1, 0, 0, 0),
++ uint8(op))
++}
++
++func zVRV(op, v1, v2, b2, d2, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
++ uint8(d2),
++ (uint8(m3)<<4)|rxb(v1, v2, 0, 0),
++ uint8(op))
++}
++
++func zVRS(op, v1, v3_r3, b2, d2, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v3_r3)&0xf),
++ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
++ uint8(d2),
++ (uint8(m4)<<4)|rxb(v1, v3_r3, 0, 0),
++ uint8(op))
++}
++
++func zVRRa(op, v1, v2, m5, m4, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ 0,
++ (uint8(m5)<<4)|(uint8(m4)&0xf),
++ (uint8(m3)<<4)|rxb(v1, v2, 0, 0),
++ uint8(op))
++}
++
++func zVRRb(op, v1, v2, v3, m5, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ uint8(v3)<<4,
++ uint8(m5)<<4,
++ (uint8(m4)<<4)|rxb(v1, v2, v3, 0),
++ uint8(op))
++}
++
++func zVRRc(op, v1, v2, v3, m6, m5, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ uint8(v3)<<4,
++ (uint8(m6)<<4)|(uint8(m5)&0xf),
++ (uint8(m4)<<4)|rxb(v1, v2, v3, 0),
++ uint8(op))
++}
++
++func zVRRd(op, v1, v2, v3, m5, m6, v4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ (uint8(v3)<<4)|(uint8(m5)&0xf),
++ uint8(m6)<<4,
++ (uint8(v4)<<4)|rxb(v1, v2, v3, v4),
++ uint8(op))
++}
++
++func zVRRe(op, v1, v2, v3, m6, m5, v4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ (uint8(v3)<<4)|(uint8(m6)&0xf),
++ uint8(m5),
++ (uint8(v4)<<4)|rxb(v1, v2, v3, v4),
++ uint8(op))
++}
++
++func zVRRf(op, v1, r2, r3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(r2)&0xf),
++ uint8(r3)<<4,
++ 0,
++ rxb(v1, 0, 0, 0),
++ uint8(op))
++}
++
++func zVRIa(op, v1, i2, m3 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(v1)<<4,
++ uint8(i2>>8),
++ uint8(i2),
++ (uint8(m3)<<4)|rxb(v1, 0, 0, 0),
++ uint8(op))
++}
++
++func zVRIb(op, v1, i2, i3, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ uint8(v1)<<4,
++ uint8(i2),
++ uint8(i3),
++ (uint8(m4)<<4)|rxb(v1, 0, 0, 0),
++ uint8(op))
++}
++
++func zVRIc(op, v1, v3, i2, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v3)&0xf),
++ uint8(i2>>8),
++ uint8(i2),
++ (uint8(m4)<<4)|rxb(v1, v3, 0, 0),
++ uint8(op))
++}
++
++func zVRId(op, v1, v2, v3, i4, m5 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ uint8(v3)<<4,
++ uint8(i4),
++ (uint8(m5)<<4)|rxb(v1, v2, v3, 0),
++ uint8(op))
++}
++
++func zVRIe(op, v1, v2, i3, m5, m4 uint32, asm *[]byte) {
++ *asm = append(*asm,
++ uint8(op>>8),
++ (uint8(v1)<<4)|(uint8(v2)&0xf),
++ uint8(i3>>4),
++ (uint8(i3)<<4)|(uint8(m5)&0xf),
++ (uint8(m4)<<4)|rxb(v1, v2, 0, 0),
++ uint8(op))
++}
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/listz.go
+@@ -0,0 +1,73 @@
++// Based on cmd/internal/obj/ppc64/list9.go.
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/internal/obj"
++ "fmt"
++)
++
++func init() {
++ obj.RegisterRegister(obj.RBaseS390X, REG_R0+1024, Rconv)
++ obj.RegisterOpcode(obj.ABaseS390X, Anames)
++}
++
++func Rconv(r int) string {
++ if r == 0 {
++ return "NONE"
++ }
++ if r == REGG {
++ // Special case.
++ return "g"
++ }
++ if REG_R0 <= r && r <= REG_R15 {
++ return fmt.Sprintf("R%d", r-REG_R0)
++ }
++ if REG_F0 <= r && r <= REG_F15 {
++ return fmt.Sprintf("F%d", r-REG_F0)
++ }
++ if REG_AR0 <= r && r <= REG_AR15 {
++ return fmt.Sprintf("AR%d", r-REG_AR0)
++ }
++ if REG_V0 <= r && r <= REG_V31 {
++ return fmt.Sprintf("V%d", r-REG_V0)
++ }
++ return fmt.Sprintf("Rgok(%d)", r-obj.RBaseS390X)
++}
++
++func DRconv(a int) string {
++ s := "C_??"
++ if a >= C_NONE && a <= C_NCLASS {
++ s = cnamesz[a]
++ }
++ var fp string
++ fp += s
++ return fp
++}
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/objz.go
+@@ -0,0 +1,1024 @@
++// Based on cmd/internal/obj/ppc64/obj9.go.
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/internal/obj"
++ "encoding/binary"
++ "fmt"
++ "math"
++)
++
++func progedit(ctxt *obj.Link, p *obj.Prog) {
++ p.From.Class = 0
++ p.To.Class = 0
++
++ // Rewrite BR/BL to symbol as TYPE_BRANCH.
++ switch p.As {
++ case ABR,
++ ABL,
++ obj.ARET,
++ obj.ADUFFZERO,
++ obj.ADUFFCOPY:
++ if p.To.Sym != nil {
++ p.To.Type = obj.TYPE_BRANCH
++ }
++ }
++
++ // Rewrite float constants to values stored in memory unless they are +0.
++ switch p.As {
++ case AFMOVS:
++ if p.From.Type == obj.TYPE_FCONST {
++ f32 := float32(p.From.Val.(float64))
++ i32 := math.Float32bits(f32)
++ if i32 == 0 { // +0
++ break
++ }
++ literal := fmt.Sprintf("$f32.%08x", i32)
++ s := obj.Linklookup(ctxt, literal, 0)
++ s.Size = 4
++ p.From.Type = obj.TYPE_MEM
++ p.From.Sym = s
++ p.From.Sym.Local = true
++ p.From.Name = obj.NAME_EXTERN
++ p.From.Offset = 0
++ }
++
++ case AFMOVD:
++ if p.From.Type == obj.TYPE_FCONST {
++ i64 := math.Float64bits(p.From.Val.(float64))
++ if i64 == 0 { // +0
++ break
++ }
++ literal := fmt.Sprintf("$f64.%016x", i64)
++ s := obj.Linklookup(ctxt, literal, 0)
++ s.Size = 8
++ p.From.Type = obj.TYPE_MEM
++ p.From.Sym = s
++ p.From.Sym.Local = true
++ p.From.Name = obj.NAME_EXTERN
++ p.From.Offset = 0
++ }
++
++ // put constants not loadable by LOAD IMMEDIATE into memory
++ case AMOVD:
++ if p.From.Type == obj.TYPE_CONST {
++ val := p.From.Offset
++ if int64(int32(val)) != val &&
++ int64(uint32(val)) != val &&
++ int64(uint64(val)&(0xffffffff<<32)) != val {
++ literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
++ s := obj.Linklookup(ctxt, literal, 0)
++ s.Size = 8
++ p.From.Type = obj.TYPE_MEM
++ p.From.Sym = s
++ p.From.Sym.Local = true
++ p.From.Name = obj.NAME_EXTERN
++ p.From.Offset = 0
++ }
++ }
++ }
++
++ // Rewrite SUB constants into ADD.
++ switch p.As {
++ case ASUBC:
++ if p.From.Type == obj.TYPE_CONST {
++ p.From.Offset = -p.From.Offset
++ p.As = AADDC
++ }
++
++ case ASUB:
++ if p.From.Type == obj.TYPE_CONST {
++ p.From.Offset = -p.From.Offset
++ p.As = AADD
++ }
++ }
++
++ if ctxt.Flag_dynlink {
++ rewriteToUseGot(ctxt, p)
++ }
++}
++
++// Rewrite p, if necessary, to access global data via the global offset table.
++func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
++ // At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in
++ // assembly code.
++ if p.As == AEXRL {
++ return
++ }
++
++ // We only care about global data: NAME_EXTERN means a global
++ // symbol in the Go sense, and p.Sym.Local is true for a few
++ // internally defined symbols.
++ if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
++ // MOVD $sym, Rx becomes MOVD sym@GOT, Rx
++ // MOVD $sym+, Rx becomes MOVD sym@GOT, Rx; ADD , Rx
++ if p.To.Type != obj.TYPE_REG || p.As != AMOVD {
++ ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
++ }
++ p.From.Type = obj.TYPE_MEM
++ p.From.Name = obj.NAME_GOTREF
++ q := p
++ if p.From.Offset != 0 {
++ q = obj.Appendp(ctxt, p)
++ q.As = AADD
++ q.From.Type = obj.TYPE_CONST
++ q.From.Offset = p.From.Offset
++ q.To = p.To
++ p.From.Offset = 0
++ }
++ }
++ if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
++ ctxt.Diag("don't know how to handle %v with -dynlink", p)
++ }
++ var source *obj.Addr
++ // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry
++ // MOVD Ry, sym becomes MOVD sym@GOT, REGTMP; MOVD Ry, (REGTMP)
++ // An addition may be inserted between the two MOVs if there is an offset.
++ if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
++ if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
++ ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
++ }
++ source = &p.From
++ } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
++ source = &p.To
++ } else {
++ return
++ }
++ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
++ return
++ }
++ if source.Sym.Type == obj.STLSBSS {
++ return
++ }
++ if source.Type != obj.TYPE_MEM {
++ ctxt.Diag("don't know how to handle %v with -dynlink", p)
++ }
++ p1 := obj.Appendp(ctxt, p)
++ p2 := obj.Appendp(ctxt, p1)
++
++ p1.As = AMOVD
++ p1.From.Type = obj.TYPE_MEM
++ p1.From.Sym = source.Sym
++ p1.From.Name = obj.NAME_GOTREF
++ p1.To.Type = obj.TYPE_REG
++ p1.To.Reg = REGTMP
++
++ p2.As = p.As
++ p2.From = p.From
++ p2.To = p.To
++ if p.From.Name == obj.NAME_EXTERN {
++ p2.From.Reg = REGTMP
++ p2.From.Name = obj.NAME_NONE
++ p2.From.Sym = nil
++ } else if p.To.Name == obj.NAME_EXTERN {
++ p2.To.Reg = REGTMP
++ p2.To.Name = obj.NAME_NONE
++ p2.To.Sym = nil
++ } else {
++ return
++ }
++ obj.Nopout(p)
++}
++
++func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
++ // TODO(minux): add morestack short-cuts with small fixed frame-size.
++ ctxt.Cursym = cursym
++
++ if cursym.Text == nil || cursym.Text.Link == nil {
++ return
++ }
++
++ p := cursym.Text
++ textstksiz := p.To.Offset
++ if textstksiz == -8 {
++ // Compatibility hack.
++ p.From3.Offset |= obj.NOFRAME
++ textstksiz = 0
++ }
++ if textstksiz%8 != 0 {
++ ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
++ }
++ if p.From3.Offset&obj.NOFRAME != 0 {
++ if textstksiz != 0 {
++ ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
++ }
++ }
++
++ cursym.Args = p.To.Val.(int32)
++ cursym.Locals = int32(textstksiz)
++
++ /*
++ * find leaf subroutines
++ * strip NOPs
++ * expand RET
++ * expand BECOME pseudo
++ */
++ if ctxt.Debugvlog != 0 {
++ fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
++ }
++ ctxt.Bso.Flush()
++
++ var q *obj.Prog
++ var q1 *obj.Prog
++ for p := cursym.Text; p != nil; p = p.Link {
++ switch p.As {
++ /* too hard, just leave alone */
++ case obj.ATEXT:
++ q = p
++
++ p.Mark |= LABEL | LEAF | SYNC
++ if p.Link != nil {
++ p.Link.Mark |= LABEL
++ }
++
++ case ANOR:
++ q = p
++ if p.To.Type == obj.TYPE_REG {
++ if p.To.Reg == REGZERO {
++ p.Mark |= LABEL | SYNC
++ }
++ }
++
++ case ASYNC,
++ AWORD:
++ q = p
++ p.Mark |= LABEL | SYNC
++ continue
++
++ case AMOVW, AMOVWZ, AMOVD:
++ q = p
++ if p.From.Reg >= REG_RESERVED || p.To.Reg >= REG_RESERVED {
++ p.Mark |= LABEL | SYNC
++ }
++ continue
++
++ case AFABS,
++ AFADD,
++ AFDIV,
++ AFMADD,
++ AFMOVD,
++ AFMOVS,
++ AFMSUB,
++ AFMUL,
++ AFNABS,
++ AFNEG,
++ AFNMADD,
++ AFNMSUB,
++ ALEDBR,
++ ALDEBR,
++ AFSUB:
++ q = p
++
++ p.Mark |= FLOAT
++ continue
++
++ case ABL,
++ ABCL,
++ obj.ADUFFZERO,
++ obj.ADUFFCOPY:
++ cursym.Text.Mark &^= LEAF
++ fallthrough
++
++ case ABC,
++ ABEQ,
++ ABGE,
++ ABGT,
++ ABLE,
++ ABLT,
++ ABNE,
++ ABR,
++ ABVC,
++ ABVS,
++ ACMPBEQ,
++ ACMPBGE,
++ ACMPBGT,
++ ACMPBLE,
++ ACMPBLT,
++ ACMPBNE,
++ ACMPUBEQ,
++ ACMPUBGE,
++ ACMPUBGT,
++ ACMPUBLE,
++ ACMPUBLT,
++ ACMPUBNE:
++ p.Mark |= BRANCH
++ q = p
++ q1 = p.Pcond
++ if q1 != nil {
++ for q1.As == obj.ANOP {
++ q1 = q1.Link
++ p.Pcond = q1
++ }
++
++ if q1.Mark&LEAF == 0 {
++ q1.Mark |= LABEL
++ }
++ } else {
++ p.Mark |= LABEL
++ }
++ q1 = p.Link
++ if q1 != nil {
++ q1.Mark |= LABEL
++ }
++ continue
++
++ case AFCMPO, AFCMPU:
++ q = p
++ p.Mark |= FCMP | FLOAT
++ continue
++
++ case obj.ARET:
++ q = p
++ if p.Link != nil {
++ p.Link.Mark |= LABEL
++ }
++ continue
++
++ case obj.ANOP:
++ q1 = p.Link
++ q.Link = q1 /* q is non-nop */
++ q1.Mark |= p.Mark
++ continue
++
++ default:
++ q = p
++ continue
++ }
++ }
++
++ autosize := int32(0)
++ var o int
++ var p1 *obj.Prog
++ var p2 *obj.Prog
++ var pLast *obj.Prog
++ var pPre *obj.Prog
++ var pPreempt *obj.Prog
++ wasSplit := false
++ for p := cursym.Text; p != nil; p = p.Link {
++ pLast = p
++ o = int(p.As)
++ switch o {
++ case obj.ATEXT:
++ autosize = int32(textstksiz)
++
++ if p.Mark&LEAF != 0 && autosize == 0 && p.From3.Offset&obj.NOFRAME == 0 {
++ // A leaf function with no locals has no frame.
++ p.From3.Offset |= obj.NOFRAME
++ }
++
++ if p.From3.Offset&obj.NOFRAME == 0 {
++ // If there is a stack frame at all, it includes
++ // space to save the LR.
++ autosize += int32(ctxt.FixedFrameSize())
++ }
++
++ p.To.Offset = int64(autosize)
++
++ q = p
++
++ if p.From3.Offset&obj.NOSPLIT == 0 {
++ p, pPreempt = stacksplitPre(ctxt, p, autosize) // emit pre part of split check
++ pPre = p
++ wasSplit = true //need post part of split
++ }
++
++ if autosize != 0 {
++ q = obj.Appendp(ctxt, p)
++ q.As = AMOVD
++ q.From.Type = obj.TYPE_ADDR
++ q.From.Offset = int64(-autosize)
++ q.From.Reg = REGSP // not actually needed - REGSP is assumed if no reg is provided
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REGSP
++ q.Spadj = autosize
++ } else if cursym.Text.Mark&LEAF == 0 {
++ // A very few functions that do not return to their caller
++ // (e.g. gogo) are not identified as leaves but still have
++ // no frame.
++ cursym.Text.Mark |= LEAF
++ }
++
++ if cursym.Text.Mark&LEAF != 0 {
++ cursym.Leaf = 1
++ break
++ }
++
++ q = obj.Appendp(ctxt, q)
++ q.As = AMOVD
++ q.From.Type = obj.TYPE_REG
++ q.From.Reg = REG_LR
++ q.To.Type = obj.TYPE_MEM
++ q.To.Reg = REGSP
++ q.To.Offset = 0
++
++ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
++ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
++ //
++ // MOVD g_panic(g), R3
++ // CMP R0, R3
++ // BEQ end
++ // MOVD panic_argp(R3), R4
++ // ADD $(autosize+8), R1, R5
++ // CMP R4, R5
++ // BNE end
++ // ADD $8, R1, R6
++ // MOVD R6, panic_argp(R3)
++ // end:
++ // NOP
++ //
++ // The NOP is needed to give the jumps somewhere to land.
++ // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes.
++
++ q = obj.Appendp(ctxt, q)
++
++ q.As = AMOVD
++ q.From.Type = obj.TYPE_MEM
++ q.From.Reg = REGG
++ q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R3
++
++ q = obj.Appendp(ctxt, q)
++ q.As = ACMP
++ q.From.Type = obj.TYPE_REG
++ q.From.Reg = REG_R0
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R3
++
++ q = obj.Appendp(ctxt, q)
++ q.As = ABEQ
++ q.To.Type = obj.TYPE_BRANCH
++ p1 = q
++
++ q = obj.Appendp(ctxt, q)
++ q.As = AMOVD
++ q.From.Type = obj.TYPE_MEM
++ q.From.Reg = REG_R3
++ q.From.Offset = 0 // Panic.argp
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R4
++
++ q = obj.Appendp(ctxt, q)
++ q.As = AADD
++ q.From.Type = obj.TYPE_CONST
++ q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
++ q.Reg = REGSP
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R5
++
++ q = obj.Appendp(ctxt, q)
++ q.As = ACMP
++ q.From.Type = obj.TYPE_REG
++ q.From.Reg = REG_R4
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R5
++
++ q = obj.Appendp(ctxt, q)
++ q.As = ABNE
++ q.To.Type = obj.TYPE_BRANCH
++ p2 = q
++
++ q = obj.Appendp(ctxt, q)
++ q.As = AADD
++ q.From.Type = obj.TYPE_CONST
++ q.From.Offset = ctxt.FixedFrameSize()
++ q.Reg = REGSP
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_R6
++
++ q = obj.Appendp(ctxt, q)
++ q.As = AMOVD
++ q.From.Type = obj.TYPE_REG
++ q.From.Reg = REG_R6
++ q.To.Type = obj.TYPE_MEM
++ q.To.Reg = REG_R3
++ q.To.Offset = 0 // Panic.argp
++
++ q = obj.Appendp(ctxt, q)
++
++ q.As = obj.ANOP
++ p1.Pcond = q
++ p2.Pcond = q
++ }
++
++ case obj.ARET:
++ if p.From.Type == obj.TYPE_CONST {
++ ctxt.Diag("using BECOME (%v) is not supported!", p)
++ break
++ }
++
++ retTarget := p.To.Sym
++
++ if cursym.Text.Mark&LEAF != 0 {
++ if autosize == 0 {
++ p.As = ABR
++ p.From = obj.Addr{}
++ if retTarget == nil {
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_LR
++ } else {
++ p.To.Type = obj.TYPE_BRANCH
++ p.To.Sym = retTarget
++ }
++ p.Mark |= BRANCH
++ break
++ }
++
++ p.As = AADD
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = int64(autosize)
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REGSP
++ p.Spadj = -autosize
++
++ q = obj.Appendp(ctxt, p)
++ q.As = ABR
++ q.From = obj.Addr{}
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_LR
++ q.Mark |= BRANCH
++ q.Spadj = autosize
++ break
++ }
++
++ p.As = AMOVD
++ p.From.Type = obj.TYPE_MEM
++ p.From.Reg = REGSP
++ p.From.Offset = 0
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_LR
++
++ q = p
++
++ if autosize != 0 {
++ q = obj.Appendp(ctxt, q)
++ q.As = AADD
++ q.From.Type = obj.TYPE_CONST
++ q.From.Offset = int64(autosize)
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REGSP
++ q.Spadj = -autosize
++ }
++
++ q = obj.Appendp(ctxt, q)
++ q.As = ABR
++ q.From = obj.Addr{}
++ if retTarget == nil {
++ q.To.Type = obj.TYPE_REG
++ q.To.Reg = REG_LR
++ } else {
++ q.To.Type = obj.TYPE_BRANCH
++ q.To.Sym = retTarget
++ }
++ q.Mark |= BRANCH
++ q.Spadj = autosize
++
++ case AADD:
++ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
++ p.Spadj = int32(-p.From.Offset)
++ }
++ }
++ }
++ if wasSplit {
++ pLast = stacksplitPost(ctxt, pLast, pPre, pPreempt) // emit post part of split check
++ }
++}
++
++/*
++// instruction scheduling
++ if(debug['Q'] == 0)
++ return;
++
++ curtext = nil;
++ q = nil; // p - 1
++ q1 = firstp; // top of block
++ o = 0; // count of instructions
++ for(p = firstp; p != nil; p = p1) {
++ p1 = p->link;
++ o++;
++ if(p->mark & NOSCHED){
++ if(q1 != p){
++ sched(q1, q);
++ }
++ for(; p != nil; p = p->link){
++ if(!(p->mark & NOSCHED))
++ break;
++ q = p;
++ }
++ p1 = p;
++ q1 = p;
++ o = 0;
++ continue;
++ }
++ if(p->mark & (LABEL|SYNC)) {
++ if(q1 != p)
++ sched(q1, q);
++ q1 = p;
++ o = 1;
++ }
++ if(p->mark & (BRANCH|SYNC)) {
++ sched(q1, p);
++ q1 = p1;
++ o = 0;
++ }
++ if(o >= NSCHED) {
++ sched(q1, p);
++ q1 = p1;
++ o = 0;
++ }
++ q = p;
++ }
++*/
++func stacksplitPre(ctxt *obj.Link, p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) {
++ var q *obj.Prog
++
++ // MOVD g_stackguard(g), R3
++ p = obj.Appendp(ctxt, p)
++
++ p.As = AMOVD
++ p.From.Type = obj.TYPE_MEM
++ p.From.Reg = REGG
++ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
++ if ctxt.Cursym.Cfunc != 0 {
++ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
++ }
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_R3
++
++ q = nil
++ if framesize <= obj.StackSmall {
++ // small stack: SP < stackguard
++ // CMP stackguard, SP
++
++ //p.To.Type = obj.TYPE_REG
++ //p.To.Reg = REGSP
++
++ // q1: BLT done
++
++ p = obj.Appendp(ctxt, p)
++ //q1 = p
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REG_R3
++ p.Reg = REGSP
++ p.As = ACMPUBGE
++ p.To.Type = obj.TYPE_BRANCH
++ //p = obj.Appendp(ctxt, p)
++
++ //p.As = ACMPU
++ //p.From.Type = obj.TYPE_REG
++ //p.From.Reg = REG_R3
++ //p.To.Type = obj.TYPE_REG
++ //p.To.Reg = REGSP
++
++ //p = obj.Appendp(ctxt, p)
++ //p.As = ABGE
++ //p.To.Type = obj.TYPE_BRANCH
++
++ } else if framesize <= obj.StackBig {
++ // large stack: SP-framesize < stackguard-StackSmall
++ // ADD $-framesize, SP, R4
++ // CMP stackguard, R4
++ p = obj.Appendp(ctxt, p)
++
++ p.As = AADD
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = int64(-framesize)
++ p.Reg = REGSP
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_R4
++
++ p = obj.Appendp(ctxt, p)
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REG_R3
++ p.Reg = REG_R4
++ p.As = ACMPUBGE
++ p.To.Type = obj.TYPE_BRANCH
++
++ } else {
++ // Such a large stack we need to protect against wraparound.
++ // If SP is close to zero:
++ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
++ // The +StackGuard on both sides is required to keep the left side positive:
++ // SP is allowed to be slightly below stackguard. See stack.h.
++ //
++ // Preemption sets stackguard to StackPreempt, a very large value.
++ // That breaks the math above, so we have to check for that explicitly.
++ // // stackguard is R3
++ // CMP R3, $StackPreempt
++ // BEQ label-of-call-to-morestack
++ // ADD $StackGuard, SP, R4
++ // SUB R3, R4
++ // MOVD $(framesize+(StackGuard-StackSmall)), TEMP
++ // CMPUBGE TEMP, R4
++ p = obj.Appendp(ctxt, p)
++
++ p.As = ACMP
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REG_R3
++ p.To.Type = obj.TYPE_CONST
++ p.To.Offset = obj.StackPreempt
++
++ p = obj.Appendp(ctxt, p)
++ q = p
++ p.As = ABEQ
++ p.To.Type = obj.TYPE_BRANCH
++
++ p = obj.Appendp(ctxt, p)
++ p.As = AADD
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = obj.StackGuard
++ p.Reg = REGSP
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_R4
++
++ p = obj.Appendp(ctxt, p)
++ p.As = ASUB
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REG_R3
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_R4
++
++ p = obj.Appendp(ctxt, p)
++ p.As = AMOVD
++ p.From.Type = obj.TYPE_CONST
++ p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REGTMP
++
++ p = obj.Appendp(ctxt, p)
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REGTMP
++ p.Reg = REG_R4
++ p.As = ACMPUBGE
++ p.To.Type = obj.TYPE_BRANCH
++ }
++
++ return p, q
++}
++
++func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog) *obj.Prog {
++
++ // MOVD LR, R5
++ p = obj.Appendp(ctxt, p)
++ pPre.Pcond = p
++ p.As = AMOVD
++ p.From.Type = obj.TYPE_REG
++ p.From.Reg = REG_LR
++ p.To.Type = obj.TYPE_REG
++ p.To.Reg = REG_R5
++ if pPreempt != nil {
++ pPreempt.Pcond = p
++ }
++
++ // BL runtime.morestack(SB)
++ p = obj.Appendp(ctxt, p)
++
++ p.As = ABL
++ p.To.Type = obj.TYPE_BRANCH
++ if ctxt.Cursym.Cfunc != 0 {
++ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
++ } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
++ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
++ } else {
++ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0)
++ }
++
++ // BR start
++ p = obj.Appendp(ctxt, p)
++
++ p.As = ABR
++ p.To.Type = obj.TYPE_BRANCH
++ p.Pcond = ctxt.Cursym.Text.Link
++ return p
++}
++
++var pc_cnt int64
++
++func follow(ctxt *obj.Link, s *obj.LSym) {
++ ctxt.Cursym = s
++
++ pc_cnt = 0
++ firstp := ctxt.NewProg()
++ lastp := firstp
++ xfol(ctxt, s.Text, &lastp)
++ lastp.Link = nil
++ s.Text = firstp.Link
++}
++
++func relinv(a int) int {
++ switch a {
++ case ABEQ:
++ return ABNE
++ case ABNE:
++ return ABEQ
++
++ case ABGE:
++ return ABLT
++ case ABLT:
++ return ABGE
++
++ case ABGT:
++ return ABLE
++ case ABLE:
++ return ABGT
++
++ case ABVC:
++ return ABVS
++ case ABVS:
++ return ABVC
++ }
++
++ return 0
++}
++
++func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
++ var q *obj.Prog
++ var r *obj.Prog
++ var a int
++ var b int
++ var i int
++
++loop:
++ if p == nil {
++ return
++ }
++ a = int(p.As)
++ if a == ABR {
++ q = p.Pcond
++ if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
++ p.Mark |= FOLL
++ (*last).Link = p
++ *last = p
++ (*last).Pc = pc_cnt
++ pc_cnt += 1
++ p = p.Link
++ xfol(ctxt, p, last)
++ p = q
++ if p != nil && p.Mark&FOLL == 0 {
++ goto loop
++ }
++ return
++ }
++
++ if q != nil {
++ p.Mark |= FOLL
++ p = q
++ if p.Mark&FOLL == 0 {
++ goto loop
++ }
++ }
++ }
++
++ if p.Mark&FOLL != 0 {
++ i = 0
++ q = p
++ for ; i < 4; i, q = i+1, q.Link {
++ if q == *last || (q.Mark&NOSCHED != 0) {
++ break
++ }
++ b = 0 /* set */
++ a = int(q.As)
++ if a == obj.ANOP {
++ i--
++ continue
++ }
++
++ if a == ABR || a == obj.ARET {
++ goto copy
++ }
++ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
++ continue
++ }
++ b = relinv(a)
++ if b == 0 {
++ continue
++ }
++
++ copy:
++ for {
++ r = ctxt.NewProg()
++ *r = *p
++ if r.Mark&FOLL == 0 {
++ fmt.Printf("cant happen 1\n")
++ }
++ r.Mark |= FOLL
++ if p != q {
++ p = p.Link
++ (*last).Link = r
++ *last = r
++ (*last).Pc = pc_cnt
++ pc_cnt += 1
++ continue
++ }
++
++ (*last).Link = r
++ *last = r
++ (*last).Pc = pc_cnt
++ pc_cnt += 1
++ if a == ABR || a == obj.ARET {
++ return
++ }
++ r.As = int16(b)
++ r.Pcond = p.Link
++ r.Link = p.Pcond
++ if r.Link.Mark&FOLL == 0 {
++ xfol(ctxt, r.Link, last)
++ }
++ if r.Pcond.Mark&FOLL == 0 {
++ fmt.Printf("cant happen 2\n")
++ }
++ return
++ }
++ }
++
++ a = ABR
++ q = ctxt.NewProg()
++ q.As = int16(a)
++ q.Lineno = p.Lineno
++ q.To.Type = obj.TYPE_BRANCH
++ q.To.Offset = p.Pc
++ q.Pcond = p
++ p = q
++ }
++
++ p.Mark |= FOLL
++ (*last).Link = p
++ *last = p
++ (*last).Pc = pc_cnt
++ pc_cnt += 1
++
++ if a == ABR || a == obj.ARET {
++ if p.Mark&NOSCHED != 0 {
++ p = p.Link
++ goto loop
++ }
++
++ return
++ }
++
++ if p.Pcond != nil {
++ if a != ABL && p.Link != nil {
++ xfol(ctxt, p.Link, last)
++ p = p.Pcond
++ if p == nil || (p.Mark&FOLL != 0) {
++ return
++ }
++ goto loop
++ }
++ }
++
++ p = p.Link
++ goto loop
++}
++
++var unaryDst = map[int]bool{
++ ASTCK: true,
++ ASTCKC: true,
++ ASTCKE: true,
++ ASTCKF: true,
++ ANEG: true,
++ AVONE: true,
++ AVZERO: true,
++}
++
++var Links390x = obj.LinkArch{
++ ByteOrder: binary.BigEndian,
++ Name: "s390x",
++ Thechar: 'z',
++ Preprocess: preprocess,
++ Assemble: spanz,
++ Follow: follow,
++ Progedit: progedit,
++ UnaryDst: unaryDst,
++ Minlc: 2,
++ Ptrsize: 8,
++ Regsize: 8,
++}
+--- /dev/null
++++ b/src/cmd/internal/obj/s390x/vector.go
+@@ -0,0 +1,1057 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package s390x
++
++// This file contains utility functions for use when
++// assembling vector instructions.
++
++// vop returns the opcode, element size and condition
++// setting for the given (possibly extended) mnemonic.
++func vop(as int16) (opcode, es, cs uint32) {
++ switch as {
++ default:
++ return 0, 0, 0
++ case AVA:
++ return op_VA, 0, 0
++ case AVAB:
++ return op_VA, 0, 0
++ case AVAH:
++ return op_VA, 1, 0
++ case AVAF:
++ return op_VA, 2, 0
++ case AVAG:
++ return op_VA, 3, 0
++ case AVAQ:
++ return op_VA, 4, 0
++ case AVACC:
++ return op_VACC, 0, 0
++ case AVACCB:
++ return op_VACC, 0, 0
++ case AVACCH:
++ return op_VACC, 1, 0
++ case AVACCF:
++ return op_VACC, 2, 0
++ case AVACCG:
++ return op_VACC, 3, 0
++ case AVACCQ:
++ return op_VACC, 4, 0
++ case AVAC:
++ return op_VAC, 0, 0
++ case AVACQ:
++ return op_VAC, 4, 0
++ case AVACCC:
++ return op_VACCC, 0, 0
++ case AVACCCQ:
++ return op_VACCC, 4, 0
++ case AVN:
++ return op_VN, 0, 0
++ case AVNC:
++ return op_VNC, 0, 0
++ case AVAVG:
++ return op_VAVG, 0, 0
++ case AVAVGB:
++ return op_VAVG, 0, 0
++ case AVAVGH:
++ return op_VAVG, 1, 0
++ case AVAVGF:
++ return op_VAVG, 2, 0
++ case AVAVGG:
++ return op_VAVG, 3, 0
++ case AVAVGL:
++ return op_VAVGL, 0, 0
++ case AVAVGLB:
++ return op_VAVGL, 0, 0
++ case AVAVGLH:
++ return op_VAVGL, 1, 0
++ case AVAVGLF:
++ return op_VAVGL, 2, 0
++ case AVAVGLG:
++ return op_VAVGL, 3, 0
++ case AVCKSM:
++ return op_VCKSM, 0, 0
++ case AVCEQ:
++ return op_VCEQ, 0, 0
++ case AVCEQB:
++ return op_VCEQ, 0, 0
++ case AVCEQH:
++ return op_VCEQ, 1, 0
++ case AVCEQF:
++ return op_VCEQ, 2, 0
++ case AVCEQG:
++ return op_VCEQ, 3, 0
++ case AVCEQBS:
++ return op_VCEQ, 0, 1
++ case AVCEQHS:
++ return op_VCEQ, 1, 1
++ case AVCEQFS:
++ return op_VCEQ, 2, 1
++ case AVCEQGS:
++ return op_VCEQ, 3, 1
++ case AVCH:
++ return op_VCH, 0, 0
++ case AVCHB:
++ return op_VCH, 0, 0
++ case AVCHH:
++ return op_VCH, 1, 0
++ case AVCHF:
++ return op_VCH, 2, 0
++ case AVCHG:
++ return op_VCH, 3, 0
++ case AVCHBS:
++ return op_VCH, 0, 1
++ case AVCHHS:
++ return op_VCH, 1, 1
++ case AVCHFS:
++ return op_VCH, 2, 1
++ case AVCHGS:
++ return op_VCH, 3, 1
++ case AVCHL:
++ return op_VCHL, 0, 0
++ case AVCHLB:
++ return op_VCHL, 0, 0
++ case AVCHLH:
++ return op_VCHL, 1, 0
++ case AVCHLF:
++ return op_VCHL, 2, 0
++ case AVCHLG:
++ return op_VCHL, 3, 0
++ case AVCHLBS:
++ return op_VCHL, 0, 1
++ case AVCHLHS:
++ return op_VCHL, 1, 1
++ case AVCHLFS:
++ return op_VCHL, 2, 1
++ case AVCHLGS:
++ return op_VCHL, 3, 1
++ case AVCLZ:
++ return op_VCLZ, 0, 0
++ case AVCLZB:
++ return op_VCLZ, 0, 0
++ case AVCLZH:
++ return op_VCLZ, 1, 0
++ case AVCLZF:
++ return op_VCLZ, 2, 0
++ case AVCLZG:
++ return op_VCLZ, 3, 0
++ case AVCTZ:
++ return op_VCTZ, 0, 0
++ case AVCTZB:
++ return op_VCTZ, 0, 0
++ case AVCTZH:
++ return op_VCTZ, 1, 0
++ case AVCTZF:
++ return op_VCTZ, 2, 0
++ case AVCTZG:
++ return op_VCTZ, 3, 0
++ case AVEC:
++ return op_VEC, 0, 0
++ case AVECB:
++ return op_VEC, 0, 0
++ case AVECH:
++ return op_VEC, 1, 0
++ case AVECF:
++ return op_VEC, 2, 0
++ case AVECG:
++ return op_VEC, 3, 0
++ case AVECL:
++ return op_VECL, 0, 0
++ case AVECLB:
++ return op_VECL, 0, 0
++ case AVECLH:
++ return op_VECL, 1, 0
++ case AVECLF:
++ return op_VECL, 2, 0
++ case AVECLG:
++ return op_VECL, 3, 0
++ case AVERIM:
++ return op_VERIM, 0, 0
++ case AVERIMB:
++ return op_VERIM, 0, 0
++ case AVERIMH:
++ return op_VERIM, 1, 0
++ case AVERIMF:
++ return op_VERIM, 2, 0
++ case AVERIMG:
++ return op_VERIM, 3, 0
++ case AVERLL:
++ return op_VERLL, 0, 0
++ case AVERLLB:
++ return op_VERLL, 0, 0
++ case AVERLLH:
++ return op_VERLL, 1, 0
++ case AVERLLF:
++ return op_VERLL, 2, 0
++ case AVERLLG:
++ return op_VERLL, 3, 0
++ case AVERLLV:
++ return op_VERLLV, 0, 0
++ case AVERLLVB:
++ return op_VERLLV, 0, 0
++ case AVERLLVH:
++ return op_VERLLV, 1, 0
++ case AVERLLVF:
++ return op_VERLLV, 2, 0
++ case AVERLLVG:
++ return op_VERLLV, 3, 0
++ case AVESLV:
++ return op_VESLV, 0, 0
++ case AVESLVB:
++ return op_VESLV, 0, 0
++ case AVESLVH:
++ return op_VESLV, 1, 0
++ case AVESLVF:
++ return op_VESLV, 2, 0
++ case AVESLVG:
++ return op_VESLV, 3, 0
++ case AVESL:
++ return op_VESL, 0, 0
++ case AVESLB:
++ return op_VESL, 0, 0
++ case AVESLH:
++ return op_VESL, 1, 0
++ case AVESLF:
++ return op_VESL, 2, 0
++ case AVESLG:
++ return op_VESL, 3, 0
++ case AVESRA:
++ return op_VESRA, 0, 0
++ case AVESRAB:
++ return op_VESRA, 0, 0
++ case AVESRAH:
++ return op_VESRA, 1, 0
++ case AVESRAF:
++ return op_VESRA, 2, 0
++ case AVESRAG:
++ return op_VESRA, 3, 0
++ case AVESRAV:
++ return op_VESRAV, 0, 0
++ case AVESRAVB:
++ return op_VESRAV, 0, 0
++ case AVESRAVH:
++ return op_VESRAV, 1, 0
++ case AVESRAVF:
++ return op_VESRAV, 2, 0
++ case AVESRAVG:
++ return op_VESRAV, 3, 0
++ case AVESRL:
++ return op_VESRL, 0, 0
++ case AVESRLB:
++ return op_VESRL, 0, 0
++ case AVESRLH:
++ return op_VESRL, 1, 0
++ case AVESRLF:
++ return op_VESRL, 2, 0
++ case AVESRLG:
++ return op_VESRL, 3, 0
++ case AVESRLV:
++ return op_VESRLV, 0, 0
++ case AVESRLVB:
++ return op_VESRLV, 0, 0
++ case AVESRLVH:
++ return op_VESRLV, 1, 0
++ case AVESRLVF:
++ return op_VESRLV, 2, 0
++ case AVESRLVG:
++ return op_VESRLV, 3, 0
++ case AVX:
++ return op_VX, 0, 0
++ case AVFAE:
++ return op_VFAE, 0, 0
++ case AVFAEB:
++ return op_VFAE, 0, 0
++ case AVFAEH:
++ return op_VFAE, 1, 0
++ case AVFAEF:
++ return op_VFAE, 2, 0
++ case AVFAEBS:
++ return op_VFAE, 0, 1
++ case AVFAEHS:
++ return op_VFAE, 1, 1
++ case AVFAEFS:
++ return op_VFAE, 2, 1
++ case AVFAEZB:
++ return op_VFAE, 0, 2
++ case AVFAEZH:
++ return op_VFAE, 1, 2
++ case AVFAEZF:
++ return op_VFAE, 2, 2
++ case AVFAEZBS:
++ return op_VFAE, 0, 3
++ case AVFAEZHS:
++ return op_VFAE, 1, 3
++ case AVFAEZFS:
++ return op_VFAE, 2, 3
++ case AVFEE:
++ return op_VFEE, 0, 0
++ case AVFEEB:
++ return op_VFEE, 0, 0
++ case AVFEEH:
++ return op_VFEE, 1, 0
++ case AVFEEF:
++ return op_VFEE, 2, 0
++ case AVFEEBS:
++ return op_VFEE, 0, 1
++ case AVFEEHS:
++ return op_VFEE, 1, 1
++ case AVFEEFS:
++ return op_VFEE, 2, 1
++ case AVFEEZB:
++ return op_VFEE, 0, 2
++ case AVFEEZH:
++ return op_VFEE, 1, 2
++ case AVFEEZF:
++ return op_VFEE, 2, 2
++ case AVFEEZBS:
++ return op_VFEE, 0, 3
++ case AVFEEZHS:
++ return op_VFEE, 1, 3
++ case AVFEEZFS:
++ return op_VFEE, 2, 3
++ case AVFENE:
++ return op_VFENE, 0, 0
++ case AVFENEB:
++ return op_VFENE, 0, 0
++ case AVFENEH:
++ return op_VFENE, 1, 0
++ case AVFENEF:
++ return op_VFENE, 2, 0
++ case AVFENEBS:
++ return op_VFENE, 0, 1
++ case AVFENEHS:
++ return op_VFENE, 1, 1
++ case AVFENEFS:
++ return op_VFENE, 2, 1
++ case AVFENEZB:
++ return op_VFENE, 0, 2
++ case AVFENEZH:
++ return op_VFENE, 1, 2
++ case AVFENEZF:
++ return op_VFENE, 2, 2
++ case AVFENEZBS:
++ return op_VFENE, 0, 3
++ case AVFENEZHS:
++ return op_VFENE, 1, 3
++ case AVFENEZFS:
++ return op_VFENE, 2, 3
++ case AVFA:
++ return op_VFA, 0, 0
++ case AVFADB:
++ return op_VFA, 3, 0
++ case AWFADB:
++ return op_VFA, 3, 0
++ case AWFK:
++ return op_WFK, 0, 0
++ case AWFKDB:
++ return op_WFK, 3, 0
++ case AVFCE:
++ return op_VFCE, 0, 0
++ case AVFCEDB:
++ return op_VFCE, 3, 0
++ case AVFCEDBS:
++ return op_VFCE, 3, 1
++ case AWFCEDB:
++ return op_VFCE, 3, 0
++ case AWFCEDBS:
++ return op_VFCE, 3, 1
++ case AVFCH:
++ return op_VFCH, 0, 0
++ case AVFCHDB:
++ return op_VFCH, 3, 0
++ case AVFCHDBS:
++ return op_VFCH, 3, 1
++ case AWFCHDB:
++ return op_VFCH, 3, 0
++ case AWFCHDBS:
++ return op_VFCH, 3, 1
++ case AVFCHE:
++ return op_VFCHE, 0, 0
++ case AVFCHEDB:
++ return op_VFCHE, 3, 0
++ case AVFCHEDBS:
++ return op_VFCHE, 3, 1
++ case AWFCHEDB:
++ return op_VFCHE, 3, 0
++ case AWFCHEDBS:
++ return op_VFCHE, 3, 1
++ case AWFC:
++ return op_WFC, 0, 0
++ case AWFCDB:
++ return op_WFC, 3, 0
++ case AVCDG:
++ return op_VCDG, 0, 0
++ case AVCDGB:
++ return op_VCDG, 3, 0
++ case AWCDGB:
++ return op_VCDG, 3, 0
++ case AVCDLG:
++ return op_VCDLG, 0, 0
++ case AVCDLGB:
++ return op_VCDLG, 3, 0
++ case AWCDLGB:
++ return op_VCDLG, 3, 0
++ case AVCGD:
++ return op_VCGD, 0, 0
++ case AVCGDB:
++ return op_VCGD, 3, 0
++ case AWCGDB:
++ return op_VCGD, 3, 0
++ case AVCLGD:
++ return op_VCLGD, 0, 0
++ case AVCLGDB:
++ return op_VCLGD, 3, 0
++ case AWCLGDB:
++ return op_VCLGD, 3, 0
++ case AVFD:
++ return op_VFD, 0, 0
++ case AVFDDB:
++ return op_VFD, 3, 0
++ case AWFDDB:
++ return op_VFD, 3, 0
++ case AVLDE:
++ return op_VLDE, 0, 0
++ case AVLDEB:
++ return op_VLDE, 2, 0
++ case AWLDEB:
++ return op_VLDE, 2, 0
++ case AVLED:
++ return op_VLED, 0, 0
++ case AVLEDB:
++ return op_VLED, 3, 0
++ case AWLEDB:
++ return op_VLED, 3, 0
++ case AVFM:
++ return op_VFM, 0, 0
++ case AVFMDB:
++ return op_VFM, 3, 0
++ case AWFMDB:
++ return op_VFM, 3, 0
++ case AVFMA:
++ return op_VFMA, 0, 0
++ case AVFMADB:
++ return op_VFMA, 3, 0
++ case AWFMADB:
++ return op_VFMA, 3, 0
++ case AVFMS:
++ return op_VFMS, 0, 0
++ case AVFMSDB:
++ return op_VFMS, 3, 0
++ case AWFMSDB:
++ return op_VFMS, 3, 0
++ case AVFPSO:
++ return op_VFPSO, 0, 0
++ case AVFPSODB:
++ return op_VFPSO, 3, 0
++ case AWFPSODB:
++ return op_VFPSO, 3, 0
++ case AVFLCDB:
++ return op_VFPSO, 3, 0
++ case AWFLCDB:
++ return op_VFPSO, 3, 0
++ case AVFLNDB:
++ return op_VFPSO, 3, 1
++ case AWFLNDB:
++ return op_VFPSO, 3, 1
++ case AVFLPDB:
++ return op_VFPSO, 3, 2
++ case AWFLPDB:
++ return op_VFPSO, 3, 2
++ case AVFSQ:
++ return op_VFSQ, 0, 0
++ case AVFSQDB:
++ return op_VFSQ, 3, 0
++ case AWFSQDB:
++ return op_VFSQ, 3, 0
++ case AVFS:
++ return op_VFS, 0, 0
++ case AVFSDB:
++ return op_VFS, 3, 0
++ case AWFSDB:
++ return op_VFS, 3, 0
++ case AVFTCI:
++ return op_VFTCI, 0, 0
++ case AVFTCIDB:
++ return op_VFTCI, 3, 0
++ case AWFTCIDB:
++ return op_VFTCI, 3, 0
++ case AVGFM:
++ return op_VGFM, 0, 0
++ case AVGFMB:
++ return op_VGFM, 0, 0
++ case AVGFMH:
++ return op_VGFM, 1, 0
++ case AVGFMF:
++ return op_VGFM, 2, 0
++ case AVGFMG:
++ return op_VGFM, 3, 0
++ case AVGFMA:
++ return op_VGFMA, 0, 0
++ case AVGFMAB:
++ return op_VGFMA, 0, 0
++ case AVGFMAH:
++ return op_VGFMA, 1, 0
++ case AVGFMAF:
++ return op_VGFMA, 2, 0
++ case AVGFMAG:
++ return op_VGFMA, 3, 0
++ case AVGEF:
++ return op_VGEF, 0, 0
++ case AVGEG:
++ return op_VGEG, 0, 0
++ case AVGBM:
++ return op_VGBM, 0, 0
++ case AVZERO:
++ return op_VGBM, 0, 0
++ case AVONE:
++ return op_VGBM, 0, 0
++ case AVGM:
++ return op_VGM, 0, 0
++ case AVGMB:
++ return op_VGM, 0, 0
++ case AVGMH:
++ return op_VGM, 1, 0
++ case AVGMF:
++ return op_VGM, 2, 0
++ case AVGMG:
++ return op_VGM, 3, 0
++ case AVISTR:
++ return op_VISTR, 0, 0
++ case AVISTRB:
++ return op_VISTR, 0, 0
++ case AVISTRH:
++ return op_VISTR, 1, 0
++ case AVISTRF:
++ return op_VISTR, 2, 0
++ case AVISTRBS:
++ return op_VISTR, 0, 1
++ case AVISTRHS:
++ return op_VISTR, 1, 1
++ case AVISTRFS:
++ return op_VISTR, 2, 1
++ case AVL:
++ return op_VL, 0, 0
++ case AVLR:
++ return op_VLR, 0, 0
++ case AVLREP:
++ return op_VLREP, 0, 0
++ case AVLREPB:
++ return op_VLREP, 0, 0
++ case AVLREPH:
++ return op_VLREP, 1, 0
++ case AVLREPF:
++ return op_VLREP, 2, 0
++ case AVLREPG:
++ return op_VLREP, 3, 0
++ case AVLC:
++ return op_VLC, 0, 0
++ case AVLCB:
++ return op_VLC, 0, 0
++ case AVLCH:
++ return op_VLC, 1, 0
++ case AVLCF:
++ return op_VLC, 2, 0
++ case AVLCG:
++ return op_VLC, 3, 0
++ case AVLEH:
++ return op_VLEH, 0, 0
++ case AVLEF:
++ return op_VLEF, 0, 0
++ case AVLEG:
++ return op_VLEG, 0, 0
++ case AVLEB:
++ return op_VLEB, 0, 0
++ case AVLEIH:
++ return op_VLEIH, 0, 0
++ case AVLEIF:
++ return op_VLEIF, 0, 0
++ case AVLEIG:
++ return op_VLEIG, 0, 0
++ case AVLEIB:
++ return op_VLEIB, 0, 0
++ case AVFI:
++ return op_VFI, 0, 0
++ case AVFIDB:
++ return op_VFI, 3, 0
++ case AWFIDB:
++ return op_VFI, 3, 0
++ case AVLGV:
++ return op_VLGV, 0, 0
++ case AVLGVB:
++ return op_VLGV, 0, 0
++ case AVLGVH:
++ return op_VLGV, 1, 0
++ case AVLGVF:
++ return op_VLGV, 2, 0
++ case AVLGVG:
++ return op_VLGV, 3, 0
++ case AVLLEZ:
++ return op_VLLEZ, 0, 0
++ case AVLLEZB:
++ return op_VLLEZ, 0, 0
++ case AVLLEZH:
++ return op_VLLEZ, 1, 0
++ case AVLLEZF:
++ return op_VLLEZ, 2, 0
++ case AVLLEZG:
++ return op_VLLEZ, 3, 0
++ case AVLM:
++ return op_VLM, 0, 0
++ case AVLP:
++ return op_VLP, 0, 0
++ case AVLPB:
++ return op_VLP, 0, 0
++ case AVLPH:
++ return op_VLP, 1, 0
++ case AVLPF:
++ return op_VLP, 2, 0
++ case AVLPG:
++ return op_VLP, 3, 0
++ case AVLBB:
++ return op_VLBB, 0, 0
++ case AVLVG:
++ return op_VLVG, 0, 0
++ case AVLVGB:
++ return op_VLVG, 0, 0
++ case AVLVGH:
++ return op_VLVG, 1, 0
++ case AVLVGF:
++ return op_VLVG, 2, 0
++ case AVLVGG:
++ return op_VLVG, 3, 0
++ case AVLVGP:
++ return op_VLVGP, 0, 0
++ case AVLL:
++ return op_VLL, 0, 0
++ case AVMX:
++ return op_VMX, 0, 0
++ case AVMXB:
++ return op_VMX, 0, 0
++ case AVMXH:
++ return op_VMX, 1, 0
++ case AVMXF:
++ return op_VMX, 2, 0
++ case AVMXG:
++ return op_VMX, 3, 0
++ case AVMXL:
++ return op_VMXL, 0, 0
++ case AVMXLB:
++ return op_VMXL, 0, 0
++ case AVMXLH:
++ return op_VMXL, 1, 0
++ case AVMXLF:
++ return op_VMXL, 2, 0
++ case AVMXLG:
++ return op_VMXL, 3, 0
++ case AVMRH:
++ return op_VMRH, 0, 0
++ case AVMRHB:
++ return op_VMRH, 0, 0
++ case AVMRHH:
++ return op_VMRH, 1, 0
++ case AVMRHF:
++ return op_VMRH, 2, 0
++ case AVMRHG:
++ return op_VMRH, 3, 0
++ case AVMRL:
++ return op_VMRL, 0, 0
++ case AVMRLB:
++ return op_VMRL, 0, 0
++ case AVMRLH:
++ return op_VMRL, 1, 0
++ case AVMRLF:
++ return op_VMRL, 2, 0
++ case AVMRLG:
++ return op_VMRL, 3, 0
++ case AVMN:
++ return op_VMN, 0, 0
++ case AVMNB:
++ return op_VMN, 0, 0
++ case AVMNH:
++ return op_VMN, 1, 0
++ case AVMNF:
++ return op_VMN, 2, 0
++ case AVMNG:
++ return op_VMN, 3, 0
++ case AVMNL:
++ return op_VMNL, 0, 0
++ case AVMNLB:
++ return op_VMNL, 0, 0
++ case AVMNLH:
++ return op_VMNL, 1, 0
++ case AVMNLF:
++ return op_VMNL, 2, 0
++ case AVMNLG:
++ return op_VMNL, 3, 0
++ case AVMAE:
++ return op_VMAE, 0, 0
++ case AVMAEB:
++ return op_VMAE, 0, 0
++ case AVMAEH:
++ return op_VMAE, 1, 0
++ case AVMAEF:
++ return op_VMAE, 2, 0
++ case AVMAH:
++ return op_VMAH, 0, 0
++ case AVMAHB:
++ return op_VMAH, 0, 0
++ case AVMAHH:
++ return op_VMAH, 1, 0
++ case AVMAHF:
++ return op_VMAH, 2, 0
++ case AVMALE:
++ return op_VMALE, 0, 0
++ case AVMALEB:
++ return op_VMALE, 0, 0
++ case AVMALEH:
++ return op_VMALE, 1, 0
++ case AVMALEF:
++ return op_VMALE, 2, 0
++ case AVMALH:
++ return op_VMALH, 0, 0
++ case AVMALHB:
++ return op_VMALH, 0, 0
++ case AVMALHH:
++ return op_VMALH, 1, 0
++ case AVMALHF:
++ return op_VMALH, 2, 0
++ case AVMALO:
++ return op_VMALO, 0, 0
++ case AVMALOB:
++ return op_VMALO, 0, 0
++ case AVMALOH:
++ return op_VMALO, 1, 0
++ case AVMALOF:
++ return op_VMALO, 2, 0
++ case AVMAL:
++ return op_VMAL, 0, 0
++ case AVMALB:
++ return op_VMAL, 0, 0
++ case AVMALHW:
++ return op_VMAL, 1, 0
++ case AVMALF:
++ return op_VMAL, 2, 0
++ case AVMAO:
++ return op_VMAO, 0, 0
++ case AVMAOB:
++ return op_VMAO, 0, 0
++ case AVMAOH:
++ return op_VMAO, 1, 0
++ case AVMAOF:
++ return op_VMAO, 2, 0
++ case AVME:
++ return op_VME, 0, 0
++ case AVMEB:
++ return op_VME, 0, 0
++ case AVMEH:
++ return op_VME, 1, 0
++ case AVMEF:
++ return op_VME, 2, 0
++ case AVMH:
++ return op_VMH, 0, 0
++ case AVMHB:
++ return op_VMH, 0, 0
++ case AVMHH:
++ return op_VMH, 1, 0
++ case AVMHF:
++ return op_VMH, 2, 0
++ case AVMLE:
++ return op_VMLE, 0, 0
++ case AVMLEB:
++ return op_VMLE, 0, 0
++ case AVMLEH:
++ return op_VMLE, 1, 0
++ case AVMLEF:
++ return op_VMLE, 2, 0
++ case AVMLH:
++ return op_VMLH, 0, 0
++ case AVMLHB:
++ return op_VMLH, 0, 0
++ case AVMLHH:
++ return op_VMLH, 1, 0
++ case AVMLHF:
++ return op_VMLH, 2, 0
++ case AVMLO:
++ return op_VMLO, 0, 0
++ case AVMLOB:
++ return op_VMLO, 0, 0
++ case AVMLOH:
++ return op_VMLO, 1, 0
++ case AVMLOF:
++ return op_VMLO, 2, 0
++ case AVML:
++ return op_VML, 0, 0
++ case AVMLB:
++ return op_VML, 0, 0
++ case AVMLHW:
++ return op_VML, 1, 0
++ case AVMLF:
++ return op_VML, 2, 0
++ case AVMO:
++ return op_VMO, 0, 0
++ case AVMOB:
++ return op_VMO, 0, 0
++ case AVMOH:
++ return op_VMO, 1, 0
++ case AVMOF:
++ return op_VMO, 2, 0
++ case AVNO:
++ return op_VNO, 0, 0
++ case AVNOT:
++ return op_VNO, 0, 0
++ case AVO:
++ return op_VO, 0, 0
++ case AVPK:
++ return op_VPK, 0, 0
++ case AVPKH:
++ return op_VPK, 1, 0
++ case AVPKF:
++ return op_VPK, 2, 0
++ case AVPKG:
++ return op_VPK, 3, 0
++ case AVPKLS:
++ return op_VPKLS, 0, 0
++ case AVPKLSH:
++ return op_VPKLS, 1, 0
++ case AVPKLSF:
++ return op_VPKLS, 2, 0
++ case AVPKLSG:
++ return op_VPKLS, 3, 0
++ case AVPKLSHS:
++ return op_VPKLS, 1, 1
++ case AVPKLSFS:
++ return op_VPKLS, 2, 1
++ case AVPKLSGS:
++ return op_VPKLS, 3, 1
++ case AVPKS:
++ return op_VPKS, 0, 0
++ case AVPKSH:
++ return op_VPKS, 1, 0
++ case AVPKSF:
++ return op_VPKS, 2, 0
++ case AVPKSG:
++ return op_VPKS, 3, 0
++ case AVPKSHS:
++ return op_VPKS, 1, 1
++ case AVPKSFS:
++ return op_VPKS, 2, 1
++ case AVPKSGS:
++ return op_VPKS, 3, 1
++ case AVPERM:
++ return op_VPERM, 0, 0
++ case AVPDI:
++ return op_VPDI, 0, 0
++ case AVPOPCT:
++ return op_VPOPCT, 0, 0
++ case AVREP:
++ return op_VREP, 0, 0
++ case AVREPB:
++ return op_VREP, 0, 0
++ case AVREPH:
++ return op_VREP, 1, 0
++ case AVREPF:
++ return op_VREP, 2, 0
++ case AVREPG:
++ return op_VREP, 3, 0
++ case AVREPI:
++ return op_VREPI, 0, 0
++ case AVREPIB:
++ return op_VREPI, 0, 0
++ case AVREPIH:
++ return op_VREPI, 1, 0
++ case AVREPIF:
++ return op_VREPI, 2, 0
++ case AVREPIG:
++ return op_VREPI, 3, 0
++ case AVSCEF:
++ return op_VSCEF, 0, 0
++ case AVSCEG:
++ return op_VSCEG, 0, 0
++ case AVSEL:
++ return op_VSEL, 0, 0
++ case AVSL:
++ return op_VSL, 0, 0
++ case AVSLB:
++ return op_VSLB, 0, 0
++ case AVSLDB:
++ return op_VSLDB, 0, 0
++ case AVSRA:
++ return op_VSRA, 0, 0
++ case AVSRAB:
++ return op_VSRAB, 0, 0
++ case AVSRL:
++ return op_VSRL, 0, 0
++ case AVSRLB:
++ return op_VSRLB, 0, 0
++ case AVSEG:
++ return op_VSEG, 0, 0
++ case AVSEGB:
++ return op_VSEG, 0, 0
++ case AVSEGH:
++ return op_VSEG, 1, 0
++ case AVSEGF:
++ return op_VSEG, 2, 0
++ case AVST:
++ return op_VST, 0, 0
++ case AVSTEH:
++ return op_VSTEH, 0, 0
++ case AVSTEF:
++ return op_VSTEF, 0, 0
++ case AVSTEG:
++ return op_VSTEG, 0, 0
++ case AVSTEB:
++ return op_VSTEB, 0, 0
++ case AVSTM:
++ return op_VSTM, 0, 0
++ case AVSTL:
++ return op_VSTL, 0, 0
++ case AVSTRC:
++ return op_VSTRC, 0, 0
++ case AVSTRCB:
++ return op_VSTRC, 0, 0
++ case AVSTRCH:
++ return op_VSTRC, 1, 0
++ case AVSTRCF:
++ return op_VSTRC, 2, 0
++ case AVSTRCBS:
++ return op_VSTRC, 0, 1
++ case AVSTRCHS:
++ return op_VSTRC, 1, 1
++ case AVSTRCFS:
++ return op_VSTRC, 2, 1
++ case AVSTRCZB:
++ return op_VSTRC, 0, 2
++ case AVSTRCZH:
++ return op_VSTRC, 1, 2
++ case AVSTRCZF:
++ return op_VSTRC, 2, 2
++ case AVSTRCZBS:
++ return op_VSTRC, 0, 3
++ case AVSTRCZHS:
++ return op_VSTRC, 1, 3
++ case AVSTRCZFS:
++ return op_VSTRC, 2, 3
++ case AVS:
++ return op_VS, 0, 0
++ case AVSB:
++ return op_VS, 0, 0
++ case AVSH:
++ return op_VS, 1, 0
++ case AVSF:
++ return op_VS, 2, 0
++ case AVSG:
++ return op_VS, 3, 0
++ case AVSQ:
++ return op_VS, 4, 0
++ case AVSCBI:
++ return op_VSCBI, 0, 0
++ case AVSCBIB:
++ return op_VSCBI, 0, 0
++ case AVSCBIH:
++ return op_VSCBI, 1, 0
++ case AVSCBIF:
++ return op_VSCBI, 2, 0
++ case AVSCBIG:
++ return op_VSCBI, 3, 0
++ case AVSCBIQ:
++ return op_VSCBI, 4, 0
++ case AVSBCBI:
++ return op_VSBCBI, 0, 0
++ case AVSBCBIQ:
++ return op_VSBCBI, 4, 0
++ case AVSBI:
++ return op_VSBI, 0, 0
++ case AVSBIQ:
++ return op_VSBI, 4, 0
++ case AVSUMG:
++ return op_VSUMG, 0, 0
++ case AVSUMGH:
++ return op_VSUMG, 1, 0
++ case AVSUMGF:
++ return op_VSUMG, 2, 0
++ case AVSUMQ:
++ return op_VSUMQ, 0, 0
++ case AVSUMQF:
++ return op_VSUMQ, 1, 0
++ case AVSUMQG:
++ return op_VSUMQ, 2, 0
++ case AVSUM:
++ return op_VSUM, 0, 0
++ case AVSUMB:
++ return op_VSUM, 0, 0
++ case AVSUMH:
++ return op_VSUM, 1, 0
++ case AVTM:
++ return op_VTM, 0, 0
++ case AVUPH:
++ return op_VUPH, 0, 0
++ case AVUPHB:
++ return op_VUPH, 0, 0
++ case AVUPHH:
++ return op_VUPH, 1, 0
++ case AVUPHF:
++ return op_VUPH, 2, 0
++ case AVUPLH:
++ return op_VUPLH, 0, 0
++ case AVUPLHB:
++ return op_VUPLH, 0, 0
++ case AVUPLHH:
++ return op_VUPLH, 1, 0
++ case AVUPLHF:
++ return op_VUPLH, 2, 0
++ case AVUPLL:
++ return op_VUPLL, 0, 0
++ case AVUPLLB:
++ return op_VUPLL, 0, 0
++ case AVUPLLH:
++ return op_VUPLL, 1, 0
++ case AVUPLLF:
++ return op_VUPLL, 2, 0
++ case AVUPL:
++ return op_VUPL, 0, 0
++ case AVUPLB:
++ return op_VUPL, 0, 0
++ case AVUPLHW:
++ return op_VUPL, 1, 0
++ case AVUPLF:
++ return op_VUPL, 2, 0
++ }
++}
++
++// singleElementMask returns the single element mask bits required for the
++// given instruction.
++func singleElementMask(as int16) uint32 {
++ switch as {
++ case AWFADB,
++ AWFK,
++ AWFKDB,
++ AWFCEDB,
++ AWFCEDBS,
++ AWFCHDB,
++ AWFCHDBS,
++ AWFCHEDB,
++ AWFCHEDBS,
++ AWFC,
++ AWFCDB,
++ AWCDGB,
++ AWCDLGB,
++ AWCGDB,
++ AWCLGDB,
++ AWFDDB,
++ AWLDEB,
++ AWLEDB,
++ AWFMDB,
++ AWFMADB,
++ AWFMSDB,
++ AWFPSODB,
++ AWFLCDB,
++ AWFLNDB,
++ AWFLPDB,
++ AWFSQDB,
++ AWFSDB,
++ AWFTCIDB,
++ AWFIDB:
++ return 8
++ }
++ return 0
++}
+--- a/src/cmd/internal/obj/util.go
++++ b/src/cmd/internal/obj/util.go
+@@ -530,6 +530,7 @@
+ RBasePPC64 = 4 * 1024 // range [4k, 8k)
+ RBaseARM64 = 8 * 1024 // range [8k, 13k)
+ RBaseMIPS64 = 13 * 1024 // range [13k, 14k)
++ RBaseS390X = 14 * 1024 // range [14k, 15k)
+ )
+
+ // RegisterRegister binds a pretty-printer (Rconv) for register
+@@ -590,6 +591,7 @@
+ ABasePPC64
+ ABaseARM64
+ ABaseMIPS64
++ ABaseS390X
+ AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
+ )
+
+--- a/src/cmd/internal/objfile/disasm.go
++++ b/src/cmd/internal/objfile/disasm.go
+@@ -245,4 +245,5 @@
+ "arm": binary.LittleEndian,
+ "ppc64": binary.BigEndian,
+ "ppc64le": binary.LittleEndian,
++ "s390x": binary.BigEndian,
+ }
+--- a/src/cmd/internal/objfile/elf.go
++++ b/src/cmd/internal/objfile/elf.go
+@@ -99,6 +99,8 @@
+ return "arm"
+ case elf.EM_PPC64:
+ return "ppc64"
++ case elf.EM_S390:
++ return "s390x"
+ }
+ return ""
+ }
+--- a/src/cmd/link/internal/ld/arch.go
++++ b/src/cmd/link/internal/ld/arch.go
+@@ -86,3 +86,12 @@
+ Ptrsize: 8,
+ Regsize: 8,
+ }
++
++var Links390x = LinkArch{
++ ByteOrder: binary.BigEndian,
++ Name: "s390x",
++ Thechar: 'z',
++ Minlc: 2,
++ Ptrsize: 8,
++ Regsize: 8,
++}
+--- a/src/cmd/link/internal/ld/data.go
++++ b/src/cmd/link/internal/ld/data.go
+@@ -148,6 +148,9 @@
+ r.Add = add
+ r.Type = obj.R_PCREL
+ r.Siz = 4
++ if Thearch.Thechar == 'z' {
++ r.Variant = RV_390_DBL
++ }
+ return i + int64(r.Siz)
+ }
+
+@@ -364,6 +367,18 @@
+ Diag("unreachable sym in relocation: %s %s", s.Name, r.Sym.Name)
+ }
+
++ // TODO(mundaym): Move this conversion somewhere more appropriate.
++ // Ideally the obj relocations would support variants.
++ if Thearch.Thechar == 'z' {
++ switch r.Type {
++ case obj.R_PCRELDBL:
++ r.Type = obj.R_PCREL
++ r.Variant = RV_390_DBL
++ case obj.R_CALL:
++ r.Variant = RV_390_DBL
++ }
++ }
++
+ switch r.Type {
+ default:
+ switch siz {
+@@ -454,7 +469,7 @@
+
+ o = r.Xadd
+ if Iself {
+- if Thearch.Thechar == '6' {
++ if Thearch.Thechar == '6' || Thearch.Thechar == 'z' {
+ o = 0
+ }
+ } else if HEADTYPE == obj.Hdarwin {
+@@ -514,7 +529,7 @@
+
+ o = r.Xadd
+ if Iself {
+- if Thearch.Thechar == '6' {
++ if Thearch.Thechar == '6' || Thearch.Thechar == 'z' {
+ o = 0
+ }
+ } else if HEADTYPE == obj.Hdarwin {
+@@ -1030,6 +1045,13 @@
+ if align < s.Align {
+ align = s.Align
+ }
++
++ // TODO(mundaym): Minalign should probably be a new attribute on 'Thearch'
++ if Thearch.Thechar == 'z' && align < 2 {
++ // Relative addressing requires a 2 byte alignment on s390x.
++ align = 2
++ }
++
+ return align
+ }
+
+@@ -1159,6 +1181,7 @@
+ for s := datap; s != nil; s = s.Next {
+ if int64(len(s.P)) > s.Size {
+ Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
++ s.Size = int64(len(s.P)) // hack to allow linking of asm into go // TODO(WGO)
+ }
+ }
+
+--- a/src/cmd/link/internal/ld/dwarf.go
++++ b/src/cmd/link/internal/ld/dwarf.go
+@@ -1715,7 +1715,7 @@
+ * Emit .debug_frame
+ */
+ const (
+- CIERESERVE = 16
++ CIERESERVE = 32
+ DATAALIGNMENTFACTOR = -4
+ )
+
+@@ -1754,7 +1754,6 @@
+ uleb128put(int64(Thearch.Dwarfreglr)) // return_address_register
+
+ Cput(DW_CFA_def_cfa)
+-
+ uleb128put(int64(Thearch.Dwarfregsp)) // register SP (**ABI-dependent, defined in l.h)
+ if haslinkregister() {
+ uleb128put(int64(0)) // offset
+@@ -1762,14 +1761,21 @@
+ uleb128put(int64(Thearch.Ptrsize)) // offset
+ }
+
+- Cput(DW_CFA_offset_extended)
+- uleb128put(int64(Thearch.Dwarfreglr)) // return address
+ if haslinkregister() {
+- uleb128put(int64(0) / DATAALIGNMENTFACTOR) // at cfa - 0
++ Cput(DW_CFA_same_value)
++ uleb128put(int64(Thearch.Dwarfreglr))
+ } else {
++ Cput(DW_CFA_offset_extended)
++ uleb128put(int64(Thearch.Dwarfreglr)) // return address
+ uleb128put(int64(-Thearch.Ptrsize) / DATAALIGNMENTFACTOR) // at cfa - x*4
+ }
+
++ if haslinkregister() {
++ Cput(DW_CFA_val_offset)
++ uleb128put(int64(Thearch.Dwarfregsp))
++ uleb128put(int64(0))
++ }
++
+ // 4 is to exclude the length field.
+ pad := CIERESERVE + frameo + 4 - Cpos()
+
+@@ -1788,7 +1794,7 @@
+
+ fdeo := Cpos()
+
+- // Emit a FDE, Section 6.4.1, starting wit a placeholder.
++ // Emit a FDE, Section 6.4.1, starting with a placeholder.
+ Thearch.Lput(0) // length, must be multiple of thearch.ptrsize
+ Thearch.Lput(0) // Pointer to the CIE above, at offset 0
+ addrput(0) // initial location
+@@ -1807,6 +1813,21 @@
+ }
+
+ if haslinkregister() {
++ // TODO(bryanpkc): This is imprecise. In general, the instruction
++ // that stores the return address to the stack frame is not the
++ // same one that allocates the frame.
++ if pcsp.value > 0 {
++ // The return address is preserved at (CFA-frame_size)
++ // after a stack frame has been allocated.
++ Cput(DW_CFA_offset_extended_sf)
++ uleb128put(int64(Thearch.Dwarfreglr))
++ sleb128put(-int64(pcsp.value) / DATAALIGNMENTFACTOR)
++ } else {
++ // The return address is restored into the link register
++ // when a stack frame has been de-allocated.
++ Cput(DW_CFA_same_value)
++ uleb128put(int64(Thearch.Dwarfreglr))
++ }
+ putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(pcsp.value))
+ } else {
+ putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(Thearch.Ptrsize)+int64(pcsp.value))
+@@ -2237,7 +2258,7 @@
+ elfstrdbg[ElfStrGDBScripts] = Addstring(shstrtab, ".debug_gdb_scripts")
+ if Linkmode == LinkExternal {
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ elfstrdbg[ElfStrRelDebugInfo] = Addstring(shstrtab, ".rela.debug_info")
+ elfstrdbg[ElfStrRelDebugAranges] = Addstring(shstrtab, ".rela.debug_aranges")
+ elfstrdbg[ElfStrRelDebugLine] = Addstring(shstrtab, ".rela.debug_line")
+@@ -2290,7 +2311,7 @@
+ func dwarfaddelfrelocheader(elfstr int, shdata *ElfShdr, off int64, size int64) {
+ sh := newElfShdr(elfstrdbg[elfstr])
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ sh.type_ = SHT_RELA
+ default:
+ sh.type_ = SHT_REL
+--- a/src/cmd/link/internal/ld/elf.go
++++ b/src/cmd/link/internal/ld/elf.go
+@@ -646,6 +646,68 @@
+ R_SPARC_UA64 = 54
+ R_SPARC_UA16 = 55
+
++ R_390_NONE = 0
++ R_390_8 = 1
++ R_390_12 = 2
++ R_390_16 = 3
++ R_390_32 = 4
++ R_390_PC32 = 5
++ R_390_GOT12 = 6
++ R_390_GOT32 = 7
++ R_390_PLT32 = 8
++ R_390_COPY = 9
++ R_390_GLOB_DAT = 10
++ R_390_JMP_SLOT = 11
++ R_390_RELATIVE = 12
++ R_390_GOTOFF = 13
++ R_390_GOTPC = 14
++ R_390_GOT16 = 15
++ R_390_PC16 = 16
++ R_390_PC16DBL = 17
++ R_390_PLT16DBL = 18
++ R_390_PC32DBL = 19
++ R_390_PLT32DBL = 20
++ R_390_GOTPCDBL = 21
++ R_390_64 = 22
++ R_390_PC64 = 23
++ R_390_GOT64 = 24
++ R_390_PLT64 = 25
++ R_390_GOTENT = 26
++ R_390_GOTOFF16 = 27
++ R_390_GOTOFF64 = 28
++ R_390_GOTPLT12 = 29
++ R_390_GOTPLT16 = 30
++ R_390_GOTPLT32 = 31
++ R_390_GOTPLT64 = 32
++ R_390_GOTPLTENT = 33
++ R_390_GOTPLTOFF16 = 34
++ R_390_GOTPLTOFF32 = 35
++ R_390_GOTPLTOFF64 = 36
++ R_390_TLS_LOAD = 37
++ R_390_TLS_GDCALL = 38
++ R_390_TLS_LDCALL = 39
++ R_390_TLS_GD32 = 40
++ R_390_TLS_GD64 = 41
++ R_390_TLS_GOTIE12 = 42
++ R_390_TLS_GOTIE32 = 43
++ R_390_TLS_GOTIE64 = 44
++ R_390_TLS_LDM32 = 45
++ R_390_TLS_LDM64 = 46
++ R_390_TLS_IE32 = 47
++ R_390_TLS_IE64 = 48
++ R_390_TLS_IEENT = 49
++ R_390_TLS_LE32 = 50
++ R_390_TLS_LE64 = 51
++ R_390_TLS_LDO32 = 52
++ R_390_TLS_LDO64 = 53
++ R_390_TLS_DTPMOD = 54
++ R_390_TLS_DTPOFF = 55
++ R_390_TLS_TPOFF = 56
++ R_390_20 = 57
++ R_390_GOT20 = 58
++ R_390_GOTPLT20 = 59
++ R_390_TLS_GOTIE20 = 60
++
+ ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
+ )
+
+@@ -803,7 +865,7 @@
+
+ switch Thearch.Thechar {
+ // 64-bit architectures
+- case '9':
++ case '9', 'z':
+ if Ctxt.Arch.ByteOrder == binary.BigEndian {
+ ehdr.flags = 1 /* Version 1 ABI */
+ } else {
+@@ -1380,13 +1442,25 @@
+ buckets[b] = uint32(sy.Dynid)
+ }
+
+- Adduint32(Ctxt, s, uint32(nbucket))
+- Adduint32(Ctxt, s, uint32(nsym))
+- for i := 0; i < nbucket; i++ {
+- Adduint32(Ctxt, s, buckets[i])
+- }
+- for i := 0; i < nsym; i++ {
+- Adduint32(Ctxt, s, chain[i])
++ // s390x hash table entries are 8 bytes
++ if Thearch.Thechar == 'z' && elf64 {
++ Adduint64(Ctxt, s, uint64(nbucket))
++ Adduint64(Ctxt, s, uint64(nsym))
++ for i := 0; i < nbucket; i++ {
++ Adduint64(Ctxt, s, uint64(buckets[i]))
++ }
++ for i := 0; i < nsym; i++ {
++ Adduint64(Ctxt, s, uint64(chain[i]))
++ }
++ } else {
++ Adduint32(Ctxt, s, uint32(nbucket))
++ Adduint32(Ctxt, s, uint32(nsym))
++ for i := 0; i < nbucket; i++ {
++ Adduint32(Ctxt, s, buckets[i])
++ }
++ for i := 0; i < nsym; i++ {
++ Adduint32(Ctxt, s, chain[i])
++ }
+ }
+
+ // version symbols
+@@ -1454,7 +1528,7 @@
+ }
+
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ sy := Linklookup(Ctxt, ".rela.plt", 0)
+ if sy.Size > 0 {
+ Elfwritedynent(s, DT_PLTREL, DT_RELA)
+@@ -1594,7 +1668,7 @@
+ var prefix string
+ var typ int
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ prefix = ".rela"
+ typ = SHT_RELA
+ default:
+@@ -1767,7 +1841,7 @@
+ Debug['d'] = 1
+
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ Addstring(shstrtab, ".rela.text")
+ Addstring(shstrtab, ".rela.rodata")
+ Addstring(shstrtab, ".rela"+relro_prefix+".typelink")
+@@ -1813,7 +1887,7 @@
+ if hasinitarr {
+ Addstring(shstrtab, ".init_array")
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ Addstring(shstrtab, ".rela.init_array")
+ default:
+ Addstring(shstrtab, ".rel.init_array")
+@@ -1840,7 +1914,7 @@
+ Addstring(shstrtab, ".dynsym")
+ Addstring(shstrtab, ".dynstr")
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ Addstring(shstrtab, ".rela")
+ Addstring(shstrtab, ".rela.plt")
+ default:
+@@ -1858,7 +1932,7 @@
+ s.Type = obj.SELFROSECT
+ s.Reachable = true
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ s.Size += ELF64SYMSIZE
+ default:
+ s.Size += ELF32SYMSIZE
+@@ -1876,7 +1950,7 @@
+
+ /* relocation table */
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ s = Linklookup(Ctxt, ".rela", 0)
+ default:
+ s = Linklookup(Ctxt, ".rel", 0)
+@@ -1921,7 +1995,7 @@
+ Thearch.Elfsetupplt()
+
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ s = Linklookup(Ctxt, ".rela.plt", 0)
+ default:
+ s = Linklookup(Ctxt, ".rel.plt", 0)
+@@ -1950,7 +2024,7 @@
+
+ elfwritedynentsym(s, DT_SYMTAB, Linklookup(Ctxt, ".dynsym", 0))
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ Elfwritedynent(s, DT_SYMENT, ELF64SYMSIZE)
+ default:
+ Elfwritedynent(s, DT_SYMENT, ELF32SYMSIZE)
+@@ -1958,7 +2032,7 @@
+ elfwritedynentsym(s, DT_STRTAB, Linklookup(Ctxt, ".dynstr", 0))
+ elfwritedynentsymsize(s, DT_STRSZ, Linklookup(Ctxt, ".dynstr", 0))
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ elfwritedynentsym(s, DT_RELA, Linklookup(Ctxt, ".rela", 0))
+ elfwritedynentsymsize(s, DT_RELASZ, Linklookup(Ctxt, ".rela", 0))
+ Elfwritedynent(s, DT_RELAENT, ELF64RELASIZE)
+@@ -1974,6 +2048,8 @@
+
+ if Thearch.Thechar == '9' {
+ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".plt", 0))
++ } else if Thearch.Thechar == 'z' {
++ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got", 0))
+ } else {
+ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got.plt", 0))
+ }
+@@ -2069,6 +2145,8 @@
+ eh.machine = EM_386
+ case '9':
+ eh.machine = EM_PPC64
++ case 'z':
++ eh.machine = EM_S390
+ }
+
+ elfreserve := int64(ELFRESERVE)
+@@ -2254,7 +2332,7 @@
+ }
+
+ switch eh.machine {
+- case EM_X86_64, EM_PPC64, EM_AARCH64:
++ case EM_X86_64, EM_PPC64, EM_AARCH64, EM_S390:
+ sh := elfshname(".rela.plt")
+ sh.type_ = SHT_RELA
+ sh.flags = SHF_ALLOC
+@@ -2303,6 +2381,8 @@
+ sh.flags = SHF_ALLOC + SHF_EXECINSTR
+ if eh.machine == EM_X86_64 {
+ sh.entsize = 16
++ } else if eh.machine == EM_S390 {
++ sh.entsize = 32
+ } else if eh.machine == EM_PPC64 {
+ // On ppc64, this is just a table of addresses
+ // filled by the dynamic linker
+--- a/src/cmd/link/internal/ld/ldelf.go
++++ b/src/cmd/link/internal/ld/ldelf.go
+@@ -586,6 +586,11 @@
+ Diag("%s: elf object but not ppc64", pn)
+ return
+ }
++ case 'z':
++ if elfobj.machine != ElfMachS390 || hdr.Ident[4] != ElfClass64 {
++ Diag("%s: elf object but not s390x", pn)
++ return
++ }
+ }
+
+ // load section list into memory.
+@@ -778,6 +783,9 @@
+ continue
+ }
+
++ if strings.HasPrefix(sym.name, ".LASF") { // gcc on s390x does this
++ continue
++ }
+ Diag("%s: sym#%d: ignoring %s in section %d (type %d)", pn, i, sym.name, sym.shndx, sym.type_)
+ continue
+ }
+@@ -1124,6 +1132,14 @@
+ Diag("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype)
+ fallthrough
+
++ case 'z' | R_390_NONE<<24,
++ 'z' | R_390_COPY<<24,
++ 'z' | R_390_JMP_SLOT<<24:
++ *siz = 0
++
++ case 'z' | R_390_8:
++ *siz = 1
++
+ case '9' | R_PPC64_TOC16<<24,
+ '9' | R_PPC64_TOC16_LO<<24,
+ '9' | R_PPC64_TOC16_HI<<24,
+@@ -1132,7 +1148,14 @@
+ '9' | R_PPC64_TOC16_LO_DS<<24,
+ '9' | R_PPC64_REL16_LO<<24,
+ '9' | R_PPC64_REL16_HI<<24,
+- '9' | R_PPC64_REL16_HA<<24:
++ '9' | R_PPC64_REL16_HA<<24,
++ 'z' | R_390_12<<24,
++ 'z' | R_390_16<<24,
++ 'z' | R_390_GOT12<<24,
++ 'z' | R_390_GOT16<<24,
++ 'z' | R_390_PC16<<24,
++ 'z' | R_390_PC16DBL<<24,
++ 'z' | R_390_PLT16DBL<<24:
+ *siz = 2
+
+ case '5' | R_ARM_ABS32<<24,
+@@ -1160,11 +1183,27 @@
+ '8' | R_386_GOTPC<<24,
+ '8' | R_386_GOT32X<<24,
+ '9' | R_PPC64_REL24<<24,
+- '9' | R_PPC_REL32<<24:
++ '9' | R_PPC_REL32<<24,
++ 'z' | R_390_32<<24,
++ 'z' | R_390_PC32<<24,
++ 'z' | R_390_GOT32<<24,
++ 'z' | R_390_PLT32<<24,
++ 'z' | R_390_PC32DBL<<24,
++ 'z' | R_390_PLT32DBL<<24,
++ 'z' | R_390_GOTPCDBL<<24,
++ 'z' | R_390_GOTENT<<24:
+ *siz = 4
+
+ case '6' | R_X86_64_64<<24,
+- '9' | R_PPC64_ADDR64<<24:
++ '9' | R_PPC64_ADDR64<<24,
++ 'z' | R_390_GLOB_DAT<<24,
++ 'z' | R_390_RELATIVE<<24,
++ 'z' | R_390_GOTOFF<<24,
++ 'z' | R_390_GOTPC<<24,
++ 'z' | R_390_64<<24,
++ 'z' | R_390_PC64<<24,
++ 'z' | R_390_GOT64<<24,
++ 'z' | R_390_PLT64<<24:
+ *siz = 8
+ }
+
+--- a/src/cmd/link/internal/ld/lib.go
++++ b/src/cmd/link/internal/ld/lib.go
+@@ -329,7 +329,7 @@
+ switch goos {
+ case "linux":
+ switch goarch {
+- case "386", "amd64", "arm", "arm64", "ppc64le":
++ case "386", "amd64", "arm", "arm64", "ppc64le", "s390x":
+ default:
+ return badmode()
+ }
+--- a/src/cmd/link/internal/ld/link.go
++++ b/src/cmd/link/internal/ld/link.go
+@@ -233,6 +233,7 @@
+ RV_POWER_HI
+ RV_POWER_HA
+ RV_POWER_DS
++ RV_390_DBL
+ RV_CHECK_OVERFLOW = 1 << 8
+ RV_TYPE_MASK = RV_CHECK_OVERFLOW - 1
+ )
+--- a/src/cmd/link/internal/ld/symtab.go
++++ b/src/cmd/link/internal/ld/symtab.go
+@@ -67,7 +67,7 @@
+
+ func putelfsyment(off int, addr int64, size int64, info int, shndx int, other int) {
+ switch Thearch.Thechar {
+- case '0', '6', '7', '9':
++ case '0', '6', '7', '9', 'z':
+ Thearch.Lput(uint32(off))
+ Cput(uint8(info))
+ Cput(uint8(other))
+@@ -562,6 +562,7 @@
+ adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
+ adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
+ }
++
+ // The rest of moduledata is zero initialized.
+ // When linking an object that does not contain the runtime we are
+ // creating the moduledata from scratch and it does not have a
+--- /dev/null
++++ b/src/cmd/link/internal/s390x/asm.go
+@@ -0,0 +1,643 @@
++// Inferno utils/5l/asm.c
++// http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/internal/obj"
++ "cmd/link/internal/ld"
++ "debug/elf"
++ "fmt"
++ "log"
++)
++
++// gentext generates assembly to append the local moduledata to the global
++// moduledata linked list at initialization time. This is only done if the runtime
++// is in a different module.
++//
++// :
++// larl %r2,
++// jg
++// undef
++//
++// The job of appending the moduledata is delegated to runtime.addmoduledata.
++func gentext() {
++ if !ld.DynlinkingGo() {
++ return
++ }
++ addmoduledata := ld.Linklookup(ld.Ctxt, "runtime.addmoduledata", 0)
++ if addmoduledata.Type == obj.STEXT {
++ // we're linking a module containing the runtime -> no need for
++ // an init function
++ return
++ }
++ addmoduledata.Reachable = true
++ initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
++ initfunc.Type = obj.STEXT
++ initfunc.Local = true
++ initfunc.Reachable = true
++
++ // larl %r2,
++ ld.Adduint8(ld.Ctxt, initfunc, 0xc0)
++ ld.Adduint8(ld.Ctxt, initfunc, 0x20)
++ lmd := ld.Addrel(initfunc)
++ lmd.Off = int32(initfunc.Size)
++ lmd.Siz = 4
++ lmd.Sym = ld.Ctxt.Moduledata
++ lmd.Type = obj.R_PCREL
++ lmd.Variant = ld.RV_390_DBL
++ lmd.Add = 2 + int64(lmd.Siz)
++ ld.Adduint32(ld.Ctxt, initfunc, 0)
++
++ // jg
++ ld.Adduint8(ld.Ctxt, initfunc, 0xc0)
++ ld.Adduint8(ld.Ctxt, initfunc, 0xf4)
++ rel := ld.Addrel(initfunc)
++ rel.Off = int32(initfunc.Size)
++ rel.Siz = 4
++ rel.Sym = ld.Linklookup(ld.Ctxt, "runtime.addmoduledata", 0)
++ rel.Type = obj.R_CALL
++ rel.Variant = ld.RV_390_DBL
++ rel.Add = 2 + int64(rel.Siz)
++ ld.Adduint32(ld.Ctxt, initfunc, 0)
++
++ // undef (for debugging)
++ ld.Adduint32(ld.Ctxt, initfunc, 0)
++
++ if ld.Ctxt.Etextp != nil {
++ ld.Ctxt.Etextp.Next = initfunc
++ } else {
++ ld.Ctxt.Textp = initfunc
++ }
++ ld.Ctxt.Etextp = initfunc
++ initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
++ initarray_entry.Reachable = true
++ initarray_entry.Local = true
++ initarray_entry.Type = obj.SINITARR
++ ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
++}
++
++func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) {
++ log.Fatalf("adddynrela not implemented")
++}
++
++func adddynrel(s *ld.LSym, r *ld.Reloc) {
++ targ := r.Sym
++ ld.Ctxt.Cursym = s
++
++ switch r.Type {
++ default:
++ if r.Type >= 256 {
++ ld.Diag("unexpected relocation type %d", r.Type)
++ return
++ }
++
++ // Handle relocations found in ELF object files.
++ case 256 + ld.R_390_12,
++ 256 + ld.R_390_GOT12:
++ ld.Diag("s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type-256)
++ return
++
++ case 256 + ld.R_390_8,
++ 256 + ld.R_390_16,
++ 256 + ld.R_390_32,
++ 256 + ld.R_390_64:
++ if targ.Type == obj.SDYNIMPORT {
++ ld.Diag("unexpected R_390_nn relocation for dynamic symbol %s", targ.Name)
++ }
++ r.Type = obj.R_ADDR
++ return
++
++ case 256 + ld.R_390_PC16,
++ 256 + ld.R_390_PC32,
++ 256 + ld.R_390_PC64:
++ if targ.Type == obj.SDYNIMPORT {
++ ld.Diag("unexpected R_390_PCnn relocation for dynamic symbol %s", targ.Name)
++ }
++ if targ.Type == 0 || targ.Type == obj.SXREF {
++ ld.Diag("unknown symbol %s in pcrel", targ.Name)
++ }
++ r.Type = obj.R_PCREL
++ r.Add += int64(r.Siz)
++ return
++
++ case 256 + ld.R_390_GOT16,
++ 256 + ld.R_390_GOT32,
++ 256 + ld.R_390_GOT64:
++ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
++ return
++
++ case 256 + ld.R_390_PLT16DBL,
++ 256 + ld.R_390_PLT32DBL:
++ r.Type = obj.R_PCREL
++ r.Variant = ld.RV_390_DBL
++ r.Add += int64(r.Siz)
++ if targ.Type == obj.SDYNIMPORT {
++ addpltsym(ld.Ctxt, targ)
++ r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
++ r.Add += int64(targ.Plt)
++ }
++ return
++
++ case 256 + ld.R_390_PLT32,
++ 256 + ld.R_390_PLT64:
++ r.Type = obj.R_PCREL
++ r.Add += int64(r.Siz)
++ if targ.Type == obj.SDYNIMPORT {
++ addpltsym(ld.Ctxt, targ)
++ r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
++ r.Add += int64(targ.Plt)
++ }
++ return
++
++ case 256 + ld.R_390_COPY:
++ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
++
++ case 256 + ld.R_390_GLOB_DAT:
++ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
++
++ case 256 + ld.R_390_JMP_SLOT:
++ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
++
++ case 256 + ld.R_390_RELATIVE:
++ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
++
++ case 256 + ld.R_390_GOTOFF:
++ if targ.Type == obj.SDYNIMPORT {
++ ld.Diag("unexpected R_390_GOTOFF relocation for dynamic symbol %s", targ.Name)
++ }
++ r.Type = obj.R_GOTOFF
++ return
++
++ case 256 + ld.R_390_GOTPC:
++ r.Type = obj.R_PCREL
++ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
++ r.Add += int64(r.Siz)
++ return
++
++ case 256 + ld.R_390_PC16DBL,
++ 256 + ld.R_390_PC32DBL:
++ r.Type = obj.R_PCREL
++ r.Variant = ld.RV_390_DBL
++ r.Add += int64(r.Siz)
++ if targ.Type == obj.SDYNIMPORT {
++ ld.Diag("unexpected R_390_PCnnDBL relocation for dynamic symbol %s", targ.Name)
++ }
++ return
++
++ case 256 + ld.R_390_GOTPCDBL:
++ r.Type = obj.R_PCREL
++ r.Variant = ld.RV_390_DBL
++ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
++ r.Add += int64(r.Siz)
++ return
++
++ case 256 + ld.R_390_GOTENT:
++ // if targ.Type != obj.SDYNIMPORT {
++ // ld.Diag("unexpected R_390_GOTENT relocation for non-dynamic symbol %s", targ.Name)
++ // }
++ addgotsym(targ)
++
++ r.Type = obj.R_PCREL
++ r.Variant = ld.RV_390_DBL
++ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
++ r.Add += int64(targ.Got)
++ r.Add += int64(r.Siz)
++ return
++ }
++ // Handle references to ELF symbols from our own object files.
++ if targ.Type != obj.SDYNIMPORT {
++ return
++ }
++
++ ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type)
++}
++
++func elfreloc1(r *ld.Reloc, sectoff int64) int {
++ ld.Thearch.Vput(uint64(sectoff))
++
++ elfsym := r.Xsym.ElfsymForReloc()
++ switch r.Type {
++ default:
++ return -1
++
++ case obj.R_TLS_LE:
++ switch r.Siz {
++ default:
++ return -1
++ case 4:
++ // WARNING - silently ignored by linker in ELF64
++ ld.Thearch.Vput(ld.R_390_TLS_LE32 | uint64(elfsym)<<32)
++ case 8:
++ // WARNING - silently ignored by linker in ELF32
++ ld.Thearch.Vput(ld.R_390_TLS_LE64 | uint64(elfsym)<<32)
++ }
++
++ case obj.R_TLS_IE:
++ switch r.Siz {
++ default:
++ return -1
++ case 4:
++ ld.Thearch.Vput(ld.R_390_TLS_IEENT | uint64(elfsym)<<32)
++ }
++
++ case obj.R_ADDR:
++ switch r.Siz {
++ default:
++ return -1
++ case 4:
++ ld.Thearch.Vput(ld.R_390_32 | uint64(elfsym)<<32)
++ case 8:
++ ld.Thearch.Vput(ld.R_390_64 | uint64(elfsym)<<32)
++ }
++
++ case obj.R_GOTPCREL:
++ if r.Siz == 4 {
++ ld.Thearch.Vput(ld.R_390_GOTENT | uint64(elfsym)<<32)
++ } else {
++ return -1
++ }
++
++ case obj.R_PCREL, obj.R_PCRELDBL, obj.R_CALL:
++ elfrel := ld.R_390_NONE
++ isdbl := r.Variant&ld.RV_TYPE_MASK == ld.RV_390_DBL
++ // TODO(mundaym): all DBL style relocations should be signalled using the variant.
++ switch r.Type {
++ case obj.R_PCRELDBL, obj.R_CALL:
++ isdbl = true
++ }
++ if r.Xsym.Type == obj.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == obj.R_CALL) {
++ if isdbl {
++ switch r.Siz {
++ case 2:
++ elfrel = ld.R_390_PLT16DBL
++ case 4:
++ elfrel = ld.R_390_PLT32DBL
++ }
++ } else {
++ switch r.Siz {
++ case 4:
++ elfrel = ld.R_390_PLT32
++ case 8:
++ elfrel = ld.R_390_PLT64
++ }
++ }
++ } else {
++ if isdbl {
++ switch r.Siz {
++ case 2:
++ elfrel = ld.R_390_PC16DBL
++ case 4:
++ elfrel = ld.R_390_PC32DBL
++ }
++ } else {
++ switch r.Siz {
++ case 2:
++ elfrel = ld.R_390_PC16
++ case 4:
++ elfrel = ld.R_390_PC32
++ case 8:
++ elfrel = ld.R_390_PC64
++ }
++ }
++ }
++ if elfrel == ld.R_390_NONE {
++ return -1 // unsupported size/dbl combination
++ }
++ ld.Thearch.Vput(uint64(elfrel) | uint64(elfsym)<<32)
++ }
++
++ ld.Thearch.Vput(uint64(r.Xadd))
++ return 0
++}
++
++func elfsetupplt() {
++ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
++ got := ld.Linklookup(ld.Ctxt, ".got", 0)
++ if plt.Size == 0 {
++ // stg %r1,56(%r15)
++ ld.Adduint8(ld.Ctxt, plt, 0xe3)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Adduint8(ld.Ctxt, plt, 0xf0)
++ ld.Adduint8(ld.Ctxt, plt, 0x38)
++ ld.Adduint8(ld.Ctxt, plt, 0x00)
++ ld.Adduint8(ld.Ctxt, plt, 0x24)
++ // larl %r1,_GLOBAL_OFFSET_TABLE_
++ ld.Adduint8(ld.Ctxt, plt, 0xc0)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Addpcrelplus(ld.Ctxt, plt, got, 6)
++ // mvc 48(8,%r15),8(%r1)
++ ld.Adduint8(ld.Ctxt, plt, 0xd2)
++ ld.Adduint8(ld.Ctxt, plt, 0x07)
++ ld.Adduint8(ld.Ctxt, plt, 0xf0)
++ ld.Adduint8(ld.Ctxt, plt, 0x30)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Adduint8(ld.Ctxt, plt, 0x08)
++ // lg %r1,16(%r1)
++ ld.Adduint8(ld.Ctxt, plt, 0xe3)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Adduint8(ld.Ctxt, plt, 0x10)
++ ld.Adduint8(ld.Ctxt, plt, 0x00)
++ ld.Adduint8(ld.Ctxt, plt, 0x04)
++ // br %r1
++ ld.Adduint8(ld.Ctxt, plt, 0x07)
++ ld.Adduint8(ld.Ctxt, plt, 0xf1)
++ // nopr %r0
++ ld.Adduint8(ld.Ctxt, plt, 0x07)
++ ld.Adduint8(ld.Ctxt, plt, 0x00)
++ // nopr %r0
++ ld.Adduint8(ld.Ctxt, plt, 0x07)
++ ld.Adduint8(ld.Ctxt, plt, 0x00)
++ // nopr %r0
++ ld.Adduint8(ld.Ctxt, plt, 0x07)
++ ld.Adduint8(ld.Ctxt, plt, 0x00)
++
++ // assume got->size == 0 too
++ ld.Addaddrplus(ld.Ctxt, got, ld.Linklookup(ld.Ctxt, ".dynamic", 0), 0)
++
++ ld.Adduint64(ld.Ctxt, got, 0)
++ ld.Adduint64(ld.Ctxt, got, 0)
++ }
++}
++
++func machoreloc1(r *ld.Reloc, sectoff int64) int {
++ return -1
++}
++
++func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
++ if ld.Linkmode == ld.LinkExternal {
++ return -1
++ }
++
++ switch r.Type {
++ case obj.R_CONST:
++ *val = r.Add
++ return 0
++
++ case obj.R_GOTOFF:
++ *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0))
++ return 0
++ }
++
++ return -1
++}
++
++func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
++ switch r.Variant & ld.RV_TYPE_MASK {
++ default:
++ ld.Diag("unexpected relocation variant %d", r.Variant)
++ return t
++
++ case ld.RV_NONE:
++ return t
++
++ case ld.RV_390_DBL:
++ if (t & 1) != 0 {
++ ld.Diag("%s+%v is not 2-byte aligned", r.Sym.Name, r.Sym.Value)
++ }
++ return t >> 1
++ }
++}
++
++func addpltsym(ctxt *ld.Link, s *ld.LSym) {
++ if s.Plt >= 0 {
++ return
++ }
++
++ ld.Adddynsym(ctxt, s)
++
++ if ld.Iself {
++ plt := ld.Linklookup(ctxt, ".plt", 0)
++ got := ld.Linklookup(ctxt, ".got", 0)
++ rela := ld.Linklookup(ctxt, ".rela.plt", 0)
++ if plt.Size == 0 {
++ elfsetupplt()
++ }
++ // larl %r1,_GLOBAL_OFFSET_TABLE_+index
++
++ ld.Adduint8(ctxt, plt, 0xc0)
++ ld.Adduint8(ctxt, plt, 0x10)
++ ld.Addpcrelplus(ctxt, plt, got, got.Size+6) // need variant?
++
++ // add to got: pointer to current pos in plt
++ ld.Addaddrplus(ctxt, got, plt, plt.Size+8) // weird but correct
++ // lg %r1,0(%r1)
++ ld.Adduint8(ctxt, plt, 0xe3)
++ ld.Adduint8(ctxt, plt, 0x10)
++ ld.Adduint8(ctxt, plt, 0x10)
++ ld.Adduint8(ctxt, plt, 0x00)
++ ld.Adduint8(ctxt, plt, 0x00)
++ ld.Adduint8(ctxt, plt, 0x04)
++ // br %r1
++ ld.Adduint8(ctxt, plt, 0x07)
++ ld.Adduint8(ctxt, plt, 0xf1)
++ // basr %r1,%r0
++ ld.Adduint8(ctxt, plt, 0x0d)
++ ld.Adduint8(ctxt, plt, 0x10)
++ // lgf %r1,12(%r1)
++ ld.Adduint8(ctxt, plt, 0xe3)
++ ld.Adduint8(ctxt, plt, 0x10)
++ ld.Adduint8(ctxt, plt, 0x10)
++ ld.Adduint8(ctxt, plt, 0x0c)
++ ld.Adduint8(ctxt, plt, 0x00)
++ ld.Adduint8(ctxt, plt, 0x14)
++ // jg .plt
++ ld.Adduint8(ctxt, plt, 0xc0)
++ ld.Adduint8(ctxt, plt, 0xf4)
++
++ ld.Adduint32(ctxt, plt, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation
++ //.plt index
++ ld.Adduint32(ctxt, plt, uint32(rela.Size)) // rela size before current entry
++
++ // rela
++ ld.Addaddrplus(ctxt, rela, got, got.Size-8)
++
++ ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_JMP_SLOT))
++ ld.Adduint64(ctxt, rela, 0)
++
++ s.Plt = int32(plt.Size - 32)
++
++ } else {
++ ld.Diag("addpltsym: unsupported binary format")
++ }
++}
++
++func addgotsym(s *ld.LSym) {
++ if s.Got >= 0 {
++ return
++ }
++
++ ld.Adddynsym(ld.Ctxt, s)
++ got := ld.Linklookup(ld.Ctxt, ".got", 0)
++ s.Got = int32(got.Size)
++ ld.Adduint64(ld.Ctxt, got, 0)
++
++ if ld.Iself {
++ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
++ ld.Addaddrplus(ld.Ctxt, rela, got, int64(s.Got))
++ ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_GLOB_DAT))
++ ld.Adduint64(ld.Ctxt, rela, 0)
++ } else {
++ ld.Diag("addgotsym: unsupported binary format")
++ }
++}
++
++func asmb() {
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
++ }
++ ld.Bso.Flush()
++
++ if ld.Iself {
++ ld.Asmbelfsetup()
++ }
++
++ sect := ld.Segtext.Sect
++ ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
++ ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
++ for sect = sect.Next; sect != nil; sect = sect.Next {
++ ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
++ ld.Datblk(int64(sect.Vaddr), int64(sect.Length))
++ }
++
++ if ld.Segrodata.Filelen > 0 {
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f rodatblk\n", obj.Cputime())
++ }
++ ld.Bso.Flush()
++
++ ld.Cseek(int64(ld.Segrodata.Fileoff))
++ ld.Datblk(int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
++ }
++
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f datblk\n", obj.Cputime())
++ }
++ ld.Bso.Flush()
++
++ ld.Cseek(int64(ld.Segdata.Fileoff))
++ ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
++
++ /* output symbol table */
++ ld.Symsize = 0
++
++ ld.Lcsize = 0
++ symo := uint32(0)
++ if ld.Debug['s'] == 0 {
++ // TODO: rationalize
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime())
++ }
++ ld.Bso.Flush()
++ switch ld.HEADTYPE {
++ default:
++ if ld.Iself {
++ symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
++ symo = uint32(ld.Rnd(int64(symo), int64(ld.INITRND)))
++ }
++
++ case obj.Hplan9:
++ symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
++ }
++
++ ld.Cseek(int64(symo))
++ switch ld.HEADTYPE {
++ default:
++ if ld.Iself {
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f elfsym\n", obj.Cputime())
++ }
++ ld.Asmelfsym()
++ ld.Cflush()
++ ld.Cwrite(ld.Elfstrdat)
++
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
++ }
++ ld.Dwarfemitdebugsections()
++
++ if ld.Linkmode == ld.LinkExternal {
++ ld.Elfemitreloc()
++ }
++ }
++
++ case obj.Hplan9:
++ ld.Asmplan9sym()
++ ld.Cflush()
++
++ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
++ if sym != nil {
++ ld.Lcsize = int32(len(sym.P))
++ for i := 0; int32(i) < ld.Lcsize; i++ {
++ ld.Cput(uint8(sym.P[i]))
++ }
++
++ ld.Cflush()
++ }
++ }
++ }
++
++ ld.Ctxt.Cursym = nil
++ if ld.Debug['v'] != 0 {
++ fmt.Fprintf(&ld.Bso, "%5.2f header\n", obj.Cputime())
++ }
++ ld.Bso.Flush()
++ ld.Cseek(0)
++ switch ld.HEADTYPE {
++ default:
++ case obj.Hplan9: /* plan 9 */
++ ld.Thearch.Lput(0x647) /* magic */
++ ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */
++ ld.Thearch.Lput(uint32(ld.Segdata.Filelen))
++ ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
++ ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */
++ ld.Thearch.Lput(uint32(ld.Entryvalue())) /* va of entry */
++ ld.Thearch.Lput(0)
++ ld.Thearch.Lput(uint32(ld.Lcsize))
++
++ case obj.Hlinux,
++ obj.Hfreebsd,
++ obj.Hnetbsd,
++ obj.Hopenbsd,
++ obj.Hnacl:
++ ld.Asmbelf(int64(symo))
++ }
++
++ ld.Cflush()
++ if ld.Debug['c'] != 0 {
++ fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
++ fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
++ fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
++ fmt.Printf("symsize=%d\n", ld.Symsize)
++ fmt.Printf("lcsize=%d\n", ld.Lcsize)
++ fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
++ }
++}
+--- /dev/null
++++ b/src/cmd/link/internal/s390x/l.go
+@@ -0,0 +1,78 @@
++// Inferno utils/5l/asm.c
++// http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++// Writing object files.
++
++// cmd/9l/l.h from Vita Nuova.
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++const (
++ thechar = 'z'
++ PtrSize = 8
++ IntSize = 8
++ RegSize = 8
++ MaxAlign = 32 // max data alignment
++ FuncAlign = 8
++ MINLC = 2
++)
++
++/* Used by ../internal/ld/dwarf.go */
++const (
++ DWARFREGSP = 15
++ DWARFREGLR = 14
++)
+--- /dev/null
++++ b/src/cmd/link/internal/s390x/obj.go
+@@ -0,0 +1,115 @@
++// Inferno utils/5l/obj.c
++// http://code.google.com/p/inferno-os/source/browse/utils/5l/obj.c
++//
++// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
++// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
++// Portions Copyright © 1997-1999 Vita Nuova Limited
++// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
++// Portions Copyright © 2004,2006 Bruce Ellis
++// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
++// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
++// Portions Copyright © 2009 The Go Authors. All rights reserved.
++//
++// Permission is hereby granted, free of charge, to any person obtaining a copy
++// of this software and associated documentation files (the "Software"), to deal
++// in the Software without restriction, including without limitation the rights
++// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
++// copies of the Software, and to permit persons to whom the Software is
++// furnished to do so, subject to the following conditions:
++//
++// The above copyright notice and this permission notice shall be included in
++// all copies or substantial portions of the Software.
++//
++// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
++// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
++// THE SOFTWARE.
++
++package s390x
++
++import (
++ "cmd/internal/obj"
++ "cmd/link/internal/ld"
++ "fmt"
++)
++
++// Reading object files.
++
++func Main() {
++ linkarchinit()
++ ld.Ldmain()
++}
++
++func linkarchinit() {
++ ld.Thestring = obj.Getgoarch()
++ ld.Thelinkarch = &ld.Links390x
++
++ ld.Thearch.Thechar = thechar
++ ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize
++ ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize
++ ld.Thearch.Regsize = ld.Thelinkarch.Regsize
++ ld.Thearch.Funcalign = FuncAlign
++ ld.Thearch.Maxalign = MaxAlign
++ ld.Thearch.Minlc = MINLC
++ ld.Thearch.Dwarfregsp = DWARFREGSP
++ ld.Thearch.Dwarfreglr = DWARFREGLR
++
++ ld.Thearch.Adddynrel = adddynrel
++ ld.Thearch.Archinit = archinit
++ ld.Thearch.Archreloc = archreloc
++ ld.Thearch.Archrelocvariant = archrelocvariant
++ ld.Thearch.Asmb = asmb // in asm.go
++ ld.Thearch.Elfreloc1 = elfreloc1
++ ld.Thearch.Elfsetupplt = elfsetupplt
++ ld.Thearch.Gentext = gentext
++ ld.Thearch.Machoreloc1 = machoreloc1
++ ld.Thearch.Lput = ld.Lputb
++ ld.Thearch.Wput = ld.Wputb
++ ld.Thearch.Vput = ld.Vputb
++
++ ld.Thearch.Linuxdynld = "/lib64/ld64.so.1"
++
++ // not relevant for s390x
++ ld.Thearch.Freebsddynld = "XXX"
++ ld.Thearch.Openbsddynld = "XXX"
++ ld.Thearch.Netbsddynld = "XXX"
++ ld.Thearch.Dragonflydynld = "XXX"
++ ld.Thearch.Solarisdynld = "XXX"
++}
++
++func archinit() {
++ // getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
++ // Go was built; see ../../make.bash.
++ if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
++ ld.Linkmode = ld.LinkInternal
++ }
++
++ if ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.DynlinkingGo() {
++ ld.Linkmode = ld.LinkExternal
++ }
++
++ switch ld.HEADTYPE {
++ default:
++ ld.Exitf("unknown -H option: %v", ld.HEADTYPE)
++
++ case obj.Hlinux: /* s390x elf */
++ ld.Elfinit()
++ ld.HEADR = ld.ELFRESERVE
++ if ld.INITTEXT == -1 {
++ ld.INITTEXT = 0x10000 + int64(ld.HEADR)
++ }
++ if ld.INITDAT == -1 {
++ ld.INITDAT = 0
++ }
++ if ld.INITRND == -1 {
++ ld.INITRND = 0x10000
++ }
++ }
++
++ if ld.INITDAT != 0 && ld.INITRND != 0 {
++ fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(ld.INITDAT), uint32(ld.INITRND))
++ }
++}
+--- a/src/cmd/link/main.go
++++ b/src/cmd/link/main.go
+@@ -11,6 +11,7 @@
+ "cmd/link/internal/arm64"
+ "cmd/link/internal/mips64"
+ "cmd/link/internal/ppc64"
++ "cmd/link/internal/s390x"
+ "cmd/link/internal/x86"
+ "fmt"
+ "os"
+@@ -33,5 +34,7 @@
+ mips64.Main()
+ case "ppc64", "ppc64le":
+ ppc64.Main()
++ case "s390x":
++ s390x.Main()
+ }
+ }
+--- a/src/cmd/objdump/objdump_test.go
++++ b/src/cmd/objdump/objdump_test.go
+@@ -107,6 +107,8 @@
+ t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
+ case "mips64", "mips64le":
+ t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
++ case "s390x":
++ t.Skipf("skipping on %s", runtime.GOARCH)
+ }
+ testDisasm(t)
+ }
+@@ -123,6 +125,8 @@
+ t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
+ case "mips64", "mips64le":
+ t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH)
++ case "s390x":
++ t.Skipf("skipping on %s", runtime.GOARCH)
+ }
+ // TODO(jsing): Reenable once openbsd/arm has external linking support.
+ if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" {
+--- a/src/cmd/vet/asmdecl.go
++++ b/src/cmd/vet/asmdecl.go
+@@ -65,6 +65,7 @@
+ asmArchAmd64p32 = asmArch{"amd64p32", 4, 4, 8, false, "SP", false}
+ asmArchPpc64 = asmArch{"ppc64", 8, 8, 8, true, "R1", true}
+ asmArchPpc64LE = asmArch{"ppc64le", 8, 8, 8, false, "R1", true}
++ asmArchS390x = asmArch{"s390x", 8, 8, 8, true, "R15", true}
+
+ arches = []*asmArch{
+ &asmArch386,
+@@ -74,6 +75,7 @@
+ &asmArchAmd64p32,
+ &asmArchPpc64,
+ &asmArchPpc64LE,
++ &asmArchS390x,
+ }
+ )
+
+--- /dev/null
++++ b/src/crypto/aes/asm_s390x.s
+@@ -0,0 +1,97 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func hasAsm() bool
++// returns whether the AES-128, AES-192 and AES-256
++// cipher message functions are supported.
++TEXT ·hasAsm(SB),NOSPLIT,$16-1
++ XOR R0, R0 // set function code to 0 (query)
++ LA 8(R15), R1
++ WORD $0xB92E0024 // KM-Query
++
++ // check if bits 18-20 are set
++ MOVD 8(R15), R2
++ SRD $40, R2
++ AND $0x38, R2 // mask bits 18-20 (00111000)
++ CMPBNE R2, $0x38, notfound
++ MOVBZ $1, R1
++ MOVB R1, ret+0(FP)
++ RET
++notfound:
++ MOVBZ R0, ret+0(FP)
++ MOVD $0, 0(R0)
++ RET
++
++// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
++TEXT ·encryptBlockAsm(SB),NOSPLIT,$0-32
++ MOVD nr+0(FP), R7
++ MOVD xk+8(FP), R1
++ MOVD dst+16(FP), R2
++ MOVD src+24(FP), R4
++ MOVD $16, R5
++ CMPBEQ R7, $14, aes256
++ CMPBEQ R7, $12, aes192
++aes128:
++ MOVBZ $18, R0
++ BR enc
++aes192:
++ MOVBZ $19, R0
++ BR enc
++aes256:
++ MOVBZ $20, R0
++enc:
++ WORD $0xB92E0024 // KM-AES
++ BVS enc
++ XOR R0, R0
++ RET
++
++// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
++TEXT ·decryptBlockAsm(SB),NOSPLIT,$0-32
++ MOVD nr+0(FP), R7
++ MOVD xk+8(FP), R1
++ MOVD dst+16(FP), R2
++ MOVD src+24(FP), R4
++ MOVD $16, R5
++ CMPBEQ R7, $14, aes256
++ CMPBEQ R7, $12, aes192
++aes128:
++ MOVBZ $(128+18), R0
++ BR dec
++aes192:
++ MOVBZ $(128+19), R0
++ BR dec
++aes256:
++ MOVBZ $(128+20), R0
++dec:
++ WORD $0xB92E0024 // KM-AES
++ BVS dec
++ XOR R0, R0
++ RET
++
++// func expandKeyAsm(nr int, key *byte, enc, dec *uint32)
++// We do NOT expand the keys here as the KM command just
++// expects the cryptographic key.
++// Instead just copy the needed bytes from the key into
++// the encryption/decryption expanded keys.
++TEXT ·expandKeyAsm(SB),NOSPLIT,$0-32
++ MOVD nr+0(FP), R1
++ MOVD key+8(FP), R2
++ MOVD enc+16(FP), R3
++ MOVD dec+24(FP), R4
++ CMPBEQ R1, $14, aes256
++ CMPBEQ R1, $12, aes192
++aes128:
++ MVC $(128/8), 0(R2), 0(R3)
++ MVC $(128/8), 0(R2), 0(R4)
++ RET
++aes192:
++ MVC $(192/8), 0(R2), 0(R3)
++ MVC $(192/8), 0(R2), 0(R4)
++ RET
++aes256:
++ MVC $(256/8), 0(R2), 0(R3)
++ MVC $(256/8), 0(R2), 0(R4)
++ RET
+--- a/src/crypto/aes/cipher_asm.go
++++ b/src/crypto/aes/cipher_asm.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build amd64
++// +build amd64 s390x
+
+ package aes
+
+--- a/src/crypto/aes/cipher_generic.go
++++ b/src/crypto/aes/cipher_generic.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build !amd64
++// +build !amd64,!s390x
+
+ package aes
+
+@@ -17,11 +17,3 @@
+ func expandKey(key []byte, enc, dec []uint32) {
+ expandKeyGo(key, enc, dec)
+ }
+-
+-func hasGCMAsm() bool {
+- return false
+-}
+-
+-type aesCipherGCM struct {
+- aesCipher
+-}
+--- /dev/null
++++ b/src/crypto/aes/gcm_generic.go
+@@ -0,0 +1,15 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build !amd64
++
++package aes
++
++func hasGCMAsm() bool {
++ return false
++}
++
++type aesCipherGCM struct {
++ aesCipher
++}
+--- a/src/crypto/cipher/xor.go
++++ b/src/crypto/cipher/xor.go
+@@ -10,7 +10,7 @@
+ )
+
+ const wordSize = int(unsafe.Sizeof(uintptr(0)))
+-const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64"
++const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "s390x"
+
+ // fastXORBytes xors in bulk. It only works on architectures that
+ // support unaligned read/writes.
+--- a/src/crypto/md5/md5block_decl.go
++++ b/src/crypto/md5/md5block_decl.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build amd64 amd64p32 386 arm
++// +build amd64 amd64p32 386 arm s390x
+
+ package md5
+
+--- a/src/crypto/md5/md5block_generic.go
++++ b/src/crypto/md5/md5block_generic.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build !amd64,!amd64p32,!386,!arm
++// +build !amd64,!amd64p32,!386,!arm,!s390x
+
+ package md5
+
+--- /dev/null
++++ b/src/crypto/md5/md5block_s390x.s
+@@ -0,0 +1,177 @@
++// Adapted from md5block_amd64.s by the Go Authors.
++//
++// Original source:
++// http://www.zorinaq.com/papers/md5-amd64.html
++// http://www.zorinaq.com/papers/md5-amd64.tar.bz2
++//
++// Translated from Perl generating GNU assembly into
++// #defines generating 6a assembly by the Go Authors.
++//
++// MD5 optimized for AMD64.
++//
++// Author: Marc Bevand
++// Licence: I hereby disclaim the copyright on this code and place it
++// in the public domain.
++
++#include "textflag.h"
++
++TEXT ·block(SB),NOSPLIT,$16-32
++ MOVD dig+0(FP), R1
++ MOVD p+8(FP), R6
++ MOVD p_len+16(FP), R5
++ AND $-64, R5
++ LAY (R6)(R5*1), R7
++
++ LMY 0(R1), R2, R5
++ CMPBEQ R6, R7, end
++
++loop:
++ STMY R2, R5, tmp-16(SP)
++
++ MOVWBR 0(R6), R8
++ MOVWZ R5, R9
++
++#define ROUND1(a, b, c, d, index, const, shift) \
++ XOR c, R9; \
++ ADD $const, a; \
++ ADD R8, a; \
++ AND b, R9; \
++ XOR d, R9; \
++ MOVWBR (index*4)(R6), R8; \
++ ADD R9, a; \
++ RLL $shift, a; \
++ MOVWZ c, R9; \
++ ADD b, a
++
++ ROUND1(R2,R3,R4,R5, 1,0xd76aa478, 7);
++ ROUND1(R5,R2,R3,R4, 2,0xe8c7b756,12);
++ ROUND1(R4,R5,R2,R3, 3,0x242070db,17);
++ ROUND1(R3,R4,R5,R2, 4,0xc1bdceee,22);
++ ROUND1(R2,R3,R4,R5, 5,0xf57c0faf, 7);
++ ROUND1(R5,R2,R3,R4, 6,0x4787c62a,12);
++ ROUND1(R4,R5,R2,R3, 7,0xa8304613,17);
++ ROUND1(R3,R4,R5,R2, 8,0xfd469501,22);
++ ROUND1(R2,R3,R4,R5, 9,0x698098d8, 7);
++ ROUND1(R5,R2,R3,R4,10,0x8b44f7af,12);
++ ROUND1(R4,R5,R2,R3,11,0xffff5bb1,17);
++ ROUND1(R3,R4,R5,R2,12,0x895cd7be,22);
++ ROUND1(R2,R3,R4,R5,13,0x6b901122, 7);
++ ROUND1(R5,R2,R3,R4,14,0xfd987193,12);
++ ROUND1(R4,R5,R2,R3,15,0xa679438e,17);
++ ROUND1(R3,R4,R5,R2, 0,0x49b40821,22);
++
++ MOVWBR (1*4)(R6), R8
++ MOVWZ R5, R9
++ MOVWZ R5, R1
++
++#define ROUND2(a, b, c, d, index, const, shift) \
++ XOR $0xffffffff, R9; \ // NOTW R9
++ ADD $const, a; \
++ ADD R8, a; \
++ AND b, R1; \
++ AND c, R9; \
++ MOVWBR (index*4)(R6), R8; \
++ OR R9, R1; \
++ MOVWZ c, R9; \
++ ADD R1, a; \
++ MOVWZ c, R1; \
++ RLL $shift, a; \
++ ADD b, a
++
++ ROUND2(R2,R3,R4,R5, 6,0xf61e2562, 5);
++ ROUND2(R5,R2,R3,R4,11,0xc040b340, 9);
++ ROUND2(R4,R5,R2,R3, 0,0x265e5a51,14);
++ ROUND2(R3,R4,R5,R2, 5,0xe9b6c7aa,20);
++ ROUND2(R2,R3,R4,R5,10,0xd62f105d, 5);
++ ROUND2(R5,R2,R3,R4,15, 0x2441453, 9);
++ ROUND2(R4,R5,R2,R3, 4,0xd8a1e681,14);
++ ROUND2(R3,R4,R5,R2, 9,0xe7d3fbc8,20);
++ ROUND2(R2,R3,R4,R5,14,0x21e1cde6, 5);
++ ROUND2(R5,R2,R3,R4, 3,0xc33707d6, 9);
++ ROUND2(R4,R5,R2,R3, 8,0xf4d50d87,14);
++ ROUND2(R3,R4,R5,R2,13,0x455a14ed,20);
++ ROUND2(R2,R3,R4,R5, 2,0xa9e3e905, 5);
++ ROUND2(R5,R2,R3,R4, 7,0xfcefa3f8, 9);
++ ROUND2(R4,R5,R2,R3,12,0x676f02d9,14);
++ ROUND2(R3,R4,R5,R2, 0,0x8d2a4c8a,20);
++
++ MOVWBR (5*4)(R6), R8
++ MOVWZ R4, R9
++
++#define ROUND3(a, b, c, d, index, const, shift) \
++ ADD $const, a; \
++ ADD R8, a; \
++ MOVWBR (index*4)(R6), R8; \
++ XOR d, R9; \
++ XOR b, R9; \
++ ADD R9, a; \
++ RLL $shift, a; \
++ MOVWZ b, R9; \
++ ADD b, a
++
++ ROUND3(R2,R3,R4,R5, 8,0xfffa3942, 4);
++ ROUND3(R5,R2,R3,R4,11,0x8771f681,11);
++ ROUND3(R4,R5,R2,R3,14,0x6d9d6122,16);
++ ROUND3(R3,R4,R5,R2, 1,0xfde5380c,23);
++ ROUND3(R2,R3,R4,R5, 4,0xa4beea44, 4);
++ ROUND3(R5,R2,R3,R4, 7,0x4bdecfa9,11);
++ ROUND3(R4,R5,R2,R3,10,0xf6bb4b60,16);
++ ROUND3(R3,R4,R5,R2,13,0xbebfbc70,23);
++ ROUND3(R2,R3,R4,R5, 0,0x289b7ec6, 4);
++ ROUND3(R5,R2,R3,R4, 3,0xeaa127fa,11);
++ ROUND3(R4,R5,R2,R3, 6,0xd4ef3085,16);
++ ROUND3(R3,R4,R5,R2, 9, 0x4881d05,23);
++ ROUND3(R2,R3,R4,R5,12,0xd9d4d039, 4);
++ ROUND3(R5,R2,R3,R4,15,0xe6db99e5,11);
++ ROUND3(R4,R5,R2,R3, 2,0x1fa27cf8,16);
++ ROUND3(R3,R4,R5,R2, 0,0xc4ac5665,23);
++
++ MOVWBR (0*4)(R6), R8
++ MOVWZ $0xffffffff, R9
++ XOR R5, R9
++
++#define ROUND4(a, b, c, d, index, const, shift) \
++ ADD $const, a; \
++ ADD R8, a; \
++ OR b, R9; \
++ XOR c, R9; \
++ ADD R9, a; \
++ MOVWBR (index*4)(R6), R8; \
++ MOVWZ $0xffffffff, R9; \
++ RLL $shift, a; \
++ XOR c, R9; \
++ ADD b, a
++
++ ROUND4(R2,R3,R4,R5, 7,0xf4292244, 6);
++ ROUND4(R5,R2,R3,R4,14,0x432aff97,10);
++ ROUND4(R4,R5,R2,R3, 5,0xab9423a7,15);
++ ROUND4(R3,R4,R5,R2,12,0xfc93a039,21);
++ ROUND4(R2,R3,R4,R5, 3,0x655b59c3, 6);
++ ROUND4(R5,R2,R3,R4,10,0x8f0ccc92,10);
++ ROUND4(R4,R5,R2,R3, 1,0xffeff47d,15);
++ ROUND4(R3,R4,R5,R2, 8,0x85845dd1,21);
++ ROUND4(R2,R3,R4,R5,15,0x6fa87e4f, 6);
++ ROUND4(R5,R2,R3,R4, 6,0xfe2ce6e0,10);
++ ROUND4(R4,R5,R2,R3,13,0xa3014314,15);
++ ROUND4(R3,R4,R5,R2, 4,0x4e0811a1,21);
++ ROUND4(R2,R3,R4,R5,11,0xf7537e82, 6);
++ ROUND4(R5,R2,R3,R4, 2,0xbd3af235,10);
++ ROUND4(R4,R5,R2,R3, 9,0x2ad7d2bb,15);
++ ROUND4(R3,R4,R5,R2, 0,0xeb86d391,21);
++
++ MOVWZ tmp-16(SP), R1
++ ADD R1, R2
++ MOVWZ tmp-12(SP), R1
++ ADD R1, R3
++ MOVWZ tmp-8(SP), R1
++ ADD R1, R4
++ MOVWZ tmp-4(SP), R1
++ ADD R1, R5
++
++ ADD $64, R6
++ CMPBLT R6, R7, loop
++
++end:
++ MOVD dig+0(FP), R1
++ STMY R2, R5, 0(R1)
++ RET
+--- a/src/crypto/sha1/sha1block_decl.go
++++ b/src/crypto/sha1/sha1block_decl.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build amd64 amd64p32 arm 386
++// +build amd64 amd64p32 arm 386 s390x
+
+ package sha1
+
+--- a/src/crypto/sha1/sha1block_generic.go
++++ b/src/crypto/sha1/sha1block_generic.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build !amd64,!amd64p32,!386,!arm
++// +build !amd64,!amd64p32,!386,!arm,!s390x
+
+ package sha1
+
+--- /dev/null
++++ b/src/crypto/sha1/sha1block_s390x.s
+@@ -0,0 +1,37 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func block(dig *digest, p []byte)
++TEXT ·block(SB),NOSPLIT,$0-32
++start:
++ // Check that we have the SHA-1 function
++ MOVD ·kimdQueryResult(SB), R4
++ SRD $56, R4 // Get the first byte
++ AND $0x40, R4, R5 // Bit 1 for SHA-1
++ BNE hardware
++ AND $0x80, R4, R5 // Bit 0 for Query
++ BNE generic
++ MOVD $·kimdQueryResult(SB), R1
++ XOR R0, R0 // Query function code
++ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
++ BR start
++
++hardware:
++ MOVD dig+0(FP), R1
++ MOVD p_base+8(FP), R2
++ MOVD p_len+16(FP), R3
++ MOVBZ $1, R0 // SHA-1 function code
++kimd:
++ WORD $0xB93E0002 // KIMD R2
++ BVS kimd // interrupted -- continue
++done:
++ XOR R0, R0 // Restore R0
++ RET
++
++generic:
++ BR ·blockGeneric(SB)
++
++GLOBL ·kimdQueryResult(SB), NOPTR, $16
+--- a/src/crypto/sha256/sha256block.go
++++ b/src/crypto/sha256/sha256block.go
+@@ -77,7 +77,7 @@
+ 0xc67178f2,
+ }
+
+-func block(dig *digest, p []byte) {
++func blockGeneric(dig *digest, p []byte) {
+ var w [64]uint32
+ h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
+ for len(p) >= chunk {
+--- a/src/crypto/sha256/sha256block_decl.go
++++ b/src/crypto/sha256/sha256block_decl.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build 386 amd64
++// +build 386 amd64 s390x
+
+ package sha256
+
+--- /dev/null
++++ b/src/crypto/sha256/sha256block_generic.go
+@@ -0,0 +1,9 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build !amd64,!386,!s390x
++
++package sha256
++
++var block = blockGeneric
+--- /dev/null
++++ b/src/crypto/sha256/sha256block_s390x.s
+@@ -0,0 +1,37 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func block(dig *digest, p []byte)
++TEXT ·block(SB),NOSPLIT,$0-32
++start:
++ // Check that we have the SHA-256 function
++ MOVD ·kimdQueryResult(SB), R4
++ SRD $56, R4 // Get the first byte
++ AND $0x20, R4, R5 // Bit 2 for SHA-256
++ BNE hardware
++ AND $0x80, R4, R5 // Bit 0 for Query
++ BNE generic
++ MOVD $·kimdQueryResult(SB), R1
++ XOR R0, R0 // Query function code
++ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
++ BR start
++
++hardware:
++ MOVD dig+0(FP), R1
++ MOVD p_base+8(FP), R2
++ MOVD p_len+16(FP), R3
++ MOVBZ $2, R0 // SHA-256 function code
++kimd:
++ WORD $0xB93E0002 // KIMD R2
++ BVS kimd // interrupted -- continue
++done:
++ XOR R0, R0 // Restore R0
++ RET
++
++generic:
++ BR ·blockGeneric(SB)
++
++GLOBL ·kimdQueryResult(SB), NOPTR, $16
+--- a/src/crypto/sha512/sha512block.go
++++ b/src/crypto/sha512/sha512block.go
+@@ -93,7 +93,7 @@
+ 0x6c44198c4a475817,
+ }
+
+-func block(dig *digest, p []byte) {
++func blockGeneric(dig *digest, p []byte) {
+ var w [80]uint64
+ h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
+ for len(p) >= chunk {
+--- a/src/crypto/sha512/sha512block_decl.go
++++ b/src/crypto/sha512/sha512block_decl.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build amd64
++// +build amd64 s390x
+
+ package sha512
+
+--- /dev/null
++++ b/src/crypto/sha512/sha512block_generic.go
+@@ -0,0 +1,9 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build !amd64,!s390x
++
++package sha512
++
++var block = blockGeneric
+--- /dev/null
++++ b/src/crypto/sha512/sha512block_s390x.s
+@@ -0,0 +1,37 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func block(dig *digest, p []byte)
++TEXT ·block(SB),NOSPLIT,$0-32
++start:
++ // Check that we have the SHA-512 function
++ MOVD ·kimdQueryResult(SB), R4
++ SRD $56, R4 // Get the first byte
++ AND $0x10, R4, R5 // Bit 3 for SHA-512
++ BNE hardware
++ AND $0x80, R4, R5 // Bit 0 for Query
++ BNE generic
++ MOVD $·kimdQueryResult(SB), R1
++ XOR R0, R0 // Query function code
++ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
++ BR start
++
++hardware:
++ MOVD dig+0(FP), R1
++ MOVD p_base+8(FP), R2
++ MOVD p_len+16(FP), R3
++ MOVBZ $3, R0 // SHA-512 function code
++kimd:
++ WORD $0xB93E0002 // KIMD R2
++ BVS kimd // interrupted -- continue
++done:
++ XOR R0, R0 // Restore R0
++ RET
++
++generic:
++ BR ·blockGeneric(SB)
++
++GLOBL ·kimdQueryResult(SB), NOPTR, $16
+--- a/src/crypto/x509/sec1.go
++++ b/src/crypto/x509/sec1.go
+@@ -41,8 +41,8 @@
+ }
+
+ privateKeyBytes := key.D.Bytes()
+- paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen() + 7) / 8)
+- copy(paddedPrivateKey[len(paddedPrivateKey) - len(privateKeyBytes):], privateKeyBytes)
++ paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
++ copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
+
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+@@ -84,7 +84,7 @@
+ priv.Curve = curve
+ priv.D = k
+
+- privateKey := make([]byte, (curveOrder.BitLen() + 7) / 8)
++ privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
+
+ // Some private keys have leading zero padding. This is invalid
+ // according to [SEC1], but this code will ignore it.
+@@ -98,7 +98,7 @@
+ // Some private keys remove all leading zeros, this is also invalid
+ // according to [SEC1] but since OpenSSL used to do this, we ignore
+ // this too.
+- copy(privateKey[len(privateKey) - len(privKey.PrivateKey):], privKey.PrivateKey)
++ copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
+ priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
+
+ return priv, nil
+--- a/src/crypto/x509/sec1_test.go
++++ b/src/crypto/x509/sec1_test.go
+@@ -10,8 +10,8 @@
+ "testing"
+ )
+
+-var ecKeyTests = []struct{
+- derHex string
++var ecKeyTests = []struct {
++ derHex string
+ shouldReserialize bool
+ }{
+ // Generated using:
+--- a/src/debug/elf/elf.go
++++ b/src/debug/elf/elf.go
+@@ -1725,6 +1725,140 @@
+ func (i R_PPC64) String() string { return stringName(uint32(i), rppc64Strings, false) }
+ func (i R_PPC64) GoString() string { return stringName(uint32(i), rppc64Strings, true) }
+
++// Relocation types for s390x processors.
++type R_390 int
++
++const (
++ R_390_NONE R_390 = 0
++ R_390_8 R_390 = 1
++ R_390_12 R_390 = 2
++ R_390_16 R_390 = 3
++ R_390_32 R_390 = 4
++ R_390_PC32 R_390 = 5
++ R_390_GOT12 R_390 = 6
++ R_390_GOT32 R_390 = 7
++ R_390_PLT32 R_390 = 8
++ R_390_COPY R_390 = 9
++ R_390_GLOB_DAT R_390 = 10
++ R_390_JMP_SLOT R_390 = 11
++ R_390_RELATIVE R_390 = 12
++ R_390_GOTOFF R_390 = 13
++ R_390_GOTPC R_390 = 14
++ R_390_GOT16 R_390 = 15
++ R_390_PC16 R_390 = 16
++ R_390_PC16DBL R_390 = 17
++ R_390_PLT16DBL R_390 = 18
++ R_390_PC32DBL R_390 = 19
++ R_390_PLT32DBL R_390 = 20
++ R_390_GOTPCDBL R_390 = 21
++ R_390_64 R_390 = 22
++ R_390_PC64 R_390 = 23
++ R_390_GOT64 R_390 = 24
++ R_390_PLT64 R_390 = 25
++ R_390_GOTENT R_390 = 26
++ R_390_GOTOFF16 R_390 = 27
++ R_390_GOTOFF64 R_390 = 28
++ R_390_GOTPLT12 R_390 = 29
++ R_390_GOTPLT16 R_390 = 30
++ R_390_GOTPLT32 R_390 = 31
++ R_390_GOTPLT64 R_390 = 32
++ R_390_GOTPLTENT R_390 = 33
++ R_390_GOTPLTOFF16 R_390 = 34
++ R_390_GOTPLTOFF32 R_390 = 35
++ R_390_GOTPLTOFF64 R_390 = 36
++ R_390_TLS_LOAD R_390 = 37
++ R_390_TLS_GDCALL R_390 = 38
++ R_390_TLS_LDCALL R_390 = 39
++ R_390_TLS_GD32 R_390 = 40
++ R_390_TLS_GD64 R_390 = 41
++ R_390_TLS_GOTIE12 R_390 = 42
++ R_390_TLS_GOTIE32 R_390 = 43
++ R_390_TLS_GOTIE64 R_390 = 44
++ R_390_TLS_LDM32 R_390 = 45
++ R_390_TLS_LDM64 R_390 = 46
++ R_390_TLS_IE32 R_390 = 47
++ R_390_TLS_IE64 R_390 = 48
++ R_390_TLS_IEENT R_390 = 49
++ R_390_TLS_LE32 R_390 = 50
++ R_390_TLS_LE64 R_390 = 51
++ R_390_TLS_LDO32 R_390 = 52
++ R_390_TLS_LDO64 R_390 = 53
++ R_390_TLS_DTPMOD R_390 = 54
++ R_390_TLS_DTPOFF R_390 = 55
++ R_390_TLS_TPOFF R_390 = 56
++ R_390_20 R_390 = 57
++ R_390_GOT20 R_390 = 58
++ R_390_GOTPLT20 R_390 = 59
++ R_390_TLS_GOTIE20 R_390 = 60
++)
++
++var r390Strings = []intName{
++ {0, "R_390_NONE"},
++ {1, "R_390_8"},
++ {2, "R_390_12"},
++ {3, "R_390_16"},
++ {4, "R_390_32"},
++ {5, "R_390_PC32"},
++ {6, "R_390_GOT12"},
++ {7, "R_390_GOT32"},
++ {8, "R_390_PLT32"},
++ {9, "R_390_COPY"},
++ {10, "R_390_GLOB_DAT"},
++ {11, "R_390_JMP_SLOT"},
++ {12, "R_390_RELATIVE"},
++ {13, "R_390_GOTOFF"},
++ {14, "R_390_GOTPC"},
++ {15, "R_390_GOT16"},
++ {16, "R_390_PC16"},
++ {17, "R_390_PC16DBL"},
++ {18, "R_390_PLT16DBL"},
++ {19, "R_390_PC32DBL"},
++ {20, "R_390_PLT32DBL"},
++ {21, "R_390_GOTPCDBL"},
++ {22, "R_390_64"},
++ {23, "R_390_PC64"},
++ {24, "R_390_GOT64"},
++ {25, "R_390_PLT64"},
++ {26, "R_390_GOTENT"},
++ {27, "R_390_GOTOFF16"},
++ {28, "R_390_GOTOFF64"},
++ {29, "R_390_GOTPLT12"},
++ {30, "R_390_GOTPLT16"},
++ {31, "R_390_GOTPLT32"},
++ {32, "R_390_GOTPLT64"},
++ {33, "R_390_GOTPLTENT"},
++ {34, "R_390_GOTPLTOFF16"},
++ {35, "R_390_GOTPLTOFF32"},
++ {36, "R_390_GOTPLTOFF64"},
++ {37, "R_390_TLS_LOAD"},
++ {38, "R_390_TLS_GDCALL"},
++ {39, "R_390_TLS_LDCALL"},
++ {40, "R_390_TLS_GD32"},
++ {41, "R_390_TLS_GD64"},
++ {42, "R_390_TLS_GOTIE12"},
++ {43, "R_390_TLS_GOTIE32"},
++ {44, "R_390_TLS_GOTIE64"},
++ {45, "R_390_TLS_LDM32"},
++ {46, "R_390_TLS_LDM64"},
++ {47, "R_390_TLS_IE32"},
++ {48, "R_390_TLS_IE64"},
++ {49, "R_390_TLS_IEENT"},
++ {50, "R_390_TLS_LE32"},
++ {51, "R_390_TLS_LE64"},
++ {52, "R_390_TLS_LDO32"},
++ {53, "R_390_TLS_LDO64"},
++ {54, "R_390_TLS_DTPMOD"},
++ {55, "R_390_TLS_DTPOFF"},
++ {56, "R_390_TLS_TPOFF"},
++ {57, "R_390_20"},
++ {58, "R_390_GOT20"},
++ {59, "R_390_GOTPLT20"},
++ {60, "R_390_TLS_GOTIE20"},
++}
++
++func (i R_390) String() string { return stringName(uint32(i), r390Strings, false) }
++func (i R_390) GoString() string { return stringName(uint32(i), r390Strings, true) }
++
+ // Relocation types for SPARC.
+ type R_SPARC int
+
+--- a/src/debug/elf/file.go
++++ b/src/debug/elf/file.go
+@@ -596,6 +596,8 @@
+ return f.applyRelocationsPPC64(dst, rels)
+ case f.Class == ELFCLASS64 && f.Machine == EM_MIPS:
+ return f.applyRelocationsMIPS64(dst, rels)
++ case f.Class == ELFCLASS64 && f.Machine == EM_S390:
++ return f.applyRelocationss390x(dst, rels)
+ default:
+ return errors.New("applyRelocations: not implemented")
+ }
+@@ -908,6 +910,55 @@
+ }
+ }
+
++ return nil
++}
++
++func (f *File) applyRelocationss390x(dst []byte, rels []byte) error {
++ // 24 is the size of Rela64.
++ if len(rels)%24 != 0 {
++ return errors.New("length of relocation section is not a multiple of 24")
++ }
++
++ symbols, _, err := f.getSymbols(SHT_SYMTAB)
++ if err != nil {
++ return err
++ }
++
++ b := bytes.NewReader(rels)
++ var rela Rela64
++
++ for b.Len() > 0 {
++ binary.Read(b, f.ByteOrder, &rela)
++ symNo := rela.Info >> 32
++ t := R_390(rela.Info & 0xffff)
++
++ if symNo == 0 || symNo > uint64(len(symbols)) {
++ continue
++ }
++ sym := &symbols[symNo-1]
++ switch SymType(sym.Info & 0xf) {
++ case STT_SECTION, STT_NOTYPE:
++ break
++ default:
++ continue
++ }
++
++ switch t {
++ case R_390_64:
++ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
++ continue
++ }
++ val := sym.Value + uint64(rela.Addend)
++ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val)
++ case R_390_32:
++ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
++ continue
++ }
++ val := uint32(sym.Value) + uint32(rela.Addend)
++ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val)
++ }
++ }
++
+ return nil
+ }
+
+--- a/src/debug/elf/file_test.go
++++ b/src/debug/elf/file_test.go
+@@ -473,6 +473,25 @@
+ },
+ },
+ {
++ "testdata/go-relocation-test-gcc531-s390x.obj",
++ []relocationTestEntry{
++ {0, &dwarf.Entry{
++ Offset: 0xb,
++ Tag: dwarf.TagCompileUnit,
++ Children: true,
++ Field: []dwarf.Field{
++ {Attr: dwarf.AttrProducer, Val: "GNU C11 5.3.1 20160316 -march=zEC12 -m64 -mzarch -g -fstack-protector-strong", Class: dwarf.ClassString},
++ {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant},
++ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString},
++ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
++ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
++ {Attr: dwarf.AttrHighpc, Val: int64(58), Class: dwarf.ClassConstant},
++ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
++ },
++ }},
++ },
++ },
++ {
+ "testdata/go-relocation-test-gcc493-mips64le.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+--- a/src/debug/gosym/pclntab.go
++++ b/src/debug/gosym/pclntab.go
+@@ -167,7 +167,7 @@
+ // Check header: 4-byte magic, two zeros, pc quantum, pointer size.
+ t.go12 = -1 // not Go 1.2 until proven otherwise
+ if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
+- (t.Data[6] != 1 && t.Data[6] != 4) || // pc quantum
++ (t.Data[6] != 1 && t.Data[6] != 2 && t.Data[6] != 4) || // pc quantum
+ (t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
+ return
+ }
+--- a/src/go/build/build.go
++++ b/src/go/build/build.go
+@@ -282,6 +282,7 @@
+ "solaris/amd64": true,
+ "windows/386": true,
+ "windows/amd64": true,
++ "linux/s390x": true,
+ }
+
+ func defaultContext() Context {
+--- a/src/hash/crc32/crc32_generic.go
++++ b/src/hash/crc32/crc32_generic.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build 386 arm arm64 mips64 mips64le ppc64 ppc64le
++// +build 386 arm arm64 mips64 mips64le ppc64 ppc64le s390x
+
+ package crc32
+
+--- /dev/null
++++ b/src/internal/syscall/unix/getrandom_linux_s390x.go
+@@ -0,0 +1,7 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package unix
++
++const randomTrap uintptr = 349
+--- /dev/null
++++ b/src/math/big/arith_s390x.s
+@@ -0,0 +1,565 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build !math_big_pure_go,s390x
++
++#include "textflag.h"
++
++// This file provides fast assembly versions for the elementary
++// arithmetic operations on vectors implemented in arith.go.
++
++TEXT ·mulWW(SB),NOSPLIT,$0
++ MOVD x+0(FP), R3
++ MOVD y+8(FP), R4
++ MULHDU R3, R4
++ MOVD R10, z1+16(FP)
++ MOVD R11, z0+24(FP)
++ RET
++
++// func divWW(x1, x0, y Word) (q, r Word)
++TEXT ·divWW(SB),NOSPLIT,$0
++ MOVD x1+0(FP), R10
++ MOVD x0+8(FP), R11
++ MOVD y+16(FP), R5
++ WORD $0xb98700a5 // dlgr r10,r5
++ MOVD R11, q+24(FP)
++ MOVD R10, r+32(FP)
++ RET
++
++// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
++// func addVV(z, x, y []Word) (c Word)
++TEXT ·addVV(SB),NOSPLIT,$0
++ MOVD z_len+8(FP), R3
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R9
++ MOVD z+0(FP), R2
++
++ MOVD $0, R4 // c = 0
++ MOVD $0, R0 // make sure it's zero
++ MOVD $0, R10 // i = 0
++
++ // s/JL/JMP/ below to disable the unrolled loop
++ SUB $4, R3 // n -= 4
++ BLT v1 // if n < 0 goto v1
++
++U1: // n >= 0
++ // regular loop body unrolled 4x
++ MOVD 0(R8)(R10*1), R5
++ MOVD 8(R8)(R10*1), R6
++ MOVD 16(R8)(R10*1), R7
++ MOVD 24(R8)(R10*1), R1
++ ADDC R4, R4 // restore CF
++ MOVD 0(R9)(R10*1), R11
++ ADDE R11, R5
++ MOVD 8(R9)(R10*1), R11
++ ADDE R11, R6
++ MOVD 16(R9)(R10*1), R11
++ ADDE R11, R7
++ MOVD 24(R9)(R10*1), R11
++ ADDE R11, R1
++ MOVD R0, R4
++ ADDE R4, R4 // save CF
++ NEG R4, R4
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R6, 8(R2)(R10*1)
++ MOVD R7, 16(R2)(R10*1)
++ MOVD R1, 24(R2)(R10*1)
++
++
++ ADD $32, R10 // i += 4
++ SUB $4, R3 // n -= 4
++ BGE U1 // if n >= 0 goto U1
++
++v1: ADD $4, R3 // n += 4
++ BLE E1 // if n <= 0 goto E1
++
++L1: // n > 0
++ ADDC R4, R4 // restore CF
++ MOVD 0(R8)(R10*1), R5
++ MOVD 0(R9)(R10*1), R11
++ ADDE R11, R5
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R0, R4
++ ADDE R4, R4 // save CF
++ NEG R4, R4
++
++ ADD $8, R10 // i++
++ SUB $1, R3 // n--
++ BGT L1 // if n > 0 goto L1
++
++E1: NEG R4, R4
++ MOVD R4, c+72(FP) // return c
++ RET
++
++// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
++// func subVV(z, x, y []Word) (c Word)
++// (same as addVV except for SUBC/SUBE instead of ADDC/ADDE and label names)
++TEXT ·subVV(SB),NOSPLIT,$0
++ MOVD z_len+8(FP), R3
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R9
++ MOVD z+0(FP), R2
++
++ MOVD $0, R4 // c = 0
++ MOVD $0, R0 // make sure it's zero
++ MOVD $0, R10 // i = 0
++
++ // s/JL/JMP/ below to disable the unrolled loop
++ SUB $4, R3 // n -= 4
++ BLT v1 // if n < 0 goto v1
++
++U1: // n >= 0
++ // regular loop body unrolled 4x
++ MOVD 0(R8)(R10*1), R5
++ MOVD 8(R8)(R10*1), R6
++ MOVD 16(R8)(R10*1), R7
++ MOVD 24(R8)(R10*1), R1
++ MOVD R0, R11
++ SUBC R4, R11 // restore CF
++ MOVD 0(R9)(R10*1), R11
++ SUBE R11, R5
++ MOVD 8(R9)(R10*1), R11
++ SUBE R11, R6
++ MOVD 16(R9)(R10*1), R11
++ SUBE R11, R7
++ MOVD 24(R9)(R10*1), R11
++ SUBE R11, R1
++ MOVD R0, R4
++ SUBE R4, R4 // save CF
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R6, 8(R2)(R10*1)
++ MOVD R7, 16(R2)(R10*1)
++ MOVD R1, 24(R2)(R10*1)
++
++
++ ADD $32, R10 // i += 4
++ SUB $4, R3 // n -= 4
++ BGE U1 // if n >= 0 goto U1
++
++v1: ADD $4, R3 // n += 4
++ BLE E1 // if n <= 0 goto E1
++
++L1: // n > 0
++ MOVD R0, R11
++ SUBC R4, R11 // restore CF
++ MOVD 0(R8)(R10*1), R5
++ MOVD 0(R9)(R10*1), R11
++ SUBE R11, R5
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R0, R4
++ SUBE R4, R4 // save CF
++
++ ADD $8, R10 // i++
++ SUB $1, R3 // n--
++ BGT L1 // if n > 0 goto L1
++
++E1: NEG R4, R4
++ MOVD R4, c+72(FP) // return c
++ RET
++
++
++// func addVW(z, x []Word, y Word) (c Word)
++TEXT ·addVW(SB),NOSPLIT,$0
++//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
++ MOVD z_len+8(FP), R3
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R4 // c = y
++ MOVD z+0(FP), R2
++ MOVD $0, R0 // make sure it's 0
++ MOVD $0, R10 // i = 0
++
++ // s/JL/JMP/ below to disable the unrolled loop
++ SUB $4, R3 // n -= 4
++ BLT v4 // if n < 4 goto v4
++
++U4: // n >= 0
++ // regular loop body unrolled 4x
++ MOVD 0(R8)(R10*1), R5
++ MOVD 8(R8)(R10*1), R6
++ MOVD 16(R8)(R10*1), R7
++ MOVD 24(R8)(R10*1), R1
++ ADDC R4, R5
++ ADDE R0, R6
++ ADDE R0, R7
++ ADDE R0, R1
++ ADDE R0, R0
++ MOVD R0, R4 // save CF
++ SUB R0, R0
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R6, 8(R2)(R10*1)
++ MOVD R7, 16(R2)(R10*1)
++ MOVD R1, 24(R2)(R10*1)
++
++ ADD $32, R10 // i += 4 -> i +=32
++ SUB $4, R3 // n -= 4
++ BGE U4 // if n >= 0 goto U4
++
++v4: ADD $4, R3 // n += 4
++ BLE E4 // if n <= 0 goto E4
++
++L4: // n > 0
++ MOVD 0(R8)(R10*1), R5
++ ADDC R4, R5
++ ADDE R0, R0
++ MOVD R0, R4 // save CF
++ SUB R0, R0
++ MOVD R5, 0(R2)(R10*1)
++
++ ADD $8, R10 // i++
++ SUB $1, R3 // n--
++ BGT L4 // if n > 0 goto L4
++
++E4: MOVD R4, c+56(FP) // return c
++
++ RET
++
++//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
++// func subVW(z, x []Word, y Word) (c Word)
++// (same as addVW except for SUBC/SUBE instead of ADDC/ADDE and label names)
++TEXT ·subVW(SB),NOSPLIT,$0
++ MOVD z_len+8(FP), R3
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R4 // c = y
++ MOVD z+0(FP), R2
++ MOVD $0, R0 // make sure it's 0
++ MOVD $0, R10 // i = 0
++
++ // s/JL/JMP/ below to disable the unrolled loop
++ SUB $4, R3 // n -= 4
++ BLT v4 // if n < 4 goto v4
++
++U4: // n >= 0
++ // regular loop body unrolled 4x
++ MOVD 0(R8)(R10*1), R5
++ MOVD 8(R8)(R10*1), R6
++ MOVD 16(R8)(R10*1), R7
++ MOVD 24(R8)(R10*1), R1
++ SUBC R4, R5 //SLGR -> SUBC
++ SUBE R0, R6 //SLBGR -> SUBE
++ SUBE R0, R7
++ SUBE R0, R1
++ SUBE R4, R4 // save CF
++ NEG R4, R4
++ MOVD R5, 0(R2)(R10*1)
++ MOVD R6, 8(R2)(R10*1)
++ MOVD R7, 16(R2)(R10*1)
++ MOVD R1, 24(R2)(R10*1)
++
++ ADD $32, R10 // i += 4 -> i +=32
++ SUB $4, R3 // n -= 4
++ BGE U4 // if n >= 0 goto U4
++
++v4: ADD $4, R3 // n += 4
++ BLE E4 // if n <= 0 goto E4
++
++L4: // n > 0
++ MOVD 0(R8)(R10*1), R5
++ SUBC R4, R5
++ SUBE R4, R4 // save CF
++ NEG R4, R4
++ MOVD R5, 0(R2)(R10*1)
++
++ ADD $8, R10 // i++
++ SUB $1, R3 // n--
++ BGT L4 // if n > 0 goto L4
++
++E4: MOVD R4, c+56(FP) // return c
++
++ RET
++
++// func shlVU(z, x []Word, s uint) (c Word)
++TEXT ·shlVU(SB),NOSPLIT,$0
++ MOVD z_len+8(FP), R5
++ SUB $1, R5 // n--
++ BLT X8b // n < 0 (n <= 0)
++
++ // n > 0
++ MOVD s+48(FP), R4
++ CMPBEQ R0, R4, Z80 //handle 0 case beq
++ MOVD $64, R6
++ CMPBEQ R6, R4, Z864 //handle 64 case beq
++ MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ SLD $3, R5 // n = n*8
++ SUB R4, R6, R7
++ MOVD (R8)(R5*1), R10 // w1 = x[i-1]
++ SRD R7, R10, R3
++ MOVD R3, c+56(FP)
++
++ MOVD $0, R1 // i = 0
++ BR E8
++
++ // i < n-1
++L8: MOVD R10, R3 // w = w1
++ MOVD -8(R8)(R5*1), R10 // w1 = x[i+1]
++
++ SLD R4, R3 // w<>ŝ
++ SRD R7, R10, R6
++ OR R6, R3
++ MOVD R3, (R2)(R5*1) // z[i] = w<>ŝ
++ SUB $8, R5 // i--
++
++E8: CMPBGT R5, R0, L8 // i < n-1
++
++ // i >= n-1
++X8a: SLD R4, R10 // w1<= n-1
++ MOVD R10, (R2)(R5*1)
++ RET
++
++Z864: MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ SLD $3, R5 // n = n*8
++ MOVD (R8)(R5*1), R3 // w1 = x[n-1]
++ MOVD R3, c+56(FP) // z[i] = x[n-1]
++
++ BR E864
++
++ // i < n-1
++L864: MOVD -8(R8)(R5*1), R3
++
++ MOVD R3, (R2)(R5*1) // z[i] = x[n-1]
++ SUB $8, R5 // i--
++
++E864: CMPBGT R5, R0, L864 // i < n-1
++
++ MOVD R0, (R2) // z[n-1] = 0
++ RET
++
++
++// CX = R4, r8 = r8, r10 = r2 , r11 = r5, DX = r3, AX = r10 , BX = R1 , 64-count = r7 (R0 set to 0) temp = R6
++// func shrVU(z, x []Word, s uint) (c Word)
++TEXT ·shrVU(SB),NOSPLIT,$0
++ MOVD z_len+8(FP), R5
++ SUB $1, R5 // n--
++ BLT X9b // n < 0 (n <= 0)
++
++ // n > 0
++ MOVD s+48(FP), R4
++ CMPBEQ R0, R4, ZB0 //handle 0 case beq
++ MOVD $64, R6
++ CMPBEQ R6, R4, ZB64 //handle 64 case beq
++ MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ SLD $3, R5 // n = n*8
++ SUB R4, R6, R7
++ MOVD (R8), R10 // w1 = x[0]
++ SLD R7, R10, R3
++ MOVD R3, c+56(FP)
++
++ MOVD $0, R1 // i = 0
++ BR E9
++
++ // i < n-1
++L9: MOVD R10, R3 // w = w1
++ MOVD 8(R8)(R1*1), R10 // w1 = x[i+1]
++
++ SRD R4, R3 // w>>s | w1<>s | w1<= n-1
++X9a: SRD R4, R10 // w1>>s
++ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
++ RET
++
++X9b: MOVD R0, c+56(FP)
++ RET
++
++ZB0: MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ SLD $3, R5 // n = n*8
++
++ MOVD (R8), R10 // w1 = x[0]
++ MOVD $0, R3 // R10 << 64
++ MOVD R3, c+56(FP)
++
++ MOVD $0, R1 // i = 0
++ BR E9Z
++
++ // i < n-1
++L9Z: MOVD R10, R3 // w = w1
++ MOVD 8(R8)(R1*1), R10 // w1 = x[i+1]
++
++ MOVD R3, (R2)(R1*1) // z[i] = w>>s | w1<= n-1
++ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
++ RET
++
++ZB64: MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ SLD $3, R5 // n = n*8
++ MOVD (R8), R3 // w1 = x[0]
++ MOVD R3, c+56(FP)
++
++ MOVD $0, R1 // i = 0
++ BR E964
++
++ // i < n-1
++L964: MOVD 8(R8)(R1*1), R3 // w1 = x[i+1]
++
++ MOVD R3, (R2)(R1*1) // z[i] = w>>s | w1<= n-1
++ MOVD $0, R10 // w1>>s
++ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
++ RET
++
++// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, DX = r3, AX = r6 , BX = R1 , (R0 set to 0) + use R11 + use R7 for i
++// func mulAddVWW(z, x []Word, y, r Word) (c Word)
++TEXT ·mulAddVWW(SB),NOSPLIT,$0
++ MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R9
++ MOVD r+56(FP), R4 // c = r
++ MOVD z_len+8(FP), R5
++ MOVD $0, R1 // i = 0
++ MOVD $0, R7 // i*8 = 0
++ MOVD $0, R0 // make sure it's zero
++ BR E5
++
++L5: MOVD (R8)(R1*1), R6
++ MULHDU R9, R6
++ ADDC R4, R11 //add to low order bits
++ ADDE R0, R6
++ MOVD R11, (R2)(R1*1)
++ MOVD R6, R4
++ ADD $8, R1 // i*8 + 8
++ ADD $1, R7 // i++
++
++E5: CMPBLT R7, R5, L5 // i < n
++
++ MOVD R4, c+64(FP)
++ RET
++
++// func addMulVVW(z, x []Word, y Word) (c Word)
++// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1 , (R0 set to 0) + use R11 + use R7 for i
++TEXT ·addMulVVW(SB),NOSPLIT,$0
++ MOVD z+0(FP), R2
++ MOVD x+24(FP), R8
++ MOVD y+48(FP), R9
++ MOVD z_len+8(FP), R5
++
++ MOVD $0, R1 // i*8 = 0
++ MOVD $0, R7 // i = 0
++ MOVD $0, R0 // make sure it's zero
++ MOVD $0, R4 // c = 0
++
++ MOVD R5, R12
++ AND $-2, R12
++ CMPBGE R5, $2, A6
++ BR E6
++
++A6: MOVD (R8)(R1*1), R6
++ MULHDU R9, R6
++ MOVD (R2)(R1*1), R10
++ ADDC R10, R11 //add to low order bits
++ ADDE R0, R6
++ ADDC R4, R11
++ ADDE R0, R6
++ MOVD R6, R4
++ MOVD R11, (R2)(R1*1)
++
++ MOVD (8)(R8)(R1*1), R6
++ MULHDU R9, R6
++ MOVD (8)(R2)(R1*1), R10
++ ADDC R10, R11 //add to low order bits
++ ADDE R0, R6
++ ADDC R4, R11
++ ADDE R0, R6
++ MOVD R6, R4
++ MOVD R11, (8)(R2)(R1*1)
++
++ ADD $16, R1 // i*8 + 8
++ ADD $2, R7 // i++
++
++ CMPBLT R7, R12, A6
++ BR E6
++
++L6: MOVD (R8)(R1*1), R6
++ MULHDU R9, R6
++ MOVD (R2)(R1*1), R10
++ ADDC R10, R11 //add to low order bits
++ ADDE R0, R6
++ ADDC R4, R11
++ ADDE R0, R6
++ MOVD R6, R4
++ MOVD R11, (R2)(R1*1)
++
++ ADD $8, R1 // i*8 + 8
++ ADD $1, R7 // i++
++
++E6: CMPBLT R7, R5, L6 // i < n
++
++ MOVD R4, c+56(FP)
++ RET
++
++// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
++// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1(*8) , (R0 set to 0) + use R11 + use R7 for i
++TEXT ·divWVW(SB),NOSPLIT,$0
++ MOVD z+0(FP), R2
++ MOVD xn+24(FP), R10 // r = xn
++ MOVD x+32(FP), R8
++ MOVD y+56(FP), R9
++ MOVD z_len+8(FP), R7 // i = z
++ SLD $3, R7, R1 // i*8
++ MOVD $0, R0 // make sure it's zero
++ BR E7
++
++L7: MOVD (R8)(R1*1), R11
++ WORD $0xB98700A9 //DLGR R10,R9
++ MOVD R11, (R2)(R1*1)
++
++E7: SUB $1, R7 // i--
++ SUB $8, R1
++ BGE L7 // i >= 0
++
++ MOVD R10, r+64(FP)
++ RET
++
++// func bitLen(x Word) (n int)
++TEXT ·bitLen(SB),NOSPLIT,$0
++ MOVD x+0(FP), R2
++ WORD $0xb9830022 // FLOGR R2,R2
++ MOVD $64, R3
++ SUB R2, R3
++ MOVD R3, n+8(FP)
++ RET
+--- /dev/null
++++ b/src/math/dim_s390x.s
+@@ -0,0 +1,132 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// Based on dim_amd64.s
++
++#include "textflag.h"
++
++#define PosInf 0x7FF0000000000000
++#define NaN 0x7FF8000000000001
++#define NegInf 0xFFF0000000000000
++
++// func Dim(x, y float64) float64
++TEXT ·Dim(SB),NOSPLIT,$0
++ // (+Inf, +Inf) special case
++ MOVD x+0(FP), R2
++ MOVD y+8(FP), R3
++ MOVD $PosInf, R4
++ CMPUBNE R4, R2, dim2
++ CMPUBEQ R4, R3, bothInf
++dim2: // (-Inf, -Inf) special case
++ MOVD $NegInf, R4
++ CMPUBNE R4, R2, dim3
++ CMPUBEQ R4, R3, bothInf
++dim3: // (NaN, x) or (x, NaN)
++ MOVD $~(1<<63), R5
++ MOVD $PosInf, R4
++ AND R5, R2 // x = |x|
++ CMPUBLT R4, R2, isDimNaN
++ AND R5, R3 // y = |y|
++ CMPUBLT R4, R3, isDimNaN
++
++ FMOVD x+0(FP), F1
++ FMOVD y+8(FP), F2
++ FSUB F2, F1
++ FMOVD $(0.0), F2
++ FCMPU F2, F1
++ BGE +3(PC)
++ FMOVD F1, ret+16(FP)
++ RET
++ FMOVD F2, ret+16(FP)
++ RET
++bothInf: // Dim(-Inf, -Inf) or Dim(+Inf, +Inf)
++isDimNaN:
++ MOVD $NaN, R4
++ MOVD R4, ret+16(FP)
++ RET
++
++// func ·Max(x, y float64) float64
++TEXT ·Max(SB),NOSPLIT,$0
++ // +Inf special cases
++ MOVD $PosInf, R4
++ MOVD x+0(FP), R8
++ CMPUBEQ R4, R8, isPosInf
++ MOVD y+8(FP), R9
++ CMPUBEQ R4, R9, isPosInf
++ // NaN special cases
++ MOVD $~(1<<63), R5 // bit mask
++ MOVD $PosInf, R4
++ MOVD R8, R2
++ AND R5, R2 // x = |x|
++ CMPUBLT R4, R2, isMaxNaN
++ MOVD R9, R3
++ AND R5, R3 // y = |y|
++ CMPUBLT R4, R3, isMaxNaN
++ // ±0 special cases
++ OR R3, R2
++ BEQ isMaxZero
++
++ FMOVD x+0(FP), F1
++ FMOVD y+8(FP), F2
++ FCMPU F2, F1
++ BGT +3(PC)
++ FMOVD F1, ret+16(FP)
++ RET
++ FMOVD F2, ret+16(FP)
++ RET
++isMaxNaN: // return NaN
++ MOVD $NaN, R4
++isPosInf: // return +Inf
++ MOVD R4, ret+16(FP)
++ RET
++isMaxZero:
++ MOVD $(1<<63), R4 // -0.0
++ CMPUBEQ R4, R8, +3(PC)
++ MOVD R8, ret+16(FP) // return 0
++ RET
++ MOVD R9, ret+16(FP) // return other 0
++ RET
++
++// func Min(x, y float64) float64
++TEXT ·Min(SB),NOSPLIT,$0
++ // -Inf special cases
++ MOVD $NegInf, R4
++ MOVD x+0(FP), R8
++ CMPUBEQ R4, R8, isNegInf
++ MOVD y+8(FP), R9
++ CMPUBEQ R4, R9, isNegInf
++ // NaN special cases
++ MOVD $~(1<<63), R5
++ MOVD $PosInf, R4
++ MOVD R8, R2
++ AND R5, R2 // x = |x|
++ CMPUBLT R4, R2, isMinNaN
++ MOVD R9, R3
++ AND R5, R3 // y = |y|
++ CMPUBLT R4, R3, isMinNaN
++ // ±0 special cases
++ OR R3, R2
++ BEQ isMinZero
++
++ FMOVD x+0(FP), F1
++ FMOVD y+8(FP), F2
++ FCMPU F2, F1
++ BLT +3(PC)
++ FMOVD F1, ret+16(FP)
++ RET
++ FMOVD F2, ret+16(FP)
++ RET
++isMinNaN: // return NaN
++ MOVD $NaN, R4
++isNegInf: // return -Inf
++ MOVD R4, ret+16(FP)
++ RET
++isMinZero:
++ MOVD $(1<<63), R4 // -0.0
++ CMPUBEQ R4, R8, +3(PC)
++ MOVD R9, ret+16(FP) // return other 0
++ RET
++ MOVD R8, ret+16(FP) // return -0
++ RET
++
+--- /dev/null
++++ b/src/math/sqrt_s390x.s
+@@ -0,0 +1,12 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func Sqrt(x float64) float64
++TEXT ·Sqrt(SB),NOSPLIT,$0
++ FMOVD x+0(FP), F1
++ FSQRT F1, F1
++ FMOVD F1, ret+8(FP)
++ RET
+--- /dev/null
++++ b/src/math/stubs_s390x.s
+@@ -0,0 +1,77 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "../runtime/textflag.h"
++
++TEXT ·Asin(SB),NOSPLIT,$0
++ BR ·asin(SB)
++
++TEXT ·Acos(SB),NOSPLIT,$0
++ BR ·acos(SB)
++
++TEXT ·Atan2(SB),NOSPLIT,$0
++ BR ·atan2(SB)
++
++TEXT ·Atan(SB),NOSPLIT,$0
++ BR ·atan(SB)
++
++TEXT ·Exp2(SB),NOSPLIT,$0
++ BR ·exp2(SB)
++
++TEXT ·Expm1(SB),NOSPLIT,$0
++ BR ·expm1(SB)
++
++TEXT ·Exp(SB),NOSPLIT,$0
++ BR ·exp(SB)
++
++TEXT ·Floor(SB),NOSPLIT,$0
++ BR ·floor(SB)
++
++TEXT ·Ceil(SB),NOSPLIT,$0
++ BR ·ceil(SB)
++
++TEXT ·Trunc(SB),NOSPLIT,$0
++ BR ·trunc(SB)
++
++TEXT ·Frexp(SB),NOSPLIT,$0
++ BR ·frexp(SB)
++
++TEXT ·Hypot(SB),NOSPLIT,$0
++ BR ·hypot(SB)
++
++TEXT ·Ldexp(SB),NOSPLIT,$0
++ BR ·ldexp(SB)
++
++TEXT ·Log10(SB),NOSPLIT,$0
++ BR ·log10(SB)
++
++TEXT ·Log2(SB),NOSPLIT,$0
++ BR ·log2(SB)
++
++TEXT ·Log1p(SB),NOSPLIT,$0
++ BR ·log1p(SB)
++
++TEXT ·Log(SB),NOSPLIT,$0
++ BR ·log(SB)
++
++TEXT ·Modf(SB),NOSPLIT,$0
++ BR ·modf(SB)
++
++TEXT ·Mod(SB),NOSPLIT,$0
++ BR ·mod(SB)
++
++TEXT ·Remainder(SB),NOSPLIT,$0
++ BR ·remainder(SB)
++
++TEXT ·Sincos(SB),NOSPLIT,$0
++ BR ·sincos(SB)
++
++TEXT ·Sin(SB),NOSPLIT,$0
++ BR ·sin(SB)
++
++TEXT ·Cos(SB),NOSPLIT,$0
++ BR ·cos(SB)
++
++TEXT ·Tan(SB),NOSPLIT,$0
++ BR ·tan(SB)
+--- a/src/net/http/fs_test.go
++++ b/src/net/http/fs_test.go
+@@ -963,9 +963,9 @@
+
+ syscalls := "sendfile,sendfile64"
+ switch runtime.GOARCH {
+- case "mips64", "mips64le":
+- // mips64 strace doesn't support sendfile64 and will error out
+- // if we specify that with `-e trace='.
++ case "mips64", "mips64le", "s390x":
++ // strace on the above platforms doesn't support sendfile64
++ // and will error out if we specify that with `-e trace='.
+ syscalls = "sendfile"
+ }
+
+--- a/src/net/lookup_test.go
++++ b/src/net/lookup_test.go
+@@ -626,6 +626,11 @@
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
++ switch runtime.GOARCH {
++ case "s390x":
++ t.Skipf("services not all known on %s", runtime.GOARCH)
++ }
++
+ for _, tt := range lookupPortTests {
+ if port, err := LookupPort(tt.network, tt.name); port != tt.port || (err == nil) != tt.ok {
+ t.Errorf("LookupPort(%q, %q) = %d, %v; want %d", tt.network, tt.name, port, err, tt.port)
+--- /dev/null
++++ b/src/reflect/asm_s390x.s
+@@ -0,0 +1,30 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++#include "funcdata.h"
++
++// makeFuncStub is the code half of the function returned by MakeFunc.
++// See the comment on the declaration of makeFuncStub in makefunc.go
++// for more details.
++// No arg size here, runtime pulls arg map out of the func value.
++TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
++ NO_LOCAL_POINTERS
++ MOVD R12, 8(R15)
++ MOVD $argframe+0(FP), R3
++ MOVD R3, 16(R15)
++ BL ·callReflect(SB)
++ RET
++
++// methodValueCall is the code half of the function returned by makeMethodValue.
++// See the comment on the declaration of methodValueCall in makefunc.go
++// for more details.
++// No arg size here; runtime pulls arg map out of the func value.
++TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
++ NO_LOCAL_POINTERS
++ MOVD R12, 8(R15)
++ MOVD $argframe+0(FP), R3
++ MOVD R3, 16(R15)
++ BL ·callMethod(SB)
++ RET
+--- /dev/null
++++ b/src/runtime/asm_s390x.s
+@@ -0,0 +1,1129 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "go_asm.h"
++#include "go_tls.h"
++#include "funcdata.h"
++#include "textflag.h"
++
++// Indicate the status of vector facility
++// -1: init value
++// 0: vector not installed
++// 1: vector installed and enabled
++// 2: vector installed but not enabled
++
++DATA runtime·vectorfacility+0x00(SB)/4, $-1
++GLOBL runtime·vectorfacility(SB), NOPTR, $4
++
++TEXT runtime·checkvectorfacility(SB),NOSPLIT,$32-0
++ MOVD $2, R0
++ MOVD R1, tmp-32(SP)
++ MOVD $x-24(SP), R1
++// STFLE 0(R1)
++ WORD $0xB2B01000
++ MOVBZ z-8(SP), R1
++ AND $0x40, R1
++ BNE vectorinstalled
++ MOVB $0, runtime·vectorfacility(SB) //Vector not installed
++ MOVD tmp-32(SP), R1
++ MOVD $0, R0
++ RET
++vectorinstalled:
++ // check if the vector instruction has been enabled
++ VLEIB $0, $0xF, V16
++ VLGVB $0, V16, R0
++ CMPBEQ R0, $0xF, vectorenabled
++ MOVB $2, runtime·vectorfacility(SB) //Vector installed but not enabled
++ MOVD tmp-32(SP), R1
++ MOVD $0, R0
++ RET
++vectorenabled:
++ MOVB $1, runtime·vectorfacility(SB) //Vector installed and enabled
++ MOVD tmp-32(SP), R1
++ MOVD $0, R0
++ RET
++
++TEXT runtime·rt0_go(SB),NOSPLIT,$0
++ // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer
++ // C TLS base pointer in AR0:AR1
++
++ // initialize essential registers
++ XOR R0, R0
++
++ SUB $24, R15
++ MOVW R2, 8(R15) // argc
++ MOVD R3, 16(R15) // argv
++
++ // create istack out of the given (operating system) stack.
++ // _cgo_init may update stackguard.
++ MOVD $runtime·g0(SB), g
++ MOVD R15, R11
++ SUB $(64*1024), R11
++ MOVD R11, g_stackguard0(g)
++ MOVD R11, g_stackguard1(g)
++ MOVD R11, (g_stack+stack_lo)(g)
++ MOVD R15, (g_stack+stack_hi)(g)
++
++ // if there is a _cgo_init, call it using the gcc ABI.
++ MOVD _cgo_init(SB), R11
++ CMPBEQ R11, $0, nocgo
++ MOVW AR0, R4 // (AR0 << 32 | AR1) is the TLS base pointer; MOVD is translated to EAR
++ SLD $32, R4, R4
++ MOVW AR1, R4 // arg 2: TLS base pointer
++ MOVD $setg_gcc<>(SB), R3 // arg 1: setg
++ MOVD g, R2 // arg 0: G
++ // C functions expect 160 bytes of space on caller stack frame
++ // and an 8-byte aligned stack pointer
++ MOVD R15, R9 // save current stack (R9 is preserved in the Linux ABI)
++ SUB $160, R15 // reserve 160 bytes
++ MOVD $~7, R6
++ AND R6, R15 // 8-byte align
++ BL R11 // this call clobbers volatile registers according to Linux ABI (R0-R5, R14)
++ MOVD R9, R15 // restore stack
++ XOR R0, R0 // zero R0
++
++nocgo:
++ // update stackguard after _cgo_init
++ MOVD (g_stack+stack_lo)(g), R2
++ ADD $const__StackGuard, R2
++ MOVD R2, g_stackguard0(g)
++ MOVD R2, g_stackguard1(g)
++
++ // set the per-goroutine and per-mach "registers"
++ MOVD $runtime·m0(SB), R2
++
++ // save m->g0 = g0
++ MOVD g, m_g0(R2)
++ // save m0 to g0->m
++ MOVD R2, g_m(g)
++
++ BL runtime·check(SB)
++
++ // argc/argv are already prepared on stack
++ BL runtime·args(SB)
++ BL runtime·osinit(SB)
++ BL runtime·schedinit(SB)
++
++ // create a new goroutine to start program
++ MOVD $runtime·mainPC(SB), R2 // entry
++ SUB $24, R15
++ MOVD R2, 16(R15)
++ MOVD R0, 8(R15)
++ MOVD R0, 0(R15)
++ BL runtime·newproc(SB)
++ ADD $24, R15
++
++ // start this M
++ BL runtime·mstart(SB)
++
++ MOVD R0, 1(R0)
++ RET
++
++DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
++GLOBL runtime·mainPC(SB),RODATA,$8
++
++TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
++ MOVD R0, 2(R0)
++ RET
++
++TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
++ RET
++
++/*
++ * go-routine
++ */
++
++// void gosave(Gobuf*)
++// save state in Gobuf; setjmp
++TEXT runtime·gosave(SB), NOSPLIT, $-8-8
++ MOVD buf+0(FP), R3
++ MOVD R15, gobuf_sp(R3)
++ MOVD LR, gobuf_pc(R3)
++ MOVD g, gobuf_g(R3)
++ MOVD $0, gobuf_lr(R3)
++ MOVD $0, gobuf_ret(R3)
++ MOVD $0, gobuf_ctxt(R3)
++ RET
++
++// void gogo(Gobuf*)
++// restore state from Gobuf; longjmp
++TEXT runtime·gogo(SB), NOSPLIT, $-8-8
++ MOVD buf+0(FP), R5
++ MOVD gobuf_g(R5), g // make sure g is not nil
++ BL runtime·save_g(SB)
++
++ MOVD 0(g), R4
++ MOVD gobuf_sp(R5), R15
++ MOVD gobuf_lr(R5), LR
++ MOVD gobuf_ret(R5), R3
++ MOVD gobuf_ctxt(R5), R12
++ MOVD $0, gobuf_sp(R5)
++ MOVD $0, gobuf_ret(R5)
++ MOVD $0, gobuf_lr(R5)
++ MOVD $0, gobuf_ctxt(R5)
++ CMP R0, R0 // set condition codes for == test, needed by stack split
++ MOVD gobuf_pc(R5), R6
++ BR (R6)
++
++// void mcall(fn func(*g))
++// Switch to m->g0's stack, call fn(g).
++// Fn must never return. It should gogo(&g->sched)
++// to keep running g.
++TEXT runtime·mcall(SB), NOSPLIT, $-8-8
++ // Save caller state in g->sched
++ MOVD R15, (g_sched+gobuf_sp)(g)
++ MOVD LR, (g_sched+gobuf_pc)(g)
++ MOVD R0, (g_sched+gobuf_lr)(g)
++ MOVD g, (g_sched+gobuf_g)(g)
++
++ // Switch to m->g0 & its stack, call fn.
++ MOVD g, R3
++ MOVD g_m(g), R8
++ MOVD m_g0(R8), g
++ BL runtime·save_g(SB)
++ CMP g, R3
++ BNE 2(PC)
++ BR runtime·badmcall(SB)
++ MOVD fn+0(FP), R12 // context
++ MOVD 0(R12), R4 // code pointer
++ MOVD (g_sched+gobuf_sp)(g), R15 // sp = m->g0->sched.sp
++ SUB $16, R15
++ MOVD R3, 8(R15)
++ MOVD $0, 0(R15)
++ BL (R4)
++ BR runtime·badmcall2(SB)
++
++// systemstack_switch is a dummy routine that systemstack leaves at the bottom
++// of the G stack. We need to distinguish the routine that
++// lives at the bottom of the G stack from the one that lives
++// at the top of the system stack because the one at the top of
++// the system stack terminates the stack walk (see topofstack()).
++TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
++ UNDEF
++ BL (LR) // make sure this function is not leaf
++ RET
++
++// func systemstack(fn func())
++TEXT runtime·systemstack(SB), NOSPLIT, $0-8
++ MOVD fn+0(FP), R3 // R3 = fn
++ MOVD R3, R12 // context
++ MOVD g_m(g), R4 // R4 = m
++
++ MOVD m_gsignal(R4), R5 // R5 = gsignal
++ CMPBEQ g, R5, noswitch
++
++ MOVD m_g0(R4), R5 // R5 = g0
++ CMPBEQ g, R5, noswitch
++
++ MOVD m_curg(R4), R6
++ CMPBEQ g, R6, switch
++
++ // Bad: g is not gsignal, not g0, not curg. What is it?
++ // Hide call from linker nosplit analysis.
++ MOVD $runtime·badsystemstack(SB), R3
++ BL (R3)
++
++switch:
++ // save our state in g->sched. Pretend to
++ // be systemstack_switch if the G stack is scanned.
++ MOVD $runtime·systemstack_switch(SB), R6
++ ADD $16, R6 // get past prologue
++ MOVD R6, (g_sched+gobuf_pc)(g)
++ MOVD R15, (g_sched+gobuf_sp)(g)
++ MOVD R0, (g_sched+gobuf_lr)(g)
++ MOVD g, (g_sched+gobuf_g)(g)
++
++ // switch to g0
++ MOVD R5, g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R3
++ // make it look like mstart called systemstack on g0, to stop traceback
++ SUB $8, R3
++ MOVD $runtime·mstart(SB), R4
++ MOVD R4, 0(R3)
++ MOVD R3, R15
++
++ // call target function
++ MOVD 0(R12), R3 // code pointer
++ BL (R3)
++
++ // switch back to g
++ MOVD g_m(g), R3
++ MOVD m_curg(R3), g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R15
++ MOVD $0, (g_sched+gobuf_sp)(g)
++ RET
++
++noswitch:
++ // already on m stack, just call directly
++ MOVD 0(R12), R3 // code pointer
++ BL (R3)
++ RET
++
++/*
++ * support for morestack
++ */
++
++// Called during function prolog when more stack is needed.
++// Caller has already loaded:
++// R3: framesize, R4: argsize, R5: LR
++//
++// The traceback routines see morestack on a g0 as being
++// the top of a stack (for example, morestack calling newstack
++// calling the scheduler calling newm calling gc), so we must
++// record an argument size. For that purpose, it has no arguments.
++TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
++ // Cannot grow scheduler stack (m->g0).
++ MOVD g_m(g), R7
++ MOVD m_g0(R7), R8
++ CMPBNE g, R8, 2(PC)
++ BL runtime·abort(SB)
++
++ // Cannot grow signal stack (m->gsignal).
++ MOVD m_gsignal(R7), R8
++ CMP g, R8
++ BNE 2(PC)
++ BL runtime·abort(SB)
++
++ // Called from f.
++ // Set g->sched to context in f.
++ MOVD R12, (g_sched+gobuf_ctxt)(g)
++ MOVD R15, (g_sched+gobuf_sp)(g)
++ MOVD LR, R8
++ MOVD R8, (g_sched+gobuf_pc)(g)
++ MOVD R5, (g_sched+gobuf_lr)(g)
++
++ // Called from f.
++ // Set m->morebuf to f's caller.
++ MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC
++ MOVD R15, (m_morebuf+gobuf_sp)(R7) // f's caller's SP
++ MOVD g, (m_morebuf+gobuf_g)(R7)
++
++ // Call newstack on m->g0's stack.
++ MOVD m_g0(R7), g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R15
++ BL runtime·newstack(SB)
++
++ // Not reached, but make sure the return PC from the call to newstack
++ // is still in this function, and not the beginning of the next.
++ UNDEF
++
++TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
++ MOVD $0, R12
++ BR runtime·morestack(SB)
++
++TEXT runtime·stackBarrier(SB),NOSPLIT,$0
++ // We came here via a RET to an overwritten LR.
++ // R3 may be live. Other registers are available.
++
++ // Get the original return PC, g.stkbar[g.stkbarPos].savedLRVal.
++ MOVD (g_stkbar+slice_array)(g), R4
++ MOVD g_stkbarPos(g), R5
++ MOVD $stkbar__size, R6
++ MULLD R5, R6
++ ADD R4, R6
++ MOVD stkbar_savedLRVal(R6), R6
++ // Record that this stack barrier was hit.
++ ADD $1, R5
++ MOVD R5, g_stkbarPos(g)
++ // Jump to the original return PC.
++ BR (R6)
++
++// reflectcall: call a function with the given argument list
++// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
++// we don't have variable-sized frames, so we use a small number
++// of constant-sized-frame functions to encode a few bits of size in the pc.
++// Caution: ugly multiline assembly macros in your future!
++
++#define DISPATCH(NAME,MAXSIZE) \
++ MOVD $MAXSIZE, R4; \
++ CMP R3, R4; \
++ BGT 3(PC); \
++ MOVD $NAME(SB), R5; \
++ BR (R5)
++// Note: can't just "BR NAME(SB)" - bad inlining results.
++
++TEXT reflect·call(SB), NOSPLIT, $0-0
++ BR ·reflectcall(SB)
++
++TEXT ·reflectcall(SB), NOSPLIT, $-8-32
++ MOVWZ argsize+24(FP), R3
++ // NOTE(rsc): No call16, because CALLFN needs four words
++ // of argument space to invoke callwritebarrier.
++ DISPATCH(runtime·call32, 32)
++ DISPATCH(runtime·call64, 64)
++ DISPATCH(runtime·call128, 128)
++ DISPATCH(runtime·call256, 256)
++ DISPATCH(runtime·call512, 512)
++ DISPATCH(runtime·call1024, 1024)
++ DISPATCH(runtime·call2048, 2048)
++ DISPATCH(runtime·call4096, 4096)
++ DISPATCH(runtime·call8192, 8192)
++ DISPATCH(runtime·call16384, 16384)
++ DISPATCH(runtime·call32768, 32768)
++ DISPATCH(runtime·call65536, 65536)
++ DISPATCH(runtime·call131072, 131072)
++ DISPATCH(runtime·call262144, 262144)
++ DISPATCH(runtime·call524288, 524288)
++ DISPATCH(runtime·call1048576, 1048576)
++ DISPATCH(runtime·call2097152, 2097152)
++ DISPATCH(runtime·call4194304, 4194304)
++ DISPATCH(runtime·call8388608, 8388608)
++ DISPATCH(runtime·call16777216, 16777216)
++ DISPATCH(runtime·call33554432, 33554432)
++ DISPATCH(runtime·call67108864, 67108864)
++ DISPATCH(runtime·call134217728, 134217728)
++ DISPATCH(runtime·call268435456, 268435456)
++ DISPATCH(runtime·call536870912, 536870912)
++ DISPATCH(runtime·call1073741824, 1073741824)
++ MOVD $runtime·badreflectcall(SB), R5
++ BR (R5)
++
++#define CALLFN(NAME,MAXSIZE) \
++TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
++ NO_LOCAL_POINTERS; \
++ /* copy arguments to stack */ \
++ MOVD arg+16(FP), R3; \
++ MOVWZ argsize+24(FP), R4; \
++ MOVD R15, R5; \
++ ADD $(8-1), R5; \
++ SUB $1, R3; \
++ ADD R5, R4; \
++ CMP R5, R4; \
++ BEQ 6(PC); \
++ ADD $1, R3; \
++ ADD $1, R5; \
++ MOVBZ 0(R3), R6; \
++ MOVBZ R6, 0(R5); \
++ BR -6(PC); \
++ /* call function */ \
++ MOVD f+8(FP), R12; \
++ MOVD (R12), R8; \
++ PCDATA $PCDATA_StackMapIndex, $0; \
++ BL (R8); \
++ /* copy return values back */ \
++ MOVD arg+16(FP), R3; \
++ MOVWZ n+24(FP), R4; \
++ MOVWZ retoffset+28(FP), R6; \
++ MOVD R15, R5; \
++ ADD R6, R5; \
++ ADD R6, R3; \
++ SUB R6, R4; \
++ ADD $(8-1), R5; \
++ SUB $1, R3; \
++ ADD R5, R4; \
++loop: \
++ CMP R5, R4; \
++ BEQ end; \
++ ADD $1, R5; \
++ ADD $1, R3; \
++ MOVBZ 0(R5), R6; \
++ MOVBZ R6, 0(R3); \
++ BR loop; \
++end: \
++ /* execute write barrier updates */ \
++ MOVD argtype+0(FP), R7; \
++ MOVD arg+16(FP), R3; \
++ MOVWZ n+24(FP), R4; \
++ MOVWZ retoffset+28(FP), R6; \
++ MOVD R7, 8(R15); \
++ MOVD R3, 16(R15); \
++ MOVD R4, 24(R15); \
++ MOVD R6, 32(R15); \
++ BL runtime·callwritebarrier(SB); \
++ RET
++
++CALLFN(·call32, 32)
++CALLFN(·call64, 64)
++CALLFN(·call128, 128)
++CALLFN(·call256, 256)
++CALLFN(·call512, 512)
++CALLFN(·call1024, 1024)
++CALLFN(·call2048, 2048)
++CALLFN(·call4096, 4096)
++CALLFN(·call8192, 8192)
++CALLFN(·call16384, 16384)
++CALLFN(·call32768, 32768)
++CALLFN(·call65536, 65536)
++CALLFN(·call131072, 131072)
++CALLFN(·call262144, 262144)
++CALLFN(·call524288, 524288)
++CALLFN(·call1048576, 1048576)
++CALLFN(·call2097152, 2097152)
++CALLFN(·call4194304, 4194304)
++CALLFN(·call8388608, 8388608)
++CALLFN(·call16777216, 16777216)
++CALLFN(·call33554432, 33554432)
++CALLFN(·call67108864, 67108864)
++CALLFN(·call134217728, 134217728)
++CALLFN(·call268435456, 268435456)
++CALLFN(·call536870912, 536870912)
++CALLFN(·call1073741824, 1073741824)
++
++TEXT runtime·procyield(SB),NOSPLIT,$0-0
++ RET
++
++// void jmpdefer(fv, sp);
++// called from deferreturn.
++// 1. grab stored LR for caller
++// 2. sub 6 bytes to get back to BL deferreturn (size of BRASL instruction)
++// 3. BR to fn
++TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16
++ MOVD 0(R15), R1
++ SUB $6, R1, LR
++
++ MOVD fv+0(FP), R12
++ MOVD argp+8(FP), R15
++ SUB $8, R15
++ MOVD 0(R12), R3
++ BR (R3)
++
++// Save state of caller into g->sched. Smashes R31.
++TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
++ MOVD LR, (g_sched+gobuf_pc)(g)
++ MOVD R15, (g_sched+gobuf_sp)(g)
++ MOVD $0, (g_sched+gobuf_lr)(g)
++ MOVD $0, (g_sched+gobuf_ret)(g)
++ MOVD $0, (g_sched+gobuf_ctxt)(g)
++ RET
++
++// func asmcgocall(fn, arg unsafe.Pointer) int32
++// Call fn(arg) on the scheduler stack,
++// aligned appropriately for the gcc ABI.
++// See cgocall.go for more details.
++TEXT ·asmcgocall(SB),NOSPLIT,$0-20
++ // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer
++ // C TLS base pointer in AR0:AR1
++ MOVD fn+0(FP), R3
++ MOVD arg+8(FP), R4
++
++ MOVD R15, R2 // save original stack pointer
++ MOVD g, R5
++
++ // Figure out if we need to switch to m->g0 stack.
++ // We get called to create new OS threads too, and those
++ // come in on the m->g0 stack already.
++ MOVD g_m(g), R6
++ MOVD m_g0(R6), R6
++ CMPBEQ R6, g, g0
++ BL gosave<>(SB)
++ MOVD R6, g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R15
++
++ // Now on a scheduling stack (a pthread-created stack).
++g0:
++ // Save room for two of our pointers, plus 160 bytes of callee
++ // save area that lives on the caller stack.
++ SUB $176, R15
++ MOVD $~7, R6
++ AND R6, R15 // 8-byte alignment for gcc ABI
++ MOVD R5, 168(R15) // save old g on stack
++ MOVD (g_stack+stack_hi)(R5), R5
++ SUB R2, R5
++ MOVD R5, 160(R15) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
++ MOVD R0, 0(R15) // clear back chain pointer (TODO can we give it real back trace information?)
++ MOVD R4, R2 // arg in R2
++ BL R3 // can clobber: R0-R5, R14, F0-F3, F5, F7-F15
++
++ XOR R0, R0 // set R0 back to 0.
++ // Restore g, stack pointer.
++ MOVD 168(R15), g
++ BL runtime·save_g(SB)
++ MOVD (g_stack+stack_hi)(g), R5
++ MOVD 160(R15), R6
++ SUB R6, R5
++ MOVD R5, R15
++
++ MOVW R2, ret+16(FP)
++ RET
++
++// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
++// Turn the fn into a Go func (by taking its address) and call
++// cgocallback_gofunc.
++TEXT runtime·cgocallback(SB),NOSPLIT,$24-24
++ MOVD $fn+0(FP), R3
++ MOVD R3, 8(R15)
++ MOVD frame+8(FP), R3
++ MOVD R3, 16(R15)
++ MOVD framesize+16(FP), R3
++ MOVD R3, 24(R15)
++ MOVD $runtime·cgocallback_gofunc(SB), R3
++ BL (R3)
++ RET
++
++// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
++// See cgocall.go for more details.
++TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-24
++ NO_LOCAL_POINTERS
++
++ // Load m and g from thread-local storage.
++ MOVB runtime·iscgo(SB), R3
++ CMPBEQ R3, $0, nocgo
++ BL runtime·load_g(SB)
++
++nocgo:
++ // If g is nil, Go did not create the current thread.
++ // Call needm to obtain one for temporary use.
++ // In this case, we're running on the thread stack, so there's
++ // lots of space, but the linker doesn't know. Hide the call from
++ // the linker analysis by using an indirect call.
++ CMPBEQ g, $0, needm
++
++ MOVD g_m(g), R8
++ MOVD R8, savedm-8(SP)
++ BR havem
++
++needm:
++ MOVD g, savedm-8(SP) // g is zero, so is m.
++ MOVD $runtime·needm(SB), R3
++ BL (R3)
++
++ // Set m->sched.sp = SP, so that if a panic happens
++ // during the function we are about to execute, it will
++ // have a valid SP to run on the g0 stack.
++ // The next few lines (after the havem label)
++ // will save this SP onto the stack and then write
++ // the same SP back to m->sched.sp. That seems redundant,
++ // but if an unrecovered panic happens, unwindm will
++ // restore the g->sched.sp from the stack location
++ // and then systemstack will try to use it. If we don't set it here,
++ // that restored SP will be uninitialized (typically 0) and
++ // will not be usable.
++ MOVD g_m(g), R8
++ MOVD m_g0(R8), R3
++ MOVD R15, (g_sched+gobuf_sp)(R3)
++
++havem:
++ // Now there's a valid m, and we're running on its m->g0.
++ // Save current m->g0->sched.sp on stack and then set it to SP.
++ // Save current sp in m->g0->sched.sp in preparation for
++ // switch back to m->curg stack.
++ // NOTE: unwindm knows that the saved g->sched.sp is at 8(R1) aka savedsp-16(SP).
++ MOVD m_g0(R8), R3
++ MOVD (g_sched+gobuf_sp)(R3), R4
++ MOVD R4, savedsp-16(SP)
++ MOVD R15, (g_sched+gobuf_sp)(R3)
++
++ // Switch to m->curg stack and call runtime.cgocallbackg.
++ // Because we are taking over the execution of m->curg
++ // but *not* resuming what had been running, we need to
++ // save that information (m->curg->sched) so we can restore it.
++ // We can restore m->curg->sched.sp easily, because calling
++ // runtime.cgocallbackg leaves SP unchanged upon return.
++ // To save m->curg->sched.pc, we push it onto the stack.
++ // This has the added benefit that it looks to the traceback
++ // routine like cgocallbackg is going to return to that
++ // PC (because the frame we allocate below has the same
++ // size as cgocallback_gofunc's frame declared above)
++ // so that the traceback will seamlessly trace back into
++ // the earlier calls.
++ //
++ // In the new goroutine, -16(SP) and -8(SP) are unused.
++ MOVD m_curg(R8), g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4
++ MOVD (g_sched+gobuf_pc)(g), R5
++ MOVD R5, -24(R4)
++ MOVD $-24(R4), R15
++ BL runtime·cgocallbackg(SB)
++
++ // Restore g->sched (== m->curg->sched) from saved values.
++ MOVD 0(R15), R5
++ MOVD R5, (g_sched+gobuf_pc)(g)
++ MOVD $24(R15), R4
++ MOVD R4, (g_sched+gobuf_sp)(g)
++
++ // Switch back to m->g0's stack and restore m->g0->sched.sp.
++ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
++ // so we do not have to restore it.)
++ MOVD g_m(g), R8
++ MOVD m_g0(R8), g
++ BL runtime·save_g(SB)
++ MOVD (g_sched+gobuf_sp)(g), R15
++ MOVD savedsp-16(SP), R4
++ MOVD R4, (g_sched+gobuf_sp)(g)
++
++ // If the m on entry was nil, we called needm above to borrow an m
++ // for the duration of the call. Since the call is over, return it with dropm.
++ MOVD savedm-8(SP), R6
++ CMPBNE R6, $0, droppedm
++ MOVD $runtime·dropm(SB), R3
++ BL (R3)
++droppedm:
++
++ // Done!
++ RET
++
++// void setg(G*); set g. for use by needm.
++TEXT runtime·setg(SB), NOSPLIT, $0-8
++ MOVD gg+0(FP), g
++ // This only happens if iscgo, so jump straight to save_g
++ BL runtime·save_g(SB)
++ RET
++
++// void setg_gcc(G*); set g in C TLS.
++// Must obey the gcc calling convention.
++TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0
++ // The standard prologue clobbers LR (R14), which is callee-save in
++ // the C ABI, so we have to use NOFRAME and save LR ourselves.
++ MOVD LR, R1
++ // Also save g, R10, and R11 since they're callee-save in C ABI
++ MOVD R10, R3
++ MOVD g, R4
++ MOVD R11, R5
++
++ MOVD R2, g
++ BL runtime·save_g(SB)
++
++ MOVD R5, R11
++ MOVD R4, g
++ MOVD R3, R10
++ MOVD R1, LR
++ RET
++
++TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
++ MOVD 16(R15), R3 // LR saved by caller
++ MOVD runtime·stackBarrierPC(SB), R4
++ CMPBNE R3, R4, nobar
++ // Get original return PC.
++ BL runtime·nextBarrierPC(SB)
++ MOVD 8(R15), R3
++nobar:
++ MOVD R3, ret+8(FP)
++ RET
++
++TEXT runtime·setcallerpc(SB),NOSPLIT,$8-16
++ MOVD pc+8(FP), R3
++ MOVD 16(R15), R4
++ MOVD runtime·stackBarrierPC(SB), R5
++ CMPBEQ R4, R5, setbar
++ MOVD R3, 16(R15) // set LR in caller
++ RET
++setbar:
++ // Set the stack barrier return PC.
++ MOVD R3, 8(R15)
++ BL runtime·setNextBarrierPC(SB)
++ RET
++
++TEXT runtime·getcallersp(SB),NOSPLIT,$0-16
++ MOVD argp+0(FP), R3
++ SUB $8, R3
++ MOVD R3, ret+8(FP)
++ RET
++
++TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW (R0), R0
++ UNDEF
++
++// int64 runtime·cputicks(void)
++TEXT runtime·cputicks(SB),NOSPLIT,$0-8
++ // The TOD clock on s390 counts from the year 1900 in ~250ps intervals.
++ // This means that since about 1972 the msb has been set, making the
++ // result of a call to STORE CLOCK (stck) a negative number.
++ // We clear the msb to make it positive.
++ STCK ret+0(FP) // serialises before and after call
++ MOVD ret+0(FP), R3 // R3 will wrap to 0 in the year 2043
++ SLD $1, R3
++ SRD $1, R3
++ MOVD R3, ret+0(FP)
++ RET
++
++// memhash_varlen(p unsafe.Pointer, h seed) uintptr
++// redirects to memhash(p, h, size) using the size
++// stored in the closure.
++TEXT runtime·memhash_varlen(SB),NOSPLIT,$40-24
++ GO_ARGS
++ NO_LOCAL_POINTERS
++ MOVD p+0(FP), R3
++ MOVD h+8(FP), R4
++ MOVD 8(R12), R5
++ MOVD R3, 8(R15)
++ MOVD R4, 16(R15)
++ MOVD R5, 24(R15)
++ BL runtime·memhash(SB)
++ MOVD 32(R15), R3
++ MOVD R3, ret+16(FP)
++ RET
++
++// AES hashing not implemented for s390x
++TEXT runtime·aeshash(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW (R0), R15
++TEXT runtime·aeshash32(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW (R0), R15
++TEXT runtime·aeshash64(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW (R0), R15
++TEXT runtime·aeshashstr(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW (R0), R15
++
++TEXT runtime·memeq(SB),NOSPLIT|NOFRAME,$0-25
++ MOVD a+0(FP), R3
++ MOVD b+8(FP), R5
++ MOVD size+16(FP), R6
++ LA ret+24(FP), R7
++ BR runtime·memeqbody(SB)
++
++// memequal_varlen(a, b unsafe.Pointer) bool
++TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-17
++ MOVD a+0(FP), R3
++ MOVD b+8(FP), R5
++ MOVD 8(R12), R6 // compiler stores size at offset 8 in the closure
++ LA ret+16(FP), R7
++ BR runtime·memeqbody(SB)
++
++// eqstring tests whether two strings are equal.
++// The compiler guarantees that strings passed
++// to eqstring have equal length.
++// See runtime_test.go:eqstring_generic for
++// equivalent Go code.
++TEXT runtime·eqstring(SB),NOSPLIT|NOFRAME,$0-33
++ MOVD s1str+0(FP), R3
++ MOVD s1len+8(FP), R6
++ MOVD s2str+16(FP), R5
++ LA ret+32(FP), R7
++ BR runtime·memeqbody(SB)
++
++TEXT bytes·Equal(SB),NOSPLIT|NOFRAME,$0-49
++ MOVD a_len+8(FP), R2
++ MOVD b_len+32(FP), R6
++ MOVD a+0(FP), R3
++ MOVD b+24(FP), R5
++ LA ret+48(FP), R7
++ CMPBNE R2, R6, notequal
++ BR runtime·memeqbody(SB)
++notequal:
++ MOVB $0, ret+48(FP)
++ RET
++
++// input:
++// R3 = a
++// R5 = b
++// R6 = len
++// R7 = address of output byte (stores 0 or 1 here)
++// a and b have the same length
++TEXT runtime·memeqbody(SB),NOSPLIT|NOFRAME,$0-0
++ CMPBEQ R3, R5, equal
++loop:
++ CMPBEQ R6, $0, equal
++ CMPBLT R6, $32, tiny
++ CMP R6, $256
++ BLT tail
++ CLC $256, 0(R3), 0(R5)
++ BNE notequal
++ SUB $256, R6
++ LA 256(R3), R3
++ LA 256(R5), R5
++ BR loop
++tail:
++ SUB $1, R6, R8
++ EXRL $runtime·memeqbodyclc(SB), R8
++ BEQ equal
++notequal:
++ MOVB $0, 0(R7)
++ RET
++equal:
++ MOVB $1, 0(R7)
++ RET
++tiny:
++ MOVD $0, R2
++ CMPBLT R6, $16, lt16
++ MOVD 0(R3), R8
++ MOVD 0(R5), R9
++ CMPBNE R8, R9, notequal
++ MOVD 8(R3), R8
++ MOVD 8(R5), R9
++ CMPBNE R8, R9, notequal
++ LA 16(R2), R2
++ SUB $16, R6
++lt16:
++ CMPBLT R6, $8, lt8
++ MOVD 0(R3)(R2*1), R8
++ MOVD 0(R5)(R2*1), R9
++ CMPBNE R8, R9, notequal
++ LA 8(R2), R2
++ SUB $8, R6
++lt8:
++ CMPBLT R6, $4, lt4
++ MOVWZ 0(R3)(R2*1), R8
++ MOVWZ 0(R5)(R2*1), R9
++ CMPBNE R8, R9, notequal
++ LA 4(R2), R2
++ SUB $4, R6
++lt4:
++#define CHECK(n) \
++ CMPBEQ R6, $n, equal \
++ MOVB n(R3)(R2*1), R8 \
++ MOVB n(R5)(R2*1), R9 \
++ CMPBNE R8, R9, notequal
++ CHECK(0)
++ CHECK(1)
++ CHECK(2)
++ CHECK(3)
++ BR equal
++
++TEXT runtime·memeqbodyclc(SB),NOSPLIT|NOFRAME,$0-0
++ CLC $1, 0(R3), 0(R5)
++ RET
++
++TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
++ MOVD g_m(g), R4
++ MOVWZ m_fastrand(R4), R3
++ ADD R3, R3
++ CMPW R3, $0
++ BGE 2(PC)
++ XOR $0x88888eef, R3
++ MOVW R3, m_fastrand(R4)
++ MOVW R3, ret+0(FP)
++ RET
++
++TEXT bytes·IndexByte(SB),NOSPLIT,$0-40
++ MOVD s+0(FP), R3 // s => R3
++ MOVD s_len+8(FP), R4 // s_len => R4
++ MOVBZ c+24(FP), R5 // c => R5
++ MOVD $ret+32(FP), R2 // &ret => R9
++ BR runtime·indexbytebody(SB)
++
++TEXT strings·IndexByte(SB),NOSPLIT,$0-32
++ MOVD s+0(FP), R3 // s => R3
++ MOVD s_len+8(FP), R4 // s_len => R4
++ MOVBZ c+16(FP), R5 // c => R5
++ MOVD $ret+24(FP), R2 // &ret => R9
++ BR runtime·indexbytebody(SB)
++
++// input:
++// R3: s
++// R4: s_len
++// R5: c -- byte sought
++// R2: &ret -- address to put index into
++TEXT runtime·indexbytebody(SB),NOSPLIT,$0
++ CMPBEQ R4, $0, notfound
++ MOVD R3, R6 // store base for later
++ ADD R3, R4, R8 // the address after the end of the string
++ //if the length is small, use loop; otherwise, use vector or srst search
++ CMPBGE R4, $16, large
++
++residual:
++ CMPBEQ R3, R8, notfound
++ MOVBZ 0(R3), R7
++ LA 1(R3), R3
++ CMPBNE R7, R5, residual
++
++found:
++ SUB R6, R3
++ SUB $1, R3
++ MOVD R3, 0(R2)
++ RET
++
++notfound:
++ MOVD $-1, 0(R2)
++ RET
++
++large:
++ MOVB runtime·vectorfacility(SB), R1
++ CMPBEQ R1, $-1, checkvector // vectorfacility = -1, vector not checked yet
++vectorchecked:
++ CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported
++
++srstimpl: // vectorfacility != 1, not support or enable vector
++ MOVBZ R5, R0 // c needs to be in R0, leave until last minute as currently R0 is expected to be 0
++srstloop:
++ WORD $0xB25E0083 // srst %r8, %r3 (search the range [R3, R8))
++ BVS srstloop // interrupted - continue
++ BGT notfoundr0
++foundr0:
++ XOR R0, R0 // reset R0
++ SUB R6, R8 // remove base
++ MOVD R8, 0(R2)
++ RET
++notfoundr0:
++ XOR R0, R0 // reset R0
++ MOVD $-1, 0(R2)
++ RET
++
++vectorimpl:
++ //if the address is not 16byte aligned, use loop for the header
++ AND $15, R3, R8
++ CMPBGT R8, $0, notaligned
++
++aligned:
++ ADD R6, R4, R8
++ AND $-16, R8, R7
++ // replicate c across V17
++ VLVGB $0, R5, V19
++ VREPB $0, V19, V17
++
++vectorloop:
++ CMPBGE R3, R7, residual
++ VL 0(R3), V16 // load string to be searched into V16
++ ADD $16, R3
++ VFEEBS V16, V17, V18 // search V17 in V16 and set conditional code accordingly
++ BVS vectorloop
++
++ // when vector search found c in the string
++ VLGVB $7, V18, R7 // load 7th element of V18 containing index into R7
++ SUB $16, R3
++ SUB R6, R3
++ ADD R3, R7
++ MOVD R7, 0(R2)
++ RET
++
++notaligned:
++ AND $-16, R3, R8
++ ADD $16, R8
++notalignedloop:
++ CMPBEQ R3, R8, aligned
++ MOVBZ 0(R3), R7
++ LA 1(R3), R3
++ CMPBNE R7, R5, notalignedloop
++ BR found
++
++checkvector:
++ CALL runtime·checkvectorfacility(SB)
++ MOVB runtime·vectorfacility(SB), R1
++ BR vectorchecked
++
++TEXT runtime·return0(SB), NOSPLIT, $0
++ MOVW $0, R3
++ RET
++
++// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
++// Must obey the gcc calling convention.
++TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
++ // g (R13), R10, R11 and LR (R14) are callee-save in the C ABI, so save them
++ MOVD g, R1
++ MOVD R10, R3
++ MOVD LR, R4
++ MOVD R11, R5
++
++ BL runtime·load_g(SB) // clobbers g (R13), R10, R11
++ MOVD g_m(g), R2
++ MOVD m_curg(R2), R2
++ MOVD (g_stack+stack_hi)(R2), R2
++
++ MOVD R1, g
++ MOVD R3, R10
++ MOVD R4, LR
++ MOVD R5, R11
++ RET
++
++// The top-most function running on a goroutine
++// returns to goexit+PCQuantum.
++TEXT runtime·goexit(SB),NOSPLIT|NOFRAME,$0-0
++ BYTE $0x07; BYTE $0x00; // 2-byte nop
++ BL runtime·goexit1(SB) // does not return
++ // traceback from goexit1 must hit code range of goexit
++ BYTE $0x07; BYTE $0x00; // 2-byte nop
++
++TEXT runtime·prefetcht0(SB),NOSPLIT,$0-8
++ RET
++
++TEXT runtime·prefetcht1(SB),NOSPLIT,$0-8
++ RET
++
++TEXT runtime·prefetcht2(SB),NOSPLIT,$0-8
++ RET
++
++TEXT runtime·prefetchnta(SB),NOSPLIT,$0-8
++ RET
++
++TEXT runtime·sigreturn(SB),NOSPLIT,$0-8
++ RET
++
++TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
++ SYNC
++ RET
++
++TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
++ MOVD s1_base+0(FP), R3
++ MOVD s1_len+8(FP), R4
++ MOVD s2_base+16(FP), R5
++ MOVD s2_len+24(FP), R6
++ LA ret+32(FP), R7
++ BR runtime·cmpbody(SB)
++
++TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-56
++ MOVD s1+0(FP), R3
++ MOVD s1+8(FP), R4
++ MOVD s2+24(FP), R5
++ MOVD s2+32(FP), R6
++ LA res+48(FP), R7
++ BR runtime·cmpbody(SB)
++
++// input:
++// R3 = a
++// R4 = alen
++// R5 = b
++// R6 = blen
++// R7 = address of output word (stores -1/0/1 here)
++TEXT runtime·cmpbody(SB),NOSPLIT|NOFRAME,$0-0
++ CMPBEQ R3, R5, cmplengths
++ MOVD R4, R8
++ CMPBLE R4, R6, amin
++ MOVD R6, R8
++amin:
++ CMPBEQ R8, $0, cmplengths
++ CMP R8, $256
++ BLE tail
++loop:
++ CLC $256, 0(R3), 0(R5)
++ BGT gt
++ BLT lt
++ SUB $256, R8
++ CMP R8, $256
++ BGT loop
++tail:
++ SUB $1, R8
++ EXRL $runtime·cmpbodyclc(SB), R8
++ BGT gt
++ BLT lt
++cmplengths:
++ CMP R4, R6
++ BEQ eq
++ BLT lt
++gt:
++ MOVD $1, 0(R7)
++ RET
++lt:
++ MOVD $-1, 0(R7)
++ RET
++eq:
++ MOVD $0, 0(R7)
++ RET
++
++TEXT runtime·cmpbodyclc(SB),NOSPLIT|NOFRAME,$0-0
++ CLC $1, 0(R3), 0(R5)
++ RET
++
++// This is called from .init_array and follows the platform, not Go, ABI.
++// We are overly conservative. We could only save the registers we use.
++// However, since this function is only called once per loaded module
++// performance is unimportant.
++TEXT runtime·addmoduledata(SB),NOSPLIT|NOFRAME,$0-0
++ // Save R6-R15, F0, F2, F4 and F6 in the
++ // register save area of the calling function
++ STMG R6, R15, 48(R15)
++ FMOVD F0, 128(R15)
++ FMOVD F2, 136(R15)
++ FMOVD F4, 144(R15)
++ FMOVD F6, 152(R15)
++
++ // append the argument (passed in R2, as per the ELF ABI) to the
++ // moduledata linked list.
++ MOVD runtime·lastmoduledatap(SB), R1
++ MOVD R2, moduledata_next(R1)
++ MOVD R2, runtime·lastmoduledatap(SB)
++
++ // Restore R6-R15, F0, F2, F4 and F6
++ LMG 48(R15), R6, R15
++ FMOVD F0, 128(R15)
++ FMOVD F2, 136(R15)
++ FMOVD F4, 144(R15)
++ FMOVD F6, 152(R15)
++ RET
++
++TEXT ·checkASM(SB),NOSPLIT,$0-1
++ MOVB $1, ret+0(FP)
++ RET
+--- /dev/null
++++ b/src/runtime/cgo/asm_s390x.s
+@@ -0,0 +1,44 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++/*
++ * void crosscall2(void (*fn)(void*, int32), void*, int32)
++ * Save registers and call fn with two arguments.
++ * crosscall2 obeys the C ABI; fn obeys the Go ABI.
++ */
++TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0
++ // Start with standard C stack frame layout and linkage
++
++ // Save R6-R15, F0, F2, F4 and F6 in the
++ // register save area of the calling function
++ STMG R6, R15, 48(R15)
++ FMOVD F0, 128(R15)
++ FMOVD F2, 136(R15)
++ FMOVD F4, 144(R15)
++ FMOVD F6, 152(R15)
++
++ // Initialize Go ABI environment
++ XOR R0, R0
++ BL runtime·load_g(SB)
++
++ // Allocate 24 bytes on the stack
++ SUB $24, R15
++
++ MOVD R3, 8(R15) // arg1
++ MOVW R4, 16(R15) // arg2
++ BL (R2) // fn(arg1, arg2)
++
++ ADD $24, R15
++
++ // Restore R6-R15, F0, F2, F4 and F6
++ LMG 48(R15), R6, R15
++ FMOVD F0, 128(R15)
++ FMOVD F2, 136(R15)
++ FMOVD F4, 144(R15)
++ FMOVD F6, 152(R15)
++
++ RET
++
+--- /dev/null
++++ b/src/runtime/cgo/gcc_linux_s390x.c
+@@ -0,0 +1,68 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include
++#include
++#include
++#include "libcgo.h"
++
++static void *threadentry(void*);
++
++void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
++static void (*setg_gcc)(void*);
++
++void
++x_cgo_init(G *g, void (*setg)(void*), void **tlsbase)
++{
++ pthread_attr_t attr;
++ size_t size;
++
++ setg_gcc = setg;
++ pthread_attr_init(&attr);
++ pthread_attr_getstacksize(&attr, &size);
++ g->stacklo = (uintptr)&attr - size + 4096;
++ pthread_attr_destroy(&attr);
++}
++
++void
++_cgo_sys_thread_start(ThreadStart *ts)
++{
++ pthread_attr_t attr;
++ sigset_t ign, oset;
++ pthread_t p;
++ size_t size;
++ int err;
++
++ sigfillset(&ign);
++ pthread_sigmask(SIG_SETMASK, &ign, &oset);
++
++ pthread_attr_init(&attr);
++ pthread_attr_getstacksize(&attr, &size);
++ // Leave stacklo=0 and set stackhi=size; mstack will do the rest.
++ ts->g->stackhi = size;
++ err = pthread_create(&p, &attr, threadentry, ts);
++
++ pthread_sigmask(SIG_SETMASK, &oset, nil);
++
++ if (err != 0) {
++ fatalf("pthread_create failed: %s", strerror(err));
++ }
++}
++
++extern void crosscall_s390x(void (*fn)(void), void *g);
++
++static void*
++threadentry(void *v)
++{
++ ThreadStart ts;
++
++ ts = *(ThreadStart*)v;
++ free(v);
++
++ // Save g for this thread in C TLS
++ setg_gcc((void*)ts.g);
++
++ crosscall_s390x(ts.fn, (void*)ts.g);
++ return nil;
++}
+--- /dev/null
++++ b/src/runtime/cgo/gcc_s390x.S
+@@ -0,0 +1,46 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++/*
++ * void crosscall_s390x(void (*fn)(void), void *g)
++ *
++ * Calling into the go tool chain, where all registers are caller save.
++ * Called from standard s390x C ABI, where r6-r13, r15, and f0, f2, f4 and f6 are
++ * callee-save, so they must be saved explicitly.
++ */
++.globl crosscall_s390x
++crosscall_s390x:
++ /*
++ * save r6-r15, f0, f2, f4 and f6 in the
++ * register save area of the calling function
++ */
++ stmg %r6, %r15, 48(%r15)
++ stdy %f0, 128(%r15)
++ stdy %f2, 136(%r15)
++ stdy %f4, 144(%r15)
++ stdy %f6, 152(%r15)
++
++ /* set r0 to 0 */
++ xgr %r0, %r0
++
++ /* restore g pointer */
++ lgr %r13, %r3
++
++ /* grow stack 8 bytes and call fn */
++ agfi %r15, -8
++ basr %r14, %r2
++ agfi %r15, 8
++
++ /* restore registers */
++ lmg %r6, %r15, 48(%r15)
++ ldy %f0, 128(%r15)
++ ldy %f2, 136(%r15)
++ ldy %f4, 144(%r15)
++ ldy %f6, 152(%r15)
++
++ br %r14 /* restored by lmg */
++
++#ifdef __ELF__
++.section .note.GNU-stack,"",%progbits
++#endif
+--- a/src/runtime/cgocall.go
++++ b/src/runtime/cgocall.go
+@@ -239,8 +239,8 @@
+ case "386":
+ // On 386, stack frame is three words, plus caller PC.
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
+- case "ppc64", "ppc64le":
+- // On ppc64, the callback arguments are in the arguments area of
++ case "ppc64", "ppc64le", "s390x":
++ // On ppc64 and s390x, the callback arguments are in the arguments area of
+ // cgocallback's stack frame. The stack looks like this:
+ // +--------------------+------------------------------+
+ // | | ... |
+@@ -293,7 +293,7 @@
+ switch GOARCH {
+ default:
+ throw("unwindm not implemented")
+- case "386", "amd64", "arm", "ppc64", "ppc64le":
++ case "386", "amd64", "arm", "ppc64", "ppc64le", "s390x":
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
+ case "arm64":
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
+--- /dev/null
++++ b/src/runtime/defs_linux_s390x.go
+@@ -0,0 +1,167 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package runtime
++
++const (
++ _EINTR = 0x4
++ _EAGAIN = 0xb
++ _ENOMEM = 0xc
++
++ _PROT_NONE = 0x0
++ _PROT_READ = 0x1
++ _PROT_WRITE = 0x2
++ _PROT_EXEC = 0x4
++
++ _MAP_ANON = 0x20
++ _MAP_PRIVATE = 0x2
++ _MAP_FIXED = 0x10
++
++ _MADV_DONTNEED = 0x4
++ _MADV_HUGEPAGE = 0xe
++ _MADV_NOHUGEPAGE = 0xf
++
++ _SA_RESTART = 0x10000000
++ _SA_ONSTACK = 0x8000000
++ _SA_SIGINFO = 0x4
++
++ _SIGHUP = 0x1
++ _SIGINT = 0x2
++ _SIGQUIT = 0x3
++ _SIGILL = 0x4
++ _SIGTRAP = 0x5
++ _SIGABRT = 0x6
++ _SIGBUS = 0x7
++ _SIGFPE = 0x8
++ _SIGKILL = 0x9
++ _SIGUSR1 = 0xa
++ _SIGSEGV = 0xb
++ _SIGUSR2 = 0xc
++ _SIGPIPE = 0xd
++ _SIGALRM = 0xe
++ _SIGSTKFLT = 0x10
++ _SIGCHLD = 0x11
++ _SIGCONT = 0x12
++ _SIGSTOP = 0x13
++ _SIGTSTP = 0x14
++ _SIGTTIN = 0x15
++ _SIGTTOU = 0x16
++ _SIGURG = 0x17
++ _SIGXCPU = 0x18
++ _SIGXFSZ = 0x19
++ _SIGVTALRM = 0x1a
++ _SIGPROF = 0x1b
++ _SIGWINCH = 0x1c
++ _SIGIO = 0x1d
++ _SIGPWR = 0x1e
++ _SIGSYS = 0x1f
++
++ _FPE_INTDIV = 0x1
++ _FPE_INTOVF = 0x2
++ _FPE_FLTDIV = 0x3
++ _FPE_FLTOVF = 0x4
++ _FPE_FLTUND = 0x5
++ _FPE_FLTRES = 0x6
++ _FPE_FLTINV = 0x7
++ _FPE_FLTSUB = 0x8
++
++ _BUS_ADRALN = 0x1
++ _BUS_ADRERR = 0x2
++ _BUS_OBJERR = 0x3
++
++ _SEGV_MAPERR = 0x1
++ _SEGV_ACCERR = 0x2
++
++ _ITIMER_REAL = 0x0
++ _ITIMER_VIRTUAL = 0x1
++ _ITIMER_PROF = 0x2
++
++ _EPOLLIN = 0x1
++ _EPOLLOUT = 0x4
++ _EPOLLERR = 0x8
++ _EPOLLHUP = 0x10
++ _EPOLLRDHUP = 0x2000
++ _EPOLLET = 0x80000000
++ _EPOLL_CLOEXEC = 0x80000
++ _EPOLL_CTL_ADD = 0x1
++ _EPOLL_CTL_DEL = 0x2
++ _EPOLL_CTL_MOD = 0x3
++)
++
++type timespec struct {
++ tv_sec int64
++ tv_nsec int64
++}
++
++func (ts *timespec) set_sec(x int64) {
++ ts.tv_sec = x
++}
++
++func (ts *timespec) set_nsec(x int32) {
++ ts.tv_nsec = int64(x)
++}
++
++type timeval struct {
++ tv_sec int64
++ tv_usec int64
++}
++
++func (tv *timeval) set_usec(x int32) {
++ tv.tv_usec = int64(x)
++}
++
++type sigactiont struct {
++ sa_handler uintptr
++ sa_flags uint64
++ sa_restorer uintptr
++ sa_mask uint64
++}
++
++type siginfo struct {
++ si_signo int32
++ si_errno int32
++ si_code int32
++ // below here is a union; si_addr is the only field we use
++ si_addr uint64
++}
++
++type itimerval struct {
++ it_interval timeval
++ it_value timeval
++}
++
++type epollevent struct {
++ events uint32
++ pad_cgo_0 [4]byte
++ data [8]byte // unaligned uintptr
++}
++
++const (
++ _O_RDONLY = 0x0
++ _O_CLOEXEC = 0x80000
++ _SA_RESTORER = 0
++)
++
++type sigaltstackt struct {
++ ss_sp *byte
++ ss_flags int32
++ ss_size uintptr
++}
++
++type sigcontext struct {
++ psw_mask uint64
++ psw_addr uint64
++ gregs [16]uint64
++ aregs [16]uint32
++ fpc uint32
++ fpregs [16]uint64
++}
++
++type ucontext struct {
++ uc_flags uint64
++ uc_link *ucontext
++ uc_stack sigaltstackt
++ uc_mcontext sigcontext
++ uc_sigmask uint64
++}
+--- a/src/runtime/extern.go
++++ b/src/runtime/extern.go
+@@ -230,5 +230,5 @@
+ const GOOS string = sys.TheGoos
+
+ // GOARCH is the running program's architecture target:
+-// 386, amd64, or arm.
++// 386, amd64, arm, or s390x.
+ const GOARCH string = sys.TheGoarch
+--- a/src/runtime/gcinfo_test.go
++++ b/src/runtime/gcinfo_test.go
+@@ -144,7 +144,7 @@
+ typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
+ }
+- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le":
++ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x":
+ return []byte{
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+--- a/src/runtime/hash64.go
++++ b/src/runtime/hash64.go
+@@ -6,7 +6,7 @@
+ // xxhash: https://code.google.com/p/xxhash/
+ // cityhash: https://code.google.com/p/cityhash/
+
+-// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le
++// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x
+
+ package runtime
+
+--- /dev/null
++++ b/src/runtime/internal/atomic/asm_s390x.s
+@@ -0,0 +1,194 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++// func Cas(ptr *uint32, old, new uint32) bool
++// Atomically:
++// if *ptr == old {
++// *val = new
++// return 1
++// } else {
++// return 0
++// }
++TEXT ·Cas(SB), NOSPLIT, $0-17
++ MOVD ptr+0(FP), R3
++ MOVWZ old+8(FP), R4
++ MOVWZ new+12(FP), R5
++ CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
++ BNE cas_fail
++ MOVB $1, ret+16(FP)
++ RET
++cas_fail:
++ MOVB $0, ret+16(FP)
++ RET
++
++// func Cas64(ptr *uint64, old, new uint64) bool
++// Atomically:
++// if *ptr == old {
++// *ptr = new
++// return 1
++// } else {
++// return 0
++// }
++TEXT ·Cas64(SB), NOSPLIT, $0-25
++ MOVD ptr+0(FP), R3
++ MOVD old+8(FP), R4
++ MOVD new+16(FP), R5
++ CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
++ BNE cas64_fail
++ MOVB $1, ret+24(FP)
++ RET
++cas64_fail:
++ MOVB $0, ret+24(FP)
++ RET
++
++// func Casuintptr(ptr *uintptr, old, new uintptr) bool
++TEXT ·Casuintptr(SB), NOSPLIT, $0-25
++ BR ·Cas64(SB)
++
++// func Loaduintptr(ptr *uintptr) uintptr
++TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
++ BR ·Load64(SB)
++
++// func Loaduint(ptr *uint) uint
++TEXT ·Loaduint(SB), NOSPLIT, $0-16
++ BR ·Load64(SB)
++
++// func Storeuintptr(ptr *uintptr, new uintptr)
++TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
++ BR ·Store64(SB)
++
++// func Loadint64(ptr *int64) int64
++TEXT ·Loadint64(SB), NOSPLIT, $0-16
++ BR ·Load64(SB)
++
++// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
++TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
++ BR ·Xadd64(SB)
++
++// func Xaddint64(ptr *int64, delta int64) int64
++TEXT ·Xaddint64(SB), NOSPLIT, $0-16
++ BR ·Xadd64(SB)
++
++// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
++// Atomically:
++// if *ptr == old {
++// *ptr = new
++// return 1
++// } else {
++// return 0
++// }
++TEXT ·Casp1(SB), NOSPLIT, $0-25
++ BR ·Cas64(SB)
++
++// func Xadd(ptr *uint32, delta int32) uint32
++// Atomically:
++// *ptr += delta
++// return *ptr
++TEXT ·Xadd(SB), NOSPLIT, $0-20
++ MOVD ptr+0(FP), R4
++ MOVW delta+8(FP), R5
++ MOVW (R4), R3
++repeat:
++ ADD R5, R3, R6
++ CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
++ BNE repeat
++ MOVW R6, ret+16(FP)
++ RET
++
++// func Xadd64(ptr *uint64, delta int64) uint64
++TEXT ·Xadd64(SB), NOSPLIT, $0-24
++ MOVD ptr+0(FP), R4
++ MOVD delta+8(FP), R5
++ MOVD (R4), R3
++repeat:
++ ADD R5, R3, R6
++ CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
++ BNE repeat
++ MOVD R6, ret+16(FP)
++ RET
++
++// func Xchg(ptr *uint32, new uint32) uint32
++TEXT ·Xchg(SB), NOSPLIT, $0-20
++ MOVD ptr+0(FP), R4
++ MOVW new+8(FP), R3
++ MOVW (R4), R6
++repeat:
++ CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
++ BNE repeat
++ MOVW R6, ret+16(FP)
++ RET
++
++// func Xchg64(ptr *uint64, new uint64) uint64
++TEXT ·Xchg64(SB), NOSPLIT, $0-24
++ MOVD ptr+0(FP), R4
++ MOVD new+8(FP), R3
++ MOVD (R4), R6
++repeat:
++ CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
++ BNE repeat
++ MOVD R6, ret+16(FP)
++ RET
++
++// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
++TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
++ BR ·Xchg64(SB)
++
++// on s390x load & store are both atomic operations
++
++// func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
++TEXT ·Storep1(SB), NOSPLIT, $0-16
++ BR ·Store64(SB)
++
++// func Store(ptr *uint32, val uint32)
++TEXT ·Store(SB), NOSPLIT, $0-12
++ MOVD ptr+0(FP), R3
++ MOVW val+8(FP), R4
++ MOVW R4, 0(R3)
++ RET
++
++// func Store64(ptr *uint64, val uint64)
++TEXT ·Store64(SB), NOSPLIT, $0-16
++ MOVD ptr+0(FP), R3
++ MOVD val+8(FP), R4
++ MOVD R4, 0(R3)
++ RET
++
++// func Or8(addr *uint8, v uint8)
++TEXT ·Or8(SB), NOSPLIT, $0-9
++ MOVD ptr+0(FP), R3
++ MOVBZ val+8(FP), R4
++ // Calculate shift.
++ AND $3, R3, R5
++ XOR $3, R5 // big endian - flip direction
++ SLD $3, R5 // MUL $8, R5
++ SLD R5, R4
++ // Align ptr down to 4 bytes so we can use 32-bit load/store.
++ AND $-4, R3
++ MOVWZ 0(R3), R6
++again:
++ OR R4, R6, R7
++ CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
++ BNE again
++ RET
++
++// func And8(addr *uint8, v uint8)
++TEXT ·And8(SB), NOSPLIT, $0-9
++ MOVD ptr+0(FP), R3
++ MOVBZ val+8(FP), R4
++ // Calculate shift.
++ AND $3, R3, R5
++ XOR $3, R5 // big endian - flip direction
++ SLD $3, R5 // MUL $8, R5
++ OR $-256, R4 // create 0xffffffffffffffxx
++ RLLG R5, R4
++ // Align ptr down to 4 bytes so we can use 32-bit load/store.
++ AND $-4, R3
++ MOVWZ 0(R3), R6
++again:
++ AND R4, R6, R7
++ CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
++ BNE again
++ RET
+--- /dev/null
++++ b/src/runtime/internal/atomic/atomic_s390x.go
+@@ -0,0 +1,63 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package atomic
++
++import "unsafe"
++
++//go:nosplit
++//go:noinline
++func Load(ptr *uint32) uint32 {
++ return *ptr
++}
++
++//go:nosplit
++//go:noinline
++func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
++ return *(*unsafe.Pointer)(ptr)
++}
++
++//go:nosplit
++//go:noinline
++func Load64(ptr *uint64) uint64 {
++ return *ptr
++}
++
++//go:noescape
++func And8(ptr *uint8, val uint8)
++
++//go:noescape
++func Or8(ptr *uint8, val uint8)
++
++// NOTE: Do not add atomicxor8 (XOR is not idempotent).
++
++//go:noescape
++func Xadd(ptr *uint32, delta int32) uint32
++
++//go:noescape
++func Xadd64(ptr *uint64, delta int64) uint64
++
++//go:noescape
++func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
++
++//go:noescape
++func Xchg(ptr *uint32, new uint32) uint32
++
++//go:noescape
++func Xchg64(ptr *uint64, new uint64) uint64
++
++//go:noescape
++func Xchguintptr(ptr *uintptr, new uintptr) uintptr
++
++//go:noescape
++func Cas64(ptr *uint64, old, new uint64) bool
++
++//go:noescape
++func Store(ptr *uint32, val uint32)
++
++//go:noescape
++func Store64(ptr *uint64, val uint64)
++
++// NO go:noescape annotation; see atomic_pointer.go.
++func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
+--- /dev/null
++++ b/src/runtime/internal/sys/arch_s390x.go
+@@ -0,0 +1,18 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package sys
++
++const (
++ TheChar = 'z'
++ BigEndian = 1
++ CacheLineSize = 256
++ PhysPageSize = 4096
++ PCQuantum = 2
++ Int64Align = 8
++ HugePageSize = 0
++ MinFrameSize = 8
++)
++
++type Uintreg uint64
+--- /dev/null
++++ b/src/runtime/internal/sys/zgoarch_s390x.go
+@@ -0,0 +1,26 @@
++// generated by gengoos.go using 'go generate'
++
++package sys
++
++const TheGoarch = `s390x`
++
++const Goarch386 = 0
++const GoarchAmd64 = 0
++const GoarchAmd64p32 = 0
++const GoarchArm = 0
++const GoarchArmbe = 0
++const GoarchArm64 = 0
++const GoarchArm64be = 0
++const GoarchPpc64 = 0
++const GoarchPpc64le = 0
++const GoarchMips = 0
++const GoarchMipsle = 0
++const GoarchMips64 = 0
++const GoarchMips64le = 0
++const GoarchMips64p32 = 0
++const GoarchMips64p32le = 0
++const GoarchPpc = 0
++const GoarchS390 = 0
++const GoarchS390x = 1
++const GoarchSparc = 0
++const GoarchSparc64 = 0
+--- /dev/null
++++ b/src/runtime/lfstack_linux_s390x.go
+@@ -0,0 +1,25 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package runtime
++
++import "unsafe"
++
++// In addition to the 16 bits taken from the top, we can take 3 from the
++// bottom, because node must be pointer-aligned, giving a total of 19 bits
++// of count.
++const (
++ addrBits = 48
++ cntBits = 64 - addrBits + 3
++)
++
++func lfstackPack(node *lfnode, cnt uintptr) uint64 {
++ return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> cntBits << 3)))
++ cnt = uintptr(val & (1< 64 {
++ throw("unexpected signal greater than 64")
++ }
++ *mask |= 1 << (uint(i) - 1)
++}
++
++func sigdelset(mask *sigset, i int) {
++ if i > 64 {
++ throw("unexpected signal greater than 64")
++ }
++ *mask &^= 1 << (uint(i) - 1)
++}
++
++func sigfillset(mask *uint64) {
++ *mask = ^uint64(0)
++}
++
++func sigcopyset(mask *sigset, m sigmask) {
++ *mask = sigset(uint64(m[0]) | uint64(m[1])<<32)
++}
+--- a/src/runtime/os2_linux_generic.go
++++ b/src/runtime/os2_linux_generic.go
+@@ -4,6 +4,7 @@
+
+ // +build !mips64
+ // +build !mips64le
++// +build !s390x
+ // +build linux
+
+ package runtime
+--- /dev/null
++++ b/src/runtime/os2_linux_s390x.go
+@@ -0,0 +1,22 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package runtime
++
++const (
++ _SS_DISABLE = 2
++ _NSIG = 65
++ _SI_USER = 0
++ _SIG_BLOCK = 0
++ _SIG_UNBLOCK = 1
++ _SIG_SETMASK = 2
++ _RLIMIT_AS = 9
++)
++
++type sigset uint64
++
++type rlimit struct {
++ rlim_cur uintptr
++ rlim_max uintptr
++}
+--- /dev/null
++++ b/src/runtime/rt0_linux_s390x.s
+@@ -0,0 +1,20 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++TEXT _rt0_s390x_linux(SB),NOSPLIT|NOFRAME,$0
++ // In a statically linked binary, the stack contains argc,
++ // argv as argc string pointers followed by a NULL, envv as a
++ // sequence of string pointers followed by a NULL, and auxv.
++ // There is no TLS base pointer.
++ //
++ // TODO: Support dynamic linking entry point
++ MOVD 0(R15), R2 // argc
++ ADD $8, R15, R3 // argv
++ BR main(SB)
++
++TEXT main(SB),NOSPLIT|NOFRAME,$0
++ MOVD $runtime·rt0_go(SB), R11
++ BR R11
+--- a/src/runtime/runtime-gdb_test.go
++++ b/src/runtime/runtime-gdb_test.go
+@@ -107,7 +107,7 @@
+ // stack frames on RISC architectures.
+ canBackTrace := false
+ switch runtime.GOARCH {
+- case "amd64", "386", "ppc64", "ppc64le", "arm", "arm64", "mips64", "mips64le":
++ case "amd64", "386", "ppc64", "ppc64le", "arm", "arm64", "mips64", "mips64le", "s390x":
+ canBackTrace = true
+ args = append(args,
+ "-ex", "echo BEGIN goroutine 2 bt\n",
+--- /dev/null
++++ b/src/runtime/signal_linux_s390x.go
+@@ -0,0 +1,50 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package runtime
++
++import (
++ "runtime/internal/sys"
++ "unsafe"
++)
++
++type sigctxt struct {
++ info *siginfo
++ ctxt unsafe.Pointer
++}
++
++func (c *sigctxt) regs() *sigcontext {
++ return (*sigcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
++}
++func (c *sigctxt) r0() uint64 { return c.regs().gregs[0] }
++func (c *sigctxt) r1() uint64 { return c.regs().gregs[1] }
++func (c *sigctxt) r2() uint64 { return c.regs().gregs[2] }
++func (c *sigctxt) r3() uint64 { return c.regs().gregs[3] }
++func (c *sigctxt) r4() uint64 { return c.regs().gregs[4] }
++func (c *sigctxt) r5() uint64 { return c.regs().gregs[5] }
++func (c *sigctxt) r6() uint64 { return c.regs().gregs[6] }
++func (c *sigctxt) r7() uint64 { return c.regs().gregs[7] }
++func (c *sigctxt) r8() uint64 { return c.regs().gregs[8] }
++func (c *sigctxt) r9() uint64 { return c.regs().gregs[9] }
++func (c *sigctxt) r10() uint64 { return c.regs().gregs[10] }
++func (c *sigctxt) r11() uint64 { return c.regs().gregs[11] }
++func (c *sigctxt) r12() uint64 { return c.regs().gregs[12] }
++func (c *sigctxt) r13() uint64 { return c.regs().gregs[13] }
++func (c *sigctxt) r14() uint64 { return c.regs().gregs[14] }
++func (c *sigctxt) r15() uint64 { return c.regs().gregs[15] }
++func (c *sigctxt) link() uint64 { return c.regs().gregs[14] }
++func (c *sigctxt) sp() uint64 { return c.regs().gregs[15] }
++func (c *sigctxt) pc() uint64 { return c.regs().psw_addr }
++func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
++func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
++
++func (c *sigctxt) set_r0(x uint64) { c.regs().gregs[0] = x }
++func (c *sigctxt) set_r13(x uint64) { c.regs().gregs[13] = x }
++func (c *sigctxt) set_link(x uint64) { c.regs().gregs[14] = x }
++func (c *sigctxt) set_sp(x uint64) { c.regs().gregs[15] = x }
++func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
++func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
++func (c *sigctxt) set_sigaddr(x uint64) {
++ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
++}
+--- /dev/null
++++ b/src/runtime/signal_s390x.go
+@@ -0,0 +1,170 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build linux
++
++package runtime
++
++import (
++ "runtime/internal/sys"
++ "unsafe"
++)
++
++func dumpregs(c *sigctxt) {
++ print("r0 ", hex(c.r0()), "\t")
++ print("r1 ", hex(c.r1()), "\n")
++ print("r2 ", hex(c.r2()), "\t")
++ print("r3 ", hex(c.r3()), "\n")
++ print("r4 ", hex(c.r4()), "\t")
++ print("r5 ", hex(c.r5()), "\n")
++ print("r6 ", hex(c.r6()), "\t")
++ print("r7 ", hex(c.r7()), "\n")
++ print("r8 ", hex(c.r8()), "\t")
++ print("r9 ", hex(c.r9()), "\n")
++ print("r10 ", hex(c.r10()), "\t")
++ print("r11 ", hex(c.r11()), "\n")
++ print("r12 ", hex(c.r12()), "\t")
++ print("r13 ", hex(c.r13()), "\n")
++ print("r14 ", hex(c.r14()), "\t")
++ print("r15 ", hex(c.r15()), "\n")
++ print("pc ", hex(c.pc()), "\t")
++ print("link ", hex(c.link()), "\n")
++}
++
++var crashing int32
++
++// May run during STW, so write barriers are not allowed.
++//
++//go:nowritebarrierrec
++func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
++ _g_ := getg()
++ c := &sigctxt{info, ctxt}
++
++ if sig == _SIGPROF {
++ sigprof(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp, _g_.m)
++ return
++ }
++ flags := int32(_SigThrow)
++ if sig < uint32(len(sigtable)) {
++ flags = sigtable[sig].flags
++ }
++ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
++ // Make it look like a call to the signal func.
++ // Have to pass arguments out of band since
++ // augmenting the stack frame would break
++ // the unwinding code.
++ gp.sig = sig
++ gp.sigcode0 = uintptr(c.sigcode())
++ gp.sigcode1 = uintptr(c.sigaddr())
++ gp.sigpc = uintptr(c.pc())
++
++ // We arrange link, and pc to pretend the panicking
++ // function calls sigpanic directly.
++ // Always save LINK to stack so that panics in leaf
++ // functions are correctly handled. This smashes
++ // the stack frame but we're not going back there
++ // anyway.
++ sp := c.sp() - sys.MinFrameSize
++ c.set_sp(sp)
++ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
++
++ pc := uintptr(gp.sigpc)
++
++ // If we don't recognize the PC as code
++ // but we do recognize the link register as code,
++ // then assume this was a call to non-code and treat like
++ // pc == 0, to make unwinding show the context.
++ if pc != 0 && findfunc(pc) == nil && findfunc(uintptr(c.link())) != nil {
++ pc = 0
++ }
++
++ // Don't bother saving PC if it's zero, which is
++ // probably a call to a nil func: the old link register
++ // is more useful in the stack trace.
++ if pc != 0 {
++ c.set_link(uint64(pc))
++ }
++
++ // In case we are panicking from external C code
++ c.set_r0(0)
++ c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
++ c.set_pc(uint64(funcPC(sigpanic)))
++ return
++ }
++
++ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
++ if sigsend(sig) {
++ return
++ }
++ }
++
++ if c.sigcode() == _SI_USER && signal_ignored(sig) {
++ return
++ }
++
++ if flags&_SigKill != 0 {
++ dieFromSignal(int32(sig))
++ }
++
++ if flags&_SigThrow == 0 {
++ return
++ }
++
++ _g_.m.throwing = 1
++ _g_.m.caughtsig.set(gp)
++
++ if crashing == 0 {
++ startpanic()
++ }
++
++ if sig < uint32(len(sigtable)) {
++ print(sigtable[sig].name, "\n")
++ } else {
++ print("Signal ", sig, "\n")
++ }
++
++ print("PC=", hex(c.pc()), " m=", _g_.m.id, "\n")
++ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
++ print("signal arrived during cgo execution\n")
++ gp = _g_.m.lockedg
++ }
++ print("\n")
++
++ level, _, docrash := gotraceback()
++ if level > 0 {
++ goroutineheader(gp)
++ tracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp)
++ if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
++ // tracebackothers on original m skipped this one; trace it now.
++ goroutineheader(_g_.m.curg)
++ traceback(^uintptr(0), ^uintptr(0), 0, gp)
++ } else if crashing == 0 {
++ tracebackothers(gp)
++ print("\n")
++ }
++ dumpregs(c)
++ }
++
++ if docrash {
++ crashing++
++ if crashing < sched.mcount {
++ // There are other m's that need to dump their stacks.
++ // Relay SIGQUIT to the next m by sending it to the current process.
++ // All m's that have already received SIGQUIT have signal masks blocking
++ // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
++ // When the last m receives the SIGQUIT, it will fall through to the call to
++ // crash below. Just in case the relaying gets botched, each m involved in
++ // the relay sleeps for 5 seconds and then does the crash/exit itself.
++ // In expected operation, the last m has received the SIGQUIT and run
++ // crash/exit and the process is gone, all long before any of the
++ // 5-second sleeps have finished.
++ print("\n-----\n\n")
++ raiseproc(_SIGQUIT)
++ usleep(5 * 1000 * 1000)
++ }
++ crash()
++ }
++
++ exit(2)
++}
+--- /dev/null
++++ b/src/runtime/sys_linux_s390x.s
+@@ -0,0 +1,440 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// System calls and other system stuff for Linux s390x; see
++// /usr/include/asm-s390/unistd.h for the syscall number definitions.
++
++#include "go_asm.h"
++#include "go_tls.h"
++#include "textflag.h"
++
++#define SYS_exit 1
++#define SYS_read 3
++#define SYS_write 4
++#define SYS_open 5
++#define SYS_close 6
++#define SYS_getpid 20
++#define SYS_kill 37
++#define SYS_fcntl 55
++#define SYS_gettimeofday 78
++#define SYS_select 142 // always return -ENOSYS // *****
++#define SYS_mmap 90
++#define SYS_munmap 91
++#define SYS_setitimer 104
++#define SYS_clone 120
++#define SYS_newselect 142 // ******
++#define SYS_sched_yield 158
++#define SYS_rt_sigreturn 173 // changed
++#define SYS_rt_sigaction 174 //
++#define SYS_rt_sigprocmask 175 //
++#define SYS_sigaltstack 186 //
++#define SYS_ugetrlimit 191 //
++#define SYS_madvise 219 //
++#define SYS_mincore 218 //
++#define SYS_gettid 236 //
++#define SYS_tkill 237 //
++#define SYS_futex 238 //
++#define SYS_sched_getaffinity 240 //
++#define SYS_exit_group 248 //
++#define SYS_epoll_create 249 //
++#define SYS_epoll_ctl 250 //
++#define SYS_epoll_wait 251 //
++#define SYS_clock_gettime 260 //
++#define SYS_epoll_create1 327 //
++
++TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
++ MOVW code+0(FP), R2
++ MOVW $SYS_exit_group, R1
++ SYSCALL
++ RET
++
++TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
++ MOVW code+0(FP), R2
++ MOVW $SYS_exit, R1
++ SYSCALL
++ RET
++
++TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
++ MOVD name+0(FP), R2
++ MOVW mode+8(FP), R3
++ MOVW perm+12(FP), R4
++ MOVW $SYS_open, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVW $-1, R2
++ MOVW R2, ret+16(FP)
++ RET
++
++TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12
++ MOVW fd+0(FP), R2
++ MOVW $SYS_close, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVW $-1, R2
++ MOVW R2, ret+8(FP)
++ RET
++
++TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
++ MOVD fd+0(FP), R2
++ MOVD p+8(FP), R3
++ MOVW n+16(FP), R4
++ MOVW $SYS_write, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVW $-1, R2
++ MOVW R2, ret+24(FP)
++ RET
++
++TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
++ MOVW fd+0(FP), R2
++ MOVD p+8(FP), R3
++ MOVW n+16(FP), R4
++ MOVW $SYS_read, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVW $-1, R2
++ MOVW R2, ret+24(FP)
++ RET
++
++TEXT runtime·getrlimit(SB),NOSPLIT|NOFRAME,$0-20
++ MOVW kind+0(FP), R2
++ MOVD limit+8(FP), R3
++ MOVW $SYS_ugetrlimit, R1
++ SYSCALL
++ MOVW R2, ret+16(FP)
++ RET
++
++TEXT runtime·usleep(SB),NOSPLIT,$16-4
++ MOVW usec+0(FP), R2
++ MOVD R2, R4
++ MOVW $1000000, R3
++ DIVD R3, R2
++ MOVD R2, 8(R15)
++ MULLD R2, R3
++ SUB R3, R4
++ MOVD R4, 16(R15)
++
++ // select(0, 0, 0, 0, &tv)
++ MOVW $0, R2
++ MOVW $0, R3
++ MOVW $0, R4
++ MOVW $0, R5
++ ADD $8, R15, R6
++ MOVW $SYS_newselect, R1
++ SYSCALL
++ RET
++
++TEXT runtime·gettid(SB),NOSPLIT,$0-4
++ MOVW $SYS_gettid, R1
++ SYSCALL
++ MOVW R2, ret+0(FP)
++ RET
++
++TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0
++ MOVW $SYS_gettid, R1
++ SYSCALL
++ MOVW R2, R2 // arg 1 tid
++ MOVW sig+0(FP), R3 // arg 2
++ MOVW $SYS_tkill, R1
++ SYSCALL
++ RET
++
++TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
++ MOVW $SYS_getpid, R1
++ SYSCALL
++ MOVW R2, R2 // arg 1 pid
++ MOVW sig+0(FP), R3 // arg 2
++ MOVW $SYS_kill, R1
++ SYSCALL
++ RET
++
++TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
++ MOVW mode+0(FP), R2
++ MOVD new+8(FP), R3
++ MOVD old+16(FP), R4
++ MOVW $SYS_setitimer, R1
++ SYSCALL
++ RET
++
++TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
++ MOVD addr+0(FP), R2
++ MOVD n+8(FP), R3
++ MOVD dst+16(FP), R4
++ MOVW $SYS_mincore, R1
++ SYSCALL
++ MOVW R2, ret+24(FP)
++ RET
++
++// func now() (sec int64, nsec int32)
++TEXT time·now(SB),NOSPLIT,$16
++ MOVD $0(R15), R2
++ MOVD $0, R3
++ MOVW $SYS_gettimeofday, R1
++ SYSCALL
++ MOVD 0(R15), R2 // sec
++ MOVD 8(R15), R4 // usec
++ MOVD $1000, R3
++ MULLD R3, R4
++ MOVD R2, sec+0(FP)
++ MOVW R4, nsec+8(FP)
++ RET
++
++TEXT runtime·nanotime(SB),NOSPLIT,$16
++ MOVW $1, R2 // CLOCK_MONOTONIC
++ MOVD $0(R15), R3
++ MOVW $SYS_clock_gettime, R1
++ SYSCALL
++ MOVD 0(R15), R2 // sec
++ MOVD 8(R15), R4 // nsec
++ // sec is in R2, nsec in R4
++ // return nsec in R2
++ MOVD $1000000000, R3
++ MULLD R3, R2
++ ADD R4, R2
++ MOVD R2, ret+0(FP)
++ RET
++
++TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28
++ MOVW sig+0(FP), R2
++ MOVD new+8(FP), R3
++ MOVD old+16(FP), R4
++ MOVW size+24(FP), R5
++ MOVW $SYS_rt_sigprocmask, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVD R0, 0(R0) // crash
++ RET
++
++TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36
++ MOVD sig+0(FP), R2
++ MOVD new+8(FP), R3
++ MOVD old+16(FP), R4
++ MOVD size+24(FP), R5
++ MOVW $SYS_rt_sigaction, R1
++ SYSCALL
++ MOVW R2, ret+32(FP)
++ RET
++
++TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
++ MOVW sig+8(FP), R2
++ MOVD info+16(FP), R3
++ MOVD ctx+24(FP), R4
++ MOVD fn+0(FP), R5
++ BL R5
++ RET
++
++TEXT runtime·sigtramp(SB),NOSPLIT,$64
++ // initialize essential registers (just in case)
++ XOR R0, R0
++
++ // this might be called in external code context,
++ // where g is not set.
++ MOVB runtime·iscgo(SB), R6
++ CMPBEQ R6, $0, 2(PC)
++ BL runtime·load_g(SB)
++
++ MOVW R2, 8(R15)
++ MOVD R3, 16(R15)
++ MOVD R4, 24(R15)
++ MOVD $runtime·sigtrampgo(SB), R5
++ BL R5
++ RET
++
++// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
++TEXT runtime·mmap(SB),NOSPLIT,$48-40
++ MOVD addr+0(FP), R2
++ MOVD n+8(FP), R3
++ MOVW prot+16(FP), R4
++ MOVW flags+20(FP), R5
++ MOVW fd+24(FP), R6
++ MOVWZ off+28(FP), R7
++
++ // s390x uses old_mmap, so the arguments need to be placed into
++ // a struct and a pointer to the struct passed to mmap.
++ MOVD R2, addr-48(SP)
++ MOVD R3, n-40(SP)
++ MOVD R4, prot-32(SP)
++ MOVD R5, flags-24(SP)
++ MOVD R6, fd-16(SP)
++ MOVD R7, off-8(SP)
++
++ MOVD $addr-48(SP), R2
++ MOVW $SYS_mmap, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ NEG R2
++ MOVD R2, ret+32(FP)
++ RET
++
++TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
++ MOVD addr+0(FP), R2
++ MOVD n+8(FP), R3
++ MOVW $SYS_munmap, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVD R0, 0(R0) // crash
++ RET
++
++TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
++ MOVD addr+0(FP), R2
++ MOVD n+8(FP), R3
++ MOVW flags+16(FP), R4
++ MOVW $SYS_madvise, R1
++ SYSCALL
++ // ignore failure - maybe pages are locked
++ RET
++
++// int64 futex(int32 *uaddr, int32 op, int32 val,
++// struct timespec *timeout, int32 *uaddr2, int32 val2);
++TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0
++ MOVD addr+0(FP), R2
++ MOVW op+8(FP), R3
++ MOVW val+12(FP), R4
++ MOVD ts+16(FP), R5
++ MOVD addr2+24(FP), R6
++ MOVW val3+32(FP), R7
++ MOVW $SYS_futex, R1
++ SYSCALL
++ MOVW R2, ret+40(FP)
++ RET
++
++// int32 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
++TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0
++ MOVW flags+0(FP), R3
++ MOVD stk+8(FP), R2
++
++ // Copy mp, gp, fn off parent stack for use by child.
++ // Careful: Linux system call clobbers ???.
++ MOVD mm+16(FP), R7
++ MOVD gg+24(FP), R8
++ MOVD fn+32(FP), R9
++
++ MOVD R7, -8(R2)
++ MOVD R8, -16(R2)
++ MOVD R9, -24(R2)
++ MOVD $1234, R7
++ MOVD R7, -32(R2)
++
++ SYSCALL $SYS_clone
++
++ // In parent, return.
++ CMPBEQ R2, $0, 3(PC)
++ MOVW R2, ret+40(FP)
++ RET
++
++ // In child, on new stack.
++ // initialize essential registers
++ XOR R0, R0
++ MOVD -32(R15), R7
++ CMP R7, $1234
++ BEQ 2(PC)
++ MOVD R0, 0(R0)
++
++ // Initialize m->procid to Linux tid
++ SYSCALL $SYS_gettid
++
++ MOVD -24(R15), R9 // fn
++ MOVD -16(R15), R8 // g
++ MOVD -8(R15), R7 // m
++
++ CMPBEQ R7, $0, nog
++ CMP R8, $0
++ BEQ nog
++
++ MOVD R2, m_procid(R7)
++
++ // TODO: setup TLS.
++
++ // In child, set up new stack
++ MOVD R7, g_m(R8)
++ MOVD R8, g
++ //CALL runtime·stackcheck(SB)
++
++nog:
++ // Call fn
++ BL R9
++
++ // It shouldn't return. If it does, exit that thread.
++ MOVW $111, R2
++ MOVW $SYS_exit, R1
++ SYSCALL
++ BR -2(PC) // keep exiting
++
++TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0
++ MOVD new+0(FP), R2
++ MOVD old+8(FP), R3
++ MOVW $SYS_sigaltstack, R1
++ SYSCALL
++ MOVD $-4095, R3
++ CMPUBLT R2, R3, 2(PC)
++ MOVD R0, 0(R0) // crash
++ RET
++
++TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0
++ MOVW $SYS_sched_yield, R1
++ SYSCALL
++ RET
++
++TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0
++ MOVD pid+0(FP), R2
++ MOVD len+8(FP), R3
++ MOVD buf+16(FP), R4
++ MOVW $SYS_sched_getaffinity, R1
++ SYSCALL
++ MOVW R2, ret+24(FP)
++ RET
++
++// int32 runtime·epollcreate(int32 size);
++TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0
++ MOVW size+0(FP), R2
++ MOVW $SYS_epoll_create, R1
++ SYSCALL
++ MOVW R2, ret+8(FP)
++ RET
++
++// int32 runtime·epollcreate1(int32 flags);
++TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0
++ MOVW flags+0(FP), R2
++ MOVW $SYS_epoll_create1, R1
++ SYSCALL
++ MOVW R2, ret+8(FP)
++ RET
++
++// func epollctl(epfd, op, fd int32, ev *epollEvent) int
++TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0
++ MOVW epfd+0(FP), R2
++ MOVW op+4(FP), R3
++ MOVW fd+8(FP), R4
++ MOVD ev+16(FP), R5
++ MOVW $SYS_epoll_ctl, R1
++ SYSCALL
++ MOVW R2, ret+24(FP)
++ RET
++
++// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
++TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0
++ MOVW epfd+0(FP), R2
++ MOVD ev+8(FP), R3
++ MOVW nev+16(FP), R4
++ MOVW timeout+20(FP), R5
++ MOVW $SYS_epoll_wait, R1
++ SYSCALL
++ MOVW R2, ret+24(FP)
++ RET
++
++// void runtime·closeonexec(int32 fd);
++TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
++ MOVW fd+0(FP), R2 // fd
++ MOVD $2, R3 // F_SETFD
++ MOVD $1, R4 // FD_CLOEXEC
++ MOVW $SYS_fcntl, R1
++ SYSCALL
++ RET
+--- /dev/null
++++ b/src/runtime/sys_s390x.go
+@@ -0,0 +1,48 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package runtime
++
++import "unsafe"
++
++// adjust Gobuf as if it executed a call to fn with context ctxt
++// and then did an immediate Gosave.
++func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
++ if buf.lr != 0 {
++ throw("invalid use of gostartcall")
++ }
++ buf.lr = buf.pc
++ buf.pc = uintptr(fn)
++ buf.ctxt = ctxt
++}
++
++// Called to rewind context saved during morestack back to beginning of function.
++// To help us, the linker emits a jmp back to the beginning right after the
++// call to morestack. We just have to decode and apply that jump.
++func rewindmorestack(buf *gobuf) {
++ var inst uint64
++ if buf.pc&1 == 0 && buf.pc != 0 {
++ inst = *(*uint64)(unsafe.Pointer(buf.pc))
++ //print("runtime: rewind pc=", hex(buf.pc), " to pc=", hex(inst), "\n");
++ if inst>>48 == 0xa7f4 {
++ inst >>= 32
++ inst &= 0xFFFF
++ offset := int64(int16(inst))
++ offset <<= 1
++ buf.pc += uintptr(offset)
++ return
++ } else if inst>>48 == 0xc0f4 {
++ inst >>= 16
++ inst = inst & 0xFFFFFFFF
++ //print("runtime: rewind inst1 = ",hex(inst),"\n")
++ inst = (inst << 1) & 0xFFFFFFFF
++ //print("runtime: rewind inst2 = ",hex(inst),"\n")
++ buf.pc += uintptr(int32(inst))
++ //print("runtime: rewind pc = ",hex(buf.pc),"\n")
++ return
++ }
++ }
++ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
++ throw("runtime: misuse of rewindmorestack")
++}
+--- /dev/null
++++ b/src/runtime/tls_s390x.s
+@@ -0,0 +1,51 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "go_asm.h"
++#include "go_tls.h"
++#include "funcdata.h"
++#include "textflag.h"
++
++// We have to resort to TLS variable to save g (R13).
++// One reason is that external code might trigger
++// SIGSEGV, and our runtime.sigtramp don't even know we
++// are in external code, and will continue to use R13,
++// this might well result in another SIGSEGV.
++
++// save_g saves the g register into pthread-provided
++// thread-local memory, so that we can call externally compiled
++// s390x code that will overwrite this register.
++//
++// If !iscgo, this is a no-op.
++//
++// NOTE: setg_gcc<> assume this clobbers only R10 and R11.
++TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
++ MOVB runtime·iscgo(SB), R10
++ CMPBEQ R10, $0, nocgo
++ MOVW AR0, R11
++ SLD $32, R11
++ MOVW AR1, R11
++ MOVD runtime·tls_g(SB), R10
++ MOVD g, 0(R10)(R11*1)
++nocgo:
++ RET
++
++// load_g loads the g register from pthread-provided
++// thread-local memory, for use after calling externally compiled
++// s390x code that overwrote those registers.
++//
++// This is never called directly from C code (it doesn't have to
++// follow the C ABI), but it may be called from a C context, where the
++// usual Go registers aren't set up.
++//
++// NOTE: _cgo_topofstack assumes this only clobbers g (R13), R10 and R11.
++TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
++ MOVW AR0, R11
++ SLD $32, R11
++ MOVW AR1, R11
++ MOVD runtime·tls_g(SB), R10
++ MOVD 0(R10)(R11*1), g
++ RET
++
++GLOBL runtime·tls_g+0(SB),TLSBSS,$8
+--- a/src/runtime/unaligned1.go
++++ b/src/runtime/unaligned1.go
+@@ -2,7 +2,7 @@
+ // Use of this source code is governed by a BSD-style
+ // license that can be found in the LICENSE file.
+
+-// +build 386 amd64 amd64p32 arm64
++// +build 386 amd64 amd64p32 arm64 s390x
+
+ package runtime
+
+--- /dev/null
++++ b/src/sync/atomic/asm_s390x.s
+@@ -0,0 +1,143 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++TEXT ·SwapInt32(SB),NOSPLIT,$0-20
++ BR ·SwapUint32(SB)
++
++TEXT ·SwapUint32(SB),NOSPLIT,$0-20
++ MOVD addr+0(FP), R3
++ MOVWZ new+8(FP), R4
++ MOVWZ (R3), R5
++repeat:
++ CS R5, R4, (R3) // if (R3)==R5 then (R3)=R4 else R5=(R3)
++ BNE repeat
++ MOVW R5, old+16(FP)
++ RET
++
++TEXT ·SwapInt64(SB),NOSPLIT,$0-24
++ BR ·SwapUint64(SB)
++
++TEXT ·SwapUint64(SB),NOSPLIT,$0-24
++ MOVD addr+0(FP), R3
++ MOVD new+8(FP), R4
++ MOVD (R3), R5
++repeat:
++ CSG R5, R4, (R3) // if (R3)==R5 then (R3)=R4 else R5=(R3)
++ BNE repeat
++ MOVD R5, old+16(FP)
++ RET
++
++TEXT ·SwapUintptr(SB),NOSPLIT,$0-24
++ BR ·SwapUint64(SB)
++
++TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0-17
++ BR ·CompareAndSwapUint32(SB)
++
++TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-17
++ MOVD ptr+0(FP), R3
++ MOVWZ old+8(FP), R4
++ MOVWZ new+12(FP), R5
++ CS R4, R5, 0(R3) // if R4==(R3) then (R3)=R5 else R4=(R3)
++ BNE cas_fail
++ MOVB $1, ret+16(FP)
++ RET
++cas_fail:
++ MOVB $0, ret+16(FP)
++ RET
++
++TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0-25
++ BR ·CompareAndSwapUint64(SB)
++
++TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25
++ BR ·CompareAndSwapUint64(SB)
++
++TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25
++ MOVD ptr+0(FP), R3
++ MOVD old+8(FP), R4
++ MOVD new+16(FP), R5
++ CSG R4, R5, 0(R3) // if R4==(R3) then (R3)=R5 else R4=(R3)
++ BNE cas64_fail
++ MOVB $1, ret+24(FP)
++ RET
++cas64_fail:
++ MOVB $0, ret+24(FP)
++ RET
++
++TEXT ·AddInt32(SB),NOSPLIT,$0-20
++ BR ·AddUint32(SB)
++
++TEXT ·AddUint32(SB),NOSPLIT,$0-20
++ MOVD ptr+0(FP), R4
++ MOVWZ delta+8(FP), R5
++ MOVWZ (R4), R3
++repeat:
++ ADD R3, R5, R6
++ CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
++ BNE repeat
++ MOVW R6, ret+16(FP)
++ RET
++
++TEXT ·AddUintptr(SB),NOSPLIT,$0-24
++ BR ·AddUint64(SB)
++
++TEXT ·AddInt64(SB),NOSPLIT,$0-24
++ BR ·AddUint64(SB)
++
++TEXT ·AddUint64(SB),NOSPLIT,$0-24
++ MOVD ptr+0(FP), R4
++ MOVD delta+8(FP), R5
++ MOVD (R4), R3
++repeat:
++ ADD R3, R5, R6
++ CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
++ BNE repeat
++ MOVD R6, ret+16(FP)
++ RET
++
++TEXT ·LoadInt32(SB),NOSPLIT,$0-12
++ BR ·LoadUint32(SB)
++
++TEXT ·LoadUint32(SB),NOSPLIT,$0-12
++ MOVD addr+0(FP), R3
++ MOVW 0(R3), R4
++ MOVW R4, val+8(FP)
++ RET
++
++TEXT ·LoadInt64(SB),NOSPLIT,$0-16
++ BR ·LoadUint64(SB)
++
++TEXT ·LoadUint64(SB),NOSPLIT,$0-16
++ MOVD addr+0(FP), R3
++ MOVD 0(R3), R4
++ MOVD R4, val+8(FP)
++ RET
++
++TEXT ·LoadUintptr(SB),NOSPLIT,$0-16
++ BR ·LoadPointer(SB)
++
++TEXT ·LoadPointer(SB),NOSPLIT,$0-16
++ BR ·LoadUint64(SB)
++
++TEXT ·StoreInt32(SB),NOSPLIT,$0-12
++ BR ·StoreUint32(SB)
++
++TEXT ·StoreUint32(SB),NOSPLIT,$0-12
++ MOVD ptr+0(FP), R3
++ MOVW val+8(FP), R4
++ MOVW R4, 0(R3)
++ RET
++
++TEXT ·StoreInt64(SB),NOSPLIT,$0-16
++ BR ·StoreUint64(SB)
++
++TEXT ·StoreUint64(SB),NOSPLIT,$0-16
++ MOVD addr+0(FP), R3
++ MOVD val+8(FP), R4
++ MOVD R4, 0(R3)
++ RET
++
++TEXT ·StoreUintptr(SB),NOSPLIT,$0-16
++ BR ·StoreUint64(SB)
+--- /dev/null
++++ b/src/syscall/asm_linux_s390x.s
+@@ -0,0 +1,156 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++#include "textflag.h"
++
++//
++// System calls for s390x, Linux
++//
++
++// func Syscall(trap int64, a1, a2, a3 int64) (r1, r2, err int64)
++TEXT ·Syscall(SB),NOSPLIT,$0-56
++ BL runtime·entersyscall(SB)
++ MOVD a1+8(FP), R2
++ MOVD a2+16(FP), R3
++ MOVD a3+24(FP), R4
++ MOVD $0, R5
++ MOVD $0, R6
++ MOVD $0, R7
++ MOVD trap+0(FP), R1 // syscall entry
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, ok
++ MOVD $-1, r1+32(FP)
++ MOVD $0, r2+40(FP)
++ NEG R2, R2
++ MOVD R2, err+48(FP) // errno
++ BL runtime·exitsyscall(SB)
++ RET
++ok:
++ MOVD R2, r1+32(FP)
++ MOVD R3, r2+40(FP)
++ MOVD $0, err+48(FP) // errno
++ BL runtime·exitsyscall(SB)
++ RET
++
++// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
++TEXT ·Syscall6(SB),NOSPLIT,$0-80
++ BL runtime·entersyscall(SB)
++ MOVD a1+8(FP), R2
++ MOVD a2+16(FP), R3
++ MOVD a3+24(FP), R4
++ MOVD a4+32(FP), R5
++ MOVD a5+40(FP), R6
++ MOVD a6+48(FP), R7
++ MOVD trap+0(FP), R1 // syscall entry
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, ok6
++ MOVD $-1, r1+56(FP)
++ MOVD $0, r2+64(FP)
++ NEG R2, R2
++ MOVD R2, err+72(FP) // errno
++ BL runtime·exitsyscall(SB)
++ RET
++ok6:
++ MOVD R2, r1+56(FP)
++ MOVD R3, r2+64(FP)
++ MOVD $0, err+72(FP) // errno
++ BL runtime·exitsyscall(SB)
++ RET
++
++// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
++TEXT ·RawSyscall(SB),NOSPLIT,$0-56
++ MOVD a1+8(FP), R2
++ MOVD a2+16(FP), R3
++ MOVD a3+24(FP), R4
++ MOVD $0, R5
++ MOVD $0, R6
++ MOVD $0, R7
++ MOVD trap+0(FP), R1 // syscall entry
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, ok1
++ MOVD $-1, r1+32(FP)
++ MOVD $0, r2+40(FP)
++ NEG R2, R2
++ MOVD R2, err+48(FP) // errno
++ RET
++ok1:
++ MOVD R2, r1+32(FP)
++ MOVD R3, r2+40(FP)
++ MOVD $0, err+48(FP) // errno
++ RET
++
++// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
++TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
++ MOVD a1+8(FP), R2
++ MOVD a2+16(FP), R3
++ MOVD a3+24(FP), R4
++ MOVD a4+32(FP), R5
++ MOVD a5+40(FP), R6
++ MOVD a6+48(FP), R7
++ MOVD trap+0(FP), R1 // syscall entry
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, ok2
++ MOVD $-1, r1+56(FP)
++ MOVD $0, r2+64(FP)
++ NEG R2, R2
++ MOVD R2, err+72(FP) // errno
++ RET
++ok2:
++ MOVD R2, r1+56(FP)
++ MOVD R3, r2+64(FP)
++ MOVD $0, err+72(FP) // errno
++ RET
++
++#define SYS_SOCKETCALL 102 /* from zsysnum_linux_s390x.go */
++
++// func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err int)
++// Kernel interface gets call sub-number and pointer to a0.
++TEXT ·socketcall(SB),NOSPLIT,$0-72
++ BL runtime·entersyscall(SB)
++ MOVD $SYS_SOCKETCALL, R1 // syscall entry
++ MOVD call+0(FP), R2 // socket call number
++ MOVD $a0+8(FP), R3 // pointer to call arguments
++ MOVD $0, R4
++ MOVD $0, R5
++ MOVD $0, R6
++ MOVD $0, R7
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, oksock
++ MOVD $-1, n+56(FP)
++ NEG R2, R2
++ MOVD R2, err+64(FP)
++ BL runtime·exitsyscall(SB)
++ RET
++oksock:
++ MOVD R2, n+56(FP)
++ MOVD $0, err+64(FP)
++ CALL runtime·exitsyscall(SB)
++ RET
++
++// func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err int)
++// Kernel interface gets call sub-number and pointer to a0.
++TEXT ·rawsocketcall(SB),NOSPLIT,$0-72
++ MOVD $SYS_SOCKETCALL, R1 // syscall entry
++ MOVD call+0(FP), R2 // socket call number
++ MOVD $a0+8(FP), R3 // pointer to call arguments
++ MOVD $0, R4
++ MOVD $0, R5
++ MOVD $0, R6
++ MOVD $0, R7
++ SYSCALL
++ MOVD $0xfffffffffffff001, R8
++ CMPUBLT R2, R8, oksock1
++ MOVD $-1, n+56(FP)
++ NEG R2, R2
++ MOVD R2, err+64(FP)
++ RET
++oksock1:
++ MOVD R2, n+56(FP)
++ MOVD $0, err+64(FP)
++ RET
+--- a/src/syscall/exec_linux.go
++++ b/src/syscall/exec_linux.go
+@@ -7,6 +7,7 @@
+ package syscall
+
+ import (
++ "runtime"
+ "unsafe"
+ )
+
+@@ -93,7 +94,11 @@
+ // About to call fork.
+ // No more allocation or calls of non-assembly functions.
+ runtime_BeforeFork()
+- r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
++ if runtime.GOARCH == "s390x" {
++ r1, _, err1 = RawSyscall6(SYS_CLONE, 0, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0)
++ } else {
++ r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
++ }
+ if err1 != 0 {
+ runtime_AfterFork()
+ return 0, err1
+--- a/src/syscall/mkall.sh
++++ b/src/syscall/mkall.sh
+@@ -207,6 +207,13 @@
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
++linux_s390x)
++ GOOSARCH_in=syscall_linux_s390x.go
++ unistd_h=/usr/include/asm/unistd.h
++ mkerrors="$mkerrors -m64"
++ mksysnum="./mksysnum_linux.pl $unistd_h"
++ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
++ ;;
+ nacl_386)
+ mkerrors=""
+ mksyscall="./mksyscall.pl -l32 -nacl"
+@@ -288,5 +295,5 @@
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+- if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
++ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go"; fi
+ ) | $run
+--- /dev/null
++++ b/src/syscall/mkpost.go
+@@ -0,0 +1,63 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++// +build ignore
++
++// mkpost processes the output of cgo -godefs to
++// modify the generated types. It is used to clean up
++// the syscall API in an architecture specific manner.
++//
++// mkpost is run after cgo -godefs by mkall.sh.
++package main
++
++import (
++ "fmt"
++ "go/format"
++ "io/ioutil"
++ "log"
++ "os"
++ "regexp"
++)
++
++func main() {
++ b, err := ioutil.ReadAll(os.Stdin)
++ if err != nil {
++ log.Fatal(err)
++ }
++ s := string(b)
++
++ goarch := os.Getenv("GOARCH")
++ goos := os.Getenv("GOOS")
++ if goarch == "s390x" && goos == "linux" {
++ // Export the types of PtraceRegs fields.
++ re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
++ s = re.ReplaceAllString(s, "Ptrace$1")
++
++ // Replace padding fields inserted by cgo with blank identifiers.
++ re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
++ s = re.ReplaceAllString(s, "_")
++
++ // Replace other unwanted fields with blank identifiers.
++ re = regexp.MustCompile("X_[A-Za-z0-9_]*")
++ s = re.ReplaceAllString(s, "_")
++
++ // Force the type of RawSockaddr.Data to [14]int8 to match
++ // the existing gccgo API.
++ re = regexp.MustCompile("(Data\\s+\\[14\\])uint8")
++ s = re.ReplaceAllString(s, "${1}int8")
++ }
++
++ // gofmt
++ b, err = format.Source([]byte(s))
++ if err != nil {
++ log.Fatal(err)
++ }
++
++ // Append this command to the header to show where the new file
++ // came from.
++ re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
++ s = re.ReplaceAllString(string(b), "$1 | go run mkpost.go")
++
++ fmt.Print(s)
++}
+--- a/src/syscall/mksyscall.pl
++++ b/src/syscall/mksyscall.pl
+@@ -100,7 +100,7 @@
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, errno error)
+ # Split into name, in params, out params.
+- if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
++ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)_?SYS_[A-Z0-9_]+))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+--- a/src/syscall/syscall_linux.go
++++ b/src/syscall/syscall_linux.go
+@@ -301,7 +301,8 @@
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+- sa.raw.Path[i] = int8(name[i])
++ bp := (*byte)(unsafe.Pointer(&sa.raw.Path[i]))
++ *bp = name[i]
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+--- /dev/null
++++ b/src/syscall/syscall_linux_s390x.go
+@@ -0,0 +1,299 @@
++// Copyright 2016 The Go Authors. All rights reserved.
++// Use of this source code is governed by a BSD-style
++// license that can be found in the LICENSE file.
++
++package syscall
++
++import "unsafe"
++
++const (
++ _SYS_dup = SYS_DUP2
++ _SYS_getdents = SYS_GETDENTS64
++)
++
++//sys Dup2(oldfd int, newfd int) (err error)
++//sys Fchown(fd int, uid int, gid int) (err error)
++//sys Fstat(fd int, stat *Stat_t) (err error)
++//sys Fstatfs(fd int, buf *Statfs_t) (err error)
++//sys Ftruncate(fd int, length int64) (err error)
++//sysnb Getegid() (egid int)
++//sysnb Geteuid() (euid int)
++//sysnb Getgid() (gid int)
++//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT
++//sysnb Getuid() (uid int)
++//sysnb InotifyInit() (fd int, err error)
++//sys Lchown(path string, uid int, gid int) (err error)
++//sys Lstat(path string, stat *Stat_t) (err error)
++//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
++//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
++//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
++//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
++//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
++//sys Setfsgid(gid int) (err error)
++//sys Setfsuid(uid int) (err error)
++//sysnb Setregid(rgid int, egid int) (err error)
++//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
++//sysnb Setresuid(ruid int, euid int, suid int) (err error)
++//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
++//sysnb Setreuid(ruid int, euid int) (err error)
++//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
++//sys Stat(path string, stat *Stat_t) (err error)
++//sys Statfs(path string, buf *Statfs_t) (err error)
++//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE
++//sys Truncate(path string, length int64) (err error)
++//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
++//sysnb setgroups(n int, list *_Gid_t) (err error)
++
++func Getpagesize() int { return 4096 }
++
++//sysnb Gettimeofday(tv *Timeval) (err error)
++
++func Time(t *Time_t) (tt Time_t, err error) {
++ var tv Timeval
++ err = Gettimeofday(&tv)
++ if err != nil {
++ return 0, err
++ }
++ if t != nil {
++ *t = Time_t(tv.Sec)
++ }
++ return Time_t(tv.Sec), nil
++}
++
++func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
++
++func NsecToTimespec(nsec int64) (ts Timespec) {
++ ts.Sec = nsec / 1e9
++ ts.Nsec = nsec % 1e9
++ return
++}
++
++func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
++
++func NsecToTimeval(nsec int64) (tv Timeval) {
++ nsec += 999 // round up to microsecond
++ tv.Sec = nsec / 1e9
++ tv.Usec = nsec % 1e9 / 1e3
++ return
++}
++
++func Pipe(p []int) (err error) {
++ if len(p) != 2 {
++ return EINVAL
++ }
++ var pp [2]_C_int
++ err = pipe2(&pp, 0)
++ p[0] = int(pp[0])
++ p[1] = int(pp[1])
++ return
++}
++
++//sysnb pipe2(p *[2]_C_int, flags int) (err error)
++
++func Pipe2(p []int, flags int) (err error) {
++ if len(p) != 2 {
++ return EINVAL
++ }
++ var pp [2]_C_int
++ err = pipe2(&pp, flags)
++ p[0] = int(pp[0])
++ p[1] = int(pp[1])
++ return
++}
++
++// Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct.
++// mmap2 also requires arguments to be passed in a struct; it is currently not exposed in .
++func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
++ mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)}
++ r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0)
++ use(unsafe.Pointer(&mmap_args[0]))
++ xaddr = uintptr(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// On s390x Linux, all the socket calls go through an extra indirection.
++// The arguments to the underlying system call are the number below
++// and a pointer to an array of uintptr. We hide the pointer in the
++// socketcall assembly to avoid allocation on every system call.
++
++const (
++ // see linux/net.h
++ _SOCKET = 1
++ _BIND = 2
++ _CONNECT = 3
++ _LISTEN = 4
++ _ACCEPT = 5
++ _GETSOCKNAME = 6
++ _GETPEERNAME = 7
++ _SOCKETPAIR = 8
++ _SEND = 9
++ _RECV = 10
++ _SENDTO = 11
++ _RECVFROM = 12
++ _SHUTDOWN = 13
++ _SETSOCKOPT = 14
++ _GETSOCKOPT = 15
++ _SENDMSG = 16
++ _RECVMSG = 17
++ _ACCEPT4 = 18
++ _RECVMMSG = 19
++ _SENDMMSG = 20
++)
++
++func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
++func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
++
++func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
++ fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
++ fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
++ _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
++ _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) {
++ _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
++ _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
++ _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func socket(domain int, typ int, proto int) (fd int, err error) {
++ fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
++ _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
++ _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
++ var base uintptr
++ if len(p) > 0 {
++ base = uintptr(unsafe.Pointer(&p[0]))
++ }
++ n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
++ var base uintptr
++ if len(p) > 0 {
++ base = uintptr(unsafe.Pointer(&p[0]))
++ }
++ _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen))
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
++ n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
++ n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func Listen(s int, n int) (err error) {
++ _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func Shutdown(s, how int) (err error) {
++ _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0)
++ if e != 0 {
++ err = e
++ }
++ return
++}
++
++func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr }
++
++func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc }
++
++func (iov *Iovec) SetLen(length int) {
++ iov.Len = uint64(length)
++}
++
++func (msghdr *Msghdr) SetControllen(length int) {
++ msghdr.Controllen = uint64(length)
++}
++
++func (cmsg *Cmsghdr) SetLen(length int) {
++ cmsg.Len = uint64(length)
++}
+--- a/src/syscall/types_linux.go
++++ b/src/syscall/types_linux.go
+@@ -77,8 +77,8 @@
+ // copied from /usr/include/linux/un.h
+ struct my_sockaddr_un {
+ sa_family_t sun_family;
+-#if defined(__ARM_EABI__) || defined(__powerpc64__)
+- // on ARM and PPC char is by default unsigned
++#if defined(__ARM_EABI__) || defined(__powerpc64__) || defined(__s390x__)
++ // on ARM, PPC and s390x char is by default unsigned
+ signed char sun_path[108];
+ #else
+ char sun_path[108];
+@@ -93,10 +93,22 @@
+ typedef struct pt_regs PtraceRegs;
+ #elif defined(__mips__)
+ typedef struct user PtraceRegs;
++#elif defined(__s390x__)
++typedef struct _user_regs_struct PtraceRegs;
+ #else
+ typedef struct user_regs_struct PtraceRegs;
+ #endif
+
++#if defined(__s390x__)
++typedef struct _user_psw_struct ptracePsw;
++typedef struct _user_fpregs_struct ptraceFpregs;
++typedef struct _user_per_struct ptracePer;
++#else
++typedef struct {} ptracePsw;
++typedef struct {} ptraceFpregs;
++typedef struct {} ptracePer;
++#endif
++
+ // The real epoll_event is a union, and godefs doesn't handle it well.
+ struct my_epoll_event {
+ uint32_t events;
+@@ -105,7 +117,7 @@
+ // alignment requirements of EABI
+ int32_t padFd;
+ #endif
+-#ifdef __powerpc64__
++#if defined(__powerpc64__) || defined(__s390x__)
+ int32_t _padFd;
+ #endif
+ int32_t fd;
+@@ -370,6 +382,13 @@
+ // Register structures
+ type PtraceRegs C.PtraceRegs
+
++// Structures contained in PtraceRegs on s390x (exported by post.go)
++type ptracePsw C.ptracePsw
++
++type ptraceFpregs C.ptraceFpregs
++
++type ptracePer C.ptracePer
++
+ // Misc
+
+ type FdSet C.fd_set
+--- /dev/null
++++ b/src/syscall/zerrors_linux_s390x.go
+@@ -0,0 +1,1942 @@
++// mkerrors.sh -m64
++// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
++
++// Created by cgo -godefs - DO NOT EDIT
++// cgo -godefs -- -m64 _const.go
++
++// +build s390x,linux
++
++package syscall
++
++const (
++ AF_ALG = 0x26
++ AF_APPLETALK = 0x5
++ AF_ASH = 0x12
++ AF_ATMPVC = 0x8
++ AF_ATMSVC = 0x14
++ AF_AX25 = 0x3
++ AF_BLUETOOTH = 0x1f
++ AF_BRIDGE = 0x7
++ AF_CAIF = 0x25
++ AF_CAN = 0x1d
++ AF_DECnet = 0xc
++ AF_ECONET = 0x13
++ AF_FILE = 0x1
++ AF_IEEE802154 = 0x24
++ AF_INET = 0x2
++ AF_INET6 = 0xa
++ AF_IPX = 0x4
++ AF_IRDA = 0x17
++ AF_ISDN = 0x22
++ AF_IUCV = 0x20
++ AF_KEY = 0xf
++ AF_LLC = 0x1a
++ AF_LOCAL = 0x1
++ AF_MAX = 0x29
++ AF_NETBEUI = 0xd
++ AF_NETLINK = 0x10
++ AF_NETROM = 0x6
++ AF_NFC = 0x27
++ AF_PACKET = 0x11
++ AF_PHONET = 0x23
++ AF_PPPOX = 0x18
++ AF_RDS = 0x15
++ AF_ROSE = 0xb
++ AF_ROUTE = 0x10
++ AF_RXRPC = 0x21
++ AF_SECURITY = 0xe
++ AF_SNA = 0x16
++ AF_TIPC = 0x1e
++ AF_UNIX = 0x1
++ AF_UNSPEC = 0x0
++ AF_VSOCK = 0x28
++ AF_WANPIPE = 0x19
++ AF_X25 = 0x9
++ ARPHRD_6LOWPAN = 0x339
++ ARPHRD_ADAPT = 0x108
++ ARPHRD_APPLETLK = 0x8
++ ARPHRD_ARCNET = 0x7
++ ARPHRD_ASH = 0x30d
++ ARPHRD_ATM = 0x13
++ ARPHRD_AX25 = 0x3
++ ARPHRD_BIF = 0x307
++ ARPHRD_CAIF = 0x336
++ ARPHRD_CAN = 0x118
++ ARPHRD_CHAOS = 0x5
++ ARPHRD_CISCO = 0x201
++ ARPHRD_CSLIP = 0x101
++ ARPHRD_CSLIP6 = 0x103
++ ARPHRD_DDCMP = 0x205
++ ARPHRD_DLCI = 0xf
++ ARPHRD_ECONET = 0x30e
++ ARPHRD_EETHER = 0x2
++ ARPHRD_ETHER = 0x1
++ ARPHRD_EUI64 = 0x1b
++ ARPHRD_FCAL = 0x311
++ ARPHRD_FCFABRIC = 0x313
++ ARPHRD_FCPL = 0x312
++ ARPHRD_FCPP = 0x310
++ ARPHRD_FDDI = 0x306
++ ARPHRD_FRAD = 0x302
++ ARPHRD_HDLC = 0x201
++ ARPHRD_HIPPI = 0x30c
++ ARPHRD_HWX25 = 0x110
++ ARPHRD_IEEE1394 = 0x18
++ ARPHRD_IEEE802 = 0x6
++ ARPHRD_IEEE80211 = 0x321
++ ARPHRD_IEEE80211_PRISM = 0x322
++ ARPHRD_IEEE80211_RADIOTAP = 0x323
++ ARPHRD_IEEE802154 = 0x324
++ ARPHRD_IEEE802154_MONITOR = 0x325
++ ARPHRD_IEEE802_TR = 0x320
++ ARPHRD_INFINIBAND = 0x20
++ ARPHRD_IP6GRE = 0x337
++ ARPHRD_IPDDP = 0x309
++ ARPHRD_IPGRE = 0x30a
++ ARPHRD_IRDA = 0x30f
++ ARPHRD_LAPB = 0x204
++ ARPHRD_LOCALTLK = 0x305
++ ARPHRD_LOOPBACK = 0x304
++ ARPHRD_METRICOM = 0x17
++ ARPHRD_NETLINK = 0x338
++ ARPHRD_NETROM = 0x0
++ ARPHRD_NONE = 0xfffe
++ ARPHRD_PHONET = 0x334
++ ARPHRD_PHONET_PIPE = 0x335
++ ARPHRD_PIMREG = 0x30b
++ ARPHRD_PPP = 0x200
++ ARPHRD_PRONET = 0x4
++ ARPHRD_RAWHDLC = 0x206
++ ARPHRD_ROSE = 0x10e
++ ARPHRD_RSRVD = 0x104
++ ARPHRD_SIT = 0x308
++ ARPHRD_SKIP = 0x303
++ ARPHRD_SLIP = 0x100
++ ARPHRD_SLIP6 = 0x102
++ ARPHRD_TUNNEL = 0x300
++ ARPHRD_TUNNEL6 = 0x301
++ ARPHRD_VOID = 0xffff
++ ARPHRD_X25 = 0x10f
++ B0 = 0x0
++ B1000000 = 0x1008
++ B110 = 0x3
++ B115200 = 0x1002
++ B1152000 = 0x1009
++ B1200 = 0x9
++ B134 = 0x4
++ B150 = 0x5
++ B1500000 = 0x100a
++ B1800 = 0xa
++ B19200 = 0xe
++ B200 = 0x6
++ B2000000 = 0x100b
++ B230400 = 0x1003
++ B2400 = 0xb
++ B2500000 = 0x100c
++ B300 = 0x7
++ B3000000 = 0x100d
++ B3500000 = 0x100e
++ B38400 = 0xf
++ B4000000 = 0x100f
++ B460800 = 0x1004
++ B4800 = 0xc
++ B50 = 0x1
++ B500000 = 0x1005
++ B57600 = 0x1001
++ B576000 = 0x1006
++ B600 = 0x8
++ B75 = 0x2
++ B921600 = 0x1007
++ B9600 = 0xd
++ BPF_A = 0x10
++ BPF_ABS = 0x20
++ BPF_ADD = 0x0
++ BPF_ALU = 0x4
++ BPF_AND = 0x50
++ BPF_B = 0x10
++ BPF_DIV = 0x30
++ BPF_H = 0x8
++ BPF_IMM = 0x0
++ BPF_IND = 0x40
++ BPF_JA = 0x0
++ BPF_JEQ = 0x10
++ BPF_JGE = 0x30
++ BPF_JGT = 0x20
++ BPF_JMP = 0x5
++ BPF_JSET = 0x40
++ BPF_K = 0x0
++ BPF_LD = 0x0
++ BPF_LDX = 0x1
++ BPF_LEN = 0x80
++ BPF_LL_OFF = -0x200000
++ BPF_LSH = 0x60
++ BPF_MAJOR_VERSION = 0x1
++ BPF_MAXINSNS = 0x1000
++ BPF_MEM = 0x60
++ BPF_MEMWORDS = 0x10
++ BPF_MINOR_VERSION = 0x1
++ BPF_MISC = 0x7
++ BPF_MOD = 0x90
++ BPF_MSH = 0xa0
++ BPF_MUL = 0x20
++ BPF_NEG = 0x80
++ BPF_NET_OFF = -0x100000
++ BPF_OR = 0x40
++ BPF_RET = 0x6
++ BPF_RSH = 0x70
++ BPF_ST = 0x2
++ BPF_STX = 0x3
++ BPF_SUB = 0x10
++ BPF_TAX = 0x0
++ BPF_TXA = 0x80
++ BPF_W = 0x0
++ BPF_X = 0x8
++ BPF_XOR = 0xa0
++ BRKINT = 0x2
++ CFLUSH = 0xf
++ CLOCAL = 0x800
++ CLONE_CHILD_CLEARTID = 0x200000
++ CLONE_CHILD_SETTID = 0x1000000
++ CLONE_DETACHED = 0x400000
++ CLONE_FILES = 0x400
++ CLONE_FS = 0x200
++ CLONE_IO = 0x80000000
++ CLONE_NEWCGROUP = 0x2000000
++ CLONE_NEWIPC = 0x8000000
++ CLONE_NEWNET = 0x40000000
++ CLONE_NEWNS = 0x20000
++ CLONE_NEWPID = 0x20000000
++ CLONE_NEWUSER = 0x10000000
++ CLONE_NEWUTS = 0x4000000
++ CLONE_PARENT = 0x8000
++ CLONE_PARENT_SETTID = 0x100000
++ CLONE_PTRACE = 0x2000
++ CLONE_SETTLS = 0x80000
++ CLONE_SIGHAND = 0x800
++ CLONE_SYSVSEM = 0x40000
++ CLONE_THREAD = 0x10000
++ CLONE_UNTRACED = 0x800000
++ CLONE_VFORK = 0x4000
++ CLONE_VM = 0x100
++ CREAD = 0x80
++ CS5 = 0x0
++ CS6 = 0x10
++ CS7 = 0x20
++ CS8 = 0x30
++ CSIGNAL = 0xff
++ CSIZE = 0x30
++ CSTART = 0x11
++ CSTATUS = 0x0
++ CSTOP = 0x13
++ CSTOPB = 0x40
++ CSUSP = 0x1a
++ DT_BLK = 0x6
++ DT_CHR = 0x2
++ DT_DIR = 0x4
++ DT_FIFO = 0x1
++ DT_LNK = 0xa
++ DT_REG = 0x8
++ DT_SOCK = 0xc
++ DT_UNKNOWN = 0x0
++ DT_WHT = 0xe
++ ECHO = 0x8
++ ECHOCTL = 0x200
++ ECHOE = 0x10
++ ECHOK = 0x20
++ ECHOKE = 0x800
++ ECHONL = 0x40
++ ECHOPRT = 0x400
++ ENCODING_DEFAULT = 0x0
++ ENCODING_FM_MARK = 0x3
++ ENCODING_FM_SPACE = 0x4
++ ENCODING_MANCHESTER = 0x5
++ ENCODING_NRZ = 0x1
++ ENCODING_NRZI = 0x2
++ EPOLLERR = 0x8
++ EPOLLET = 0x80000000
++ EPOLLHUP = 0x10
++ EPOLLIN = 0x1
++ EPOLLMSG = 0x400
++ EPOLLONESHOT = 0x40000000
++ EPOLLOUT = 0x4
++ EPOLLPRI = 0x2
++ EPOLLRDBAND = 0x80
++ EPOLLRDHUP = 0x2000
++ EPOLLRDNORM = 0x40
++ EPOLLWAKEUP = 0x20000000
++ EPOLLWRBAND = 0x200
++ EPOLLWRNORM = 0x100
++ EPOLL_CLOEXEC = 0x80000
++ EPOLL_CTL_ADD = 0x1
++ EPOLL_CTL_DEL = 0x2
++ EPOLL_CTL_MOD = 0x3
++ ETH_P_1588 = 0x88f7
++ ETH_P_8021AD = 0x88a8
++ ETH_P_8021AH = 0x88e7
++ ETH_P_8021Q = 0x8100
++ ETH_P_80221 = 0x8917
++ ETH_P_802_2 = 0x4
++ ETH_P_802_3 = 0x1
++ ETH_P_802_3_MIN = 0x600
++ ETH_P_802_EX1 = 0x88b5
++ ETH_P_AARP = 0x80f3
++ ETH_P_AF_IUCV = 0xfbfb
++ ETH_P_ALL = 0x3
++ ETH_P_AOE = 0x88a2
++ ETH_P_ARCNET = 0x1a
++ ETH_P_ARP = 0x806
++ ETH_P_ATALK = 0x809b
++ ETH_P_ATMFATE = 0x8884
++ ETH_P_ATMMPOA = 0x884c
++ ETH_P_AX25 = 0x2
++ ETH_P_BATMAN = 0x4305
++ ETH_P_BPQ = 0x8ff
++ ETH_P_CAIF = 0xf7
++ ETH_P_CAN = 0xc
++ ETH_P_CANFD = 0xd
++ ETH_P_CONTROL = 0x16
++ ETH_P_CUST = 0x6006
++ ETH_P_DDCMP = 0x6
++ ETH_P_DEC = 0x6000
++ ETH_P_DIAG = 0x6005
++ ETH_P_DNA_DL = 0x6001
++ ETH_P_DNA_RC = 0x6002
++ ETH_P_DNA_RT = 0x6003
++ ETH_P_DSA = 0x1b
++ ETH_P_ECONET = 0x18
++ ETH_P_EDSA = 0xdada
++ ETH_P_FCOE = 0x8906
++ ETH_P_FIP = 0x8914
++ ETH_P_HDLC = 0x19
++ ETH_P_IEEE802154 = 0xf6
++ ETH_P_IEEEPUP = 0xa00
++ ETH_P_IEEEPUPAT = 0xa01
++ ETH_P_IP = 0x800
++ ETH_P_IPV6 = 0x86dd
++ ETH_P_IPX = 0x8137
++ ETH_P_IRDA = 0x17
++ ETH_P_LAT = 0x6004
++ ETH_P_LINK_CTL = 0x886c
++ ETH_P_LOCALTALK = 0x9
++ ETH_P_LOOP = 0x60
++ ETH_P_LOOPBACK = 0x9000
++ ETH_P_MOBITEX = 0x15
++ ETH_P_MPLS_MC = 0x8848
++ ETH_P_MPLS_UC = 0x8847
++ ETH_P_MVRP = 0x88f5
++ ETH_P_PAE = 0x888e
++ ETH_P_PAUSE = 0x8808
++ ETH_P_PHONET = 0xf5
++ ETH_P_PPPTALK = 0x10
++ ETH_P_PPP_DISC = 0x8863
++ ETH_P_PPP_MP = 0x8
++ ETH_P_PPP_SES = 0x8864
++ ETH_P_PRP = 0x88fb
++ ETH_P_PUP = 0x200
++ ETH_P_PUPAT = 0x201
++ ETH_P_QINQ1 = 0x9100
++ ETH_P_QINQ2 = 0x9200
++ ETH_P_QINQ3 = 0x9300
++ ETH_P_RARP = 0x8035
++ ETH_P_SCA = 0x6007
++ ETH_P_SLOW = 0x8809
++ ETH_P_SNAP = 0x5
++ ETH_P_TDLS = 0x890d
++ ETH_P_TEB = 0x6558
++ ETH_P_TIPC = 0x88ca
++ ETH_P_TRAILER = 0x1c
++ ETH_P_TR_802_2 = 0x11
++ ETH_P_TSN = 0x22f0
++ ETH_P_WAN_PPP = 0x7
++ ETH_P_WCCP = 0x883e
++ ETH_P_X25 = 0x805
++ ETH_P_XDSA = 0xf8
++ EXTA = 0xe
++ EXTB = 0xf
++ EXTPROC = 0x10000
++ FD_CLOEXEC = 0x1
++ FD_SETSIZE = 0x400
++ FLUSHO = 0x1000
++ F_DUPFD = 0x0
++ F_DUPFD_CLOEXEC = 0x406
++ F_EXLCK = 0x4
++ F_GETFD = 0x1
++ F_GETFL = 0x3
++ F_GETLEASE = 0x401
++ F_GETLK = 0x5
++ F_GETLK64 = 0x5
++ F_GETOWN = 0x9
++ F_GETOWN_EX = 0x10
++ F_GETPIPE_SZ = 0x408
++ F_GETSIG = 0xb
++ F_LOCK = 0x1
++ F_NOTIFY = 0x402
++ F_OFD_GETLK = 0x24
++ F_OFD_SETLK = 0x25
++ F_OFD_SETLKW = 0x26
++ F_OK = 0x0
++ F_RDLCK = 0x0
++ F_SETFD = 0x2
++ F_SETFL = 0x4
++ F_SETLEASE = 0x400
++ F_SETLK = 0x6
++ F_SETLK64 = 0x6
++ F_SETLKW = 0x7
++ F_SETLKW64 = 0x7
++ F_SETOWN = 0x8
++ F_SETOWN_EX = 0xf
++ F_SETPIPE_SZ = 0x407
++ F_SETSIG = 0xa
++ F_SHLCK = 0x8
++ F_TEST = 0x3
++ F_TLOCK = 0x2
++ F_ULOCK = 0x0
++ F_UNLCK = 0x2
++ F_WRLCK = 0x1
++ HUPCL = 0x400
++ ICANON = 0x2
++ ICMPV6_FILTER = 0x1
++ ICRNL = 0x100
++ IEXTEN = 0x8000
++ IFA_F_DADFAILED = 0x8
++ IFA_F_DEPRECATED = 0x20
++ IFA_F_HOMEADDRESS = 0x10
++ IFA_F_MANAGETEMPADDR = 0x100
++ IFA_F_MCAUTOJOIN = 0x400
++ IFA_F_NODAD = 0x2
++ IFA_F_NOPREFIXROUTE = 0x200
++ IFA_F_OPTIMISTIC = 0x4
++ IFA_F_PERMANENT = 0x80
++ IFA_F_SECONDARY = 0x1
++ IFA_F_STABLE_PRIVACY = 0x800
++ IFA_F_TEMPORARY = 0x1
++ IFA_F_TENTATIVE = 0x40
++ IFA_MAX = 0x8
++ IFF_ALLMULTI = 0x200
++ IFF_ATTACH_QUEUE = 0x200
++ IFF_AUTOMEDIA = 0x4000
++ IFF_BROADCAST = 0x2
++ IFF_DEBUG = 0x4
++ IFF_DETACH_QUEUE = 0x400
++ IFF_DORMANT = 0x20000
++ IFF_DYNAMIC = 0x8000
++ IFF_ECHO = 0x40000
++ IFF_LOOPBACK = 0x8
++ IFF_LOWER_UP = 0x10000
++ IFF_MASTER = 0x400
++ IFF_MULTICAST = 0x1000
++ IFF_MULTI_QUEUE = 0x100
++ IFF_NOARP = 0x80
++ IFF_NOFILTER = 0x1000
++ IFF_NOTRAILERS = 0x20
++ IFF_NO_PI = 0x1000
++ IFF_ONE_QUEUE = 0x2000
++ IFF_PERSIST = 0x800
++ IFF_POINTOPOINT = 0x10
++ IFF_PORTSEL = 0x2000
++ IFF_PROMISC = 0x100
++ IFF_RUNNING = 0x40
++ IFF_SLAVE = 0x800
++ IFF_TAP = 0x2
++ IFF_TUN = 0x1
++ IFF_TUN_EXCL = 0x8000
++ IFF_UP = 0x1
++ IFF_VNET_HDR = 0x4000
++ IFF_VOLATILE = 0x70c5a
++ IFNAMSIZ = 0x10
++ IGNBRK = 0x1
++ IGNCR = 0x80
++ IGNPAR = 0x4
++ IMAXBEL = 0x2000
++ INLCR = 0x40
++ INPCK = 0x10
++ IN_ACCESS = 0x1
++ IN_ALL_EVENTS = 0xfff
++ IN_ATTRIB = 0x4
++ IN_CLASSA_HOST = 0xffffff
++ IN_CLASSA_MAX = 0x80
++ IN_CLASSA_NET = 0xff000000
++ IN_CLASSA_NSHIFT = 0x18
++ IN_CLASSB_HOST = 0xffff
++ IN_CLASSB_MAX = 0x10000
++ IN_CLASSB_NET = 0xffff0000
++ IN_CLASSB_NSHIFT = 0x10
++ IN_CLASSC_HOST = 0xff
++ IN_CLASSC_NET = 0xffffff00
++ IN_CLASSC_NSHIFT = 0x8
++ IN_CLOEXEC = 0x80000
++ IN_CLOSE = 0x18
++ IN_CLOSE_NOWRITE = 0x10
++ IN_CLOSE_WRITE = 0x8
++ IN_CREATE = 0x100
++ IN_DELETE = 0x200
++ IN_DELETE_SELF = 0x400
++ IN_DONT_FOLLOW = 0x2000000
++ IN_EXCL_UNLINK = 0x4000000
++ IN_IGNORED = 0x8000
++ IN_ISDIR = 0x40000000
++ IN_LOOPBACKNET = 0x7f
++ IN_MASK_ADD = 0x20000000
++ IN_MODIFY = 0x2
++ IN_MOVE = 0xc0
++ IN_MOVED_FROM = 0x40
++ IN_MOVED_TO = 0x80
++ IN_MOVE_SELF = 0x800
++ IN_NONBLOCK = 0x800
++ IN_ONESHOT = 0x80000000
++ IN_ONLYDIR = 0x1000000
++ IN_OPEN = 0x20
++ IN_Q_OVERFLOW = 0x4000
++ IN_UNMOUNT = 0x2000
++ IPPROTO_AH = 0x33
++ IPPROTO_BEETPH = 0x5e
++ IPPROTO_COMP = 0x6c
++ IPPROTO_DCCP = 0x21
++ IPPROTO_DSTOPTS = 0x3c
++ IPPROTO_EGP = 0x8
++ IPPROTO_ENCAP = 0x62
++ IPPROTO_ESP = 0x32
++ IPPROTO_FRAGMENT = 0x2c
++ IPPROTO_GRE = 0x2f
++ IPPROTO_HOPOPTS = 0x0
++ IPPROTO_ICMP = 0x1
++ IPPROTO_ICMPV6 = 0x3a
++ IPPROTO_IDP = 0x16
++ IPPROTO_IGMP = 0x2
++ IPPROTO_IP = 0x0
++ IPPROTO_IPIP = 0x4
++ IPPROTO_IPV6 = 0x29
++ IPPROTO_MH = 0x87
++ IPPROTO_MTP = 0x5c
++ IPPROTO_NONE = 0x3b
++ IPPROTO_PIM = 0x67
++ IPPROTO_PUP = 0xc
++ IPPROTO_RAW = 0xff
++ IPPROTO_ROUTING = 0x2b
++ IPPROTO_RSVP = 0x2e
++ IPPROTO_SCTP = 0x84
++ IPPROTO_TCP = 0x6
++ IPPROTO_TP = 0x1d
++ IPPROTO_UDP = 0x11
++ IPPROTO_UDPLITE = 0x88
++ IPV6_2292DSTOPTS = 0x4
++ IPV6_2292HOPLIMIT = 0x8
++ IPV6_2292HOPOPTS = 0x3
++ IPV6_2292PKTINFO = 0x2
++ IPV6_2292PKTOPTIONS = 0x6
++ IPV6_2292RTHDR = 0x5
++ IPV6_ADDRFORM = 0x1
++ IPV6_ADD_MEMBERSHIP = 0x14
++ IPV6_AUTHHDR = 0xa
++ IPV6_CHECKSUM = 0x7
++ IPV6_DROP_MEMBERSHIP = 0x15
++ IPV6_DSTOPTS = 0x3b
++ IPV6_HOPLIMIT = 0x34
++ IPV6_HOPOPTS = 0x36
++ IPV6_IPSEC_POLICY = 0x22
++ IPV6_JOIN_ANYCAST = 0x1b
++ IPV6_JOIN_GROUP = 0x14
++ IPV6_LEAVE_ANYCAST = 0x1c
++ IPV6_LEAVE_GROUP = 0x15
++ IPV6_MTU = 0x18
++ IPV6_MTU_DISCOVER = 0x17
++ IPV6_MULTICAST_HOPS = 0x12
++ IPV6_MULTICAST_IF = 0x11
++ IPV6_MULTICAST_LOOP = 0x13
++ IPV6_NEXTHOP = 0x9
++ IPV6_PKTINFO = 0x32
++ IPV6_PMTUDISC_DO = 0x2
++ IPV6_PMTUDISC_DONT = 0x0
++ IPV6_PMTUDISC_INTERFACE = 0x4
++ IPV6_PMTUDISC_OMIT = 0x5
++ IPV6_PMTUDISC_PROBE = 0x3
++ IPV6_PMTUDISC_WANT = 0x1
++ IPV6_RECVDSTOPTS = 0x3a
++ IPV6_RECVERR = 0x19
++ IPV6_RECVHOPLIMIT = 0x33
++ IPV6_RECVHOPOPTS = 0x35
++ IPV6_RECVPKTINFO = 0x31
++ IPV6_RECVRTHDR = 0x38
++ IPV6_RECVTCLASS = 0x42
++ IPV6_ROUTER_ALERT = 0x16
++ IPV6_RTHDR = 0x39
++ IPV6_RTHDRDSTOPTS = 0x37
++ IPV6_RTHDR_LOOSE = 0x0
++ IPV6_RTHDR_STRICT = 0x1
++ IPV6_RTHDR_TYPE_0 = 0x0
++ IPV6_RXDSTOPTS = 0x3b
++ IPV6_RXHOPOPTS = 0x36
++ IPV6_TCLASS = 0x43
++ IPV6_UNICAST_HOPS = 0x10
++ IPV6_V6ONLY = 0x1a
++ IPV6_XFRM_POLICY = 0x23
++ IP_ADD_MEMBERSHIP = 0x23
++ IP_ADD_SOURCE_MEMBERSHIP = 0x27
++ IP_BLOCK_SOURCE = 0x26
++ IP_DEFAULT_MULTICAST_LOOP = 0x1
++ IP_DEFAULT_MULTICAST_TTL = 0x1
++ IP_DF = 0x4000
++ IP_DROP_MEMBERSHIP = 0x24
++ IP_DROP_SOURCE_MEMBERSHIP = 0x28
++ IP_FREEBIND = 0xf
++ IP_HDRINCL = 0x3
++ IP_IPSEC_POLICY = 0x10
++ IP_MAXPACKET = 0xffff
++ IP_MAX_MEMBERSHIPS = 0x14
++ IP_MF = 0x2000
++ IP_MINTTL = 0x15
++ IP_MSFILTER = 0x29
++ IP_MSS = 0x240
++ IP_MTU = 0xe
++ IP_MTU_DISCOVER = 0xa
++ IP_MULTICAST_ALL = 0x31
++ IP_MULTICAST_IF = 0x20
++ IP_MULTICAST_LOOP = 0x22
++ IP_MULTICAST_TTL = 0x21
++ IP_NODEFRAG = 0x16
++ IP_OFFMASK = 0x1fff
++ IP_OPTIONS = 0x4
++ IP_ORIGDSTADDR = 0x14
++ IP_PASSSEC = 0x12
++ IP_PKTINFO = 0x8
++ IP_PKTOPTIONS = 0x9
++ IP_PMTUDISC = 0xa
++ IP_PMTUDISC_DO = 0x2
++ IP_PMTUDISC_DONT = 0x0
++ IP_PMTUDISC_INTERFACE = 0x4
++ IP_PMTUDISC_OMIT = 0x5
++ IP_PMTUDISC_PROBE = 0x3
++ IP_PMTUDISC_WANT = 0x1
++ IP_RECVERR = 0xb
++ IP_RECVOPTS = 0x6
++ IP_RECVORIGDSTADDR = 0x14
++ IP_RECVRETOPTS = 0x7
++ IP_RECVTOS = 0xd
++ IP_RECVTTL = 0xc
++ IP_RETOPTS = 0x7
++ IP_RF = 0x8000
++ IP_ROUTER_ALERT = 0x5
++ IP_TOS = 0x1
++ IP_TRANSPARENT = 0x13
++ IP_TTL = 0x2
++ IP_UNBLOCK_SOURCE = 0x25
++ IP_UNICAST_IF = 0x32
++ IP_XFRM_POLICY = 0x11
++ ISIG = 0x1
++ ISTRIP = 0x20
++ IUTF8 = 0x4000
++ IXANY = 0x800
++ IXOFF = 0x1000
++ IXON = 0x400
++ LINUX_REBOOT_CMD_CAD_OFF = 0x0
++ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
++ LINUX_REBOOT_CMD_HALT = 0xcdef0123
++ LINUX_REBOOT_CMD_KEXEC = 0x45584543
++ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc
++ LINUX_REBOOT_CMD_RESTART = 0x1234567
++ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4
++ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
++ LINUX_REBOOT_MAGIC1 = 0xfee1dead
++ LINUX_REBOOT_MAGIC2 = 0x28121969
++ LOCK_EX = 0x2
++ LOCK_NB = 0x4
++ LOCK_SH = 0x1
++ LOCK_UN = 0x8
++ MADV_DODUMP = 0x11
++ MADV_DOFORK = 0xb
++ MADV_DONTDUMP = 0x10
++ MADV_DONTFORK = 0xa
++ MADV_DONTNEED = 0x4
++ MADV_HUGEPAGE = 0xe
++ MADV_HWPOISON = 0x64
++ MADV_MERGEABLE = 0xc
++ MADV_NOHUGEPAGE = 0xf
++ MADV_NORMAL = 0x0
++ MADV_RANDOM = 0x1
++ MADV_REMOVE = 0x9
++ MADV_SEQUENTIAL = 0x2
++ MADV_UNMERGEABLE = 0xd
++ MADV_WILLNEED = 0x3
++ MAP_ANON = 0x20
++ MAP_ANONYMOUS = 0x20
++ MAP_DENYWRITE = 0x800
++ MAP_EXECUTABLE = 0x1000
++ MAP_FILE = 0x0
++ MAP_FIXED = 0x10
++ MAP_GROWSDOWN = 0x100
++ MAP_HUGETLB = 0x40000
++ MAP_HUGE_MASK = 0x3f
++ MAP_HUGE_SHIFT = 0x1a
++ MAP_LOCKED = 0x2000
++ MAP_NONBLOCK = 0x10000
++ MAP_NORESERVE = 0x4000
++ MAP_POPULATE = 0x8000
++ MAP_PRIVATE = 0x2
++ MAP_SHARED = 0x1
++ MAP_STACK = 0x20000
++ MAP_TYPE = 0xf
++ MCL_CURRENT = 0x1
++ MCL_FUTURE = 0x2
++ MNT_DETACH = 0x2
++ MNT_EXPIRE = 0x4
++ MNT_FORCE = 0x1
++ MSG_CMSG_CLOEXEC = 0x40000000
++ MSG_CONFIRM = 0x800
++ MSG_CTRUNC = 0x8
++ MSG_DONTROUTE = 0x4
++ MSG_DONTWAIT = 0x40
++ MSG_EOR = 0x80
++ MSG_ERRQUEUE = 0x2000
++ MSG_FASTOPEN = 0x20000000
++ MSG_FIN = 0x200
++ MSG_MORE = 0x8000
++ MSG_NOSIGNAL = 0x4000
++ MSG_OOB = 0x1
++ MSG_PEEK = 0x2
++ MSG_PROXY = 0x10
++ MSG_RST = 0x1000
++ MSG_SYN = 0x400
++ MSG_TRUNC = 0x20
++ MSG_TRYHARD = 0x4
++ MSG_WAITALL = 0x100
++ MSG_WAITFORONE = 0x10000
++ MS_ACTIVE = 0x40000000
++ MS_ASYNC = 0x1
++ MS_BIND = 0x1000
++ MS_DIRSYNC = 0x80
++ MS_INVALIDATE = 0x2
++ MS_I_VERSION = 0x800000
++ MS_KERNMOUNT = 0x400000
++ MS_MANDLOCK = 0x40
++ MS_MGC_MSK = 0xffff0000
++ MS_MGC_VAL = 0xc0ed0000
++ MS_MOVE = 0x2000
++ MS_NOATIME = 0x400
++ MS_NODEV = 0x4
++ MS_NODIRATIME = 0x800
++ MS_NOEXEC = 0x8
++ MS_NOSUID = 0x2
++ MS_NOUSER = -0x80000000
++ MS_POSIXACL = 0x10000
++ MS_PRIVATE = 0x40000
++ MS_RDONLY = 0x1
++ MS_REC = 0x4000
++ MS_RELATIME = 0x200000
++ MS_REMOUNT = 0x20
++ MS_RMT_MASK = 0x800051
++ MS_SHARED = 0x100000
++ MS_SILENT = 0x8000
++ MS_SLAVE = 0x80000
++ MS_STRICTATIME = 0x1000000
++ MS_SYNC = 0x4
++ MS_SYNCHRONOUS = 0x10
++ MS_UNBINDABLE = 0x20000
++ NAME_MAX = 0xff
++ NETLINK_ADD_MEMBERSHIP = 0x1
++ NETLINK_AUDIT = 0x9
++ NETLINK_BROADCAST_ERROR = 0x4
++ NETLINK_CAP_ACK = 0xa
++ NETLINK_CONNECTOR = 0xb
++ NETLINK_CRYPTO = 0x15
++ NETLINK_DNRTMSG = 0xe
++ NETLINK_DROP_MEMBERSHIP = 0x2
++ NETLINK_ECRYPTFS = 0x13
++ NETLINK_FIB_LOOKUP = 0xa
++ NETLINK_FIREWALL = 0x3
++ NETLINK_GENERIC = 0x10
++ NETLINK_INET_DIAG = 0x4
++ NETLINK_IP6_FW = 0xd
++ NETLINK_ISCSI = 0x8
++ NETLINK_KOBJECT_UEVENT = 0xf
++ NETLINK_LISTEN_ALL_NSID = 0x8
++ NETLINK_LIST_MEMBERSHIPS = 0x9
++ NETLINK_NETFILTER = 0xc
++ NETLINK_NFLOG = 0x5
++ NETLINK_NO_ENOBUFS = 0x5
++ NETLINK_PKTINFO = 0x3
++ NETLINK_RDMA = 0x14
++ NETLINK_ROUTE = 0x0
++ NETLINK_RX_RING = 0x6
++ NETLINK_SCSITRANSPORT = 0x12
++ NETLINK_SELINUX = 0x7
++ NETLINK_SOCK_DIAG = 0x4
++ NETLINK_TX_RING = 0x7
++ NETLINK_UNUSED = 0x1
++ NETLINK_USERSOCK = 0x2
++ NETLINK_XFRM = 0x6
++ NLA_ALIGNTO = 0x4
++ NLA_F_NESTED = 0x8000
++ NLA_F_NET_BYTEORDER = 0x4000
++ NLA_HDRLEN = 0x4
++ NLMSG_ALIGNTO = 0x4
++ NLMSG_DONE = 0x3
++ NLMSG_ERROR = 0x2
++ NLMSG_HDRLEN = 0x10
++ NLMSG_MIN_TYPE = 0x10
++ NLMSG_NOOP = 0x1
++ NLMSG_OVERRUN = 0x4
++ NLM_F_ACK = 0x4
++ NLM_F_APPEND = 0x800
++ NLM_F_ATOMIC = 0x400
++ NLM_F_CREATE = 0x400
++ NLM_F_DUMP = 0x300
++ NLM_F_DUMP_FILTERED = 0x20
++ NLM_F_DUMP_INTR = 0x10
++ NLM_F_ECHO = 0x8
++ NLM_F_EXCL = 0x200
++ NLM_F_MATCH = 0x200
++ NLM_F_MULTI = 0x2
++ NLM_F_REPLACE = 0x100
++ NLM_F_REQUEST = 0x1
++ NLM_F_ROOT = 0x100
++ NOFLSH = 0x80
++ OCRNL = 0x8
++ OFDEL = 0x80
++ OFILL = 0x40
++ ONLCR = 0x4
++ ONLRET = 0x20
++ ONOCR = 0x10
++ OPOST = 0x1
++ O_ACCMODE = 0x3
++ O_APPEND = 0x400
++ O_ASYNC = 0x2000
++ O_CLOEXEC = 0x80000
++ O_CREAT = 0x40
++ O_DIRECT = 0x4000
++ O_DIRECTORY = 0x10000
++ O_DSYNC = 0x1000
++ O_EXCL = 0x80
++ O_FSYNC = 0x101000
++ O_LARGEFILE = 0x0
++ O_NDELAY = 0x800
++ O_NOATIME = 0x40000
++ O_NOCTTY = 0x100
++ O_NOFOLLOW = 0x20000
++ O_NONBLOCK = 0x800
++ O_PATH = 0x200000
++ O_RDONLY = 0x0
++ O_RDWR = 0x2
++ O_RSYNC = 0x101000
++ O_SYNC = 0x101000
++ O_TMPFILE = 0x410000
++ O_TRUNC = 0x200
++ O_WRONLY = 0x1
++ PACKET_ADD_MEMBERSHIP = 0x1
++ PACKET_AUXDATA = 0x8
++ PACKET_BROADCAST = 0x1
++ PACKET_COPY_THRESH = 0x7
++ PACKET_DROP_MEMBERSHIP = 0x2
++ PACKET_FANOUT = 0x12
++ PACKET_FANOUT_CBPF = 0x6
++ PACKET_FANOUT_CPU = 0x2
++ PACKET_FANOUT_DATA = 0x16
++ PACKET_FANOUT_EBPF = 0x7
++ PACKET_FANOUT_FLAG_DEFRAG = 0x8000
++ PACKET_FANOUT_FLAG_ROLLOVER = 0x1000
++ PACKET_FANOUT_HASH = 0x0
++ PACKET_FANOUT_LB = 0x1
++ PACKET_FANOUT_QM = 0x5
++ PACKET_FANOUT_RND = 0x4
++ PACKET_FANOUT_ROLLOVER = 0x3
++ PACKET_FASTROUTE = 0x6
++ PACKET_HDRLEN = 0xb
++ PACKET_HOST = 0x0
++ PACKET_KERNEL = 0x7
++ PACKET_LOOPBACK = 0x5
++ PACKET_LOSS = 0xe
++ PACKET_MR_ALLMULTI = 0x2
++ PACKET_MR_MULTICAST = 0x0
++ PACKET_MR_PROMISC = 0x1
++ PACKET_MR_UNICAST = 0x3
++ PACKET_MULTICAST = 0x2
++ PACKET_ORIGDEV = 0x9
++ PACKET_OTHERHOST = 0x3
++ PACKET_OUTGOING = 0x4
++ PACKET_QDISC_BYPASS = 0x14
++ PACKET_RECV_OUTPUT = 0x3
++ PACKET_RESERVE = 0xc
++ PACKET_ROLLOVER_STATS = 0x15
++ PACKET_RX_RING = 0x5
++ PACKET_STATISTICS = 0x6
++ PACKET_TIMESTAMP = 0x11
++ PACKET_TX_HAS_OFF = 0x13
++ PACKET_TX_RING = 0xd
++ PACKET_TX_TIMESTAMP = 0x10
++ PACKET_USER = 0x6
++ PACKET_VERSION = 0xa
++ PACKET_VNET_HDR = 0xf
++ PARENB = 0x100
++ PARITY_CRC16_PR0 = 0x2
++ PARITY_CRC16_PR0_CCITT = 0x4
++ PARITY_CRC16_PR1 = 0x3
++ PARITY_CRC16_PR1_CCITT = 0x5
++ PARITY_CRC32_PR0_CCITT = 0x6
++ PARITY_CRC32_PR1_CCITT = 0x7
++ PARITY_DEFAULT = 0x0
++ PARITY_NONE = 0x1
++ PARMRK = 0x8
++ PARODD = 0x200
++ PENDIN = 0x4000
++ PRIO_PGRP = 0x1
++ PRIO_PROCESS = 0x0
++ PRIO_USER = 0x2
++ PROT_EXEC = 0x4
++ PROT_GROWSDOWN = 0x1000000
++ PROT_GROWSUP = 0x2000000
++ PROT_NONE = 0x0
++ PROT_READ = 0x1
++ PROT_WRITE = 0x2
++ PR_CAPBSET_DROP = 0x18
++ PR_CAPBSET_READ = 0x17
++ PR_CAP_AMBIENT = 0x2f
++ PR_CAP_AMBIENT_CLEAR_ALL = 0x4
++ PR_CAP_AMBIENT_IS_SET = 0x1
++ PR_CAP_AMBIENT_LOWER = 0x3
++ PR_CAP_AMBIENT_RAISE = 0x2
++ PR_ENDIAN_BIG = 0x0
++ PR_ENDIAN_LITTLE = 0x1
++ PR_ENDIAN_PPC_LITTLE = 0x2
++ PR_FPEMU_NOPRINT = 0x1
++ PR_FPEMU_SIGFPE = 0x2
++ PR_FP_EXC_ASYNC = 0x2
++ PR_FP_EXC_DISABLED = 0x0
++ PR_FP_EXC_DIV = 0x10000
++ PR_FP_EXC_INV = 0x100000
++ PR_FP_EXC_NONRECOV = 0x1
++ PR_FP_EXC_OVF = 0x20000
++ PR_FP_EXC_PRECISE = 0x3
++ PR_FP_EXC_RES = 0x80000
++ PR_FP_EXC_SW_ENABLE = 0x80
++ PR_FP_EXC_UND = 0x40000
++ PR_FP_MODE_FR = 0x1
++ PR_FP_MODE_FRE = 0x2
++ PR_GET_CHILD_SUBREAPER = 0x25
++ PR_GET_DUMPABLE = 0x3
++ PR_GET_ENDIAN = 0x13
++ PR_GET_FPEMU = 0x9
++ PR_GET_FPEXC = 0xb
++ PR_GET_FP_MODE = 0x2e
++ PR_GET_KEEPCAPS = 0x7
++ PR_GET_NAME = 0x10
++ PR_GET_NO_NEW_PRIVS = 0x27
++ PR_GET_PDEATHSIG = 0x2
++ PR_GET_SECCOMP = 0x15
++ PR_GET_SECUREBITS = 0x1b
++ PR_GET_THP_DISABLE = 0x2a
++ PR_GET_TID_ADDRESS = 0x28
++ PR_GET_TIMERSLACK = 0x1e
++ PR_GET_TIMING = 0xd
++ PR_GET_TSC = 0x19
++ PR_GET_UNALIGN = 0x5
++ PR_MCE_KILL = 0x21
++ PR_MCE_KILL_CLEAR = 0x0
++ PR_MCE_KILL_DEFAULT = 0x2
++ PR_MCE_KILL_EARLY = 0x1
++ PR_MCE_KILL_GET = 0x22
++ PR_MCE_KILL_LATE = 0x0
++ PR_MCE_KILL_SET = 0x1
++ PR_MPX_DISABLE_MANAGEMENT = 0x2c
++ PR_MPX_ENABLE_MANAGEMENT = 0x2b
++ PR_SET_CHILD_SUBREAPER = 0x24
++ PR_SET_DUMPABLE = 0x4
++ PR_SET_ENDIAN = 0x14
++ PR_SET_FPEMU = 0xa
++ PR_SET_FPEXC = 0xc
++ PR_SET_FP_MODE = 0x2d
++ PR_SET_KEEPCAPS = 0x8
++ PR_SET_MM = 0x23
++ PR_SET_MM_ARG_END = 0x9
++ PR_SET_MM_ARG_START = 0x8
++ PR_SET_MM_AUXV = 0xc
++ PR_SET_MM_BRK = 0x7
++ PR_SET_MM_END_CODE = 0x2
++ PR_SET_MM_END_DATA = 0x4
++ PR_SET_MM_ENV_END = 0xb
++ PR_SET_MM_ENV_START = 0xa
++ PR_SET_MM_EXE_FILE = 0xd
++ PR_SET_MM_MAP = 0xe
++ PR_SET_MM_MAP_SIZE = 0xf
++ PR_SET_MM_START_BRK = 0x6
++ PR_SET_MM_START_CODE = 0x1
++ PR_SET_MM_START_DATA = 0x3
++ PR_SET_MM_START_STACK = 0x5
++ PR_SET_NAME = 0xf
++ PR_SET_NO_NEW_PRIVS = 0x26
++ PR_SET_PDEATHSIG = 0x1
++ PR_SET_PTRACER = 0x59616d61
++ PR_SET_PTRACER_ANY = -0x1
++ PR_SET_SECCOMP = 0x16
++ PR_SET_SECUREBITS = 0x1c
++ PR_SET_THP_DISABLE = 0x29
++ PR_SET_TIMERSLACK = 0x1d
++ PR_SET_TIMING = 0xe
++ PR_SET_TSC = 0x1a
++ PR_SET_UNALIGN = 0x6
++ PR_TASK_PERF_EVENTS_DISABLE = 0x1f
++ PR_TASK_PERF_EVENTS_ENABLE = 0x20
++ PR_TIMING_STATISTICAL = 0x0
++ PR_TIMING_TIMESTAMP = 0x1
++ PR_TSC_ENABLE = 0x1
++ PR_TSC_SIGSEGV = 0x2
++ PR_UNALIGN_NOPRINT = 0x1
++ PR_UNALIGN_SIGBUS = 0x2
++ PTRACE_ATTACH = 0x10
++ PTRACE_CONT = 0x7
++ PTRACE_DETACH = 0x11
++ PTRACE_DISABLE_TE = 0x5010
++ PTRACE_ENABLE_TE = 0x5009
++ PTRACE_EVENT_CLONE = 0x3
++ PTRACE_EVENT_EXEC = 0x4
++ PTRACE_EVENT_EXIT = 0x6
++ PTRACE_EVENT_FORK = 0x1
++ PTRACE_EVENT_SECCOMP = 0x7
++ PTRACE_EVENT_STOP = 0x80
++ PTRACE_EVENT_VFORK = 0x2
++ PTRACE_EVENT_VFORK_DONE = 0x5
++ PTRACE_GETEVENTMSG = 0x4201
++ PTRACE_GETREGS = 0xc
++ PTRACE_GETREGSET = 0x4204
++ PTRACE_GETSIGINFO = 0x4202
++ PTRACE_GETSIGMASK = 0x420a
++ PTRACE_GET_LAST_BREAK = 0x5006
++ PTRACE_INTERRUPT = 0x4207
++ PTRACE_KILL = 0x8
++ PTRACE_LISTEN = 0x4208
++ PTRACE_OLDSETOPTIONS = 0x15
++ PTRACE_O_EXITKILL = 0x100000
++ PTRACE_O_MASK = 0x3000ff
++ PTRACE_O_SUSPEND_SECCOMP = 0x200000
++ PTRACE_O_TRACECLONE = 0x8
++ PTRACE_O_TRACEEXEC = 0x10
++ PTRACE_O_TRACEEXIT = 0x40
++ PTRACE_O_TRACEFORK = 0x2
++ PTRACE_O_TRACESECCOMP = 0x80
++ PTRACE_O_TRACESYSGOOD = 0x1
++ PTRACE_O_TRACEVFORK = 0x4
++ PTRACE_O_TRACEVFORKDONE = 0x20
++ PTRACE_PEEKDATA = 0x2
++ PTRACE_PEEKDATA_AREA = 0x5003
++ PTRACE_PEEKSIGINFO = 0x4209
++ PTRACE_PEEKSIGINFO_SHARED = 0x1
++ PTRACE_PEEKTEXT = 0x1
++ PTRACE_PEEKTEXT_AREA = 0x5002
++ PTRACE_PEEKUSR = 0x3
++ PTRACE_PEEKUSR_AREA = 0x5000
++ PTRACE_PEEK_SYSTEM_CALL = 0x5007
++ PTRACE_POKEDATA = 0x5
++ PTRACE_POKEDATA_AREA = 0x5005
++ PTRACE_POKETEXT = 0x4
++ PTRACE_POKETEXT_AREA = 0x5004
++ PTRACE_POKEUSR = 0x6
++ PTRACE_POKEUSR_AREA = 0x5001
++ PTRACE_POKE_SYSTEM_CALL = 0x5008
++ PTRACE_PROT = 0x15
++ PTRACE_SECCOMP_GET_FILTER = 0x420c
++ PTRACE_SEIZE = 0x4206
++ PTRACE_SETOPTIONS = 0x4200
++ PTRACE_SETREGS = 0xd
++ PTRACE_SETREGSET = 0x4205
++ PTRACE_SETSIGINFO = 0x4203
++ PTRACE_SETSIGMASK = 0x420b
++ PTRACE_SINGLEBLOCK = 0xc
++ PTRACE_SINGLESTEP = 0x9
++ PTRACE_SYSCALL = 0x18
++ PTRACE_TE_ABORT_RAND = 0x5011
++ PTRACE_TRACEME = 0x0
++ PT_ACR0 = 0x90
++ PT_ACR1 = 0x94
++ PT_ACR10 = 0xb8
++ PT_ACR11 = 0xbc
++ PT_ACR12 = 0xc0
++ PT_ACR13 = 0xc4
++ PT_ACR14 = 0xc8
++ PT_ACR15 = 0xcc
++ PT_ACR2 = 0x98
++ PT_ACR3 = 0x9c
++ PT_ACR4 = 0xa0
++ PT_ACR5 = 0xa4
++ PT_ACR6 = 0xa8
++ PT_ACR7 = 0xac
++ PT_ACR8 = 0xb0
++ PT_ACR9 = 0xb4
++ PT_CR_10 = 0x168
++ PT_CR_11 = 0x170
++ PT_CR_9 = 0x160
++ PT_ENDREGS = 0x1af
++ PT_FPC = 0xd8
++ PT_FPR0 = 0xe0
++ PT_FPR1 = 0xe8
++ PT_FPR10 = 0x130
++ PT_FPR11 = 0x138
++ PT_FPR12 = 0x140
++ PT_FPR13 = 0x148
++ PT_FPR14 = 0x150
++ PT_FPR15 = 0x158
++ PT_FPR2 = 0xf0
++ PT_FPR3 = 0xf8
++ PT_FPR4 = 0x100
++ PT_FPR5 = 0x108
++ PT_FPR6 = 0x110
++ PT_FPR7 = 0x118
++ PT_FPR8 = 0x120
++ PT_FPR9 = 0x128
++ PT_GPR0 = 0x10
++ PT_GPR1 = 0x18
++ PT_GPR10 = 0x60
++ PT_GPR11 = 0x68
++ PT_GPR12 = 0x70
++ PT_GPR13 = 0x78
++ PT_GPR14 = 0x80
++ PT_GPR15 = 0x88
++ PT_GPR2 = 0x20
++ PT_GPR3 = 0x28
++ PT_GPR4 = 0x30
++ PT_GPR5 = 0x38
++ PT_GPR6 = 0x40
++ PT_GPR7 = 0x48
++ PT_GPR8 = 0x50
++ PT_GPR9 = 0x58
++ PT_IEEE_IP = 0x1a8
++ PT_LASTOFF = 0x1a8
++ PT_ORIGGPR2 = 0xd0
++ PT_PSWADDR = 0x8
++ PT_PSWMASK = 0x0
++ RLIMIT_AS = 0x9
++ RLIMIT_CORE = 0x4
++ RLIMIT_CPU = 0x0
++ RLIMIT_DATA = 0x2
++ RLIMIT_FSIZE = 0x1
++ RLIMIT_NOFILE = 0x7
++ RLIMIT_STACK = 0x3
++ RLIM_INFINITY = -0x1
++ RTAX_ADVMSS = 0x8
++ RTAX_CC_ALGO = 0x10
++ RTAX_CWND = 0x7
++ RTAX_FEATURES = 0xc
++ RTAX_FEATURE_ALLFRAG = 0x8
++ RTAX_FEATURE_ECN = 0x1
++ RTAX_FEATURE_MASK = 0xf
++ RTAX_FEATURE_SACK = 0x2
++ RTAX_FEATURE_TIMESTAMP = 0x4
++ RTAX_HOPLIMIT = 0xa
++ RTAX_INITCWND = 0xb
++ RTAX_INITRWND = 0xe
++ RTAX_LOCK = 0x1
++ RTAX_MAX = 0x10
++ RTAX_MTU = 0x2
++ RTAX_QUICKACK = 0xf
++ RTAX_REORDERING = 0x9
++ RTAX_RTO_MIN = 0xd
++ RTAX_RTT = 0x4
++ RTAX_RTTVAR = 0x5
++ RTAX_SSTHRESH = 0x6
++ RTAX_UNSPEC = 0x0
++ RTAX_WINDOW = 0x3
++ RTA_ALIGNTO = 0x4
++ RTA_MAX = 0x16
++ RTCF_DIRECTSRC = 0x4000000
++ RTCF_DOREDIRECT = 0x1000000
++ RTCF_LOG = 0x2000000
++ RTCF_MASQ = 0x400000
++ RTCF_NAT = 0x800000
++ RTCF_VALVE = 0x200000
++ RTF_ADDRCLASSMASK = 0xf8000000
++ RTF_ADDRCONF = 0x40000
++ RTF_ALLONLINK = 0x20000
++ RTF_BROADCAST = 0x10000000
++ RTF_CACHE = 0x1000000
++ RTF_DEFAULT = 0x10000
++ RTF_DYNAMIC = 0x10
++ RTF_FLOW = 0x2000000
++ RTF_GATEWAY = 0x2
++ RTF_HOST = 0x4
++ RTF_INTERFACE = 0x40000000
++ RTF_IRTT = 0x100
++ RTF_LINKRT = 0x100000
++ RTF_LOCAL = 0x80000000
++ RTF_MODIFIED = 0x20
++ RTF_MSS = 0x40
++ RTF_MTU = 0x40
++ RTF_MULTICAST = 0x20000000
++ RTF_NAT = 0x8000000
++ RTF_NOFORWARD = 0x1000
++ RTF_NONEXTHOP = 0x200000
++ RTF_NOPMTUDISC = 0x4000
++ RTF_POLICY = 0x4000000
++ RTF_REINSTATE = 0x8
++ RTF_REJECT = 0x200
++ RTF_STATIC = 0x400
++ RTF_THROW = 0x2000
++ RTF_UP = 0x1
++ RTF_WINDOW = 0x80
++ RTF_XRESOLVE = 0x800
++ RTM_BASE = 0x10
++ RTM_DELACTION = 0x31
++ RTM_DELADDR = 0x15
++ RTM_DELADDRLABEL = 0x49
++ RTM_DELLINK = 0x11
++ RTM_DELMDB = 0x55
++ RTM_DELNEIGH = 0x1d
++ RTM_DELNSID = 0x59
++ RTM_DELQDISC = 0x25
++ RTM_DELROUTE = 0x19
++ RTM_DELRULE = 0x21
++ RTM_DELTCLASS = 0x29
++ RTM_DELTFILTER = 0x2d
++ RTM_F_CLONED = 0x200
++ RTM_F_EQUALIZE = 0x400
++ RTM_F_LOOKUP_TABLE = 0x1000
++ RTM_F_NOTIFY = 0x100
++ RTM_F_PREFIX = 0x800
++ RTM_GETACTION = 0x32
++ RTM_GETADDR = 0x16
++ RTM_GETADDRLABEL = 0x4a
++ RTM_GETANYCAST = 0x3e
++ RTM_GETDCB = 0x4e
++ RTM_GETLINK = 0x12
++ RTM_GETMDB = 0x56
++ RTM_GETMULTICAST = 0x3a
++ RTM_GETNEIGH = 0x1e
++ RTM_GETNEIGHTBL = 0x42
++ RTM_GETNETCONF = 0x52
++ RTM_GETNSID = 0x5a
++ RTM_GETQDISC = 0x26
++ RTM_GETROUTE = 0x1a
++ RTM_GETRULE = 0x22
++ RTM_GETTCLASS = 0x2a
++ RTM_GETTFILTER = 0x2e
++ RTM_MAX = 0x5b
++ RTM_NEWACTION = 0x30
++ RTM_NEWADDR = 0x14
++ RTM_NEWADDRLABEL = 0x48
++ RTM_NEWLINK = 0x10
++ RTM_NEWMDB = 0x54
++ RTM_NEWNDUSEROPT = 0x44
++ RTM_NEWNEIGH = 0x1c
++ RTM_NEWNEIGHTBL = 0x40
++ RTM_NEWNETCONF = 0x50
++ RTM_NEWNSID = 0x58
++ RTM_NEWPREFIX = 0x34
++ RTM_NEWQDISC = 0x24
++ RTM_NEWROUTE = 0x18
++ RTM_NEWRULE = 0x20
++ RTM_NEWTCLASS = 0x28
++ RTM_NEWTFILTER = 0x2c
++ RTM_NR_FAMILIES = 0x13
++ RTM_NR_MSGTYPES = 0x4c
++ RTM_SETDCB = 0x4f
++ RTM_SETLINK = 0x13
++ RTM_SETNEIGHTBL = 0x43
++ RTNH_ALIGNTO = 0x4
++ RTNH_COMPARE_MASK = 0x11
++ RTNH_F_DEAD = 0x1
++ RTNH_F_LINKDOWN = 0x10
++ RTNH_F_OFFLOAD = 0x8
++ RTNH_F_ONLINK = 0x4
++ RTNH_F_PERVASIVE = 0x2
++ RTN_MAX = 0xb
++ RTPROT_BABEL = 0x2a
++ RTPROT_BIRD = 0xc
++ RTPROT_BOOT = 0x3
++ RTPROT_DHCP = 0x10
++ RTPROT_DNROUTED = 0xd
++ RTPROT_GATED = 0x8
++ RTPROT_KERNEL = 0x2
++ RTPROT_MROUTED = 0x11
++ RTPROT_MRT = 0xa
++ RTPROT_NTK = 0xf
++ RTPROT_RA = 0x9
++ RTPROT_REDIRECT = 0x1
++ RTPROT_STATIC = 0x4
++ RTPROT_UNSPEC = 0x0
++ RTPROT_XORP = 0xe
++ RTPROT_ZEBRA = 0xb
++ RT_CLASS_DEFAULT = 0xfd
++ RT_CLASS_LOCAL = 0xff
++ RT_CLASS_MAIN = 0xfe
++ RT_CLASS_MAX = 0xff
++ RT_CLASS_UNSPEC = 0x0
++ RUSAGE_CHILDREN = -0x1
++ RUSAGE_SELF = 0x0
++ RUSAGE_THREAD = 0x1
++ SCM_CREDENTIALS = 0x2
++ SCM_RIGHTS = 0x1
++ SCM_TIMESTAMP = 0x1d
++ SCM_TIMESTAMPING = 0x25
++ SCM_TIMESTAMPNS = 0x23
++ SCM_WIFI_STATUS = 0x29
++ SHUT_RD = 0x0
++ SHUT_RDWR = 0x2
++ SHUT_WR = 0x1
++ SIOCADDDLCI = 0x8980
++ SIOCADDMULTI = 0x8931
++ SIOCADDRT = 0x890b
++ SIOCATMARK = 0x8905
++ SIOCDARP = 0x8953
++ SIOCDELDLCI = 0x8981
++ SIOCDELMULTI = 0x8932
++ SIOCDELRT = 0x890c
++ SIOCDEVPRIVATE = 0x89f0
++ SIOCDIFADDR = 0x8936
++ SIOCDRARP = 0x8960
++ SIOCGARP = 0x8954
++ SIOCGIFADDR = 0x8915
++ SIOCGIFBR = 0x8940
++ SIOCGIFBRDADDR = 0x8919
++ SIOCGIFCONF = 0x8912
++ SIOCGIFCOUNT = 0x8938
++ SIOCGIFDSTADDR = 0x8917
++ SIOCGIFENCAP = 0x8925
++ SIOCGIFFLAGS = 0x8913
++ SIOCGIFHWADDR = 0x8927
++ SIOCGIFINDEX = 0x8933
++ SIOCGIFMAP = 0x8970
++ SIOCGIFMEM = 0x891f
++ SIOCGIFMETRIC = 0x891d
++ SIOCGIFMTU = 0x8921
++ SIOCGIFNAME = 0x8910
++ SIOCGIFNETMASK = 0x891b
++ SIOCGIFPFLAGS = 0x8935
++ SIOCGIFSLAVE = 0x8929
++ SIOCGIFTXQLEN = 0x8942
++ SIOCGPGRP = 0x8904
++ SIOCGRARP = 0x8961
++ SIOCGSTAMP = 0x8906
++ SIOCGSTAMPNS = 0x8907
++ SIOCPROTOPRIVATE = 0x89e0
++ SIOCRTMSG = 0x890d
++ SIOCSARP = 0x8955
++ SIOCSIFADDR = 0x8916
++ SIOCSIFBR = 0x8941
++ SIOCSIFBRDADDR = 0x891a
++ SIOCSIFDSTADDR = 0x8918
++ SIOCSIFENCAP = 0x8926
++ SIOCSIFFLAGS = 0x8914
++ SIOCSIFHWADDR = 0x8924
++ SIOCSIFHWBROADCAST = 0x8937
++ SIOCSIFLINK = 0x8911
++ SIOCSIFMAP = 0x8971
++ SIOCSIFMEM = 0x8920
++ SIOCSIFMETRIC = 0x891e
++ SIOCSIFMTU = 0x8922
++ SIOCSIFNAME = 0x8923
++ SIOCSIFNETMASK = 0x891c
++ SIOCSIFPFLAGS = 0x8934
++ SIOCSIFSLAVE = 0x8930
++ SIOCSIFTXQLEN = 0x8943
++ SIOCSPGRP = 0x8902
++ SIOCSRARP = 0x8962
++ SOCK_CLOEXEC = 0x80000
++ SOCK_DCCP = 0x6
++ SOCK_DGRAM = 0x2
++ SOCK_NONBLOCK = 0x800
++ SOCK_PACKET = 0xa
++ SOCK_RAW = 0x3
++ SOCK_RDM = 0x4
++ SOCK_SEQPACKET = 0x5
++ SOCK_STREAM = 0x1
++ SOL_AAL = 0x109
++ SOL_ATM = 0x108
++ SOL_DECNET = 0x105
++ SOL_ICMPV6 = 0x3a
++ SOL_IP = 0x0
++ SOL_IPV6 = 0x29
++ SOL_IRDA = 0x10a
++ SOL_PACKET = 0x107
++ SOL_RAW = 0xff
++ SOL_SOCKET = 0x1
++ SOL_TCP = 0x6
++ SOL_X25 = 0x106
++ SOMAXCONN = 0x80
++ SO_ACCEPTCONN = 0x1e
++ SO_ATTACH_BPF = 0x32
++ SO_ATTACH_FILTER = 0x1a
++ SO_BINDTODEVICE = 0x19
++ SO_BPF_EXTENSIONS = 0x30
++ SO_BROADCAST = 0x6
++ SO_BSDCOMPAT = 0xe
++ SO_BUSY_POLL = 0x2e
++ SO_DEBUG = 0x1
++ SO_DETACH_BPF = 0x1b
++ SO_DETACH_FILTER = 0x1b
++ SO_DOMAIN = 0x27
++ SO_DONTROUTE = 0x5
++ SO_ERROR = 0x4
++ SO_GET_FILTER = 0x1a
++ SO_INCOMING_CPU = 0x31
++ SO_KEEPALIVE = 0x9
++ SO_LINGER = 0xd
++ SO_LOCK_FILTER = 0x2c
++ SO_MARK = 0x24
++ SO_MAX_PACING_RATE = 0x2f
++ SO_NOFCS = 0x2b
++ SO_NO_CHECK = 0xb
++ SO_OOBINLINE = 0xa
++ SO_PASSCRED = 0x10
++ SO_PASSSEC = 0x22
++ SO_PEEK_OFF = 0x2a
++ SO_PEERCRED = 0x11
++ SO_PEERNAME = 0x1c
++ SO_PEERSEC = 0x1f
++ SO_PRIORITY = 0xc
++ SO_PROTOCOL = 0x26
++ SO_RCVBUF = 0x8
++ SO_RCVBUFFORCE = 0x21
++ SO_RCVLOWAT = 0x12
++ SO_RCVTIMEO = 0x14
++ SO_REUSEADDR = 0x2
++ SO_REUSEPORT = 0xf
++ SO_RXQ_OVFL = 0x28
++ SO_SECURITY_AUTHENTICATION = 0x16
++ SO_SECURITY_ENCRYPTION_NETWORK = 0x18
++ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
++ SO_SELECT_ERR_QUEUE = 0x2d
++ SO_SNDBUF = 0x7
++ SO_SNDBUFFORCE = 0x20
++ SO_SNDLOWAT = 0x13
++ SO_SNDTIMEO = 0x15
++ SO_TIMESTAMP = 0x1d
++ SO_TIMESTAMPING = 0x25
++ SO_TIMESTAMPNS = 0x23
++ SO_TYPE = 0x3
++ SO_WIFI_STATUS = 0x29
++ S_BLKSIZE = 0x200
++ S_IEXEC = 0x40
++ S_IFBLK = 0x6000
++ S_IFCHR = 0x2000
++ S_IFDIR = 0x4000
++ S_IFIFO = 0x1000
++ S_IFLNK = 0xa000
++ S_IFMT = 0xf000
++ S_IFREG = 0x8000
++ S_IFSOCK = 0xc000
++ S_IREAD = 0x100
++ S_IRGRP = 0x20
++ S_IROTH = 0x4
++ S_IRUSR = 0x100
++ S_IRWXG = 0x38
++ S_IRWXO = 0x7
++ S_IRWXU = 0x1c0
++ S_ISGID = 0x400
++ S_ISUID = 0x800
++ S_ISVTX = 0x200
++ S_IWGRP = 0x10
++ S_IWOTH = 0x2
++ S_IWRITE = 0x80
++ S_IWUSR = 0x80
++ S_IXGRP = 0x8
++ S_IXOTH = 0x1
++ S_IXUSR = 0x40
++ TCFLSH = 0x540b
++ TCIFLUSH = 0x0
++ TCIOFLUSH = 0x2
++ TCOFLUSH = 0x1
++ TCP_CONGESTION = 0xd
++ TCP_COOKIE_IN_ALWAYS = 0x1
++ TCP_COOKIE_MAX = 0x10
++ TCP_COOKIE_MIN = 0x8
++ TCP_COOKIE_OUT_NEVER = 0x2
++ TCP_COOKIE_PAIR_SIZE = 0x20
++ TCP_COOKIE_TRANSACTIONS = 0xf
++ TCP_CORK = 0x3
++ TCP_DEFER_ACCEPT = 0x9
++ TCP_FASTOPEN = 0x17
++ TCP_INFO = 0xb
++ TCP_KEEPCNT = 0x6
++ TCP_KEEPIDLE = 0x4
++ TCP_KEEPINTVL = 0x5
++ TCP_LINGER2 = 0x8
++ TCP_MAXSEG = 0x2
++ TCP_MAXWIN = 0xffff
++ TCP_MAX_WINSHIFT = 0xe
++ TCP_MD5SIG = 0xe
++ TCP_MD5SIG_MAXKEYLEN = 0x50
++ TCP_MSS = 0x200
++ TCP_MSS_DEFAULT = 0x218
++ TCP_MSS_DESIRED = 0x4c4
++ TCP_NODELAY = 0x1
++ TCP_QUEUE_SEQ = 0x15
++ TCP_QUICKACK = 0xc
++ TCP_REPAIR = 0x13
++ TCP_REPAIR_OPTIONS = 0x16
++ TCP_REPAIR_QUEUE = 0x14
++ TCP_SYNCNT = 0x7
++ TCP_S_DATA_IN = 0x4
++ TCP_S_DATA_OUT = 0x8
++ TCP_THIN_DUPACK = 0x11
++ TCP_THIN_LINEAR_TIMEOUTS = 0x10
++ TCP_TIMESTAMP = 0x18
++ TCP_USER_TIMEOUT = 0x12
++ TCP_WINDOW_CLAMP = 0xa
++ TCSAFLUSH = 0x2
++ TIOCCBRK = 0x5428
++ TIOCCONS = 0x541d
++ TIOCEXCL = 0x540c
++ TIOCGDEV = 0x80045432
++ TIOCGETD = 0x5424
++ TIOCGEXCL = 0x80045440
++ TIOCGICOUNT = 0x545d
++ TIOCGLCKTRMIOS = 0x5456
++ TIOCGPGRP = 0x540f
++ TIOCGPKT = 0x80045438
++ TIOCGPTLCK = 0x80045439
++ TIOCGPTN = 0x80045430
++ TIOCGRS485 = 0x542e
++ TIOCGSERIAL = 0x541e
++ TIOCGSID = 0x5429
++ TIOCGSOFTCAR = 0x5419
++ TIOCGWINSZ = 0x5413
++ TIOCINQ = 0x541b
++ TIOCLINUX = 0x541c
++ TIOCMBIC = 0x5417
++ TIOCMBIS = 0x5416
++ TIOCMGET = 0x5415
++ TIOCMIWAIT = 0x545c
++ TIOCMSET = 0x5418
++ TIOCM_CAR = 0x40
++ TIOCM_CD = 0x40
++ TIOCM_CTS = 0x20
++ TIOCM_DSR = 0x100
++ TIOCM_DTR = 0x2
++ TIOCM_LE = 0x1
++ TIOCM_RI = 0x80
++ TIOCM_RNG = 0x80
++ TIOCM_RTS = 0x4
++ TIOCM_SR = 0x10
++ TIOCM_ST = 0x8
++ TIOCNOTTY = 0x5422
++ TIOCNXCL = 0x540d
++ TIOCOUTQ = 0x5411
++ TIOCPKT = 0x5420
++ TIOCPKT_DATA = 0x0
++ TIOCPKT_DOSTOP = 0x20
++ TIOCPKT_FLUSHREAD = 0x1
++ TIOCPKT_FLUSHWRITE = 0x2
++ TIOCPKT_IOCTL = 0x40
++ TIOCPKT_NOSTOP = 0x10
++ TIOCPKT_START = 0x8
++ TIOCPKT_STOP = 0x4
++ TIOCSBRK = 0x5427
++ TIOCSCTTY = 0x540e
++ TIOCSERCONFIG = 0x5453
++ TIOCSERGETLSR = 0x5459
++ TIOCSERGETMULTI = 0x545a
++ TIOCSERGSTRUCT = 0x5458
++ TIOCSERGWILD = 0x5454
++ TIOCSERSETMULTI = 0x545b
++ TIOCSERSWILD = 0x5455
++ TIOCSER_TEMT = 0x1
++ TIOCSETD = 0x5423
++ TIOCSIG = 0x40045436
++ TIOCSLCKTRMIOS = 0x5457
++ TIOCSPGRP = 0x5410
++ TIOCSPTLCK = 0x40045431
++ TIOCSRS485 = 0x542f
++ TIOCSSERIAL = 0x541f
++ TIOCSSOFTCAR = 0x541a
++ TIOCSTI = 0x5412
++ TIOCSWINSZ = 0x5414
++ TIOCVHANGUP = 0x5437
++ TOSTOP = 0x100
++ TUNATTACHFILTER = 0x401054d5
++ TUNDETACHFILTER = 0x401054d6
++ TUNGETFEATURES = 0x800454cf
++ TUNGETFILTER = 0x801054db
++ TUNGETIFF = 0x800454d2
++ TUNGETSNDBUF = 0x800454d3
++ TUNGETVNETBE = 0x800454df
++ TUNGETVNETHDRSZ = 0x800454d7
++ TUNGETVNETLE = 0x800454dd
++ TUNSETDEBUG = 0x400454c9
++ TUNSETGROUP = 0x400454ce
++ TUNSETIFF = 0x400454ca
++ TUNSETIFINDEX = 0x400454da
++ TUNSETLINK = 0x400454cd
++ TUNSETNOCSUM = 0x400454c8
++ TUNSETOFFLOAD = 0x400454d0
++ TUNSETOWNER = 0x400454cc
++ TUNSETPERSIST = 0x400454cb
++ TUNSETQUEUE = 0x400454d9
++ TUNSETSNDBUF = 0x400454d4
++ TUNSETTXFILTER = 0x400454d1
++ TUNSETVNETBE = 0x400454de
++ TUNSETVNETHDRSZ = 0x400454d8
++ TUNSETVNETLE = 0x400454dc
++ VDISCARD = 0xd
++ VEOF = 0x4
++ VEOL = 0xb
++ VEOL2 = 0x10
++ VERASE = 0x2
++ VINTR = 0x0
++ VKILL = 0x3
++ VLNEXT = 0xf
++ VMIN = 0x6
++ VQUIT = 0x1
++ VREPRINT = 0xc
++ VSTART = 0x8
++ VSTOP = 0x9
++ VSUSP = 0xa
++ VSWTC = 0x7
++ VT0 = 0x0
++ VT1 = 0x4000
++ VTDLY = 0x4000
++ VTIME = 0x5
++ VWERASE = 0xe
++ WALL = 0x40000000
++ WCLONE = 0x80000000
++ WCONTINUED = 0x8
++ WEXITED = 0x4
++ WNOHANG = 0x1
++ WNOTHREAD = 0x20000000
++ WNOWAIT = 0x1000000
++ WORDSIZE = 0x40
++ WSTOPPED = 0x2
++ WUNTRACED = 0x2
++)
++
++// Errors
++const (
++ E2BIG = Errno(0x7)
++ EACCES = Errno(0xd)
++ EADDRINUSE = Errno(0x62)
++ EADDRNOTAVAIL = Errno(0x63)
++ EADV = Errno(0x44)
++ EAFNOSUPPORT = Errno(0x61)
++ EAGAIN = Errno(0xb)
++ EALREADY = Errno(0x72)
++ EBADE = Errno(0x34)
++ EBADF = Errno(0x9)
++ EBADFD = Errno(0x4d)
++ EBADMSG = Errno(0x4a)
++ EBADR = Errno(0x35)
++ EBADRQC = Errno(0x38)
++ EBADSLT = Errno(0x39)
++ EBFONT = Errno(0x3b)
++ EBUSY = Errno(0x10)
++ ECANCELED = Errno(0x7d)
++ ECHILD = Errno(0xa)
++ ECHRNG = Errno(0x2c)
++ ECOMM = Errno(0x46)
++ ECONNABORTED = Errno(0x67)
++ ECONNREFUSED = Errno(0x6f)
++ ECONNRESET = Errno(0x68)
++ EDEADLK = Errno(0x23)
++ EDEADLOCK = Errno(0x23)
++ EDESTADDRREQ = Errno(0x59)
++ EDOM = Errno(0x21)
++ EDOTDOT = Errno(0x49)
++ EDQUOT = Errno(0x7a)
++ EEXIST = Errno(0x11)
++ EFAULT = Errno(0xe)
++ EFBIG = Errno(0x1b)
++ EHOSTDOWN = Errno(0x70)
++ EHOSTUNREACH = Errno(0x71)
++ EHWPOISON = Errno(0x85)
++ EIDRM = Errno(0x2b)
++ EILSEQ = Errno(0x54)
++ EINPROGRESS = Errno(0x73)
++ EINTR = Errno(0x4)
++ EINVAL = Errno(0x16)
++ EIO = Errno(0x5)
++ EISCONN = Errno(0x6a)
++ EISDIR = Errno(0x15)
++ EISNAM = Errno(0x78)
++ EKEYEXPIRED = Errno(0x7f)
++ EKEYREJECTED = Errno(0x81)
++ EKEYREVOKED = Errno(0x80)
++ EL2HLT = Errno(0x33)
++ EL2NSYNC = Errno(0x2d)
++ EL3HLT = Errno(0x2e)
++ EL3RST = Errno(0x2f)
++ ELIBACC = Errno(0x4f)
++ ELIBBAD = Errno(0x50)
++ ELIBEXEC = Errno(0x53)
++ ELIBMAX = Errno(0x52)
++ ELIBSCN = Errno(0x51)
++ ELNRNG = Errno(0x30)
++ ELOOP = Errno(0x28)
++ EMEDIUMTYPE = Errno(0x7c)
++ EMFILE = Errno(0x18)
++ EMLINK = Errno(0x1f)
++ EMSGSIZE = Errno(0x5a)
++ EMULTIHOP = Errno(0x48)
++ ENAMETOOLONG = Errno(0x24)
++ ENAVAIL = Errno(0x77)
++ ENETDOWN = Errno(0x64)
++ ENETRESET = Errno(0x66)
++ ENETUNREACH = Errno(0x65)
++ ENFILE = Errno(0x17)
++ ENOANO = Errno(0x37)
++ ENOBUFS = Errno(0x69)
++ ENOCSI = Errno(0x32)
++ ENODATA = Errno(0x3d)
++ ENODEV = Errno(0x13)
++ ENOENT = Errno(0x2)
++ ENOEXEC = Errno(0x8)
++ ENOKEY = Errno(0x7e)
++ ENOLCK = Errno(0x25)
++ ENOLINK = Errno(0x43)
++ ENOMEDIUM = Errno(0x7b)
++ ENOMEM = Errno(0xc)
++ ENOMSG = Errno(0x2a)
++ ENONET = Errno(0x40)
++ ENOPKG = Errno(0x41)
++ ENOPROTOOPT = Errno(0x5c)
++ ENOSPC = Errno(0x1c)
++ ENOSR = Errno(0x3f)
++ ENOSTR = Errno(0x3c)
++ ENOSYS = Errno(0x26)
++ ENOTBLK = Errno(0xf)
++ ENOTCONN = Errno(0x6b)
++ ENOTDIR = Errno(0x14)
++ ENOTEMPTY = Errno(0x27)
++ ENOTNAM = Errno(0x76)
++ ENOTRECOVERABLE = Errno(0x83)
++ ENOTSOCK = Errno(0x58)
++ ENOTSUP = Errno(0x5f)
++ ENOTTY = Errno(0x19)
++ ENOTUNIQ = Errno(0x4c)
++ ENXIO = Errno(0x6)
++ EOPNOTSUPP = Errno(0x5f)
++ EOVERFLOW = Errno(0x4b)
++ EOWNERDEAD = Errno(0x82)
++ EPERM = Errno(0x1)
++ EPFNOSUPPORT = Errno(0x60)
++ EPIPE = Errno(0x20)
++ EPROTO = Errno(0x47)
++ EPROTONOSUPPORT = Errno(0x5d)
++ EPROTOTYPE = Errno(0x5b)
++ ERANGE = Errno(0x22)
++ EREMCHG = Errno(0x4e)
++ EREMOTE = Errno(0x42)
++ EREMOTEIO = Errno(0x79)
++ ERESTART = Errno(0x55)
++ ERFKILL = Errno(0x84)
++ EROFS = Errno(0x1e)
++ ESHUTDOWN = Errno(0x6c)
++ ESOCKTNOSUPPORT = Errno(0x5e)
++ ESPIPE = Errno(0x1d)
++ ESRCH = Errno(0x3)
++ ESRMNT = Errno(0x45)
++ ESTALE = Errno(0x74)
++ ESTRPIPE = Errno(0x56)
++ ETIME = Errno(0x3e)
++ ETIMEDOUT = Errno(0x6e)
++ ETOOMANYREFS = Errno(0x6d)
++ ETXTBSY = Errno(0x1a)
++ EUCLEAN = Errno(0x75)
++ EUNATCH = Errno(0x31)
++ EUSERS = Errno(0x57)
++ EWOULDBLOCK = Errno(0xb)
++ EXDEV = Errno(0x12)
++ EXFULL = Errno(0x36)
++)
++
++// Signals
++const (
++ SIGABRT = Signal(0x6)
++ SIGALRM = Signal(0xe)
++ SIGBUS = Signal(0x7)
++ SIGCHLD = Signal(0x11)
++ SIGCLD = Signal(0x11)
++ SIGCONT = Signal(0x12)
++ SIGFPE = Signal(0x8)
++ SIGHUP = Signal(0x1)
++ SIGILL = Signal(0x4)
++ SIGINT = Signal(0x2)
++ SIGIO = Signal(0x1d)
++ SIGIOT = Signal(0x6)
++ SIGKILL = Signal(0x9)
++ SIGPIPE = Signal(0xd)
++ SIGPOLL = Signal(0x1d)
++ SIGPROF = Signal(0x1b)
++ SIGPWR = Signal(0x1e)
++ SIGQUIT = Signal(0x3)
++ SIGSEGV = Signal(0xb)
++ SIGSTKFLT = Signal(0x10)
++ SIGSTOP = Signal(0x13)
++ SIGSYS = Signal(0x1f)
++ SIGTERM = Signal(0xf)
++ SIGTRAP = Signal(0x5)
++ SIGTSTP = Signal(0x14)
++ SIGTTIN = Signal(0x15)
++ SIGTTOU = Signal(0x16)
++ SIGUNUSED = Signal(0x1f)
++ SIGURG = Signal(0x17)
++ SIGUSR1 = Signal(0xa)
++ SIGUSR2 = Signal(0xc)
++ SIGVTALRM = Signal(0x1a)
++ SIGWINCH = Signal(0x1c)
++ SIGXCPU = Signal(0x18)
++ SIGXFSZ = Signal(0x19)
++)
++
++// Error table
++var errors = [...]string{
++ 1: "operation not permitted",
++ 2: "no such file or directory",
++ 3: "no such process",
++ 4: "interrupted system call",
++ 5: "input/output error",
++ 6: "no such device or address",
++ 7: "argument list too long",
++ 8: "exec format error",
++ 9: "bad file descriptor",
++ 10: "no child processes",
++ 11: "resource temporarily unavailable",
++ 12: "cannot allocate memory",
++ 13: "permission denied",
++ 14: "bad address",
++ 15: "block device required",
++ 16: "device or resource busy",
++ 17: "file exists",
++ 18: "invalid cross-device link",
++ 19: "no such device",
++ 20: "not a directory",
++ 21: "is a directory",
++ 22: "invalid argument",
++ 23: "too many open files in system",
++ 24: "too many open files",
++ 25: "inappropriate ioctl for device",
++ 26: "text file busy",
++ 27: "file too large",
++ 28: "no space left on device",
++ 29: "illegal seek",
++ 30: "read-only file system",
++ 31: "too many links",
++ 32: "broken pipe",
++ 33: "numerical argument out of domain",
++ 34: "numerical result out of range",
++ 35: "resource deadlock avoided",
++ 36: "file name too long",
++ 37: "no locks available",
++ 38: "function not implemented",
++ 39: "directory not empty",
++ 40: "too many levels of symbolic links",
++ 42: "no message of desired type",
++ 43: "identifier removed",
++ 44: "channel number out of range",
++ 45: "level 2 not synchronized",
++ 46: "level 3 halted",
++ 47: "level 3 reset",
++ 48: "link number out of range",
++ 49: "protocol driver not attached",
++ 50: "no CSI structure available",
++ 51: "level 2 halted",
++ 52: "invalid exchange",
++ 53: "invalid request descriptor",
++ 54: "exchange full",
++ 55: "no anode",
++ 56: "invalid request code",
++ 57: "invalid slot",
++ 59: "bad font file format",
++ 60: "device not a stream",
++ 61: "no data available",
++ 62: "timer expired",
++ 63: "out of streams resources",
++ 64: "machine is not on the network",
++ 65: "package not installed",
++ 66: "object is remote",
++ 67: "link has been severed",
++ 68: "advertise error",
++ 69: "srmount error",
++ 70: "communication error on send",
++ 71: "protocol error",
++ 72: "multihop attempted",
++ 73: "RFS specific error",
++ 74: "bad message",
++ 75: "value too large for defined data type",
++ 76: "name not unique on network",
++ 77: "file descriptor in bad state",
++ 78: "remote address changed",
++ 79: "can not access a needed shared library",
++ 80: "accessing a corrupted shared library",
++ 81: ".lib section in a.out corrupted",
++ 82: "attempting to link in too many shared libraries",
++ 83: "cannot exec a shared library directly",
++ 84: "invalid or incomplete multibyte or wide character",
++ 85: "interrupted system call should be restarted",
++ 86: "streams pipe error",
++ 87: "too many users",
++ 88: "socket operation on non-socket",
++ 89: "destination address required",
++ 90: "message too long",
++ 91: "protocol wrong type for socket",
++ 92: "protocol not available",
++ 93: "protocol not supported",
++ 94: "socket type not supported",
++ 95: "operation not supported",
++ 96: "protocol family not supported",
++ 97: "address family not supported by protocol",
++ 98: "address already in use",
++ 99: "cannot assign requested address",
++ 100: "network is down",
++ 101: "network is unreachable",
++ 102: "network dropped connection on reset",
++ 103: "software caused connection abort",
++ 104: "connection reset by peer",
++ 105: "no buffer space available",
++ 106: "transport endpoint is already connected",
++ 107: "transport endpoint is not connected",
++ 108: "cannot send after transport endpoint shutdown",
++ 109: "too many references: cannot splice",
++ 110: "connection timed out",
++ 111: "connection refused",
++ 112: "host is down",
++ 113: "no route to host",
++ 114: "operation already in progress",
++ 115: "operation now in progress",
++ 116: "stale file handle",
++ 117: "structure needs cleaning",
++ 118: "not a XENIX named type file",
++ 119: "no XENIX semaphores available",
++ 120: "is a named type file",
++ 121: "remote I/O error",
++ 122: "disk quota exceeded",
++ 123: "no medium found",
++ 124: "wrong medium type",
++ 125: "operation canceled",
++ 126: "required key not available",
++ 127: "key has expired",
++ 128: "key has been revoked",
++ 129: "key was rejected by service",
++ 130: "owner died",
++ 131: "state not recoverable",
++ 132: "operation not possible due to RF-kill",
++ 133: "memory page has hardware error",
++}
++
++// Signal table
++var signals = [...]string{
++ 1: "hangup",
++ 2: "interrupt",
++ 3: "quit",
++ 4: "illegal instruction",
++ 5: "trace/breakpoint trap",
++ 6: "aborted",
++ 7: "bus error",
++ 8: "floating point exception",
++ 9: "killed",
++ 10: "user defined signal 1",
++ 11: "segmentation fault",
++ 12: "user defined signal 2",
++ 13: "broken pipe",
++ 14: "alarm clock",
++ 15: "terminated",
++ 16: "stack fault",
++ 17: "child exited",
++ 18: "continued",
++ 19: "stopped (signal)",
++ 20: "stopped",
++ 21: "stopped (tty input)",
++ 22: "stopped (tty output)",
++ 23: "urgent I/O condition",
++ 24: "CPU time limit exceeded",
++ 25: "file size limit exceeded",
++ 26: "virtual timer expired",
++ 27: "profiling timer expired",
++ 28: "window changed",
++ 29: "I/O possible",
++ 30: "power failure",
++ 31: "bad system call",
++}
+--- /dev/null
++++ b/src/syscall/zsyscall_linux_s390x.go
+@@ -0,0 +1,1578 @@
++// mksyscall.pl syscall_linux.go syscall_linux_s390x.go
++// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
++
++// +build s390x,linux
++
++package syscall
++
++import "unsafe"
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(oldpath)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(newpath)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
++ use(unsafe.Pointer(_p0))
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ var _p1 unsafe.Pointer
++ if len(buf) > 0 {
++ _p1 = unsafe.Pointer(&buf[0])
++ } else {
++ _p1 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(oldpath)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(newpath)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func unlinkat(dirfd int, path string, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func utimes(path string, times *[2]Timeval) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func utimensat(dirfd int, path string, times *[2]Timespec) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)))
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
++ _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getcwd(buf []byte) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(buf) > 0 {
++ _p0 = unsafe.Pointer(&buf[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
++ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
++ wpid = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
++ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(arg)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(source)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(target)
++ if err != nil {
++ return
++ }
++ var _p2 *byte
++ _p2, err = BytePtrFromString(fstype)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ use(unsafe.Pointer(_p2))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Acct(path string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Adjtimex(buf *Timex) (state int, err error) {
++ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
++ state = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Chdir(path string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Chroot(path string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Close(fd int) (err error) {
++ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Dup(oldfd int) (fd int, err error) {
++ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Dup3(oldfd int, newfd int, flags int) (err error) {
++ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func EpollCreate(size int) (fd int, err error) {
++ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func EpollCreate1(flag int) (fd int, err error) {
++ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
++ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(events) > 0 {
++ _p0 = unsafe.Pointer(&events[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Exit(code int) {
++ Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
++ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fchdir(fd int) (err error) {
++ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fchmod(fd int, mode uint32) (err error) {
++ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func fcntl(fd int, cmd int, arg int) (val int, err error) {
++ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
++ val = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fdatasync(fd int) (err error) {
++ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Flock(fd int, how int) (err error) {
++ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fsync(fd int) (err error) {
++ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getdents(fd int, buf []byte) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(buf) > 0 {
++ _p0 = unsafe.Pointer(&buf[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(_SYS_getdents, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getpgid(pid int) (pgid int, err error) {
++ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
++ pgid = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getpid() (pid int) {
++ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
++ pid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getppid() (ppid int) {
++ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
++ ppid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getpriority(which int, who int) (prio int, err error) {
++ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
++ prio = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getrusage(who int, rusage *Rusage) (err error) {
++ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Gettid() (tid int) {
++ r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
++ tid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(attr)
++ if err != nil {
++ return
++ }
++ var _p2 unsafe.Pointer
++ if len(dest) > 0 {
++ _p2 = unsafe.Pointer(&dest[0])
++ } else {
++ _p2 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ sz = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(pathname)
++ if err != nil {
++ return
++ }
++ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
++ use(unsafe.Pointer(_p0))
++ watchdesc = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func InotifyInit1(flags int) (fd int, err error) {
++ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
++ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
++ success = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Kill(pid int, sig Signal) (err error) {
++ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Klogctl(typ int, buf []byte) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(buf) > 0 {
++ _p0 = unsafe.Pointer(&buf[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Listxattr(path string, dest []byte) (sz int, err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ var _p1 unsafe.Pointer
++ if len(dest) > 0 {
++ _p1 = unsafe.Pointer(&dest[0])
++ } else {
++ _p1 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
++ use(unsafe.Pointer(_p0))
++ sz = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Mkdirat(dirfd int, path string, mode uint32) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
++ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Pause() (err error) {
++ _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func PivotRoot(newroot string, putold string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(newroot)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(putold)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
++ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func read(fd int, p []byte) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Removexattr(path string, attr string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(attr)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(oldpath)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(newpath)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setdomainname(p []byte) (err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Sethostname(p []byte) (err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setpgid(pid int, pgid int) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setsid() (pid int, err error) {
++ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
++ pid = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Settimeofday(tv *Timeval) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setpriority(which int, who int, prio int) (err error) {
++ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setxattr(path string, attr string, data []byte, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ var _p1 *byte
++ _p1, err = BytePtrFromString(attr)
++ if err != nil {
++ return
++ }
++ var _p2 unsafe.Pointer
++ if len(data) > 0 {
++ _p2 = unsafe.Pointer(&data[0])
++ } else {
++ _p2 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
++ use(unsafe.Pointer(_p0))
++ use(unsafe.Pointer(_p1))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Sync() {
++ Syscall(SYS_SYNC, 0, 0, 0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Sysinfo(info *Sysinfo_t) (err error) {
++ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
++ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
++ n = int64(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Tgkill(tgid int, tid int, sig Signal) (err error) {
++ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Times(tms *Tms) (ticks uintptr, err error) {
++ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
++ ticks = uintptr(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Umask(mask int) (oldmask int) {
++ r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
++ oldmask = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Uname(buf *Utsname) (err error) {
++ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Unmount(target string, flags int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(target)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Unshare(flags int) (err error) {
++ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Ustat(dev int, ubuf *Ustat_t) (err error) {
++ _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Utime(path string, buf *Utimbuf) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func write(fd int, p []byte) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func exitThread(code int) (err error) {
++ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func readlen(fd int, p *byte, np int) (n int, err error) {
++ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func writelen(fd int, p *byte, np int) (n int, err error) {
++ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func munmap(addr uintptr, length uintptr) (err error) {
++ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Madvise(b []byte, advice int) (err error) {
++ var _p0 unsafe.Pointer
++ if len(b) > 0 {
++ _p0 = unsafe.Pointer(&b[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Mprotect(b []byte, prot int) (err error) {
++ var _p0 unsafe.Pointer
++ if len(b) > 0 {
++ _p0 = unsafe.Pointer(&b[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Mlock(b []byte) (err error) {
++ var _p0 unsafe.Pointer
++ if len(b) > 0 {
++ _p0 = unsafe.Pointer(&b[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Munlock(b []byte) (err error) {
++ var _p0 unsafe.Pointer
++ if len(b) > 0 {
++ _p0 = unsafe.Pointer(&b[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Mlockall(flags int) (err error) {
++ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Munlockall() (err error) {
++ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Dup2(oldfd int, newfd int) (err error) {
++ _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fchown(fd int, uid int, gid int) (err error) {
++ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fstat(fd int, stat *Stat_t) (err error) {
++ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Fstatfs(fd int, buf *Statfs_t) (err error) {
++ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Ftruncate(fd int, length int64) (err error) {
++ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getegid() (egid int) {
++ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
++ egid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Geteuid() (euid int) {
++ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
++ euid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getgid() (gid int) {
++ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
++ gid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getrlimit(resource int, rlim *Rlimit) (err error) {
++ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Getuid() (uid int) {
++ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
++ uid = int(r0)
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func InotifyInit() (fd int, err error) {
++ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
++ fd = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Lchown(path string, uid int, gid int) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Lstat(path string, stat *Stat_t) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Pread(fd int, p []byte, offset int64) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
++ var _p0 unsafe.Pointer
++ if len(p) > 0 {
++ _p0 = unsafe.Pointer(&p[0])
++ } else {
++ _p0 = unsafe.Pointer(&_zero)
++ }
++ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Seek(fd int, offset int64, whence int) (off int64, err error) {
++ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
++ off = int64(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
++ r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
++ n = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
++ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
++ written = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setfsgid(gid int) (err error) {
++ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setfsuid(uid int) (err error) {
++ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setregid(rgid int, egid int) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setresgid(rgid int, egid int, sgid int) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setresuid(ruid int, euid int, suid int) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setrlimit(resource int, rlim *Rlimit) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Setreuid(ruid int, euid int) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
++ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
++ n = int64(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Stat(path string, stat *Stat_t) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Statfs(path string, buf *Statfs_t) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
++ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Truncate(path string, length int64) (err error) {
++ var _p0 *byte
++ _p0, err = BytePtrFromString(path)
++ if err != nil {
++ return
++ }
++ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
++ use(unsafe.Pointer(_p0))
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func getgroups(n int, list *_Gid_t) (nn int, err error) {
++ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
++ nn = int(r0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func setgroups(n int, list *_Gid_t) (err error) {
++ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func Gettimeofday(tv *Timeval) (err error) {
++ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
++
++// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
++
++func pipe2(p *[2]_C_int, flags int) (err error) {
++ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
++ if e1 != 0 {
++ err = errnoErr(e1)
++ }
++ return
++}
+--- /dev/null
++++ b/src/syscall/zsysnum_linux_s390x.go
+@@ -0,0 +1,328 @@
++// mksysnum_linux.pl /usr/include/asm/unistd.h
++// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
++
++// +build s390x,linux
++
++package syscall
++
++const (
++ SYS_EXIT = 1
++ SYS_FORK = 2
++ SYS_READ = 3
++ SYS_WRITE = 4
++ SYS_OPEN = 5
++ SYS_CLOSE = 6
++ SYS_RESTART_SYSCALL = 7
++ SYS_CREAT = 8
++ SYS_LINK = 9
++ SYS_UNLINK = 10
++ SYS_EXECVE = 11
++ SYS_CHDIR = 12
++ SYS_MKNOD = 14
++ SYS_CHMOD = 15
++ SYS_LSEEK = 19
++ SYS_GETPID = 20
++ SYS_MOUNT = 21
++ SYS_UMOUNT = 22
++ SYS_PTRACE = 26
++ SYS_ALARM = 27
++ SYS_PAUSE = 29
++ SYS_UTIME = 30
++ SYS_ACCESS = 33
++ SYS_NICE = 34
++ SYS_SYNC = 36
++ SYS_KILL = 37
++ SYS_RENAME = 38
++ SYS_MKDIR = 39
++ SYS_RMDIR = 40
++ SYS_DUP = 41
++ SYS_PIPE = 42
++ SYS_TIMES = 43
++ SYS_BRK = 45
++ SYS_SIGNAL = 48
++ SYS_ACCT = 51
++ SYS_UMOUNT2 = 52
++ SYS_IOCTL = 54
++ SYS_FCNTL = 55
++ SYS_SETPGID = 57
++ SYS_UMASK = 60
++ SYS_CHROOT = 61
++ SYS_USTAT = 62
++ SYS_DUP2 = 63
++ SYS_GETPPID = 64
++ SYS_GETPGRP = 65
++ SYS_SETSID = 66
++ SYS_SIGACTION = 67
++ SYS_SIGSUSPEND = 72
++ SYS_SIGPENDING = 73
++ SYS_SETHOSTNAME = 74
++ SYS_SETRLIMIT = 75
++ SYS_GETRUSAGE = 77
++ SYS_GETTIMEOFDAY = 78
++ SYS_SETTIMEOFDAY = 79
++ SYS_SYMLINK = 83
++ SYS_READLINK = 85
++ SYS_USELIB = 86
++ SYS_SWAPON = 87
++ SYS_REBOOT = 88
++ SYS_READDIR = 89
++ SYS_MMAP = 90
++ SYS_MUNMAP = 91
++ SYS_TRUNCATE = 92
++ SYS_FTRUNCATE = 93
++ SYS_FCHMOD = 94
++ SYS_GETPRIORITY = 96
++ SYS_SETPRIORITY = 97
++ SYS_STATFS = 99
++ SYS_FSTATFS = 100
++ SYS_SOCKETCALL = 102
++ SYS_SYSLOG = 103
++ SYS_SETITIMER = 104
++ SYS_GETITIMER = 105
++ SYS_STAT = 106
++ SYS_LSTAT = 107
++ SYS_FSTAT = 108
++ SYS_LOOKUP_DCOOKIE = 110
++ SYS_VHANGUP = 111
++ SYS_IDLE = 112
++ SYS_WAIT4 = 114
++ SYS_SWAPOFF = 115
++ SYS_SYSINFO = 116
++ SYS_IPC = 117
++ SYS_FSYNC = 118
++ SYS_SIGRETURN = 119
++ SYS_CLONE = 120
++ SYS_SETDOMAINNAME = 121
++ SYS_UNAME = 122
++ SYS_ADJTIMEX = 124
++ SYS_MPROTECT = 125
++ SYS_SIGPROCMASK = 126
++ SYS_CREATE_MODULE = 127
++ SYS_INIT_MODULE = 128
++ SYS_DELETE_MODULE = 129
++ SYS_GET_KERNEL_SYMS = 130
++ SYS_QUOTACTL = 131
++ SYS_GETPGID = 132
++ SYS_FCHDIR = 133
++ SYS_BDFLUSH = 134
++ SYS_SYSFS = 135
++ SYS_PERSONALITY = 136
++ SYS_AFS_SYSCALL = 137
++ SYS_GETDENTS = 141
++ SYS_FLOCK = 143
++ SYS_MSYNC = 144
++ SYS_READV = 145
++ SYS_WRITEV = 146
++ SYS_GETSID = 147
++ SYS_FDATASYNC = 148
++ SYS__SYSCTL = 149
++ SYS_MLOCK = 150
++ SYS_MUNLOCK = 151
++ SYS_MLOCKALL = 152
++ SYS_MUNLOCKALL = 153
++ SYS_SCHED_SETPARAM = 154
++ SYS_SCHED_GETPARAM = 155
++ SYS_SCHED_SETSCHEDULER = 156
++ SYS_SCHED_GETSCHEDULER = 157
++ SYS_SCHED_YIELD = 158
++ SYS_SCHED_GET_PRIORITY_MAX = 159
++ SYS_SCHED_GET_PRIORITY_MIN = 160
++ SYS_SCHED_RR_GET_INTERVAL = 161
++ SYS_NANOSLEEP = 162
++ SYS_MREMAP = 163
++ SYS_QUERY_MODULE = 167
++ SYS_POLL = 168
++ SYS_NFSSERVCTL = 169
++ SYS_PRCTL = 172
++ SYS_RT_SIGRETURN = 173
++ SYS_RT_SIGACTION = 174
++ SYS_RT_SIGPROCMASK = 175
++ SYS_RT_SIGPENDING = 176
++ SYS_RT_SIGTIMEDWAIT = 177
++ SYS_RT_SIGQUEUEINFO = 178
++ SYS_RT_SIGSUSPEND = 179
++ SYS_PREAD64 = 180
++ SYS_PWRITE64 = 181
++ SYS_GETCWD = 183
++ SYS_CAPGET = 184
++ SYS_CAPSET = 185
++ SYS_SIGALTSTACK = 186
++ SYS_SENDFILE = 187
++ SYS_GETPMSG = 188
++ SYS_PUTPMSG = 189
++ SYS_VFORK = 190
++ SYS_PIVOT_ROOT = 217
++ SYS_MINCORE = 218
++ SYS_MADVISE = 219
++ SYS_GETDENTS64 = 220
++ SYS_READAHEAD = 222
++ SYS_SETXATTR = 224
++ SYS_LSETXATTR = 225
++ SYS_FSETXATTR = 226
++ SYS_GETXATTR = 227
++ SYS_LGETXATTR = 228
++ SYS_FGETXATTR = 229
++ SYS_LISTXATTR = 230
++ SYS_LLISTXATTR = 231
++ SYS_FLISTXATTR = 232
++ SYS_REMOVEXATTR = 233
++ SYS_LREMOVEXATTR = 234
++ SYS_FREMOVEXATTR = 235
++ SYS_GETTID = 236
++ SYS_TKILL = 237
++ SYS_FUTEX = 238
++ SYS_SCHED_SETAFFINITY = 239
++ SYS_SCHED_GETAFFINITY = 240
++ SYS_TGKILL = 241
++ SYS_IO_SETUP = 243
++ SYS_IO_DESTROY = 244
++ SYS_IO_GETEVENTS = 245
++ SYS_IO_SUBMIT = 246
++ SYS_IO_CANCEL = 247
++ SYS_EXIT_GROUP = 248
++ SYS_EPOLL_CREATE = 249
++ SYS_EPOLL_CTL = 250
++ SYS_EPOLL_WAIT = 251
++ SYS_SET_TID_ADDRESS = 252
++ SYS_FADVISE64 = 253
++ SYS_TIMER_CREATE = 254
++ SYS_TIMER_SETTIME = 255
++ SYS_TIMER_GETTIME = 256
++ SYS_TIMER_GETOVERRUN = 257
++ SYS_TIMER_DELETE = 258
++ SYS_CLOCK_SETTIME = 259
++ SYS_CLOCK_GETTIME = 260
++ SYS_CLOCK_GETRES = 261
++ SYS_CLOCK_NANOSLEEP = 262
++ SYS_STATFS64 = 265
++ SYS_FSTATFS64 = 266
++ SYS_REMAP_FILE_PAGES = 267
++ SYS_MBIND = 268
++ SYS_GET_MEMPOLICY = 269
++ SYS_SET_MEMPOLICY = 270
++ SYS_MQ_OPEN = 271
++ SYS_MQ_UNLINK = 272
++ SYS_MQ_TIMEDSEND = 273
++ SYS_MQ_TIMEDRECEIVE = 274
++ SYS_MQ_NOTIFY = 275
++ SYS_MQ_GETSETATTR = 276
++ SYS_KEXEC_LOAD = 277
++ SYS_ADD_KEY = 278
++ SYS_REQUEST_KEY = 279
++ SYS_KEYCTL = 280
++ SYS_WAITID = 281
++ SYS_IOPRIO_SET = 282
++ SYS_IOPRIO_GET = 283
++ SYS_INOTIFY_INIT = 284
++ SYS_INOTIFY_ADD_WATCH = 285
++ SYS_INOTIFY_RM_WATCH = 286
++ SYS_MIGRATE_PAGES = 287
++ SYS_OPENAT = 288
++ SYS_MKDIRAT = 289
++ SYS_MKNODAT = 290
++ SYS_FCHOWNAT = 291
++ SYS_FUTIMESAT = 292
++ SYS_UNLINKAT = 294
++ SYS_RENAMEAT = 295
++ SYS_LINKAT = 296
++ SYS_SYMLINKAT = 297
++ SYS_READLINKAT = 298
++ SYS_FCHMODAT = 299
++ SYS_FACCESSAT = 300
++ SYS_PSELECT6 = 301
++ SYS_PPOLL = 302
++ SYS_UNSHARE = 303
++ SYS_SET_ROBUST_LIST = 304
++ SYS_GET_ROBUST_LIST = 305
++ SYS_SPLICE = 306
++ SYS_SYNC_FILE_RANGE = 307
++ SYS_TEE = 308
++ SYS_VMSPLICE = 309
++ SYS_MOVE_PAGES = 310
++ SYS_GETCPU = 311
++ SYS_EPOLL_PWAIT = 312
++ SYS_UTIMES = 313
++ SYS_FALLOCATE = 314
++ SYS_UTIMENSAT = 315
++ SYS_SIGNALFD = 316
++ SYS_TIMERFD = 317
++ SYS_EVENTFD = 318
++ SYS_TIMERFD_CREATE = 319
++ SYS_TIMERFD_SETTIME = 320
++ SYS_TIMERFD_GETTIME = 321
++ SYS_SIGNALFD4 = 322
++ SYS_EVENTFD2 = 323
++ SYS_INOTIFY_INIT1 = 324
++ SYS_PIPE2 = 325
++ SYS_DUP3 = 326
++ SYS_EPOLL_CREATE1 = 327
++ SYS_PREADV = 328
++ SYS_PWRITEV = 329
++ SYS_RT_TGSIGQUEUEINFO = 330
++ SYS_PERF_EVENT_OPEN = 331
++ SYS_FANOTIFY_INIT = 332
++ SYS_FANOTIFY_MARK = 333
++ SYS_PRLIMIT64 = 334
++ SYS_NAME_TO_HANDLE_AT = 335
++ SYS_OPEN_BY_HANDLE_AT = 336
++ SYS_CLOCK_ADJTIME = 337
++ SYS_SYNCFS = 338
++ SYS_SETNS = 339
++ SYS_PROCESS_VM_READV = 340
++ SYS_PROCESS_VM_WRITEV = 341
++ SYS_S390_RUNTIME_INSTR = 342
++ SYS_KCMP = 343
++ SYS_FINIT_MODULE = 344
++ SYS_SCHED_SETATTR = 345
++ SYS_SCHED_GETATTR = 346
++ SYS_RENAMEAT2 = 347
++ SYS_SECCOMP = 348
++ SYS_GETRANDOM = 349
++ SYS_MEMFD_CREATE = 350
++ SYS_BPF = 351
++ SYS_S390_PCI_MMIO_WRITE = 352
++ SYS_S390_PCI_MMIO_READ = 353
++ SYS_EXECVEAT = 354
++ SYS_USERFAULTFD = 355
++ SYS_MEMBARRIER = 356
++ SYS_RECVMMSG = 357
++ SYS_SENDMMSG = 358
++ SYS_SOCKET = 359
++ SYS_SOCKETPAIR = 360
++ SYS_BIND = 361
++ SYS_CONNECT = 362
++ SYS_LISTEN = 363
++ SYS_ACCEPT4 = 364
++ SYS_GETSOCKOPT = 365
++ SYS_SETSOCKOPT = 366
++ SYS_GETSOCKNAME = 367
++ SYS_GETPEERNAME = 368
++ SYS_SENDTO = 369
++ SYS_SENDMSG = 370
++ SYS_RECVFROM = 371
++ SYS_RECVMSG = 372
++ SYS_SHUTDOWN = 373
++ SYS_MLOCK2 = 374
++ SYS_SELECT = 142
++ SYS_GETRLIMIT = 191
++ SYS_LCHOWN = 198
++ SYS_GETUID = 199
++ SYS_GETGID = 200
++ SYS_GETEUID = 201
++ SYS_GETEGID = 202
++ SYS_SETREUID = 203
++ SYS_SETREGID = 204
++ SYS_GETGROUPS = 205
++ SYS_SETGROUPS = 206
++ SYS_FCHOWN = 207
++ SYS_SETRESUID = 208
++ SYS_GETRESUID = 209
++ SYS_SETRESGID = 210
++ SYS_GETRESGID = 211
++ SYS_CHOWN = 212
++ SYS_SETUID = 213
++ SYS_SETGID = 214
++ SYS_SETFSUID = 215
++ SYS_SETFSGID = 216
++ SYS_NEWFSTATAT = 293
++)
+--- /dev/null
++++ b/src/syscall/ztypes_linux_s390x.go
+@@ -0,0 +1,622 @@
++// Created by cgo -godefs - DO NOT EDIT
++// cgo -godefs types_linux.go | go run mkpost.go
++
++// +build s390x,linux
++
++package syscall
++
++const (
++ sizeofPtr = 0x8
++ sizeofShort = 0x2
++ sizeofInt = 0x4
++ sizeofLong = 0x8
++ sizeofLongLong = 0x8
++ PathMax = 0x1000
++)
++
++type (
++ _C_short int16
++ _C_int int32
++ _C_long int64
++ _C_long_long int64
++)
++
++type Timespec struct {
++ Sec int64
++ Nsec int64
++}
++
++type Timeval struct {
++ Sec int64
++ Usec int64
++}
++
++type Timex struct {
++ Modes uint32
++ _ [4]byte
++ Offset int64
++ Freq int64
++ Maxerror int64
++ Esterror int64
++ Status int32
++ _ [4]byte
++ Constant int64
++ Precision int64
++ Tolerance int64
++ Time Timeval
++ Tick int64
++ Ppsfreq int64
++ Jitter int64
++ Shift int32
++ _ [4]byte
++ Stabil int64
++ Jitcnt int64
++ Calcnt int64
++ Errcnt int64
++ Stbcnt int64
++ Tai int32
++ _ [44]byte
++}
++
++type Time_t int64
++
++type Tms struct {
++ Utime int64
++ Stime int64
++ Cutime int64
++ Cstime int64
++}
++
++type Utimbuf struct {
++ Actime int64
++ Modtime int64
++}
++
++type Rusage struct {
++ Utime Timeval
++ Stime Timeval
++ Maxrss int64
++ Ixrss int64
++ Idrss int64
++ Isrss int64
++ Minflt int64
++ Majflt int64
++ Nswap int64
++ Inblock int64
++ Oublock int64
++ Msgsnd int64
++ Msgrcv int64
++ Nsignals int64
++ Nvcsw int64
++ Nivcsw int64
++}
++
++type Rlimit struct {
++ Cur uint64
++ Max uint64
++}
++
++type _Gid_t uint32
++
++type Stat_t struct {
++ Dev uint64
++ Ino uint64
++ Nlink uint64
++ Mode uint32
++ Uid uint32
++ Gid uint32
++ _ int32
++ Rdev uint64
++ Size int64
++ Atim Timespec
++ Mtim Timespec
++ Ctim Timespec
++ Blksize int64
++ Blocks int64
++ _ [3]int64
++}
++
++type Statfs_t struct {
++ Type uint32
++ Bsize uint32
++ Blocks uint64
++ Bfree uint64
++ Bavail uint64
++ Files uint64
++ Ffree uint64
++ Fsid Fsid
++ Namelen uint32
++ Frsize uint32
++ Flags uint32
++ Spare [4]uint32
++ _ [4]byte
++}
++
++type Dirent struct {
++ Ino uint64
++ Off int64
++ Reclen uint16
++ Type uint8
++ Name [256]uint8
++ _ [5]byte
++}
++
++type Fsid struct {
++ _ [2]int32
++}
++
++type Flock_t struct {
++ Type int16
++ Whence int16
++ _ [4]byte
++ Start int64
++ Len int64
++ Pid int32
++ _ [4]byte
++}
++
++type RawSockaddrInet4 struct {
++ Family uint16
++ Port uint16
++ Addr [4]byte /* in_addr */
++ Zero [8]uint8
++}
++
++type RawSockaddrInet6 struct {
++ Family uint16
++ Port uint16
++ Flowinfo uint32
++ Addr [16]byte /* in6_addr */
++ Scope_id uint32
++}
++
++type RawSockaddrUnix struct {
++ Family uint16
++ Path [108]int8
++}
++
++type RawSockaddrLinklayer struct {
++ Family uint16
++ Protocol uint16
++ Ifindex int32
++ Hatype uint16
++ Pkttype uint8
++ Halen uint8
++ Addr [8]uint8
++}
++
++type RawSockaddrNetlink struct {
++ Family uint16
++ Pad uint16
++ Pid uint32
++ Groups uint32
++}
++
++type RawSockaddr struct {
++ Family uint16
++ Data [14]int8
++}
++
++type RawSockaddrAny struct {
++ Addr RawSockaddr
++ Pad [96]uint8
++}
++
++type _Socklen uint32
++
++type Linger struct {
++ Onoff int32
++ Linger int32
++}
++
++type Iovec struct {
++ Base *byte
++ Len uint64
++}
++
++type IPMreq struct {
++ Multiaddr [4]byte /* in_addr */
++ Interface [4]byte /* in_addr */
++}
++
++type IPMreqn struct {
++ Multiaddr [4]byte /* in_addr */
++ Address [4]byte /* in_addr */
++ Ifindex int32
++}
++
++type IPv6Mreq struct {
++ Multiaddr [16]byte /* in6_addr */
++ Interface uint32
++}
++
++type Msghdr struct {
++ Name *byte
++ Namelen uint32
++ _ [4]byte
++ Iov *Iovec
++ Iovlen uint64
++ Control *byte
++ Controllen uint64
++ Flags int32
++ _ [4]byte
++}
++
++type Cmsghdr struct {
++ Len uint64
++ Level int32
++ Type int32
++}
++
++type Inet4Pktinfo struct {
++ Ifindex int32
++ Spec_dst [4]byte /* in_addr */
++ Addr [4]byte /* in_addr */
++}
++
++type Inet6Pktinfo struct {
++ Addr [16]byte /* in6_addr */
++ Ifindex uint32
++}
++
++type IPv6MTUInfo struct {
++ Addr RawSockaddrInet6
++ Mtu uint32
++}
++
++type ICMPv6Filter struct {
++ Data [8]uint32
++}
++
++type Ucred struct {
++ Pid int32
++ Uid uint32
++ Gid uint32
++}
++
++type TCPInfo struct {
++ State uint8
++ Ca_state uint8
++ Retransmits uint8
++ Probes uint8
++ Backoff uint8
++ Options uint8
++ _ [2]byte
++ Rto uint32
++ Ato uint32
++ Snd_mss uint32
++ Rcv_mss uint32
++ Unacked uint32
++ Sacked uint32
++ Lost uint32
++ Retrans uint32
++ Fackets uint32
++ Last_data_sent uint32
++ Last_ack_sent uint32
++ Last_data_recv uint32
++ Last_ack_recv uint32
++ Pmtu uint32
++ Rcv_ssthresh uint32
++ Rtt uint32
++ Rttvar uint32
++ Snd_ssthresh uint32
++ Snd_cwnd uint32
++ Advmss uint32
++ Reordering uint32
++ Rcv_rtt uint32
++ Rcv_space uint32
++ Total_retrans uint32
++}
++
++const (
++ SizeofSockaddrInet4 = 0x10
++ SizeofSockaddrInet6 = 0x1c
++ SizeofSockaddrAny = 0x70
++ SizeofSockaddrUnix = 0x6e
++ SizeofSockaddrLinklayer = 0x14
++ SizeofSockaddrNetlink = 0xc
++ SizeofLinger = 0x8
++ SizeofIPMreq = 0x8
++ SizeofIPMreqn = 0xc
++ SizeofIPv6Mreq = 0x14
++ SizeofMsghdr = 0x38
++ SizeofCmsghdr = 0x10
++ SizeofInet4Pktinfo = 0xc
++ SizeofInet6Pktinfo = 0x14
++ SizeofIPv6MTUInfo = 0x20
++ SizeofICMPv6Filter = 0x20
++ SizeofUcred = 0xc
++ SizeofTCPInfo = 0x68
++)
++
++const (
++ IFA_UNSPEC = 0x0
++ IFA_ADDRESS = 0x1
++ IFA_LOCAL = 0x2
++ IFA_LABEL = 0x3
++ IFA_BROADCAST = 0x4
++ IFA_ANYCAST = 0x5
++ IFA_CACHEINFO = 0x6
++ IFA_MULTICAST = 0x7
++ IFLA_UNSPEC = 0x0
++ IFLA_ADDRESS = 0x1
++ IFLA_BROADCAST = 0x2
++ IFLA_IFNAME = 0x3
++ IFLA_MTU = 0x4
++ IFLA_LINK = 0x5
++ IFLA_QDISC = 0x6
++ IFLA_STATS = 0x7
++ IFLA_COST = 0x8
++ IFLA_PRIORITY = 0x9
++ IFLA_MASTER = 0xa
++ IFLA_WIRELESS = 0xb
++ IFLA_PROTINFO = 0xc
++ IFLA_TXQLEN = 0xd
++ IFLA_MAP = 0xe
++ IFLA_WEIGHT = 0xf
++ IFLA_OPERSTATE = 0x10
++ IFLA_LINKMODE = 0x11
++ IFLA_LINKINFO = 0x12
++ IFLA_NET_NS_PID = 0x13
++ IFLA_IFALIAS = 0x14
++ IFLA_MAX = 0x27
++ RT_SCOPE_UNIVERSE = 0x0
++ RT_SCOPE_SITE = 0xc8
++ RT_SCOPE_LINK = 0xfd
++ RT_SCOPE_HOST = 0xfe
++ RT_SCOPE_NOWHERE = 0xff
++ RT_TABLE_UNSPEC = 0x0
++ RT_TABLE_COMPAT = 0xfc
++ RT_TABLE_DEFAULT = 0xfd
++ RT_TABLE_MAIN = 0xfe
++ RT_TABLE_LOCAL = 0xff
++ RT_TABLE_MAX = 0xffffffff
++ RTA_UNSPEC = 0x0
++ RTA_DST = 0x1
++ RTA_SRC = 0x2
++ RTA_IIF = 0x3
++ RTA_OIF = 0x4
++ RTA_GATEWAY = 0x5
++ RTA_PRIORITY = 0x6
++ RTA_PREFSRC = 0x7
++ RTA_METRICS = 0x8
++ RTA_MULTIPATH = 0x9
++ RTA_FLOW = 0xb
++ RTA_CACHEINFO = 0xc
++ RTA_TABLE = 0xf
++ RTN_UNSPEC = 0x0
++ RTN_UNICAST = 0x1
++ RTN_LOCAL = 0x2
++ RTN_BROADCAST = 0x3
++ RTN_ANYCAST = 0x4
++ RTN_MULTICAST = 0x5
++ RTN_BLACKHOLE = 0x6
++ RTN_UNREACHABLE = 0x7
++ RTN_PROHIBIT = 0x8
++ RTN_THROW = 0x9
++ RTN_NAT = 0xa
++ RTN_XRESOLVE = 0xb
++ RTNLGRP_NONE = 0x0
++ RTNLGRP_LINK = 0x1
++ RTNLGRP_NOTIFY = 0x2
++ RTNLGRP_NEIGH = 0x3
++ RTNLGRP_TC = 0x4
++ RTNLGRP_IPV4_IFADDR = 0x5
++ RTNLGRP_IPV4_MROUTE = 0x6
++ RTNLGRP_IPV4_ROUTE = 0x7
++ RTNLGRP_IPV4_RULE = 0x8
++ RTNLGRP_IPV6_IFADDR = 0x9
++ RTNLGRP_IPV6_MROUTE = 0xa
++ RTNLGRP_IPV6_ROUTE = 0xb
++ RTNLGRP_IPV6_IFINFO = 0xc
++ RTNLGRP_IPV6_PREFIX = 0x12
++ RTNLGRP_IPV6_RULE = 0x13
++ RTNLGRP_ND_USEROPT = 0x14
++ SizeofNlMsghdr = 0x10
++ SizeofNlMsgerr = 0x14
++ SizeofRtGenmsg = 0x1
++ SizeofNlAttr = 0x4
++ SizeofRtAttr = 0x4
++ SizeofIfInfomsg = 0x10
++ SizeofIfAddrmsg = 0x8
++ SizeofRtMsg = 0xc
++ SizeofRtNexthop = 0x8
++)
++
++type NlMsghdr struct {
++ Len uint32
++ Type uint16
++ Flags uint16
++ Seq uint32
++ Pid uint32
++}
++
++type NlMsgerr struct {
++ Error int32
++ Msg NlMsghdr
++}
++
++type RtGenmsg struct {
++ Family uint8
++}
++
++type NlAttr struct {
++ Len uint16
++ Type uint16
++}
++
++type RtAttr struct {
++ Len uint16
++ Type uint16
++}
++
++type IfInfomsg struct {
++ Family uint8
++ _ uint8
++ Type uint16
++ Index int32
++ Flags uint32
++ Change uint32
++}
++
++type IfAddrmsg struct {
++ Family uint8
++ Prefixlen uint8
++ Flags uint8
++ Scope uint8
++ Index uint32
++}
++
++type RtMsg struct {
++ Family uint8
++ Dst_len uint8
++ Src_len uint8
++ Tos uint8
++ Table uint8
++ Protocol uint8
++ Scope uint8
++ Type uint8
++ Flags uint32
++}
++
++type RtNexthop struct {
++ Len uint16
++ Flags uint8
++ Hops uint8
++ Ifindex int32
++}
++
++const (
++ SizeofSockFilter = 0x8
++ SizeofSockFprog = 0x10
++)
++
++type SockFilter struct {
++ Code uint16
++ Jt uint8
++ Jf uint8
++ K uint32
++}
++
++type SockFprog struct {
++ Len uint16
++ _ [6]byte
++ Filter *SockFilter
++}
++
++type InotifyEvent struct {
++ Wd int32
++ Mask uint32
++ Cookie uint32
++ Len uint32
++}
++
++const SizeofInotifyEvent = 0x10
++
++type PtraceRegs struct {
++ Psw PtracePsw
++ Gprs [16]uint64
++ Acrs [16]uint32
++ Orig_gpr2 uint64
++ Fp_regs PtraceFpregs
++ Per_info PtracePer
++ Ieee_instruction_pointer uint64
++}
++
++type PtracePsw struct {
++ Mask uint64
++ Addr uint64
++}
++
++type PtraceFpregs struct {
++ Fpc uint32
++ _ [4]byte
++ Fprs [16]float64
++}
++
++type PtracePer struct {
++ Control_regs [0]uint64
++ _ [24]byte
++ _ [8]byte
++ Starting_addr uint64
++ Ending_addr uint64
++ Perc_atmid uint16
++ _ [6]byte
++ Address uint64
++ Access_id uint8
++ _ [7]byte
++}
++
++type FdSet struct {
++ Bits [16]int64
++}
++
++type Sysinfo_t struct {
++ Uptime int64
++ Loads [3]uint64
++ Totalram uint64
++ Freeram uint64
++ Sharedram uint64
++ Bufferram uint64
++ Totalswap uint64
++ Freeswap uint64
++ Procs uint16
++ Pad uint16
++ _ [4]byte
++ Totalhigh uint64
++ Freehigh uint64
++ Unit uint32
++ _ [0]uint8
++ _ [4]byte
++}
++
++type Utsname struct {
++ Sysname [65]uint8
++ Nodename [65]uint8
++ Release [65]uint8
++ Version [65]uint8
++ Machine [65]uint8
++ Domainname [65]uint8
++}
++
++type Ustat_t struct {
++ Tfree int32
++ _ [4]byte
++ Tinode uint64
++ Fname [6]uint8
++ Fpack [6]uint8
++ _ [4]byte
++}
++
++type EpollEvent struct {
++ Events uint32
++ _ int32
++ Fd int32
++ Pad int32
++}
++
++const (
++ _AT_FDCWD = -0x64
++ _AT_REMOVEDIR = 0x200
++ _AT_SYMLINK_NOFOLLOW = 0x100
++)
++
++type Termios struct {
++ Iflag uint32
++ Oflag uint32
++ Cflag uint32
++ Lflag uint32
++ Line uint8
++ Cc [32]uint8
++ _ [3]byte
++ Ispeed uint32
++ Ospeed uint32
++}
++
++const (
++ IUCLC = 0x200
++ OLCUC = 0x2
++ TCGETS = 0x5401
++ TCSETS = 0x5402
++ XCASE = 0x4
++)
+--- a/test/fixedbugs/issue11656.go
++++ b/test/fixedbugs/issue11656.go
+@@ -65,6 +65,8 @@
+ binary.BigEndian.PutUint32(ill, 0x00000034) // trap
+ case "mips64le":
+ binary.LittleEndian.PutUint32(ill, 0x00000034) // trap
++ case "s390x":
++ binary.BigEndian.PutUint32(ill, 0) // undefined
+ default:
+ // Just leave it as 0 and hope for the best.
+ }
+--- a/test/init1.go
++++ b/test/init1.go
+@@ -40,7 +40,7 @@
+ sys1, numGC1 := memstats.Sys, memstats.NumGC
+ if sys1-sys >= N*MB || numGC1 == numGC {
+ println("allocated 1000 chunks of", MB, "and used ", sys1-sys, "memory")
+- println("numGC went", numGC, "to", numGC)
++ println("numGC went", numGC, "to", numGC1)
+ panic("init1")
+ }
+ }
+--- a/test/nilptr3.go
++++ b/test/nilptr3.go
+@@ -2,7 +2,8 @@
+ // Fails on ppc64x because of incomplete optimization.
+ // See issues 9058.
+ // Same reason for mips64x.
+-// +build !ppc64,!ppc64le,!mips64,!mips64le
++// Same reason for s390x.
++// +build !ppc64,!ppc64le,!mips64,!mips64le,!s390x
+
+ // Copyright 2013 The Go Authors. All rights reserved.
+ // Use of this source code is governed by a BSD-style
+--- a/test/nosplit.go
++++ b/test/nosplit.go
+@@ -275,6 +275,9 @@
+ case "amd64":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define REGISTER AX\n")
++ case "s390x":
++ ptrSize = 8
++ fmt.Fprintf(&buf, "#define REGISTER R10\n")
+ default:
+ fmt.Fprintf(&buf, "#define REGISTER AX\n")
+ }
diff -pruN 1.6.3-1/debian/patches/0002-no-pie-when-race.patch 1.6.3-1ubuntu1/debian/patches/0002-no-pie-when-race.patch
--- 1.6.3-1/debian/patches/0002-no-pie-when-race.patch 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/patches/0002-no-pie-when-race.patch 2016-07-20 09:45:01.000000000 +0000
@@ -0,0 +1,36 @@
+Description: Pass -no-pie to host linker when linking race-enabled binaries.
+Author: Michael Hudson-Doyle
+Origin: upstream
+Bug: https://github.com/golang/go/issues/15443
+Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/golang-1.6/+bug/1574916
+Applied-Upstream: https://go.googlesource.com/go/+/3a72d626a8bae104c658f361d97f992f609d91e7
+Last-Update: 2016-04-29
+---
+This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
+--- a/src/cmd/link/internal/ld/lib.go
++++ b/src/cmd/link/internal/ld/lib.go
+@@ -1173,6 +1173,24 @@
+
+ argv = append(argv, ldflag...)
+
++ if flag_race != 0 {
++ // On a system where the toolchain creates position independent
++ // executables by default, tsan initialization can fail. So we pass
++ // -no-pie here, but support for that flag is quite new and we test
++ // for its support first.
++ src := filepath.Join(tmpdir, "trivial.c")
++ if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
++ Ctxt.Diag("WriteFile trivial.c failed: %v", err)
++ }
++ cmd := exec.Command(argv[0], "-c", "-no-pie", "trivial.c")
++ cmd.Dir = tmpdir
++ out, err := cmd.CombinedOutput()
++ supported := err == nil && !bytes.Contains(out, []byte("unrecognized"))
++ if supported {
++ argv = append(argv, "-no-pie")
++ }
++ }
++
+ for _, p := range strings.Fields(extldflags) {
+ argv = append(argv, p)
+
diff -pruN 1.6.3-1/debian/patches/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch 1.6.3-1ubuntu1/debian/patches/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch
--- 1.6.3-1/debian/patches/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/patches/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,59 @@
+From 8a39b89a699f6f046cca97ff29f566e3e5789ecb Mon Sep 17 00:00:00 2001
+From: Michael Hudson-Doyle
+Date: Fri, 27 May 2016 15:41:55 +1200
+Subject: [PATCH] cmd/compile: do not generate tail calls when dynamic linking
+ on ppc64le
+
+When a wrapper method calls the real implementation, it's not possible to use a
+tail call when dynamic linking on ppc64le. The bad scenario is when a local
+call is made to the wrapper: the wrapper will call the implementation, which
+might be in a different module and so set the TOC to the appropriate value for
+that module. But if it returns directly to the wrapper's caller, nothing will
+reset it to the correct value for that function.
+
+Change-Id: Icebf24c9a2a0a9a7c2bce6bd6f1358657284fb10
+---
+ misc/cgo/testshared/src/depBase/dep.go | 3 +++
+ src/cmd/compile/internal/gc/subr.go | 2 +-
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/misc/cgo/testshared/src/dep/dep.go
++++ b/misc/cgo/testshared/src/dep/dep.go
+@@ -8,6 +8,13 @@
+ array [1024]*byte
+ }
+
++type Dep struct {
++ X int
++}
++
++func (d *Dep) Method() {
++}
++
+ func F() int {
+ return V
+ }
+--- a/src/cmd/compile/internal/gc/subr.go
++++ b/src/cmd/compile/internal/gc/subr.go
+@@ -2357,7 +2357,7 @@
+ dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+
+ // generate call
+- if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
++ if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) && !(Thearch.Thechar == '9' && Ctxt.Flag_dynlink) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ dot = dot.Left // skip final .M
+ if !Isptr[dotlist[0].field.Type.Etype] {
+--- a/misc/cgo/testshared/src/dep2/dep2.go
++++ b/misc/cgo/testshared/src/dep2/dep2.go
+@@ -6,6 +6,10 @@
+
+ var hasProg dep.HasProg
+
++type Dep2 struct {
++ dep.Dep
++}
++
+ func G() int {
+ return dep.F() + 1
+ }
diff -pruN 1.6.3-1/debian/patches/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch 1.6.3-1ubuntu1/debian/patches/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch
--- 1.6.3-1/debian/patches/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/patches/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,113 @@
+From fe6f1eb921dea4c1ee4d3c089fa188af2bd48276 Mon Sep 17 00:00:00 2001
+From: Michael Hudson-Doyle
+Date: Thu, 2 Jun 2016 11:07:55 +1200
+Subject: [PATCH] cmd/internal/obj, runtime: fixes for defer in 386 shared
+ libraries
+
+Any defer in a shared object crashed when GOARCH=386. This turns out to be two
+bugs:
+
+ 1) Calls to morestack were not processed to be PIC safe (must have been
+ possible to trigger this another way too)
+ 2) jmpdefer needs to rewind the return address of the deferred function past
+ the instructions that load the GOT pointer into BX, not just past the call
+
+Bug 2) requires re-introducing the a way for .s files to know when they are
+being compiled for dynamic linking but I've tried to do that in as minimal
+a way as possible.
+
+Fixes #15916
+
+Change-Id: Ia0d09b69ec272a176934176b8eaef5f3bfcacf04
+---
+ misc/cgo/testshared/src/dep/dep.go | 1 +
+ src/cmd/go/build.go | 11 +++++++++--
+ src/cmd/internal/obj/x86/obj6.go | 12 +++++++++++-
+ src/runtime/asm_386.s | 5 +++++
+ 4 files changed, 26 insertions(+), 3 deletions(-)
+
+--- a/misc/cgo/testshared/src/dep/dep.go
++++ b/misc/cgo/testshared/src/dep/dep.go
+@@ -16,5 +16,6 @@
+ }
+
+ func F() int {
++ defer func() {}()
+ return V
+ }
+--- a/src/cmd/go/build.go
++++ b/src/cmd/go/build.go
+@@ -410,7 +410,6 @@
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+- buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
+ default:
+ fatalf("-linkshared not supported on %s\n", platform)
+ }
+@@ -2315,7 +2314,15 @@
+ // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
+ inc := filepath.Join(goroot, "pkg", "include")
+ sfile = mkAbs(p.Dir, sfile)
+- args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile}
++ args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags}
++ if p.ImportPath == "runtime" && goarch == "386" {
++ for _, arg := range buildAsmflags {
++ if arg == "-dynlink" {
++ args = append(args, "-D=GOBUILDMODE_shared=1")
++ }
++ }
++ }
++ args = append(args, sfile)
+ if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
+ return err
+ }
+--- a/src/cmd/internal/obj/x86/obj6.go
++++ b/src/cmd/internal/obj/x86/obj6.go
+@@ -1089,6 +1089,7 @@
+ call.Mode = ctxt.Cursym.Text.Mode
+ call.As = obj.ACALL
+ call.To.Type = obj.TYPE_BRANCH
++ call.To.Name = obj.NAME_EXTERN
+ morestack := "runtime.morestack"
+ switch {
+ case ctxt.Cursym.Cfunc != 0:
+@@ -1097,8 +1098,17 @@
+ morestack = "runtime.morestack_noctxt"
+ }
+ call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
++ // When compiling 386 code for dynamic linking, the call needs to be adjusted
++ // to follow PIC rules. This in turn can insert more instructions, so we need
++ // to keep track of the start of the call (where the jump will be to) and the
++ // end (which following instructions are appended to).
++ callend := call
++ progedit(ctxt, callend)
++ for ; callend.Link != nil; callend = callend.Link {
++ progedit(ctxt, callend.Link)
++ }
+
+- jmp := obj.Appendp(ctxt, call)
++ jmp := obj.Appendp(ctxt, callend)
+ jmp.As = obj.AJMP
+ jmp.To.Type = obj.TYPE_BRANCH
+ jmp.Pcond = ctxt.Cursym.Text.Link
+--- a/src/runtime/asm_386.s
++++ b/src/runtime/asm_386.s
+@@ -530,12 +530,17 @@
+ // called from deferreturn.
+ // 1. pop the caller
+ // 2. sub 5 bytes from the callers return
++// (when building for shared libraries, subtract 16 to cover load of GOT pointer into BX)
+ // 3. jmp to the argument
+ TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
+ MOVL fv+0(FP), DX // fn
+ MOVL argp+4(FP), BX // caller sp
+ LEAL -4(BX), SP // caller sp after CALL
++#ifdef GOBUILDMODE_shared
++ SUBL $16, (SP) // return to CALL again
++#else
+ SUBL $5, (SP) // return to CALL again
++#endif
+ MOVL 0(DX), BX
+ JMP BX // but first run the deferred function
+
diff -pruN 1.6.3-1/debian/patches/series 1.6.3-1ubuntu1/debian/patches/series
--- 1.6.3-1/debian/patches/series 2016-07-19 02:52:34.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/patches/series 2016-07-20 09:45:02.000000000 +0000
@@ -0,0 +1,4 @@
+0001-s390x-port.patch
+0002-no-pie-when-race.patch
+0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch
+0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch
diff -pruN 1.6.3-1/debian/rules 1.6.3-1ubuntu1/debian/rules
--- 1.6.3-1/debian/rules 2016-07-19 02:57:21.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/rules 2016-07-20 09:45:02.000000000 +0000
@@ -7,15 +7,19 @@ export GOVER := $(shell perl -w -mDpkg::
export GOROOT := $(CURDIR)
export GOROOT_FINAL := /usr/lib/go-$(GOVER)
-DEB_HOST_ARCH_CPU := $(shell dpkg-architecture -qDEB_HOST_ARCH_CPU 2>/dev/null)
+DEB_HOST_ARCH := $(shell dpkg-architecture -qDEB_HOST_ARCH 2>/dev/null)
RUN_TESTS := true
-ifeq (ppc64, $(DEB_HOST_ARCH_CPU))
+ifeq (ppc64, $(DEB_HOST_ARCH))
RUN_TESTS := false
endif
ifneq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS)))
RUN_TESTS := false
endif
+shlib_archs = $(shell GOVER=$(GOVER) perl debian/helpers/getshlibarches.pl)
+
+multiarch := $(shell dpkg-architecture -qDEB_HOST_MULTIARCH)
+
%:
+dh --parallel $(opt_no_act) $@
@@ -53,7 +57,7 @@ override_dh_auto_clean: gencontrol
override_dh_prep:
dh_prep
- @set -e; cd debian; for x in golang-X.Y-*; do \
+ @set -e; cd debian; for x in *golang-X.Y-*; do \
sed -e 's/X.Y/$(GOVER)/g' $$x > golang-$(GOVER)-$${x##golang-X.Y-}; \
done
@@ -86,7 +90,13 @@ override_dh_install-indep:
dh_install --fail-missing
override_dh_install-arch:
+ set -ex; \
+ export PATH="$(GOROOT)/bin:$$PATH"; \
+ $(CURDIR)/debian/helpers/installshlib.sh
dh_install --fail-missing
+ # Remove .syso files of the race detector; they are binary files that
+ # are not built from source in the go source distribution.
+ find $(CURDIR)/debian/golang-$(GOVER)-src/usr/share/go-$(GOVER)/src/runtime/race -type f -name '*.syso' -delete
# Remove Plan9 rc(1) scripts
find debian/golang-$(GOVER)-src/usr/share/go-$(GOVER)/src -type f -name '*.rc' -delete
# Remove empty /usr/share/go-$(GOVER)/src from golang-$(GOVER)-go, it is provided by golang-$(GOVER)-src
@@ -101,12 +111,21 @@ override_dh_strip:
override_dh_shlibdeps:
dh_shlibdeps -Xtestdata -Xtest
+override_dh_makeshlibs:
+ LDIR=$(CURDIR)/debian/libgolang-$(GOVER)-std1/usr/lib/$(multiarch); \
+ HASH=$$(LD_LIBRARY_PATH=$$LDIR:$$LD_LIBRARY_PATH $(CURDIR)/bin/readabihash $$LDIR/libgolang-$(GOVER)-std.so.1); \
+ dh_makeshlibs -Vlibgolang-$(GOVER)-std1-$$HASH; \
+ echo "golang:Provides=libgolang-$(GOVER)-std1-$$HASH" >> $(CURDIR)/debian/libgolang-$(GOVER)-std1.substvars
+
override_dh_auto_build-arch:
[ -f VERSION ] || echo "debian snapshot +$$(dpkg-parsechangelog -SVersion)" > VERSION
export GOROOT_BOOTSTRAP=$$(env -i go env GOROOT) \
&& cd src \
&& $(CURDIR)/debian/helpers/goenv.sh \
bash ./make.bash --no-banner
+ $(CURDIR)/bin/go install -v -buildmode=shared \
+ -ldflags '-extldflags "-Wl,-soname=libgolang-$(GOVER)-std.so.1"' \
+ std
opt_no_act :=
ifneq (,$(findstring n,$(MAKEFLAGS)))
diff -pruN 1.6.3-1/debian/source/include-binaries 1.6.3-1ubuntu1/debian/source/include-binaries
--- 1.6.3-1/debian/source/include-binaries 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/source/include-binaries 2016-07-20 09:44:24.000000000 +0000
@@ -0,0 +1 @@
+src/debug/elf/testdata/go-relocation-test-gcc531-s390x.obj
diff -pruN 1.6.3-1/debian/source/lintian-overrides 1.6.3-1ubuntu1/debian/source/lintian-overrides
--- 1.6.3-1/debian/source/lintian-overrides 2016-07-19 02:52:34.000000000 +0000
+++ 1.6.3-1ubuntu1/debian/source/lintian-overrides 2016-07-20 09:45:01.000000000 +0000
@@ -8,3 +8,25 @@ golang source: source-contains-prebuilt-
golang source: source-contains-prebuilt-binary src/debug/elf/testdata/go-relocation-test-gcc441-x86-64.obj
golang source: source-contains-prebuilt-windows-binary src/debug/pe/testdata/gcc-386-mingw-obj
golang source: source-contains-prebuilt-binary src/runtime/race/race_linux_amd64.syso
+golang source: source-is-missing src/runtime/race/race_freebsd_amd64.syso
+golang source: source-is-missing src/runtime/race/race_linux_amd64.syso
+golang source: source-is-missing src/debug/dwarf/testdata/line-clang.elf
+golang source: source-is-missing src/debug/dwarf/testdata/line-gcc.elf
+golang source: source-is-missing src/debug/dwarf/testdata/typedef.elf4
+golang source: source-is-missing src/debug/elf/testdata/gcc-386-freebsd-exec
+golang source: source-is-missing src/debug/elf/testdata/gcc-amd64-linux-exec
+golang source: source-is-missing src/debug/elf/testdata/gcc-amd64-openbsd-debug-with-rela.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-clang-arm.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-clang-x86.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc424-x86-64.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc441-x86-64.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc441-x86.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc482-aarch64.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc482-ppc64le.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc492-arm.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc5-ppc.obj
+golang source: source-is-missing src/debug/elf/testdata/compressed-32.obj
+golang source: source-is-missing src/debug/elf/testdata/compressed-64.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc492-mips64.obj
+golang source: source-is-missing src/debug/elf/testdata/go-relocation-test-gcc493-mips64le.obj
+golang source: source-is-missing src/debug/elf/testdata/zdebug-test-gcc484-x86-64.obj
diff -pruN 1.6.3-1/doc/devel/release.html 1.6.3-1ubuntu1/doc/devel/release.html
--- 1.6.3-1/doc/devel/release.html 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/doc/devel/release.html 2016-07-21 13:36:09.000000000 +0000
@@ -61,6 +61,13 @@ See the go1.6 (released 2016/02/17)
+
+
+Go 1.6 is a major release of Go.
+Read the Go 1.6 Release Notes for more information.
+
+
go1.5 (released 2015/08/19)
diff -pruN 1.6.3-1/misc/cgo/test/issue9400/asm_s390x.s 1.6.3-1ubuntu1/misc/cgo/test/issue9400/asm_s390x.s
--- 1.6.3-1/misc/cgo/test/issue9400/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/misc/cgo/test/issue9400/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,26 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !gccgo
+
+#include "textflag.h"
+
+TEXT ·RewindAndSetgid(SB),NOSPLIT,$0-0
+ // Rewind stack pointer so anything that happens on the stack
+ // will clobber the test pattern created by the caller
+ ADD $(1024 * 8), R15
+
+ // Ask signaller to setgid
+ MOVD $·Baton(SB), R5
+ MOVW $1, 0(R5)
+
+ // Wait for setgid completion
+loop:
+ SYNC
+ MOVW ·Baton(SB), R3
+ CMPBNE R3, $0, loop
+
+ // Restore stack
+ SUB $(1024 * 8), R15
+ RET
diff -pruN 1.6.3-1/misc/cgo/testshared/src/dep/dep.go 1.6.3-1ubuntu1/misc/cgo/testshared/src/dep/dep.go
--- 1.6.3-1/misc/cgo/testshared/src/dep/dep.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/misc/cgo/testshared/src/dep/dep.go 2016-07-21 13:36:09.000000000 +0000
@@ -8,6 +8,14 @@ type HasProg struct {
array [1024]*byte
}
+type Dep struct {
+ X int
+}
+
+func (d *Dep) Method() {
+}
+
func F() int {
+ defer func() {}()
return V
}
diff -pruN 1.6.3-1/misc/cgo/testshared/src/dep2/dep2.go 1.6.3-1ubuntu1/misc/cgo/testshared/src/dep2/dep2.go
--- 1.6.3-1/misc/cgo/testshared/src/dep2/dep2.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/misc/cgo/testshared/src/dep2/dep2.go 2016-07-21 13:36:09.000000000 +0000
@@ -6,6 +6,10 @@ var W int = 1
var hasProg dep.HasProg
+type Dep2 struct {
+ dep.Dep
+}
+
func G() int {
return dep.F() + 1
}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/api/go1.6.txt 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/api/go1.6.txt
--- 1.6.3-1/.pc/0001-s390x-port.patch/api/go1.6.txt 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/api/go1.6.txt 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,275 @@
+pkg archive/zip, method (*ReadCloser) RegisterDecompressor(uint16, Decompressor)
+pkg archive/zip, method (*Reader) RegisterDecompressor(uint16, Decompressor)
+pkg archive/zip, method (*Writer) RegisterCompressor(uint16, Compressor)
+pkg bufio, method (*Scanner) Buffer([]uint8, int)
+pkg bufio, var ErrFinalToken error
+pkg crypto/tls, const TLS_RSA_WITH_AES_128_GCM_SHA256 = 156
+pkg crypto/tls, const TLS_RSA_WITH_AES_128_GCM_SHA256 uint16
+pkg crypto/tls, const TLS_RSA_WITH_AES_256_GCM_SHA384 = 157
+pkg crypto/tls, const TLS_RSA_WITH_AES_256_GCM_SHA384 uint16
+pkg crypto/tls, method (RecordHeaderError) Error() string
+pkg crypto/tls, type RecordHeaderError struct
+pkg crypto/tls, type RecordHeaderError struct, Msg string
+pkg crypto/tls, type RecordHeaderError struct, RecordHeader [5]uint8
+pkg crypto/x509, method (InsecureAlgorithmError) Error() string
+pkg crypto/x509, method (SignatureAlgorithm) String() string
+pkg crypto/x509, type InsecureAlgorithmError int
+pkg database/sql, method (*DB) SetConnMaxLifetime(time.Duration)
+pkg debug/dwarf, const ClassUnknown = 0
+pkg debug/dwarf, const ClassUnknown Class
+pkg debug/elf, const COMPRESS_HIOS = 1879048191
+pkg debug/elf, const COMPRESS_HIOS CompressionType
+pkg debug/elf, const COMPRESS_HIPROC = 2147483647
+pkg debug/elf, const COMPRESS_HIPROC CompressionType
+pkg debug/elf, const COMPRESS_LOOS = 1610612736
+pkg debug/elf, const COMPRESS_LOOS CompressionType
+pkg debug/elf, const COMPRESS_LOPROC = 1879048192
+pkg debug/elf, const COMPRESS_LOPROC CompressionType
+pkg debug/elf, const COMPRESS_ZLIB = 1
+pkg debug/elf, const COMPRESS_ZLIB CompressionType
+pkg debug/elf, const R_MIPS_16 = 1
+pkg debug/elf, const R_MIPS_16 R_MIPS
+pkg debug/elf, const R_MIPS_26 = 4
+pkg debug/elf, const R_MIPS_26 R_MIPS
+pkg debug/elf, const R_MIPS_32 = 2
+pkg debug/elf, const R_MIPS_32 R_MIPS
+pkg debug/elf, const R_MIPS_64 = 18
+pkg debug/elf, const R_MIPS_64 R_MIPS
+pkg debug/elf, const R_MIPS_ADD_IMMEDIATE = 34
+pkg debug/elf, const R_MIPS_ADD_IMMEDIATE R_MIPS
+pkg debug/elf, const R_MIPS_CALL16 = 11
+pkg debug/elf, const R_MIPS_CALL16 R_MIPS
+pkg debug/elf, const R_MIPS_CALL_HI16 = 30
+pkg debug/elf, const R_MIPS_CALL_HI16 R_MIPS
+pkg debug/elf, const R_MIPS_CALL_LO16 = 31
+pkg debug/elf, const R_MIPS_CALL_LO16 R_MIPS
+pkg debug/elf, const R_MIPS_DELETE = 27
+pkg debug/elf, const R_MIPS_DELETE R_MIPS
+pkg debug/elf, const R_MIPS_GOT16 = 9
+pkg debug/elf, const R_MIPS_GOT16 R_MIPS
+pkg debug/elf, const R_MIPS_GOT_DISP = 19
+pkg debug/elf, const R_MIPS_GOT_DISP R_MIPS
+pkg debug/elf, const R_MIPS_GOT_HI16 = 22
+pkg debug/elf, const R_MIPS_GOT_HI16 R_MIPS
+pkg debug/elf, const R_MIPS_GOT_LO16 = 23
+pkg debug/elf, const R_MIPS_GOT_LO16 R_MIPS
+pkg debug/elf, const R_MIPS_GOT_OFST = 21
+pkg debug/elf, const R_MIPS_GOT_OFST R_MIPS
+pkg debug/elf, const R_MIPS_GOT_PAGE = 20
+pkg debug/elf, const R_MIPS_GOT_PAGE R_MIPS
+pkg debug/elf, const R_MIPS_GPREL16 = 7
+pkg debug/elf, const R_MIPS_GPREL16 R_MIPS
+pkg debug/elf, const R_MIPS_GPREL32 = 12
+pkg debug/elf, const R_MIPS_GPREL32 R_MIPS
+pkg debug/elf, const R_MIPS_HI16 = 5
+pkg debug/elf, const R_MIPS_HI16 R_MIPS
+pkg debug/elf, const R_MIPS_HIGHER = 28
+pkg debug/elf, const R_MIPS_HIGHER R_MIPS
+pkg debug/elf, const R_MIPS_HIGHEST = 29
+pkg debug/elf, const R_MIPS_HIGHEST R_MIPS
+pkg debug/elf, const R_MIPS_INSERT_A = 25
+pkg debug/elf, const R_MIPS_INSERT_A R_MIPS
+pkg debug/elf, const R_MIPS_INSERT_B = 26
+pkg debug/elf, const R_MIPS_INSERT_B R_MIPS
+pkg debug/elf, const R_MIPS_JALR = 37
+pkg debug/elf, const R_MIPS_JALR R_MIPS
+pkg debug/elf, const R_MIPS_LITERAL = 8
+pkg debug/elf, const R_MIPS_LITERAL R_MIPS
+pkg debug/elf, const R_MIPS_LO16 = 6
+pkg debug/elf, const R_MIPS_LO16 R_MIPS
+pkg debug/elf, const R_MIPS_NONE = 0
+pkg debug/elf, const R_MIPS_NONE R_MIPS
+pkg debug/elf, const R_MIPS_PC16 = 10
+pkg debug/elf, const R_MIPS_PC16 R_MIPS
+pkg debug/elf, const R_MIPS_PJUMP = 35
+pkg debug/elf, const R_MIPS_PJUMP R_MIPS
+pkg debug/elf, const R_MIPS_REL16 = 33
+pkg debug/elf, const R_MIPS_REL16 R_MIPS
+pkg debug/elf, const R_MIPS_REL32 = 3
+pkg debug/elf, const R_MIPS_REL32 R_MIPS
+pkg debug/elf, const R_MIPS_RELGOT = 36
+pkg debug/elf, const R_MIPS_RELGOT R_MIPS
+pkg debug/elf, const R_MIPS_SCN_DISP = 32
+pkg debug/elf, const R_MIPS_SCN_DISP R_MIPS
+pkg debug/elf, const R_MIPS_SHIFT5 = 16
+pkg debug/elf, const R_MIPS_SHIFT5 R_MIPS
+pkg debug/elf, const R_MIPS_SHIFT6 = 17
+pkg debug/elf, const R_MIPS_SHIFT6 R_MIPS
+pkg debug/elf, const R_MIPS_SUB = 24
+pkg debug/elf, const R_MIPS_SUB R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPMOD32 = 38
+pkg debug/elf, const R_MIPS_TLS_DTPMOD32 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPMOD64 = 40
+pkg debug/elf, const R_MIPS_TLS_DTPMOD64 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPREL32 = 39
+pkg debug/elf, const R_MIPS_TLS_DTPREL32 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPREL64 = 41
+pkg debug/elf, const R_MIPS_TLS_DTPREL64 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPREL_HI16 = 44
+pkg debug/elf, const R_MIPS_TLS_DTPREL_HI16 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_DTPREL_LO16 = 45
+pkg debug/elf, const R_MIPS_TLS_DTPREL_LO16 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_GD = 42
+pkg debug/elf, const R_MIPS_TLS_GD R_MIPS
+pkg debug/elf, const R_MIPS_TLS_GOTTPREL = 46
+pkg debug/elf, const R_MIPS_TLS_GOTTPREL R_MIPS
+pkg debug/elf, const R_MIPS_TLS_LDM = 43
+pkg debug/elf, const R_MIPS_TLS_LDM R_MIPS
+pkg debug/elf, const R_MIPS_TLS_TPREL32 = 47
+pkg debug/elf, const R_MIPS_TLS_TPREL32 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_TPREL64 = 48
+pkg debug/elf, const R_MIPS_TLS_TPREL64 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_TPREL_HI16 = 49
+pkg debug/elf, const R_MIPS_TLS_TPREL_HI16 R_MIPS
+pkg debug/elf, const R_MIPS_TLS_TPREL_LO16 = 50
+pkg debug/elf, const R_MIPS_TLS_TPREL_LO16 R_MIPS
+pkg debug/elf, const SHF_COMPRESSED = 2048
+pkg debug/elf, const SHF_COMPRESSED SectionFlag
+pkg debug/elf, method (CompressionType) GoString() string
+pkg debug/elf, method (CompressionType) String() string
+pkg debug/elf, method (R_MIPS) GoString() string
+pkg debug/elf, method (R_MIPS) String() string
+pkg debug/elf, type Chdr32 struct
+pkg debug/elf, type Chdr32 struct, Addralign uint32
+pkg debug/elf, type Chdr32 struct, Size uint32
+pkg debug/elf, type Chdr32 struct, Type uint32
+pkg debug/elf, type Chdr64 struct
+pkg debug/elf, type Chdr64 struct, Addralign uint64
+pkg debug/elf, type Chdr64 struct, Size uint64
+pkg debug/elf, type Chdr64 struct, Type uint32
+pkg debug/elf, type CompressionType int
+pkg debug/elf, type R_MIPS int
+pkg debug/elf, type SectionHeader struct, FileSize uint64
+pkg encoding/asn1, const ClassApplication = 1
+pkg encoding/asn1, const ClassApplication ideal-int
+pkg encoding/asn1, const ClassContextSpecific = 2
+pkg encoding/asn1, const ClassContextSpecific ideal-int
+pkg encoding/asn1, const ClassPrivate = 3
+pkg encoding/asn1, const ClassPrivate ideal-int
+pkg encoding/asn1, const ClassUniversal = 0
+pkg encoding/asn1, const ClassUniversal ideal-int
+pkg encoding/asn1, const TagBitString = 3
+pkg encoding/asn1, const TagBitString ideal-int
+pkg encoding/asn1, const TagBoolean = 1
+pkg encoding/asn1, const TagBoolean ideal-int
+pkg encoding/asn1, const TagEnum = 10
+pkg encoding/asn1, const TagEnum ideal-int
+pkg encoding/asn1, const TagGeneralString = 27
+pkg encoding/asn1, const TagGeneralString ideal-int
+pkg encoding/asn1, const TagGeneralizedTime = 24
+pkg encoding/asn1, const TagGeneralizedTime ideal-int
+pkg encoding/asn1, const TagIA5String = 22
+pkg encoding/asn1, const TagIA5String ideal-int
+pkg encoding/asn1, const TagInteger = 2
+pkg encoding/asn1, const TagInteger ideal-int
+pkg encoding/asn1, const TagOID = 6
+pkg encoding/asn1, const TagOID ideal-int
+pkg encoding/asn1, const TagOctetString = 4
+pkg encoding/asn1, const TagOctetString ideal-int
+pkg encoding/asn1, const TagPrintableString = 19
+pkg encoding/asn1, const TagPrintableString ideal-int
+pkg encoding/asn1, const TagSequence = 16
+pkg encoding/asn1, const TagSequence ideal-int
+pkg encoding/asn1, const TagSet = 17
+pkg encoding/asn1, const TagSet ideal-int
+pkg encoding/asn1, const TagT61String = 20
+pkg encoding/asn1, const TagT61String ideal-int
+pkg encoding/asn1, const TagUTCTime = 23
+pkg encoding/asn1, const TagUTCTime ideal-int
+pkg encoding/asn1, const TagUTF8String = 12
+pkg encoding/asn1, const TagUTF8String ideal-int
+pkg go/build, const IgnoreVendor = 8
+pkg go/build, const IgnoreVendor ImportMode
+pkg go/build, type Package struct, InvalidGoFiles []string
+pkg go/constant, func ToComplex(Value) Value
+pkg go/constant, func ToFloat(Value) Value
+pkg go/constant, func ToInt(Value) Value
+pkg go/constant, type Value interface, ExactString() string
+pkg go/types, method (*Package) SetName(string)
+pkg go/types, type ImportMode int
+pkg go/types, type ImporterFrom interface { Import, ImportFrom }
+pkg go/types, type ImporterFrom interface, Import(string) (*Package, error)
+pkg go/types, type ImporterFrom interface, ImportFrom(string, string, ImportMode) (*Package, error)
+pkg html/template, func IsTrue(interface{}) (bool, bool)
+pkg html/template, method (*Template) DefinedTemplates() string
+pkg image, func NewNYCbCrA(Rectangle, YCbCrSubsampleRatio) *NYCbCrA
+pkg image, method (*NYCbCrA) AOffset(int, int) int
+pkg image, method (*NYCbCrA) At(int, int) color.Color
+pkg image, method (*NYCbCrA) Bounds() Rectangle
+pkg image, method (*NYCbCrA) COffset(int, int) int
+pkg image, method (*NYCbCrA) ColorModel() color.Model
+pkg image, method (*NYCbCrA) NYCbCrAAt(int, int) color.NYCbCrA
+pkg image, method (*NYCbCrA) Opaque() bool
+pkg image, method (*NYCbCrA) SubImage(Rectangle) Image
+pkg image, method (*NYCbCrA) YCbCrAt(int, int) color.YCbCr
+pkg image, method (*NYCbCrA) YOffset(int, int) int
+pkg image, type NYCbCrA struct
+pkg image, type NYCbCrA struct, A []uint8
+pkg image, type NYCbCrA struct, AStride int
+pkg image, type NYCbCrA struct, embedded YCbCr
+pkg image/color, method (NYCbCrA) RGBA() (uint32, uint32, uint32, uint32)
+pkg image/color, type NYCbCrA struct
+pkg image/color, type NYCbCrA struct, A uint8
+pkg image/color, type NYCbCrA struct, embedded YCbCr
+pkg image/color, var NYCbCrAModel Model
+pkg math/big, method (*Float) MarshalText() ([]uint8, error)
+pkg math/big, method (*Float) UnmarshalText([]uint8) error
+pkg math/big, method (*Int) Append([]uint8, int) []uint8
+pkg math/big, method (*Int) Text(int) string
+pkg math/rand, func Read([]uint8) (int, error)
+pkg math/rand, method (*Rand) Read([]uint8) (int, error)
+pkg net, type DNSError struct, IsTemporary bool
+pkg net, type Dialer struct, Cancel <-chan struct
+pkg net/http, const MethodConnect = "CONNECT"
+pkg net/http, const MethodConnect ideal-string
+pkg net/http, const MethodDelete = "DELETE"
+pkg net/http, const MethodDelete ideal-string
+pkg net/http, const MethodGet = "GET"
+pkg net/http, const MethodGet ideal-string
+pkg net/http, const MethodHead = "HEAD"
+pkg net/http, const MethodHead ideal-string
+pkg net/http, const MethodOptions = "OPTIONS"
+pkg net/http, const MethodOptions ideal-string
+pkg net/http, const MethodPatch = "PATCH"
+pkg net/http, const MethodPatch ideal-string
+pkg net/http, const MethodPost = "POST"
+pkg net/http, const MethodPost ideal-string
+pkg net/http, const MethodPut = "PUT"
+pkg net/http, const MethodPut ideal-string
+pkg net/http, const MethodTrace = "TRACE"
+pkg net/http, const MethodTrace ideal-string
+pkg net/http, const StatusNetworkAuthenticationRequired = 511
+pkg net/http, const StatusNetworkAuthenticationRequired ideal-int
+pkg net/http, const StatusPreconditionRequired = 428
+pkg net/http, const StatusPreconditionRequired ideal-int
+pkg net/http, const StatusRequestHeaderFieldsTooLarge = 431
+pkg net/http, const StatusRequestHeaderFieldsTooLarge ideal-int
+pkg net/http, const StatusTooManyRequests = 429
+pkg net/http, const StatusTooManyRequests ideal-int
+pkg net/http, const StatusUnavailableForLegalReasons = 451
+pkg net/http, const StatusUnavailableForLegalReasons ideal-int
+pkg net/http, type Transport struct, ExpectContinueTimeout time.Duration
+pkg net/http, type Transport struct, TLSNextProto map[string]func(string, *tls.Conn) RoundTripper
+pkg net/http, var ErrSkipAltProtocol error
+pkg net/http/httptest, method (*ResponseRecorder) WriteString(string) (int, error)
+pkg net/http/httputil, type BufferPool interface { Get, Put }
+pkg net/http/httputil, type BufferPool interface, Get() []uint8
+pkg net/http/httputil, type BufferPool interface, Put([]uint8)
+pkg net/http/httputil, type ReverseProxy struct, BufferPool BufferPool
+pkg net/url, method (*Error) Temporary() bool
+pkg net/url, method (*Error) Timeout() bool
+pkg net/url, method (InvalidHostError) Error() string
+pkg net/url, type InvalidHostError string
+pkg os/exec, type ExitError struct, Stderr []uint8
+pkg regexp, method (*Regexp) Copy() *Regexp
+pkg runtime/debug, func SetTraceback(string)
+pkg strconv, func AppendQuoteRuneToGraphic([]uint8, int32) []uint8
+pkg strconv, func AppendQuoteToGraphic([]uint8, string) []uint8
+pkg strconv, func IsGraphic(int32) bool
+pkg strconv, func QuoteRuneToGraphic(int32) string
+pkg strconv, func QuoteToGraphic(string) string
+pkg text/template, func IsTrue(interface{}) (bool, bool)
+pkg text/template, method (ExecError) Error() string
+pkg text/template, type ExecError struct
+pkg text/template, type ExecError struct, Err error
+pkg text/template, type ExecError struct, Name string
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/doc/devel/release.html 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/doc/devel/release.html
--- 1.6.3-1/.pc/0001-s390x-port.patch/doc/devel/release.html 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/doc/devel/release.html 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,247 @@
+
+
+
This page summarizes the changes between official stable releases of Go.
+The change log has the full details.
+
+To update to a specific release, use:
+
+
+git pull
+git checkout release-branch
+
+
+Release Policy
+
+
+Each major Go release obsoletes and ends support for the previous one.
+For example, if Go 1.5 has been released, then it is the current release
+and Go 1.4 and earlier are no longer supported.
+We fix critical problems in the current release as needed by issuing minor revisions
+(for example, Go 1.5.1, Go 1.5.2, and so on).
+
+
+
+As a special case, we issue minor revisions for critical security problems
+in both the current release and the previous release.
+For example, if Go 1.5 is the current release then we will issue minor revisions
+to fix critical security problems in both Go 1.4 and Go 1.5 as they arise.
+See the security policy for more details.
+
+
+go1.6 (released 2016/02/17)
+
+
+Go 1.6 is a major release of Go.
+Read the Go 1.6 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.6.1 (released 2016/04/12) includes two security fixes.
+See the Go
+1.6.1 milestone on our issue tracker for details.
+
+
+
+go1.6.2 (released 2016/04/20) includes fixes to the compiler, runtime, tools,
+documentation, and the mime/multipart
, net/http
, and
+sort
packages.
+See the Go
+1.6.2 milestone on our issue tracker for details.
+
+
+
+go1.6.3 (released 2016/07/17) includes security fixes to the
+net/http/cgi
package and net/http
package when used in
+a CGI environment. This release also adds support for macOS Sierra.
+See the Go
+1.6.3 milestone on our issue tracker for details.
+
+
+go1.5 (released 2015/08/19)
+
+
+Go 1.5 is a major release of Go.
+Read the Go 1.5 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.5.1 (released 2015/09/08) includes bug fixes to the compiler, assembler, and
+the fmt
, net/textproto
, net/http
, and
+runtime
packages.
+See the Go
+1.5.1 milestone on our issue tracker for details.
+
+
+
+go1.5.2 (released 2015/12/02) includes bug fixes to the compiler, linker, and
+the mime/multipart
, net
, and runtime
+packages.
+See the Go
+1.5.2 milestone on our issue tracker for details.
+
+
+
+go1.5.3 (released 2016/01/13) includes a security fix to the math/big
package
+affecting the crypto/tls
package.
+See the Go 1.5.3 milestone on our issue tracker
+and the release announcement for details.
+
+
+
+go1.5.4 (released 2016/04/12) includes two security fixes.
+It contains the same fixes as Go 1.6.1 and was released at the same time.
+See the Go
+1.6.1 milestone on our issue tracker for details.
+
+
+go1.4 (released 2014/12/10)
+
+
+Go 1.4 is a major release of Go.
+Read the Go 1.4 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.4.1 (released 2015/01/15) includes bug fixes to the linker and the log
, syscall
, and runtime
packages.
+See the Go 1.4.1 milestone on our issue tracker for details.
+
+
+
+go1.4.2 (released 2015/02/17) includes bug fixes to the go
command, the compiler and linker, and the runtime
, syscall
, reflect
, and math/big
packages.
+See the Go 1.4.2 milestone on our issue tracker for details.
+
+
+
+go1.4.3 (released 2015/09/22) includes security fixes to the net/http
package and bug fixes to the runtime
package.
+See the Go 1.4.3 milestone on our issue tracker for details.
+
+
+go1.3 (released 2014/06/18)
+
+
+Go 1.3 is a major release of Go.
+Read the Go 1.3 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.3.1 (released 2014/08/13) includes bug fixes to the compiler and the runtime
, net
, and crypto/rsa
packages.
+See the change history for details.
+
+
+
+go1.3.2 (released 2014/09/25) includes bug fixes to cgo and the crypto/tls packages.
+See the change history for details.
+
+
+
+go1.3.3 (released 2014/09/30) includes further bug fixes to cgo, the runtime package, and the nacl port.
+See the change history for details.
+
+
+go1.2 (released 2013/12/01)
+
+
+Go 1.2 is a major release of Go.
+Read the Go 1.2 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.2.1 (released 2014/03/02) includes bug fixes to the runtime
, net
, and database/sql
packages.
+See the change history for details.
+
+
+
+go1.2.2 (released 2014/05/05) includes a
+security fix
+that affects the tour binary included in the binary distributions (thanks to Guillaume T).
+
+
+go1.1 (released 2013/05/13)
+
+
+Go 1.1 is a major release of Go.
+Read the Go 1.1 Release Notes for more information.
+
+
+Minor revisions
+
+
+go1.1.1 (released 2013/06/13) includes several compiler and runtime bug fixes.
+See the change history for details.
+
+
+
+go1.1.2 (released 2013/08/13) includes fixes to the gc
compiler
+and cgo
, and the bufio
, runtime
,
+syscall
, and time
packages.
+See the change history for details.
+If you use package syscall's Getrlimit
and Setrlimit
+functions under Linux on the ARM or 386 architectures, please note change
+55ac276af5a7
+that fixes issue 5949.
+
+
+go1 (released 2012/03/28)
+
+
+Go 1 is a major release of Go that will be stable in the long term.
+Read the Go 1 Release Notes for more information.
+
+
+
+It is intended that programs written for Go 1 will continue to compile and run
+correctly, unchanged, under future versions of Go 1.
+Read the Go 1 compatibility document for more
+about the future of Go 1.
+
+
+
+The go1 release corresponds to
+weekly.2012-03-27
.
+
+
+Minor revisions
+
+
+go1.0.1 (released 2012/04/25) was issued to
+fix an
+escape analysis bug
+that can lead to memory corruption.
+It also includes several minor code and documentation fixes.
+
+
+
+go1.0.2 (released 2012/06/13) was issued to fix two bugs in the implementation
+of maps using struct or array keys:
+issue 3695 and
+issue 3573.
+It also includes many minor code and documentation fixes.
+
+
+
+go1.0.3 (released 2012/09/21) includes minor code and documentation fixes.
+
+
+
+See the go1 release branch history for the complete list of changes.
+
+
+Older releases
+
+
+See the Pre-Go 1 Release History page for notes
+on earlier releases.
+
+
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/arch/arch.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/arch/arch.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/arch/arch.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/arch/arch.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,428 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arch
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/mips"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/x86"
+ "fmt"
+ "strings"
+)
+
+// Pseudo-registers whose names are the constant name without the leading R.
+const (
+ RFP = -(iota + 1)
+ RSB
+ RSP
+ RPC
+)
+
+// Arch wraps the link architecture object with more architecture-specific information.
+type Arch struct {
+ *obj.LinkArch
+ // Map of instruction names to enumeration.
+ Instructions map[string]int
+ // Map of register names to enumeration.
+ Register map[string]int16
+ // Table of register prefix names. These are things like R for R(0) and SPR for SPR(268).
+ RegisterPrefix map[string]bool
+ // RegisterNumber converts R(10) into arm.REG_R10.
+ RegisterNumber func(string, int16) (int16, bool)
+ // Instruction is a jump.
+ IsJump func(word string) bool
+}
+
+// nilRegisterNumber is the register number function for architectures
+// that do not accept the R(N) notation. It always returns failure.
+func nilRegisterNumber(name string, n int16) (int16, bool) {
+ return 0, false
+}
+
+var Pseudos = map[string]int{
+ "DATA": obj.ADATA,
+ "FUNCDATA": obj.AFUNCDATA,
+ "GLOBL": obj.AGLOBL,
+ "PCDATA": obj.APCDATA,
+ "TEXT": obj.ATEXT,
+}
+
+// Set configures the architecture specified by GOARCH and returns its representation.
+// It returns nil if GOARCH is not recognized.
+func Set(GOARCH string) *Arch {
+ switch GOARCH {
+ case "386":
+ return archX86(&x86.Link386)
+ case "amd64":
+ return archX86(&x86.Linkamd64)
+ case "amd64p32":
+ return archX86(&x86.Linkamd64p32)
+ case "arm":
+ return archArm()
+ case "arm64":
+ return archArm64()
+ case "mips64":
+ a := archMips64()
+ a.LinkArch = &mips.Linkmips64
+ return a
+ case "mips64le":
+ a := archMips64()
+ a.LinkArch = &mips.Linkmips64le
+ return a
+ case "ppc64":
+ a := archPPC64()
+ a.LinkArch = &ppc64.Linkppc64
+ return a
+ case "ppc64le":
+ a := archPPC64()
+ a.LinkArch = &ppc64.Linkppc64le
+ return a
+ }
+ return nil
+}
+
+func jumpX86(word string) bool {
+ return word[0] == 'J' || word == "CALL" || strings.HasPrefix(word, "LOOP") || word == "XBEGIN"
+}
+
+func archX86(linkArch *obj.LinkArch) *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ for i, s := range x86.Register {
+ register[s] = int16(i + x86.REG_AL)
+ }
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Register prefix not used on this architecture.
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range x86.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABaseAMD64
+ }
+ }
+ // Annoying aliases.
+ instructions["JA"] = x86.AJHI /* alternate */
+ instructions["JAE"] = x86.AJCC /* alternate */
+ instructions["JB"] = x86.AJCS /* alternate */
+ instructions["JBE"] = x86.AJLS /* alternate */
+ instructions["JC"] = x86.AJCS /* alternate */
+ instructions["JCC"] = x86.AJCC /* carry clear (CF = 0) */
+ instructions["JCS"] = x86.AJCS /* carry set (CF = 1) */
+ instructions["JE"] = x86.AJEQ /* alternate */
+ instructions["JEQ"] = x86.AJEQ /* equal (ZF = 1) */
+ instructions["JG"] = x86.AJGT /* alternate */
+ instructions["JGE"] = x86.AJGE /* greater than or equal (signed) (SF = OF) */
+ instructions["JGT"] = x86.AJGT /* greater than (signed) (ZF = 0 && SF = OF) */
+ instructions["JHI"] = x86.AJHI /* higher (unsigned) (CF = 0 && ZF = 0) */
+ instructions["JHS"] = x86.AJCC /* alternate */
+ instructions["JL"] = x86.AJLT /* alternate */
+ instructions["JLE"] = x86.AJLE /* less than or equal (signed) (ZF = 1 || SF != OF) */
+ instructions["JLO"] = x86.AJCS /* alternate */
+ instructions["JLS"] = x86.AJLS /* lower or same (unsigned) (CF = 1 || ZF = 1) */
+ instructions["JLT"] = x86.AJLT /* less than (signed) (SF != OF) */
+ instructions["JMI"] = x86.AJMI /* negative (minus) (SF = 1) */
+ instructions["JNA"] = x86.AJLS /* alternate */
+ instructions["JNAE"] = x86.AJCS /* alternate */
+ instructions["JNB"] = x86.AJCC /* alternate */
+ instructions["JNBE"] = x86.AJHI /* alternate */
+ instructions["JNC"] = x86.AJCC /* alternate */
+ instructions["JNE"] = x86.AJNE /* not equal (ZF = 0) */
+ instructions["JNG"] = x86.AJLE /* alternate */
+ instructions["JNGE"] = x86.AJLT /* alternate */
+ instructions["JNL"] = x86.AJGE /* alternate */
+ instructions["JNLE"] = x86.AJGT /* alternate */
+ instructions["JNO"] = x86.AJOC /* alternate */
+ instructions["JNP"] = x86.AJPC /* alternate */
+ instructions["JNS"] = x86.AJPL /* alternate */
+ instructions["JNZ"] = x86.AJNE /* alternate */
+ instructions["JO"] = x86.AJOS /* alternate */
+ instructions["JOC"] = x86.AJOC /* overflow clear (OF = 0) */
+ instructions["JOS"] = x86.AJOS /* overflow set (OF = 1) */
+ instructions["JP"] = x86.AJPS /* alternate */
+ instructions["JPC"] = x86.AJPC /* parity clear (PF = 0) */
+ instructions["JPE"] = x86.AJPS /* alternate */
+ instructions["JPL"] = x86.AJPL /* non-negative (plus) (SF = 0) */
+ instructions["JPO"] = x86.AJPC /* alternate */
+ instructions["JPS"] = x86.AJPS /* parity set (PF = 1) */
+ instructions["JS"] = x86.AJMI /* alternate */
+ instructions["JZ"] = x86.AJEQ /* alternate */
+ instructions["MASKMOVDQU"] = x86.AMASKMOVOU
+ instructions["MOVD"] = x86.AMOVQ
+ instructions["MOVDQ2Q"] = x86.AMOVQ
+ instructions["MOVNTDQ"] = x86.AMOVNTO
+ instructions["MOVOA"] = x86.AMOVO
+ instructions["PF2ID"] = x86.APF2IL
+ instructions["PI2FD"] = x86.API2FL
+ instructions["PSLLDQ"] = x86.APSLLO
+ instructions["PSRLDQ"] = x86.APSRLO
+ instructions["PADDD"] = x86.APADDL
+
+ return &Arch{
+ LinkArch: linkArch,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: nil,
+ RegisterNumber: nilRegisterNumber,
+ IsJump: jumpX86,
+ }
+}
+
+func archArm() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // Note that there is no list of names as there is for x86.
+ for i := arm.REG_R0; i < arm.REG_SPSR; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ // Avoid unintentionally clobbering g using R10.
+ delete(register, "R10")
+ register["g"] = arm.REG_R10
+ for i := 0; i < 16; i++ {
+ register[fmt.Sprintf("C%d", i)] = int16(i)
+ }
+
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ register["SP"] = RSP
+ registerPrefix := map[string]bool{
+ "F": true,
+ "R": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range arm.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABaseARM
+ }
+ }
+ // Annoying aliases.
+ instructions["B"] = obj.AJMP
+ instructions["BL"] = obj.ACALL
+ // MCR differs from MRC by the way fields of the word are encoded.
+ // (Details in arm.go). Here we add the instruction so parse will find
+ // it, but give it an opcode number known only to us.
+ instructions["MCR"] = aMCR
+
+ return &Arch{
+ LinkArch: &arm.Linkarm,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: armRegisterNumber,
+ IsJump: jumpArm,
+ }
+}
+
+func archArm64() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // Note that there is no list of names as there is for 386 and amd64.
+ register[arm64.Rconv(arm64.REGSP)] = int16(arm64.REGSP)
+ for i := arm64.REG_R0; i <= arm64.REG_R31; i++ {
+ register[arm64.Rconv(i)] = int16(i)
+ }
+ for i := arm64.REG_F0; i <= arm64.REG_F31; i++ {
+ register[arm64.Rconv(i)] = int16(i)
+ }
+ for i := arm64.REG_V0; i <= arm64.REG_V31; i++ {
+ register[arm64.Rconv(i)] = int16(i)
+ }
+ register["LR"] = arm64.REGLINK
+ register["DAIF"] = arm64.REG_DAIF
+ register["NZCV"] = arm64.REG_NZCV
+ register["FPSR"] = arm64.REG_FPSR
+ register["FPCR"] = arm64.REG_FPCR
+ register["SPSR_EL1"] = arm64.REG_SPSR_EL1
+ register["ELR_EL1"] = arm64.REG_ELR_EL1
+ register["SPSR_EL2"] = arm64.REG_SPSR_EL2
+ register["ELR_EL2"] = arm64.REG_ELR_EL2
+ register["CurrentEL"] = arm64.REG_CurrentEL
+ register["SP_EL0"] = arm64.REG_SP_EL0
+ register["SPSel"] = arm64.REG_SPSel
+ register["DAIFSet"] = arm64.REG_DAIFSet
+ register["DAIFClr"] = arm64.REG_DAIFClr
+ // Conditional operators, like EQ, NE, etc.
+ register["EQ"] = arm64.COND_EQ
+ register["NE"] = arm64.COND_NE
+ register["HS"] = arm64.COND_HS
+ register["CS"] = arm64.COND_HS
+ register["LO"] = arm64.COND_LO
+ register["CC"] = arm64.COND_LO
+ register["MI"] = arm64.COND_MI
+ register["PL"] = arm64.COND_PL
+ register["VS"] = arm64.COND_VS
+ register["VC"] = arm64.COND_VC
+ register["HI"] = arm64.COND_HI
+ register["LS"] = arm64.COND_LS
+ register["GE"] = arm64.COND_GE
+ register["LT"] = arm64.COND_LT
+ register["GT"] = arm64.COND_GT
+ register["LE"] = arm64.COND_LE
+ register["AL"] = arm64.COND_AL
+ register["NV"] = arm64.COND_NV
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ register["SP"] = RSP
+ // Avoid unintentionally clobbering g using R28.
+ delete(register, "R28")
+ register["g"] = arm64.REG_R28
+ registerPrefix := map[string]bool{
+ "F": true,
+ "R": true,
+ "V": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range arm64.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABaseARM64
+ }
+ }
+ // Annoying aliases.
+ instructions["B"] = arm64.AB
+ instructions["BL"] = arm64.ABL
+
+ return &Arch{
+ LinkArch: &arm64.Linkarm64,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: arm64RegisterNumber,
+ IsJump: jumpArm64,
+ }
+
+}
+
+func archPPC64() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // Note that there is no list of names as there is for x86.
+ for i := ppc64.REG_R0; i <= ppc64.REG_R31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := ppc64.REG_F0; i <= ppc64.REG_F31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := ppc64.REG_CR0; i <= ppc64.REG_CR7; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := ppc64.REG_MSR; i <= ppc64.REG_CR; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ register["CR"] = ppc64.REG_CR
+ register["XER"] = ppc64.REG_XER
+ register["LR"] = ppc64.REG_LR
+ register["CTR"] = ppc64.REG_CTR
+ register["FPSCR"] = ppc64.REG_FPSCR
+ register["MSR"] = ppc64.REG_MSR
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Avoid unintentionally clobbering g using R30.
+ delete(register, "R30")
+ register["g"] = ppc64.REG_R30
+ registerPrefix := map[string]bool{
+ "CR": true,
+ "F": true,
+ "R": true,
+ "SPR": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range ppc64.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABasePPC64
+ }
+ }
+ // Annoying aliases.
+ instructions["BR"] = ppc64.ABR
+ instructions["BL"] = ppc64.ABL
+
+ return &Arch{
+ LinkArch: &ppc64.Linkppc64,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: ppc64RegisterNumber,
+ IsJump: jumpPPC64,
+ }
+}
+
+func archMips64() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // Note that there is no list of names as there is for x86.
+ for i := mips.REG_R0; i <= mips.REG_R31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := mips.REG_F0; i <= mips.REG_F31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := mips.REG_M0; i <= mips.REG_M31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := mips.REG_FCR0; i <= mips.REG_FCR31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ register["HI"] = mips.REG_HI
+ register["LO"] = mips.REG_LO
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Avoid unintentionally clobbering g using R30.
+ delete(register, "R30")
+ register["g"] = mips.REG_R30
+ registerPrefix := map[string]bool{
+ "F": true,
+ "FCR": true,
+ "M": true,
+ "R": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range mips.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABaseMIPS64
+ }
+ }
+ // Annoying alias.
+ instructions["JAL"] = mips.AJAL
+
+ return &Arch{
+ LinkArch: &mips.Linkmips64,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: mipsRegisterNumber,
+ IsJump: jumpMIPS64,
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/asm.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/asm.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/asm.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/asm.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,731 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "bytes"
+ "fmt"
+ "text/scanner"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/flags"
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+)
+
+// TODO: configure the architecture
+
+var testOut *bytes.Buffer // Gathers output when testing.
+
+// append adds the Prog to the end of the program-thus-far.
+// If doLabel is set, it also defines the labels collect for this Prog.
+func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) {
+ if cond != "" {
+ switch p.arch.Thechar {
+ case '5':
+ if !arch.ARMConditionCodes(prog, cond) {
+ p.errorf("unrecognized condition code .%q", cond)
+ return
+ }
+
+ case '7':
+ if !arch.ARM64Suffix(prog, cond) {
+ p.errorf("unrecognized suffix .%q", cond)
+ return
+ }
+
+ default:
+ p.errorf("unrecognized suffix .%q", cond)
+ return
+ }
+ }
+ if p.firstProg == nil {
+ p.firstProg = prog
+ } else {
+ p.lastProg.Link = prog
+ }
+ p.lastProg = prog
+ if doLabel {
+ p.pc++
+ for _, label := range p.pendingLabels {
+ if p.labels[label] != nil {
+ p.errorf("label %q multiply defined", label)
+ return
+ }
+ p.labels[label] = prog
+ }
+ p.pendingLabels = p.pendingLabels[0:0]
+ }
+ prog.Pc = int64(p.pc)
+ if *flags.Debug {
+ fmt.Println(p.histLineNum, prog)
+ }
+ if testOut != nil {
+ fmt.Fprintln(testOut, prog)
+ }
+}
+
+// validSymbol checks that addr represents a valid name for a pseudo-op.
+func (p *Parser) validSymbol(pseudo string, addr *obj.Addr, offsetOk bool) bool {
+ if addr.Name != obj.NAME_EXTERN && addr.Name != obj.NAME_STATIC || addr.Scale != 0 || addr.Reg != 0 {
+ p.errorf("%s symbol %q must be a symbol(SB)", pseudo, symbolName(addr))
+ return false
+ }
+ if !offsetOk && addr.Offset != 0 {
+ p.errorf("%s symbol %q must not be offset from SB", pseudo, symbolName(addr))
+ return false
+ }
+ return true
+}
+
+// evalInteger evaluates an integer constant for a pseudo-op.
+func (p *Parser) evalInteger(pseudo string, operands []lex.Token) int64 {
+ addr := p.address(operands)
+ return p.getConstantPseudo(pseudo, &addr)
+}
+
+// validImmediate checks that addr represents an immediate constant.
+func (p *Parser) validImmediate(pseudo string, addr *obj.Addr) bool {
+ if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected immediate constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
+ return false
+ }
+ return true
+}
+
+// asmText assembles a TEXT pseudo-op.
+// TEXT runtime·sigtramp(SB),4,$0-0
+func (p *Parser) asmText(word string, operands [][]lex.Token) {
+ if len(operands) != 2 && len(operands) != 3 {
+ p.errorf("expect two or three operands for TEXT")
+ return
+ }
+
+ // Labels are function scoped. Patch existing labels and
+ // create a new label space for this TEXT.
+ p.patch()
+ p.labels = make(map[string]*obj.Prog)
+
+ // Operand 0 is the symbol name in the form foo(SB).
+ // That means symbol plus indirect on SB and no offset.
+ nameAddr := p.address(operands[0])
+ if !p.validSymbol("TEXT", &nameAddr, false) {
+ return
+ }
+ name := symbolName(&nameAddr)
+ next := 1
+
+ // Next operand is the optional text flag, a literal integer.
+ var flag = int64(0)
+ if len(operands) == 3 {
+ flag = p.evalInteger("TEXT", operands[1])
+ next++
+ }
+
+ // Next operand is the frame and arg size.
+ // Bizarre syntax: $frameSize-argSize is two words, not subtraction.
+ // Both frameSize and argSize must be simple integers; only frameSize
+ // can be negative.
+ // The "-argSize" may be missing; if so, set it to obj.ArgsSizeUnknown.
+ // Parse left to right.
+ op := operands[next]
+ if len(op) < 2 || op[0].ScanToken != '$' {
+ p.errorf("TEXT %s: frame size must be an immediate constant", name)
+ return
+ }
+ op = op[1:]
+ negative := false
+ if op[0].ScanToken == '-' {
+ negative = true
+ op = op[1:]
+ }
+ if len(op) == 0 || op[0].ScanToken != scanner.Int {
+ p.errorf("TEXT %s: frame size must be an immediate constant", name)
+ return
+ }
+ frameSize := p.positiveAtoi(op[0].String())
+ if negative {
+ frameSize = -frameSize
+ }
+ op = op[1:]
+ argSize := int64(obj.ArgsSizeUnknown)
+ if len(op) > 0 {
+ // There is an argument size. It must be a minus sign followed by a non-negative integer literal.
+ if len(op) != 2 || op[0].ScanToken != '-' || op[1].ScanToken != scanner.Int {
+ p.errorf("TEXT %s: argument size must be of form -integer", name)
+ return
+ }
+ argSize = p.positiveAtoi(op[1].String())
+ }
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ As: obj.ATEXT,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: &obj.Addr{
+ Type: obj.TYPE_CONST,
+ Offset: flag,
+ },
+ To: obj.Addr{
+ Type: obj.TYPE_TEXTSIZE,
+ Offset: frameSize,
+ // Argsize set below.
+ },
+ }
+ prog.To.Val = int32(argSize)
+
+ p.append(prog, "", true)
+}
+
+// asmData assembles a DATA pseudo-op.
+// DATA masks<>+0x00(SB)/4, $0x00000000
+func (p *Parser) asmData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for DATA")
+ return
+ }
+
+ // Operand 0 has the general form foo<>+0x04(SB)/4.
+ op := operands[0]
+ n := len(op)
+ if n < 3 || op[n-2].ScanToken != '/' || op[n-1].ScanToken != scanner.Int {
+ p.errorf("expect /size for DATA argument")
+ return
+ }
+ scale := p.parseScale(op[n-1].String())
+ op = op[:n-2]
+ nameAddr := p.address(op)
+ if !p.validSymbol("DATA", &nameAddr, true) {
+ return
+ }
+ name := symbolName(&nameAddr)
+
+ // Operand 1 is an immediate constant or address.
+ valueAddr := p.address(operands[1])
+ switch valueAddr.Type {
+ case obj.TYPE_CONST, obj.TYPE_FCONST, obj.TYPE_SCONST, obj.TYPE_ADDR:
+ // OK
+ default:
+ p.errorf("DATA value must be an immediate constant or address")
+ return
+ }
+
+ // The addresses must not overlap. Easiest test: require monotonicity.
+ if lastAddr, ok := p.dataAddr[name]; ok && nameAddr.Offset < lastAddr {
+ p.errorf("overlapping DATA entry for %s", name)
+ return
+ }
+ p.dataAddr[name] = nameAddr.Offset + int64(scale)
+
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ As: obj.ADATA,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: &obj.Addr{
+ Offset: int64(scale),
+ },
+ To: valueAddr,
+ }
+
+ p.append(prog, "", false)
+}
+
+// asmGlobl assembles a GLOBL pseudo-op.
+// GLOBL shifts<>(SB),8,$256
+// GLOBL shifts<>(SB),$256
+func (p *Parser) asmGlobl(word string, operands [][]lex.Token) {
+ if len(operands) != 2 && len(operands) != 3 {
+ p.errorf("expect two or three operands for GLOBL")
+ return
+ }
+
+ // Operand 0 has the general form foo<>+0x04(SB).
+ nameAddr := p.address(operands[0])
+ if !p.validSymbol("GLOBL", &nameAddr, false) {
+ return
+ }
+ next := 1
+
+ // Next operand is the optional flag, a literal integer.
+ var flag = int64(0)
+ if len(operands) == 3 {
+ flag = p.evalInteger("GLOBL", operands[1])
+ next++
+ }
+
+ // Final operand is an immediate constant.
+ addr := p.address(operands[next])
+ if !p.validImmediate("GLOBL", &addr) {
+ return
+ }
+
+ // log.Printf("GLOBL %s %d, $%d", name, flag, size)
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ As: obj.AGLOBL,
+ Lineno: p.histLineNum,
+ From: nameAddr,
+ From3: &obj.Addr{
+ Offset: flag,
+ },
+ To: addr,
+ }
+ p.append(prog, "", false)
+}
+
+// asmPCData assembles a PCDATA pseudo-op.
+// PCDATA $2, $705
+func (p *Parser) asmPCData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for PCDATA")
+ return
+ }
+
+ // Operand 0 must be an immediate constant.
+ key := p.address(operands[0])
+ if !p.validImmediate("PCDATA", &key) {
+ return
+ }
+
+ // Operand 1 must be an immediate constant.
+ value := p.address(operands[1])
+ if !p.validImmediate("PCDATA", &value) {
+ return
+ }
+
+ // log.Printf("PCDATA $%d, $%d", key.Offset, value.Offset)
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ As: obj.APCDATA,
+ Lineno: p.histLineNum,
+ From: key,
+ To: value,
+ }
+ p.append(prog, "", true)
+}
+
+// asmFuncData assembles a FUNCDATA pseudo-op.
+// FUNCDATA $1, funcdata<>+4(SB)
+func (p *Parser) asmFuncData(word string, operands [][]lex.Token) {
+ if len(operands) != 2 {
+ p.errorf("expect two operands for FUNCDATA")
+ return
+ }
+
+ // Operand 0 must be an immediate constant.
+ valueAddr := p.address(operands[0])
+ if !p.validImmediate("FUNCDATA", &valueAddr) {
+ return
+ }
+
+ // Operand 1 is a symbol name in the form foo(SB).
+ nameAddr := p.address(operands[1])
+ if !p.validSymbol("FUNCDATA", &nameAddr, true) {
+ return
+ }
+
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ As: obj.AFUNCDATA,
+ Lineno: p.histLineNum,
+ From: valueAddr,
+ To: nameAddr,
+ }
+ p.append(prog, "", true)
+}
+
+// asmJump assembles a jump instruction.
+// JMP R1
+// JMP exit
+// JMP 3(PC)
+func (p *Parser) asmJump(op int, cond string, a []obj.Addr) {
+ var target *obj.Addr
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ Lineno: p.histLineNum,
+ As: int16(op),
+ }
+ switch len(a) {
+ case 1:
+ target = &a[0]
+ case 2:
+ // Special 2-operand jumps.
+ target = &a[1]
+ prog.From = a[0]
+ case 3:
+ if p.arch.Thechar == '9' {
+ // Special 3-operand jumps.
+ // First two must be constants; a[1] is a register number.
+ target = &a[2]
+ prog.From = obj.Addr{
+ Type: obj.TYPE_CONST,
+ Offset: p.getConstant(prog, op, &a[0]),
+ }
+ reg := int16(p.getConstant(prog, op, &a[1]))
+ reg, ok := p.arch.RegisterNumber("R", int16(reg))
+ if !ok {
+ p.errorf("bad register number %d", reg)
+ return
+ }
+ prog.Reg = reg
+ break
+ }
+ if p.arch.Thechar == '0' {
+ // 3-operand jumps.
+ // First two must be registers
+ target = &a[2]
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
+ fallthrough
+ default:
+ p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op))
+ return
+ }
+ switch {
+ case target.Type == obj.TYPE_BRANCH:
+ // JMP 4(PC)
+ prog.To = obj.Addr{
+ Type: obj.TYPE_BRANCH,
+ Offset: p.pc + 1 + target.Offset, // +1 because p.pc is incremented in append, below.
+ }
+ case target.Type == obj.TYPE_REG:
+ // JMP R1
+ prog.To = *target
+ case target.Type == obj.TYPE_MEM && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
+ // JMP main·morestack(SB)
+ prog.To = *target
+ case target.Type == obj.TYPE_INDIR && (target.Name == obj.NAME_EXTERN || target.Name == obj.NAME_STATIC):
+ // JMP *main·morestack(SB)
+ prog.To = *target
+ prog.To.Type = obj.TYPE_INDIR
+ case target.Type == obj.TYPE_MEM && target.Reg == 0 && target.Offset == 0:
+ // JMP exit
+ if target.Sym == nil {
+ // Parse error left name unset.
+ return
+ }
+ targetProg := p.labels[target.Sym.Name]
+ if targetProg == nil {
+ p.toPatch = append(p.toPatch, Patch{prog, target.Sym.Name})
+ } else {
+ p.branch(prog, targetProg)
+ }
+ case target.Type == obj.TYPE_MEM && target.Name == obj.NAME_NONE:
+ // JMP 4(R0)
+ prog.To = *target
+ // On the ppc64, 9a encodes BR (CTR) as BR CTR. We do the same.
+ if p.arch.Thechar == '9' && target.Offset == 0 {
+ prog.To.Type = obj.TYPE_REG
+ }
+ case target.Type == obj.TYPE_CONST:
+ // JMP $4
+ prog.To = a[0]
+ default:
+ p.errorf("cannot assemble jump %+v", target)
+ return
+ }
+
+ p.append(prog, cond, true)
+}
+
+func (p *Parser) patch() {
+ for _, patch := range p.toPatch {
+ targetProg := p.labels[patch.label]
+ if targetProg == nil {
+ p.errorf("undefined label %s", patch.label)
+ return
+ }
+ p.branch(patch.prog, targetProg)
+ }
+ p.toPatch = p.toPatch[:0]
+}
+
+func (p *Parser) branch(jmp, target *obj.Prog) {
+ jmp.To = obj.Addr{
+ Type: obj.TYPE_BRANCH,
+ Index: 0,
+ }
+ jmp.To.Val = target
+}
+
+// asmInstruction assembles an instruction.
+// MOVW R9, (R10)
+func (p *Parser) asmInstruction(op int, cond string, a []obj.Addr) {
+ // fmt.Printf("%s %+v\n", obj.Aconv(op), a)
+ prog := &obj.Prog{
+ Ctxt: p.ctxt,
+ Lineno: p.histLineNum,
+ As: int16(op),
+ }
+ switch len(a) {
+ case 0:
+ // Nothing to do.
+ case 1:
+ if p.arch.UnaryDst[op] {
+ // prog.From is no address.
+ prog.To = a[0]
+ } else {
+ prog.From = a[0]
+ // prog.To is no address.
+ }
+ if p.arch.Thechar == '9' && arch.IsPPC64NEG(op) {
+ // NEG: From and To are both a[0].
+ prog.To = a[0]
+ prog.From = a[0]
+ break
+ }
+ case 2:
+ if p.arch.Thechar == '5' {
+ if arch.IsARMCMP(op) {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
+ // Strange special cases.
+ if arch.IsARMSTREX(op) {
+ /*
+ STREX x, (y)
+ from=(y) reg=x to=x
+ STREX (x), y
+ from=(x) reg=y to=y
+ */
+ if a[0].Type == obj.TYPE_REG && a[1].Type != obj.TYPE_REG {
+ prog.From = a[1]
+ prog.Reg = a[0].Reg
+ prog.To = a[0]
+ break
+ } else if a[0].Type != obj.TYPE_REG && a[1].Type == obj.TYPE_REG {
+ prog.From = a[0]
+ prog.Reg = a[1].Reg
+ prog.To = a[1]
+ break
+ }
+ p.errorf("unrecognized addressing for %s", obj.Aconv(op))
+ return
+ }
+ if arch.IsARMFloatCmp(op) {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
+ } else if p.arch.Thechar == '7' && arch.IsARM64CMP(op) {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ } else if p.arch.Thechar == '0' {
+ if arch.IsMIPS64CMP(op) || arch.IsMIPS64MUL(op) {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ break
+ }
+ }
+ prog.From = a[0]
+ prog.To = a[1]
+ case 3:
+ switch p.arch.Thechar {
+ case '0':
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case '5':
+ // Special cases.
+ if arch.IsARMSTREX(op) {
+ /*
+ STREX x, (y), z
+ from=(y) reg=x to=z
+ */
+ prog.From = a[1]
+ prog.Reg = p.getRegister(prog, op, &a[0])
+ prog.To = a[2]
+ break
+ }
+ // Otherwise the 2nd operand (a[1]) must be a register.
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case '7':
+ // ARM64 instructions with one input and two outputs.
+ if arch.IsARM64STLXR(op) {
+ prog.From = a[0]
+ prog.To = a[1]
+ if a[2].Type != obj.TYPE_REG {
+ p.errorf("invalid addressing modes for third operand to %s instruction, must be register", obj.Aconv(op))
+ return
+ }
+ prog.RegTo2 = a[2].Reg
+ break
+ }
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case '6', '8':
+ prog.From = a[0]
+ prog.From3 = newAddr(a[1])
+ prog.To = a[2]
+ case '9':
+ if arch.IsPPC64CMP(op) {
+ // CMPW etc.; third argument is a CR register that goes into prog.Reg.
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[2])
+ prog.To = a[1]
+ break
+ }
+ // Arithmetic. Choices are:
+ // reg reg reg
+ // imm reg reg
+ // reg imm reg
+ // If the immediate is the middle argument, use From3.
+ switch a[1].Type {
+ case obj.TYPE_REG:
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.To = a[2]
+ case obj.TYPE_CONST:
+ prog.From = a[0]
+ prog.From3 = newAddr(a[1])
+ prog.To = a[2]
+ default:
+ p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
+ return
+ }
+ default:
+ p.errorf("TODO: implement three-operand instructions for this architecture")
+ return
+ }
+ case 4:
+ if p.arch.Thechar == '5' && arch.IsARMMULA(op) {
+ // All must be registers.
+ p.getRegister(prog, op, &a[0])
+ r1 := p.getRegister(prog, op, &a[1])
+ p.getRegister(prog, op, &a[2])
+ r3 := p.getRegister(prog, op, &a[3])
+ prog.From = a[0]
+ prog.To = a[2]
+ prog.To.Type = obj.TYPE_REGREG2
+ prog.To.Offset = int64(r3)
+ prog.Reg = r1
+ break
+ }
+ if p.arch.Thechar == '7' {
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.From3 = newAddr(a[2])
+ prog.To = a[3]
+ break
+ }
+ if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
+ // 2nd operand must always be a register.
+ // TODO: Do we need to guard this with the instruction type?
+ // That is, are there 4-operand instructions without this property?
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.From3 = newAddr(a[2])
+ prog.To = a[3]
+ break
+ }
+ p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
+ return
+ case 5:
+ if p.arch.Thechar == '9' && arch.IsPPC64RLD(op) {
+ // Always reg, reg, con, con, reg. (con, con is a 'mask').
+ prog.From = a[0]
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ mask1 := p.getConstant(prog, op, &a[2])
+ mask2 := p.getConstant(prog, op, &a[3])
+ var mask uint32
+ if mask1 < mask2 {
+ mask = (^uint32(0) >> uint(mask1)) & (^uint32(0) << uint(31-mask2))
+ } else {
+ mask = (^uint32(0) >> uint(mask2+1)) & (^uint32(0) << uint(31-(mask1-1)))
+ }
+ prog.From3 = &obj.Addr{
+ Type: obj.TYPE_CONST,
+ Offset: int64(mask),
+ }
+ prog.To = a[4]
+ break
+ }
+ p.errorf("can't handle %s instruction with 5 operands", obj.Aconv(op))
+ return
+ case 6:
+ if p.arch.Thechar == '5' && arch.IsARMMRC(op) {
+ // Strange special case: MCR, MRC.
+ prog.To.Type = obj.TYPE_CONST
+ x0 := p.getConstant(prog, op, &a[0])
+ x1 := p.getConstant(prog, op, &a[1])
+ x2 := int64(p.getRegister(prog, op, &a[2]))
+ x3 := int64(p.getRegister(prog, op, &a[3]))
+ x4 := int64(p.getRegister(prog, op, &a[4]))
+ x5 := p.getConstant(prog, op, &a[5])
+ // Cond is handled specially for this instruction.
+ offset, MRC, ok := arch.ARMMRCOffset(op, cond, x0, x1, x2, x3, x4, x5)
+ if !ok {
+ p.errorf("unrecognized condition code .%q", cond)
+ }
+ prog.To.Offset = offset
+ cond = ""
+ prog.As = MRC // Both instructions are coded as MRC.
+ break
+ }
+ fallthrough
+ default:
+ p.errorf("can't handle %s instruction with %d operands", obj.Aconv(op), len(a))
+ return
+ }
+
+ p.append(prog, cond, true)
+}
+
+// newAddr returns a new(Addr) initialized to x.
+func newAddr(x obj.Addr) *obj.Addr {
+ p := new(obj.Addr)
+ *p = x
+ return p
+}
+
+// symbolName returns the symbol name, or an error string if none if available.
+func symbolName(addr *obj.Addr) string {
+ if addr.Sym != nil {
+ return addr.Sym.Name
+ }
+ return ""
+}
+
+var emptyProg obj.Prog
+
+// getConstantPseudo checks that addr represents a plain constant and returns its value.
+func (p *Parser) getConstantPseudo(pseudo string, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected integer constant; found %s", pseudo, obj.Dconv(&emptyProg, addr))
+ }
+ return addr.Offset
+}
+
+// getConstant checks that addr represents a plain constant and returns its value.
+func (p *Parser) getConstant(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_MEM || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected integer constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
+ }
+ return addr.Offset
+}
+
+// getImmediate checks that addr represents an immediate constant and returns its value.
+func (p *Parser) getImmediate(prog *obj.Prog, op int, addr *obj.Addr) int64 {
+ if addr.Type != obj.TYPE_CONST || addr.Name != 0 || addr.Reg != 0 || addr.Index != 0 {
+ p.errorf("%s: expected immediate constant; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
+ }
+ return addr.Offset
+}
+
+// getRegister checks that addr represents a register and returns its value.
+func (p *Parser) getRegister(prog *obj.Prog, op int, addr *obj.Addr) int16 {
+ if addr.Type != obj.TYPE_REG || addr.Offset != 0 || addr.Name != 0 || addr.Index != 0 {
+ p.errorf("%s: expected register; found %s", obj.Aconv(op), obj.Dconv(prog, addr))
+ }
+ return addr.Reg
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/endtoend_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/endtoend_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/endtoend_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/endtoend_test.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,391 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+)
+
+// An end-to-end test for the assembler: Do we print what we parse?
+// Output is generated by, in effect, turning on -S and comparing the
+// result against a golden file.
+
+func testEndToEnd(t *testing.T, goarch, file string) {
+ lex.InitHist()
+ input := filepath.Join("testdata", file+".s")
+ architecture, ctxt := setArch(goarch)
+ lexer := lex.NewLexer(input, ctxt)
+ parser := NewParser(ctxt, architecture, lexer)
+ pList := obj.Linknewplist(ctxt)
+ var ok bool
+ testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
+ ctxt.Bso = obj.Binitw(os.Stdout)
+ defer ctxt.Bso.Flush()
+ failed := false
+ ctxt.DiagFunc = func(format string, args ...interface{}) {
+ failed = true
+ t.Errorf(format, args...)
+ }
+ pList.Firstpc, ok = parser.Parse()
+ if !ok || failed {
+ t.Errorf("asm: %s assembly failed", goarch)
+ return
+ }
+ output := strings.Split(testOut.String(), "\n")
+
+ // Reconstruct expected output by independently "parsing" the input.
+ data, err := ioutil.ReadFile(input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ lineno := 0
+ seq := 0
+ hexByLine := map[string]string{}
+ lines := strings.SplitAfter(string(data), "\n")
+Diff:
+ for _, line := range lines {
+ lineno++
+
+ // The general form of a test input line is:
+ // // comment
+ // INST args [// printed form] [// hex encoding]
+ parts := strings.Split(line, "//")
+ printed := strings.TrimSpace(parts[0])
+ if printed == "" || strings.HasSuffix(printed, ":") { // empty or label
+ continue
+ }
+ seq++
+
+ var hexes string
+ switch len(parts) {
+ default:
+ t.Errorf("%s:%d: unable to understand comments: %s", input, lineno, line)
+ case 1:
+ // no comment
+ case 2:
+ // might be printed form or hex
+ note := strings.TrimSpace(parts[1])
+ if isHexes(note) {
+ hexes = note
+ } else {
+ printed = note
+ }
+ case 3:
+ // printed form, then hex
+ printed = strings.TrimSpace(parts[1])
+ hexes = strings.TrimSpace(parts[2])
+ if !isHexes(hexes) {
+ t.Errorf("%s:%d: malformed hex instruction encoding: %s", input, lineno, line)
+ }
+ }
+
+ if hexes != "" {
+ hexByLine[fmt.Sprintf("%s:%d", input, lineno)] = hexes
+ }
+
+ // Canonicalize spacing in printed form.
+ // First field is opcode, then tab, then arguments separated by spaces.
+ // Canonicalize spaces after commas first.
+ // Comma to separate argument gets a space; comma within does not.
+ var buf []byte
+ nest := 0
+ for i := 0; i < len(printed); i++ {
+ c := printed[i]
+ switch c {
+ case '{', '[':
+ nest++
+ case '}', ']':
+ nest--
+ case ',':
+ buf = append(buf, ',')
+ if nest == 0 {
+ buf = append(buf, ' ')
+ }
+ for i+1 < len(printed) && (printed[i+1] == ' ' || printed[i+1] == '\t') {
+ i++
+ }
+ continue
+ }
+ buf = append(buf, c)
+ }
+
+ f := strings.Fields(string(buf))
+
+ // Turn relative (PC) into absolute (PC) automatically,
+ // so that most branch instructions don't need comments
+ // giving the absolute form.
+ if len(f) > 0 && strings.HasSuffix(printed, "(PC)") {
+ last := f[len(f)-1]
+ n, err := strconv.Atoi(last[:len(last)-len("(PC)")])
+ if err == nil {
+ f[len(f)-1] = fmt.Sprintf("%d(PC)", seq+n)
+ }
+ }
+
+ if len(f) == 1 {
+ printed = f[0]
+ } else {
+ printed = f[0] + "\t" + strings.Join(f[1:], " ")
+ }
+
+ want := fmt.Sprintf("%05d (%s:%d)\t%s", seq, input, lineno, printed)
+ for len(output) > 0 && (output[0] < want || output[0] != want && len(output[0]) >= 5 && output[0][:5] == want[:5]) {
+ if len(output[0]) >= 5 && output[0][:5] == want[:5] {
+ t.Errorf("mismatched output:\nhave %s\nwant %s", output[0], want)
+ output = output[1:]
+ continue Diff
+ }
+ t.Errorf("unexpected output: %q", output[0])
+ output = output[1:]
+ }
+ if len(output) > 0 && output[0] == want {
+ output = output[1:]
+ } else {
+ t.Errorf("missing output: %q", want)
+ }
+ }
+ for len(output) > 0 {
+ if output[0] == "" {
+ // spurious blank caused by Split on "\n"
+ output = output[1:]
+ continue
+ }
+ t.Errorf("unexpected output: %q", output[0])
+ output = output[1:]
+ }
+
+ // Checked printing.
+ // Now check machine code layout.
+
+ top := pList.Firstpc
+ var text *obj.LSym
+ ok = true
+ ctxt.DiagFunc = func(format string, args ...interface{}) {
+ t.Errorf(format, args...)
+ ok = false
+ }
+ obj.Flushplist(ctxt)
+
+ for p := top; p != nil; p = p.Link {
+ if p.As == obj.ATEXT {
+ text = p.From.Sym
+ }
+ hexes := hexByLine[p.Line()]
+ if hexes == "" {
+ continue
+ }
+ delete(hexByLine, p.Line())
+ if text == nil {
+ t.Errorf("%s: instruction outside TEXT", p)
+ }
+ size := int64(len(text.P)) - p.Pc
+ if p.Link != nil {
+ size = p.Link.Pc - p.Pc
+ } else if p.Isize != 0 {
+ size = int64(p.Isize)
+ }
+ var code []byte
+ if p.Pc < int64(len(text.P)) {
+ code = text.P[p.Pc:]
+ if size < int64(len(code)) {
+ code = code[:size]
+ }
+ }
+ codeHex := fmt.Sprintf("%x", code)
+ if codeHex == "" {
+ codeHex = "empty"
+ }
+ ok := false
+ for _, hex := range strings.Split(hexes, " or ") {
+ if codeHex == hex {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ t.Errorf("%s: have encoding %s, want %s", p, codeHex, hexes)
+ }
+ }
+
+ if len(hexByLine) > 0 {
+ var missing []string
+ for key := range hexByLine {
+ missing = append(missing, key)
+ }
+ sort.Strings(missing)
+ for _, line := range missing {
+ t.Errorf("%s: did not find instruction encoding", line)
+ }
+ }
+
+}
+
+func isHexes(s string) bool {
+ if s == "" {
+ return false
+ }
+ if s == "empty" {
+ return true
+ }
+ for _, f := range strings.Split(s, " or ") {
+ if f == "" || len(f)%2 != 0 || strings.TrimLeft(f, "0123456789abcdef") != "" {
+ return false
+ }
+ }
+ return true
+}
+
+// It would be nice if the error messages began with
+// the standard file:line: prefix,
+// but that's not where we are today.
+// It might be at the beginning but it might be in the middle of the printed instruction.
+var fileLineRE = regexp.MustCompile(`(?:^|\()(testdata[/\\][0-9a-z]+\.s:[0-9]+)(?:$|\))`)
+
+// Same as in test/run.go
+var (
+ errRE = regexp.MustCompile(`// ERROR ?(.*)`)
+ errQuotesRE = regexp.MustCompile(`"([^"]*)"`)
+)
+
+func testErrors(t *testing.T, goarch, file string) {
+ lex.InitHist()
+ input := filepath.Join("testdata", file+".s")
+ architecture, ctxt := setArch(goarch)
+ lexer := lex.NewLexer(input, ctxt)
+ parser := NewParser(ctxt, architecture, lexer)
+ pList := obj.Linknewplist(ctxt)
+ var ok bool
+ testOut = new(bytes.Buffer) // The assembler writes test output to this buffer.
+ ctxt.Bso = obj.Binitw(os.Stdout)
+ defer ctxt.Bso.Flush()
+ failed := false
+ var errBuf bytes.Buffer
+ ctxt.DiagFunc = func(format string, args ...interface{}) {
+ failed = true
+ s := fmt.Sprintf(format, args...)
+ if !strings.HasSuffix(s, "\n") {
+ s += "\n"
+ }
+ errBuf.WriteString(s)
+ }
+ pList.Firstpc, ok = parser.Parse()
+ obj.Flushplist(ctxt)
+ if ok && !failed {
+ t.Errorf("asm: %s had no errors", goarch)
+ }
+
+ errors := map[string]string{}
+ for _, line := range strings.Split(errBuf.String(), "\n") {
+ if line == "" || strings.HasPrefix(line, "\t") {
+ continue
+ }
+ m := fileLineRE.FindStringSubmatch(line)
+ if m == nil {
+ t.Errorf("unexpected error: %v", line)
+ continue
+ }
+ fileline := m[1]
+ if errors[fileline] != "" {
+ t.Errorf("multiple errors on %s:\n\t%s\n\t%s", fileline, errors[fileline], line)
+ continue
+ }
+ errors[fileline] = line
+ }
+
+ // Reconstruct expected errors by independently "parsing" the input.
+ data, err := ioutil.ReadFile(input)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ lineno := 0
+ lines := strings.Split(string(data), "\n")
+ for _, line := range lines {
+ lineno++
+
+ fileline := fmt.Sprintf("%s:%d", input, lineno)
+ if m := errRE.FindStringSubmatch(line); m != nil {
+ all := m[1]
+ mm := errQuotesRE.FindAllStringSubmatch(all, -1)
+ if len(mm) != 1 {
+ t.Errorf("%s: invalid errorcheck line:\n%s", fileline, line)
+ } else if err := errors[fileline]; err == "" {
+ t.Errorf("%s: missing error, want %s", fileline, all)
+ } else if !strings.Contains(err, mm[0][1]) {
+ t.Errorf("%s: wrong error for %s:\n%s", fileline, all, err)
+ }
+ } else {
+ if errors[fileline] != "" {
+ t.Errorf("unexpected error on %s: %v", fileline, errors[fileline])
+ }
+ }
+ delete(errors, fileline)
+ }
+ var extra []string
+ for key := range errors {
+ extra = append(extra, key)
+ }
+ sort.Strings(extra)
+ for _, fileline := range extra {
+ t.Errorf("unexpected error on %s: %v", fileline, errors[fileline])
+ }
+}
+
+func Test386EndToEnd(t *testing.T) {
+ defer os.Setenv("GO386", os.Getenv("GO386"))
+
+ for _, go386 := range []string{"387", "sse"} {
+ os.Setenv("GO386", go386)
+ t.Logf("GO386=%v", os.Getenv("GO386"))
+ testEndToEnd(t, "386", "386")
+ }
+}
+
+func TestARMEndToEnd(t *testing.T) {
+ defer os.Setenv("GOARM", os.Getenv("GOARM"))
+
+ for _, goarm := range []string{"5", "6", "7"} {
+ os.Setenv("GOARM", goarm)
+ t.Logf("GOARM=%v", os.Getenv("GOARM"))
+ testEndToEnd(t, "arm", "arm")
+ }
+}
+
+func TestARM64EndToEnd(t *testing.T) {
+ testEndToEnd(t, "arm64", "arm64")
+}
+
+func TestAMD64EndToEnd(t *testing.T) {
+ testEndToEnd(t, "amd64", "amd64")
+}
+
+func TestAMD64Encoder(t *testing.T) {
+ testEndToEnd(t, "amd64", "amd64enc")
+}
+
+func TestAMD64Errors(t *testing.T) {
+ testErrors(t, "amd64", "amd64error")
+}
+
+func TestMIPS64EndToEnd(t *testing.T) {
+ testEndToEnd(t, "mips64", "mips64")
+}
+
+func TestPPC64EndToEnd(t *testing.T) {
+ testEndToEnd(t, "ppc64", "ppc64")
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/operand_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/operand_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/operand_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/asm/internal/asm/operand_test.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package asm
+
+import (
+ "os"
+ "testing"
+
+ "cmd/asm/internal/arch"
+ "cmd/asm/internal/lex"
+ "cmd/internal/obj"
+)
+
+// A simple in-out test: Do we print what we parse?
+
+func setArch(goarch string) (*arch.Arch, *obj.Link) {
+ os.Setenv("GOOS", "linux") // obj can handle this OS for all architectures.
+ architecture := arch.Set(goarch)
+ if architecture == nil {
+ panic("asm: unrecognized architecture " + goarch)
+ }
+ return architecture, obj.Linknew(architecture.LinkArch)
+}
+
+func newParser(goarch string) *Parser {
+ architecture, ctxt := setArch(goarch)
+ return NewParser(ctxt, architecture, nil)
+}
+
+func testOperandParser(t *testing.T, parser *Parser, tests []operandTest) {
+ for _, test := range tests {
+ parser.start(lex.Tokenize(test.input))
+ addr := obj.Addr{}
+ parser.operand(&addr)
+ result := obj.Dconv(&emptyProg, &addr)
+ if result != test.output {
+ t.Errorf("fail at %s: got %s; expected %s\n", test.input, result, test.output)
+ }
+ }
+}
+
+func TestAMD64OperandParser(t *testing.T) {
+ parser := newParser("amd64")
+ testOperandParser(t, parser, amd64OperandTests)
+}
+
+func Test386OperandParser(t *testing.T) {
+ parser := newParser("386")
+ testOperandParser(t, parser, x86OperandTests)
+}
+
+func TestARMOperandParser(t *testing.T) {
+ parser := newParser("arm")
+ testOperandParser(t, parser, armOperandTests)
+}
+func TestARM64OperandParser(t *testing.T) {
+ parser := newParser("arm64")
+ testOperandParser(t, parser, arm64OperandTests)
+}
+
+func TestPPC64OperandParser(t *testing.T) {
+ parser := newParser("ppc64")
+ testOperandParser(t, parser, ppc64OperandTests)
+}
+
+func TestMIPS64OperandParser(t *testing.T) {
+ parser := newParser("mips64")
+ testOperandParser(t, parser, mips64OperandTests)
+}
+
+type operandTest struct {
+ input, output string
+}
+
+// Examples collected by scanning all the assembly in the standard repo.
+
+var amd64OperandTests = []operandTest{
+ {"$(-1.0)", "$(-1.0)"},
+ {"$(0.0)", "$(0.0)"},
+ {"$(0x2000000+116)", "$33554548"},
+ {"$(0x3F<<7)", "$8064"},
+ {"$(112+8)", "$120"},
+ {"$(1<<63)", "$-9223372036854775808"},
+ {"$-1", "$-1"},
+ {"$0", "$0"},
+ {"$0-0", "$0"},
+ {"$0-16", "$-16"},
+ {"$0x000FFFFFFFFFFFFF", "$4503599627370495"},
+ {"$0x01", "$1"},
+ {"$0x02", "$2"},
+ {"$0x04", "$4"},
+ {"$0x3FE", "$1022"},
+ {"$0x7fffffe00000", "$140737486258176"},
+ {"$0xfffffffffffff001", "$-4095"},
+ {"$1", "$1"},
+ {"$1.0", "$(1.0)"},
+ {"$10", "$10"},
+ {"$1000", "$1000"},
+ {"$1000000", "$1000000"},
+ {"$1000000000", "$1000000000"},
+ {"$__tsan_func_enter(SB)", "$__tsan_func_enter(SB)"},
+ {"$main(SB)", "$main(SB)"},
+ {"$masks<>(SB)", "$masks<>(SB)"},
+ {"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"},
+ {"$shifts<>(SB)", "$shifts<>(SB)"},
+ {"$~(1<<63)", "$9223372036854775807"},
+ {"$~0x3F", "$-64"},
+ {"$~15", "$-16"},
+ {"(((8)&0xf)*4)(SP)", "32(SP)"},
+ {"(((8-14)&0xf)*4)(SP)", "40(SP)"},
+ {"(6+8)(AX)", "14(AX)"},
+ {"(8*4)(BP)", "32(BP)"},
+ {"(AX)", "(AX)"},
+ {"(AX)(CX*8)", "(AX)(CX*8)"},
+ {"(BP)(CX*4)", "(BP)(CX*4)"},
+ {"(BP)(DX*4)", "(BP)(DX*4)"},
+ {"(BP)(R8*4)", "(BP)(R8*4)"},
+ {"(BX)", "(BX)"},
+ {"(DI)", "(DI)"},
+ {"(DI)(BX*1)", "(DI)(BX*1)"},
+ {"(DX)", "(DX)"},
+ {"(R9)", "(R9)"},
+ {"(R9)(BX*8)", "(R9)(BX*8)"},
+ {"(SI)", "(SI)"},
+ {"(SI)(BX*1)", "(SI)(BX*1)"},
+ {"(SI)(DX*1)", "(SI)(DX*1)"},
+ {"(SP)", "(SP)"},
+ {"+3(PC)", "3(PC)"},
+ {"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
+ {"-3(PC)", "-3(PC)"},
+ {"-64(SI)(BX*1)", "-64(SI)(BX*1)"},
+ {"-96(SI)(BX*1)", "-96(SI)(BX*1)"},
+ {"AL", "AL"},
+ {"AX", "AX"},
+ {"BP", "BP"},
+ {"BX", "BX"},
+ {"CX", "CX"},
+ {"DI", "DI"},
+ {"DX", "DX"},
+ {"R10", "R10"},
+ {"R10", "R10"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"SI", "SI"},
+ {"SP", "SP"},
+ {"X0", "X0"},
+ {"X1", "X1"},
+ {"X10", "X10"},
+ {"X11", "X11"},
+ {"X12", "X12"},
+ {"X13", "X13"},
+ {"X14", "X14"},
+ {"X15", "X15"},
+ {"X2", "X2"},
+ {"X3", "X3"},
+ {"X4", "X4"},
+ {"X5", "X5"},
+ {"X6", "X6"},
+ {"X7", "X7"},
+ {"X8", "X8"},
+ {"X9", "X9"},
+ {"_expand_key_128<>(SB)", "_expand_key_128<>(SB)"},
+ {"_seek<>(SB)", "_seek<>(SB)"},
+ {"a2+16(FP)", "a2+16(FP)"},
+ {"addr2+24(FP)", "addr2+24(FP)"},
+ {"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
+ {"b+24(FP)", "b+24(FP)"},
+ {"b_len+32(FP)", "b_len+32(FP)"},
+ {"racecall<>(SB)", "racecall<>(SB)"},
+ {"rcv_name+20(FP)", "rcv_name+20(FP)"},
+ {"retoffset+28(FP)", "retoffset+28(FP)"},
+ {"runtime·_GetStdHandle(SB)", "runtime._GetStdHandle(SB)"},
+ {"sync\u2215atomic·AddInt64(SB)", "sync/atomic.AddInt64(SB)"},
+ {"timeout+20(FP)", "timeout+20(FP)"},
+ {"ts+16(FP)", "ts+16(FP)"},
+ {"x+24(FP)", "x+24(FP)"},
+ {"x·y(SB)", "x.y(SB)"},
+ {"x·y(SP)", "x.y(SP)"},
+ {"x·y+8(SB)", "x.y+8(SB)"},
+ {"x·y+8(SP)", "x.y+8(SP)"},
+ {"y+56(FP)", "y+56(FP)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·callReflect(SB)", "\"\".callReflect(SB)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
+
+var x86OperandTests = []operandTest{
+ {"$(2.928932188134524e-01)", "$(0.29289321881345243)"},
+ {"$-1", "$-1"},
+ {"$0", "$0"},
+ {"$0x00000000", "$0"},
+ {"$runtime·badmcall(SB)", "$runtime.badmcall(SB)"},
+ {"$setg_gcc<>(SB)", "$setg_gcc<>(SB)"},
+ {"$~15", "$-16"},
+ {"(-64*1024+104)(SP)", "-65432(SP)"},
+ {"(0*4)(BP)", "(BP)"},
+ {"(1*4)(DI)", "4(DI)"},
+ {"(4*4)(BP)", "16(BP)"},
+ {"(AX)", "(AX)"},
+ {"(BP)(CX*4)", "(BP)(CX*4)"},
+ {"(BP*8)", "0(BP*8)"},
+ {"(BX)", "(BX)"},
+ {"(SP)", "(SP)"},
+ {"*AX", "AX"}, // TODO: Should make * illegal here; a simple alias for JMP AX.
+ {"*runtime·_GetStdHandle(SB)", "*runtime._GetStdHandle(SB)"},
+ {"-(4+12)(DI)", "-16(DI)"},
+ {"-1(DI)(BX*1)", "-1(DI)(BX*1)"},
+ {"-96(DI)(BX*1)", "-96(DI)(BX*1)"},
+ {"0(AX)", "(AX)"},
+ {"0(BP)", "(BP)"},
+ {"0(BX)", "(BX)"},
+ {"4(AX)", "4(AX)"},
+ {"AL", "AL"},
+ {"AX", "AX"},
+ {"BP", "BP"},
+ {"BX", "BX"},
+ {"CX", "CX"},
+ {"DI", "DI"},
+ {"DX", "DX"},
+ {"F0", "F0"},
+ {"GS", "GS"},
+ {"SI", "SI"},
+ {"SP", "SP"},
+ {"X0", "X0"},
+ {"X1", "X1"},
+ {"X2", "X2"},
+ {"X3", "X3"},
+ {"X4", "X4"},
+ {"X5", "X5"},
+ {"X6", "X6"},
+ {"X7", "X7"},
+ {"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
+ {"ax+4(FP)", "ax+4(FP)"},
+ {"ptime-12(SP)", "ptime-12(SP)"},
+ {"runtime·_NtWaitForSingleObject(SB)", "runtime._NtWaitForSingleObject(SB)"},
+ {"s(FP)", "s(FP)"},
+ {"sec+4(FP)", "sec+4(FP)"},
+ {"shifts<>(SB)(CX*8)", "shifts<>(SB)(CX*8)"},
+ {"x+4(FP)", "x+4(FP)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·reflectcall(SB)", "\"\".reflectcall(SB)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
+
+var armOperandTests = []operandTest{
+ {"$0", "$0"},
+ {"$256", "$256"},
+ {"(R0)", "(R0)"},
+ {"(R11)", "(R11)"},
+ {"(g)", "(g)"},
+ {"-12(R4)", "-12(R4)"},
+ {"0(PC)", "0(PC)"},
+ {"1024", "1024"},
+ {"12(R(1))", "12(R1)"},
+ {"12(R13)", "12(R13)"},
+ {"R0", "R0"},
+ {"R0->(32-1)", "R0->31"},
+ {"R0<>R(1)", "R0>>R1"},
+ {"R0@>(32-1)", "R0@>31"},
+ {"R1", "R1"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R1<<2(R3)", "R1<<2(R3)"},
+ {"R(1)<<2(R(3))", "R1<<2(R3)"},
+ {"R2", "R2"},
+ {"R3", "R3"},
+ {"R4", "R4"},
+ {"R(4)", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ {"[R0,R1,g,R15]", "[R0,R1,g,R15]"},
+ {"[R0-R7]", "[R0,R1,R2,R3,R4,R5,R6,R7]"},
+ {"[R(0)-R(7)]", "[R0,R1,R2,R3,R4,R5,R6,R7]"},
+ {"[R0]", "[R0]"},
+ {"[R1-R12]", "[R1,R2,R3,R4,R5,R6,R7,R8,R9,g,R11,R12]"},
+ {"armCAS64(SB)", "armCAS64(SB)"},
+ {"asmcgocall<>(SB)", "asmcgocall<>(SB)"},
+ {"c+28(FP)", "c+28(FP)"},
+ {"g", "g"},
+ {"gosave<>(SB)", "gosave<>(SB)"},
+ {"retlo+12(FP)", "retlo+12(FP)"},
+ {"runtime·_sfloat2(SB)", "runtime._sfloat2(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"(R1, R3)", "(R1, R3)"},
+ {"[R0,R1,g,R15", ""}, // Issue 11764 - asm hung parsing ']' missing register lists.
+ {"[):[o-FP", ""}, // Issue 12469 - there was no infinite loop for ARM; these are just sanity checks.
+ {"[):[R0-FP", ""},
+ {"(", ""}, // Issue 12466 - backed up before beginning of line.
+}
+
+var ppc64OperandTests = []operandTest{
+ {"$((1<<63)-1)", "$9223372036854775807"},
+ {"$(-64*1024)", "$-65536"},
+ {"$(1024 * 8)", "$8192"},
+ {"$-1", "$-1"},
+ {"$-24(R4)", "$-24(R4)"},
+ {"$0", "$0"},
+ {"$0(R1)", "$(R1)"},
+ {"$0.5", "$(0.5)"},
+ {"$0x7000", "$28672"},
+ {"$0x88888eef", "$2290650863"},
+ {"$1", "$1"},
+ {"$_main<>(SB)", "$_main<>(SB)"},
+ {"$argframe(FP)", "$argframe(FP)"},
+ {"$runtime·tlsg(SB)", "$runtime.tlsg(SB)"},
+ {"$~3", "$-4"},
+ {"(-288-3*8)(R1)", "-312(R1)"},
+ {"(16)(R7)", "16(R7)"},
+ {"(8)(g)", "8(g)"},
+ {"(CTR)", "(CTR)"},
+ {"(R0)", "(R0)"},
+ {"(R3)", "(R3)"},
+ {"(R4)", "(R4)"},
+ {"(R5)", "(R5)"},
+ {"(R5)(R6*1)", "(R5)(R6*1)"},
+ {"(R5+R6)", "(R5)(R6*1)"}, // Old syntax.
+ {"-1(R4)", "-1(R4)"},
+ {"-1(R5)", "-1(R5)"},
+ {"6(PC)", "6(PC)"},
+ {"CR7", "CR7"},
+ {"CTR", "CTR"},
+ {"F14", "F14"},
+ {"F15", "F15"},
+ {"F16", "F16"},
+ {"F17", "F17"},
+ {"F18", "F18"},
+ {"F19", "F19"},
+ {"F20", "F20"},
+ {"F21", "F21"},
+ {"F22", "F22"},
+ {"F23", "F23"},
+ {"F24", "F24"},
+ {"F25", "F25"},
+ {"F26", "F26"},
+ {"F27", "F27"},
+ {"F28", "F28"},
+ {"F29", "F29"},
+ {"F30", "F30"},
+ {"F31", "F31"},
+ {"LR", "LR"},
+ {"R0", "R0"},
+ {"R1", "R1"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R16", "R16"},
+ {"R17", "R17"},
+ {"R18", "R18"},
+ {"R19", "R19"},
+ {"R2", "R2"},
+ {"R20", "R20"},
+ {"R21", "R21"},
+ {"R22", "R22"},
+ {"R23", "R23"},
+ {"R24", "R24"},
+ {"R25", "R25"},
+ {"R26", "R26"},
+ {"R27", "R27"},
+ {"R28", "R28"},
+ {"R29", "R29"},
+ {"R3", "R3"},
+ {"R31", "R31"},
+ {"R4", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"SPR(269)", "SPR(269)"},
+ {"a(FP)", "a(FP)"},
+ {"g", "g"},
+ {"ret+8(FP)", "ret+8(FP)"},
+ {"runtime·abort(SB)", "runtime.abort(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·trunc(SB)", "\"\".trunc(SB)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
+
+var arm64OperandTests = []operandTest{
+ {"$0", "$0"},
+ {"$0.5", "$(0.5)"},
+ {"0(R26)", "(R26)"},
+ {"0(RSP)", "(RSP)"},
+ {"$1", "$1"},
+ {"$-1", "$-1"},
+ {"$1000", "$1000"},
+ {"$1000000000", "$1000000000"},
+ {"$0x7fff3c000", "$34358935552"},
+ {"$1234", "$1234"},
+ {"$~15", "$-16"},
+ {"$16", "$16"},
+ {"-16(RSP)", "-16(RSP)"},
+ {"16(RSP)", "16(RSP)"},
+ {"1(R1)", "1(R1)"},
+ {"-1(R4)", "-1(R4)"},
+ {"18740(R5)", "18740(R5)"},
+ {"$2", "$2"},
+ {"$-24(R4)", "$-24(R4)"},
+ {"-24(RSP)", "-24(RSP)"},
+ {"$24(RSP)", "$24(RSP)"},
+ {"-32(RSP)", "-32(RSP)"},
+ {"$48", "$48"},
+ {"$(-64*1024)(R7)", "$-65536(R7)"},
+ {"$(8-1)", "$7"},
+ {"a+0(FP)", "a(FP)"},
+ {"a1+8(FP)", "a1+8(FP)"},
+ {"·AddInt32(SB)", `"".AddInt32(SB)`},
+ {"runtime·divWVW(SB)", "runtime.divWVW(SB)"},
+ {"$argframe+0(FP)", "$argframe(FP)"},
+ {"$asmcgocall<>(SB)", "$asmcgocall<>(SB)"},
+ {"EQ", "EQ"},
+ {"F29", "F29"},
+ {"F3", "F3"},
+ {"F30", "F30"},
+ {"g", "g"},
+ {"LR", "R30"},
+ {"(LR)", "(R30)"},
+ {"R0", "R0"},
+ {"R10", "R10"},
+ {"R11", "R11"},
+ {"$4503601774854144.0", "$(4503601774854144.0)"},
+ {"$runtime·badsystemstack(SB)", "$runtime.badsystemstack(SB)"},
+ {"ZR", "ZR"},
+ {"(ZR)", "(ZR)"},
+ {"(R29, RSP)", "(R29, RSP)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
+
+var mips64OperandTests = []operandTest{
+ {"$((1<<63)-1)", "$9223372036854775807"},
+ {"$(-64*1024)", "$-65536"},
+ {"$(1024 * 8)", "$8192"},
+ {"$-1", "$-1"},
+ {"$-24(R4)", "$-24(R4)"},
+ {"$0", "$0"},
+ {"$0(R1)", "$(R1)"},
+ {"$0.5", "$(0.5)"},
+ {"$0x7000", "$28672"},
+ {"$0x88888eef", "$2290650863"},
+ {"$1", "$1"},
+ {"$_main<>(SB)", "$_main<>(SB)"},
+ {"$argframe(FP)", "$argframe(FP)"},
+ {"$~3", "$-4"},
+ {"(-288-3*8)(R1)", "-312(R1)"},
+ {"(16)(R7)", "16(R7)"},
+ {"(8)(g)", "8(g)"},
+ {"(R0)", "(R0)"},
+ {"(R3)", "(R3)"},
+ {"(R4)", "(R4)"},
+ {"(R5)", "(R5)"},
+ {"-1(R4)", "-1(R4)"},
+ {"-1(R5)", "-1(R5)"},
+ {"6(PC)", "6(PC)"},
+ {"F14", "F14"},
+ {"F15", "F15"},
+ {"F16", "F16"},
+ {"F17", "F17"},
+ {"F18", "F18"},
+ {"F19", "F19"},
+ {"F20", "F20"},
+ {"F21", "F21"},
+ {"F22", "F22"},
+ {"F23", "F23"},
+ {"F24", "F24"},
+ {"F25", "F25"},
+ {"F26", "F26"},
+ {"F27", "F27"},
+ {"F28", "F28"},
+ {"F29", "F29"},
+ {"F30", "F30"},
+ {"F31", "F31"},
+ {"R0", "R0"},
+ {"R1", "R1"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ {"R13", "R13"},
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"R16", "R16"},
+ {"R17", "R17"},
+ {"R18", "R18"},
+ {"R19", "R19"},
+ {"R2", "R2"},
+ {"R20", "R20"},
+ {"R21", "R21"},
+ {"R22", "R22"},
+ {"R23", "R23"},
+ {"R24", "R24"},
+ {"R25", "R25"},
+ {"R26", "R26"},
+ {"R27", "R27"},
+ {"R28", "R28"},
+ {"R29", "R29"},
+ {"R3", "R3"},
+ {"R31", "R31"},
+ {"R4", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"LO", "LO"},
+ {"a(FP)", "a(FP)"},
+ {"g", "g"},
+ {"ret+8(FP)", "ret+8(FP)"},
+ {"runtime·abort(SB)", "runtime.abort(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·trunc(SB)", "\"\".trunc(SB)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/cgo/main.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/cgo/main.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/cgo/main.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/cgo/main.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,364 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Cgo; see gmp.go for an overview.
+
+// TODO(rsc):
+// Emit correct line number annotations.
+// Make gc understand the annotations.
+
+package main
+
+import (
+ "crypto/md5"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/printer"
+ "go/token"
+ "io"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "sort"
+ "strings"
+)
+
+// A Package collects information about the package we're going to write.
+type Package struct {
+ PackageName string // name of package
+ PackagePath string
+ PtrSize int64
+ IntSize int64
+ GccOptions []string
+ GccIsClang bool
+ CgoFlags map[string][]string // #cgo flags (CFLAGS, LDFLAGS)
+ Written map[string]bool
+ Name map[string]*Name // accumulated Name from Files
+ ExpFunc []*ExpFunc // accumulated ExpFunc from Files
+ Decl []ast.Decl
+ GoFiles []string // list of Go files
+ GccFiles []string // list of gcc output files
+ Preamble string // collected preamble for _cgo_export.h
+ CgoChecks []string // see unsafeCheckPointerName
+}
+
+// A File collects information about a single Go input file.
+type File struct {
+ AST *ast.File // parsed AST
+ Comments []*ast.CommentGroup // comments from file
+ Package string // Package name
+ Preamble string // C preamble (doc comment on import "C")
+ Ref []*Ref // all references to C.xxx in AST
+ Calls []*ast.CallExpr // all calls to C.xxx in AST
+ ExpFunc []*ExpFunc // exported functions for this file
+ Name map[string]*Name // map from Go name to Name
+}
+
+func nameKeys(m map[string]*Name) []string {
+ var ks []string
+ for k := range m {
+ ks = append(ks, k)
+ }
+ sort.Strings(ks)
+ return ks
+}
+
+// A Ref refers to an expression of the form C.xxx in the AST.
+type Ref struct {
+ Name *Name
+ Expr *ast.Expr
+ Context string // "type", "expr", "call", or "call2"
+}
+
+func (r *Ref) Pos() token.Pos {
+ return (*r.Expr).Pos()
+}
+
+// A Name collects information about C.xxx.
+type Name struct {
+ Go string // name used in Go referring to package C
+ Mangle string // name used in generated Go
+ C string // name used in C
+ Define string // #define expansion
+ Kind string // "const", "type", "var", "fpvar", "func", "not-type"
+ Type *Type // the type of xxx
+ FuncType *FuncType
+ AddError bool
+ Const string // constant definition
+}
+
+// IsVar reports whether Kind is either "var" or "fpvar"
+func (n *Name) IsVar() bool {
+ return n.Kind == "var" || n.Kind == "fpvar"
+}
+
+// A ExpFunc is an exported function, callable from C.
+// Such functions are identified in the Go input file
+// by doc comments containing the line //export ExpName
+type ExpFunc struct {
+ Func *ast.FuncDecl
+ ExpName string // name to use from C
+ Doc string
+}
+
+// A TypeRepr contains the string representation of a type.
+type TypeRepr struct {
+ Repr string
+ FormatArgs []interface{}
+}
+
+// A Type collects information about a type in both the C and Go worlds.
+type Type struct {
+ Size int64
+ Align int64
+ C *TypeRepr
+ Go ast.Expr
+ EnumValues map[string]int64
+ Typedef string
+}
+
+// A FuncType collects information about a function type in both the C and Go worlds.
+type FuncType struct {
+ Params []*Type
+ Result *Type
+ Go *ast.FuncType
+}
+
+func usage() {
+ fmt.Fprint(os.Stderr, "usage: cgo -- [compiler options] file.go ...\n")
+ flag.PrintDefaults()
+ os.Exit(2)
+}
+
+var ptrSizeMap = map[string]int64{
+ "386": 4,
+ "amd64": 8,
+ "arm": 4,
+ "arm64": 8,
+ "mips64": 8,
+ "mips64le": 8,
+ "ppc64": 8,
+ "ppc64le": 8,
+ "s390": 4,
+ "s390x": 8,
+}
+
+var intSizeMap = map[string]int64{
+ "386": 4,
+ "amd64": 8,
+ "arm": 4,
+ "arm64": 8,
+ "mips64": 8,
+ "mips64le": 8,
+ "ppc64": 8,
+ "ppc64le": 8,
+ "s390": 4,
+ "s390x": 4,
+}
+
+var cPrefix string
+
+var fset = token.NewFileSet()
+
+var dynobj = flag.String("dynimport", "", "if non-empty, print dynamic import data for that file")
+var dynout = flag.String("dynout", "", "write -dynimport output to this file")
+var dynpackage = flag.String("dynpackage", "main", "set Go package for -dynimport output")
+var dynlinker = flag.Bool("dynlinker", false, "record dynamic linker information in -dynimport mode")
+
+// This flag is for bootstrapping a new Go implementation,
+// to generate Go types that match the data layout and
+// constant values used in the host's C libraries and system calls.
+var godefs = flag.Bool("godefs", false, "for bootstrap: write Go definitions for C file to standard output")
+
+var objDir = flag.String("objdir", "", "object directory")
+var importPath = flag.String("importpath", "", "import path of package being built (for comments in generated files)")
+var exportHeader = flag.String("exportheader", "", "where to write export header if any exported functions")
+
+var gccgo = flag.Bool("gccgo", false, "generate files for use with gccgo")
+var gccgoprefix = flag.String("gccgoprefix", "", "-fgo-prefix option used with gccgo")
+var gccgopkgpath = flag.String("gccgopkgpath", "", "-fgo-pkgpath option used with gccgo")
+var importRuntimeCgo = flag.Bool("import_runtime_cgo", true, "import runtime/cgo in generated code")
+var importSyscall = flag.Bool("import_syscall", true, "import syscall in generated code")
+var goarch, goos string
+
+func main() {
+ flag.Usage = usage
+ flag.Parse()
+
+ if *dynobj != "" {
+ // cgo -dynimport is essentially a separate helper command
+ // built into the cgo binary. It scans a gcc-produced executable
+ // and dumps information about the imported symbols and the
+ // imported libraries. The 'go build' rules for cgo prepare an
+ // appropriate executable and then use its import information
+ // instead of needing to make the linkers duplicate all the
+ // specialized knowledge gcc has about where to look for imported
+ // symbols and which ones to use.
+ dynimport(*dynobj)
+ return
+ }
+
+ if *godefs {
+ // Generating definitions pulled from header files,
+ // to be checked into Go repositories.
+ // Line numbers are just noise.
+ conf.Mode &^= printer.SourcePos
+ }
+
+ args := flag.Args()
+ if len(args) < 1 {
+ usage()
+ }
+
+ // Find first arg that looks like a go file and assume everything before
+ // that are options to pass to gcc.
+ var i int
+ for i = len(args); i > 0; i-- {
+ if !strings.HasSuffix(args[i-1], ".go") {
+ break
+ }
+ }
+ if i == len(args) {
+ usage()
+ }
+
+ goFiles := args[i:]
+
+ p := newPackage(args[:i])
+
+ // Record CGO_LDFLAGS from the environment for external linking.
+ if ldflags := os.Getenv("CGO_LDFLAGS"); ldflags != "" {
+ args, err := splitQuoted(ldflags)
+ if err != nil {
+ fatalf("bad CGO_LDFLAGS: %q (%s)", ldflags, err)
+ }
+ p.addToFlag("LDFLAGS", args)
+ }
+
+ // Need a unique prefix for the global C symbols that
+ // we use to coordinate between gcc and ourselves.
+ // We already put _cgo_ at the beginning, so the main
+ // concern is other cgo wrappers for the same functions.
+ // Use the beginning of the md5 of the input to disambiguate.
+ h := md5.New()
+ for _, input := range goFiles {
+ f, err := os.Open(input)
+ if err != nil {
+ fatalf("%s", err)
+ }
+ io.Copy(h, f)
+ f.Close()
+ }
+ cPrefix = fmt.Sprintf("_%x", h.Sum(nil)[0:6])
+
+ fs := make([]*File, len(goFiles))
+ for i, input := range goFiles {
+ f := new(File)
+ f.ReadGo(input)
+ f.DiscardCgoDirectives()
+ fs[i] = f
+ }
+
+ if *objDir == "" {
+ // make sure that _obj directory exists, so that we can write
+ // all the output files there.
+ os.Mkdir("_obj", 0777)
+ *objDir = "_obj"
+ }
+ *objDir += string(filepath.Separator)
+
+ for i, input := range goFiles {
+ f := fs[i]
+ p.Translate(f)
+ for _, cref := range f.Ref {
+ switch cref.Context {
+ case "call", "call2":
+ if cref.Name.Kind != "type" {
+ break
+ }
+ *cref.Expr = cref.Name.Type.Go
+ }
+ }
+ if nerrors > 0 {
+ os.Exit(2)
+ }
+ p.PackagePath = f.Package
+ p.Record(f)
+ if *godefs {
+ os.Stdout.WriteString(p.godefs(f, input))
+ } else {
+ p.writeOutput(f, input)
+ }
+ }
+
+ if !*godefs {
+ p.writeDefs()
+ }
+ if nerrors > 0 {
+ os.Exit(2)
+ }
+}
+
+// newPackage returns a new Package that will invoke
+// gcc with the additional arguments specified in args.
+func newPackage(args []string) *Package {
+ goarch = runtime.GOARCH
+ if s := os.Getenv("GOARCH"); s != "" {
+ goarch = s
+ }
+ goos = runtime.GOOS
+ if s := os.Getenv("GOOS"); s != "" {
+ goos = s
+ }
+ ptrSize := ptrSizeMap[goarch]
+ if ptrSize == 0 {
+ fatalf("unknown ptrSize for $GOARCH %q", goarch)
+ }
+ intSize := intSizeMap[goarch]
+ if intSize == 0 {
+ fatalf("unknown intSize for $GOARCH %q", goarch)
+ }
+
+ // Reset locale variables so gcc emits English errors [sic].
+ os.Setenv("LANG", "en_US.UTF-8")
+ os.Setenv("LC_ALL", "C")
+
+ p := &Package{
+ PtrSize: ptrSize,
+ IntSize: intSize,
+ CgoFlags: make(map[string][]string),
+ Written: make(map[string]bool),
+ }
+ p.addToFlag("CFLAGS", args)
+ return p
+}
+
+// Record what needs to be recorded about f.
+func (p *Package) Record(f *File) {
+ if p.PackageName == "" {
+ p.PackageName = f.Package
+ } else if p.PackageName != f.Package {
+ error_(token.NoPos, "inconsistent package names: %s, %s", p.PackageName, f.Package)
+ }
+
+ if p.Name == nil {
+ p.Name = f.Name
+ } else {
+ for k, v := range f.Name {
+ if p.Name[k] == nil {
+ p.Name[k] = v
+ } else if !reflect.DeepEqual(p.Name[k], v) {
+ error_(token.NoPos, "inconsistent definitions for C.%s", fixGo(k))
+ }
+ }
+ }
+
+ if f.ExpFunc != nil {
+ p.ExpFunc = append(p.ExpFunc, f.ExpFunc...)
+ p.Preamble += "\n" + f.Preamble
+ }
+ p.Decl = append(p.Decl, f.AST.Decls...)
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/cgen.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/cgen.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/cgen.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/cgen.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,3555 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "fmt"
+)
+
+// generate:
+// res = n;
+// simplifies and calls Thearch.Gmove.
+// if wb is true, need to emit write barriers.
+func Cgen(n, res *Node) {
+ cgen_wb(n, res, false)
+}
+
+func cgen_wb(n, res *Node, wb bool) {
+ if Debug['g'] != 0 {
+ op := "cgen"
+ if wb {
+ op = "cgen_wb"
+ }
+ Dump("\n"+op+"-n", n)
+ Dump(op+"-res", res)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ if res == nil || res.Type == nil {
+ Fatalf("cgen: res nil")
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ switch n.Op {
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ cgen_slice(n, res, wb)
+ return
+
+ case OEFACE:
+ if res.Op != ONAME || !res.Addable || wb {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen_eface(n, &n1)
+ cgen_wb(&n1, res, wb)
+ } else {
+ Cgen_eface(n, res)
+ }
+ return
+
+ case ODOTTYPE:
+ cgen_dottype(n, res, nil, wb)
+ return
+
+ case OAPPEND:
+ cgen_append(n, res)
+ return
+ }
+
+ if n.Ullman >= UINF {
+ if n.Op == OINDREG {
+ Fatalf("cgen: this is going to miscompile")
+ }
+ if res.Ullman >= UINF {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ cgen_wb(&n1, res, wb)
+ return
+ }
+ }
+
+ if Isfat(n.Type) {
+ if n.Type.Width < 0 {
+ Fatalf("forgot to compute width for %v", n.Type)
+ }
+ sgen_wb(n, res, n.Type.Width, wb)
+ return
+ }
+
+ if !res.Addable {
+ if n.Ullman > res.Ullman {
+ if Ctxt.Arch.Regsize == 4 && Is64(n.Type) {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ cgen_wb(&n1, res, wb)
+ return
+ }
+
+ var n1 Node
+ Regalloc(&n1, n.Type, res)
+ Cgen(n, &n1)
+ if n1.Ullman > res.Ullman {
+ Dump("n1", &n1)
+ Dump("res", res)
+ Fatalf("loop in cgen")
+ }
+
+ cgen_wb(&n1, res, wb)
+ Regfree(&n1)
+ return
+ }
+
+ var f int
+ if res.Ullman < UINF {
+ if Complexop(n, res) {
+ Complexgen(n, res)
+ return
+ }
+
+ f = 1 // gen thru register
+ switch n.Op {
+ case OLITERAL:
+ if Smallintconst(n) {
+ f = 0
+ }
+
+ case OREGISTER:
+ f = 0
+ }
+
+ if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 && !wb {
+ a := Thearch.Optoas(OAS, res.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, res, &addr) {
+ var p1 *obj.Prog
+ if f != 0 {
+ var n2 Node
+ Regalloc(&n2, res.Type, nil)
+ Cgen(n, &n2)
+ p1 = Thearch.Gins(a, &n2, nil)
+ Regfree(&n2)
+ } else {
+ p1 = Thearch.Gins(a, n, nil)
+ }
+ p1.To = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ Thearch.Sudoclean()
+ return
+ }
+ }
+ }
+
+ if Ctxt.Arch.Thechar == '8' {
+ // no registers to speak of
+ var n1, n2 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Igen(res, &n2, nil)
+ cgen_wb(&n1, &n2, wb)
+ Regfree(&n2)
+ return
+ }
+
+ var n1 Node
+ Igen(res, &n1, nil)
+ cgen_wb(n, &n1, wb)
+ Regfree(&n1)
+ return
+ }
+
+ // update addressability for string, slice
+ // can't do in walk because n->left->addable
+ // changes if n->left is an escaping local variable.
+ switch n.Op {
+ case OSPTR, OLEN:
+ if Isslice(n.Left.Type) || Istype(n.Left.Type, TSTRING) {
+ n.Addable = n.Left.Addable
+ }
+
+ case OCAP:
+ if Isslice(n.Left.Type) {
+ n.Addable = n.Left.Addable
+ }
+
+ case OITAB:
+ n.Addable = n.Left.Addable
+ }
+
+ if wb {
+ if Simtype[res.Type.Etype] != Tptr {
+ Fatalf("cgen_wb of type %v", res.Type)
+ }
+ if n.Ullman >= UINF {
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ n = &n1
+ }
+ cgen_wbptr(n, res)
+ return
+ }
+
+ // Write barrier now handled. Code below this line can ignore wb.
+
+ if Ctxt.Arch.Thechar == '5' { // TODO(rsc): Maybe more often?
+ // if both are addressable, move
+ if n.Addable && res.Addable {
+ if Is64(n.Type) || Is64(res.Type) || n.Op == OREGISTER || res.Op == OREGISTER || Iscomplex[n.Type.Etype] || Iscomplex[res.Type.Etype] {
+ Thearch.Gmove(n, res)
+ } else {
+ var n1 Node
+ Regalloc(&n1, n.Type, nil)
+ Thearch.Gmove(n, &n1)
+ Cgen(&n1, res)
+ Regfree(&n1)
+ }
+
+ return
+ }
+
+ // if both are not addressable, use a temporary.
+ if !n.Addable && !res.Addable {
+ // could use regalloc here sometimes,
+ // but have to check for ullman >= UINF.
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Cgen(&n1, res)
+ return
+ }
+
+ // if result is not addressable directly but n is,
+ // compute its address and then store via the address.
+ if !res.Addable {
+ var n1 Node
+ Igen(res, &n1, nil)
+ Cgen(n, &n1)
+ Regfree(&n1)
+ return
+ }
+ }
+
+ if Complexop(n, res) {
+ Complexgen(n, res)
+ return
+ }
+
+ if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable {
+ Thearch.Gmove(n, res)
+ return
+ }
+
+ if Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ // if both are addressable, move
+ if n.Addable {
+ if n.Op == OREGISTER || res.Op == OREGISTER {
+ Thearch.Gmove(n, res)
+ } else {
+ var n1 Node
+ Regalloc(&n1, n.Type, nil)
+ Thearch.Gmove(n, &n1)
+ Cgen(&n1, res)
+ Regfree(&n1)
+ }
+ return
+ }
+ }
+
+ // if n is sudoaddable generate addr and move
+ if Ctxt.Arch.Thechar == '5' && !Is64(n.Type) && !Is64(res.Type) && !Iscomplex[n.Type.Etype] && !Iscomplex[res.Type.Etype] {
+ a := Thearch.Optoas(OAS, n.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, n, &addr) {
+ if res.Op != OREGISTER {
+ var n2 Node
+ Regalloc(&n2, res.Type, nil)
+ p1 := Thearch.Gins(a, nil, &n2)
+ p1.From = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ } else {
+ p1 := Thearch.Gins(a, nil, res)
+ p1.From = addr
+ if Debug['g'] != 0 {
+ fmt.Printf("%v [ignore previous line]\n", p1)
+ }
+ }
+ Thearch.Sudoclean()
+ return
+ }
+ }
+
+ nl := n.Left
+ nr := n.Right
+
+ if nl != nil && nl.Ullman >= UINF {
+ if nr != nil && nr.Ullman >= UINF {
+ var n1 Node
+ Tempname(&n1, nl.Type)
+ Cgen(nl, &n1)
+ n2 := *n
+ n2.Left = &n1
+ Cgen(&n2, res)
+ return
+ }
+ }
+
+ // 64-bit ops are hard on 32-bit machine.
+ if Ctxt.Arch.Regsize == 4 && (Is64(n.Type) || Is64(res.Type) || n.Left != nil && Is64(n.Left.Type)) {
+ switch n.Op {
+ // math goes to cgen64.
+ case OMINUS,
+ OCOM,
+ OADD,
+ OSUB,
+ OMUL,
+ OLROT,
+ OLSH,
+ ORSH,
+ OAND,
+ OOR,
+ OXOR:
+ Thearch.Cgen64(n, res)
+ return
+ }
+ }
+
+ if Thearch.Cgen_float != nil && nl != nil && Isfloat[n.Type.Etype] && Isfloat[nl.Type.Etype] {
+ Thearch.Cgen_float(n, res)
+ return
+ }
+
+ if !Iscomplex[n.Type.Etype] && Ctxt.Arch.Regsize == 8 {
+ a := Thearch.Optoas(OAS, n.Type)
+ var addr obj.Addr
+ if Thearch.Sudoaddable(a, n, &addr) {
+ if res.Op == OREGISTER {
+ p1 := Thearch.Gins(a, nil, res)
+ p1.From = addr
+ } else {
+ var n2 Node
+ Regalloc(&n2, n.Type, nil)
+ p1 := Thearch.Gins(a, nil, &n2)
+ p1.From = addr
+ Thearch.Gins(a, &n2, res)
+ Regfree(&n2)
+ }
+
+ Thearch.Sudoclean()
+ return
+ }
+ }
+
+ var a int
+ switch n.Op {
+ default:
+ Dump("cgen", n)
+ Dump("cgen-res", res)
+ Fatalf("cgen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OOROR, OANDAND,
+ OEQ, ONE,
+ OLT, OLE,
+ OGE, OGT,
+ ONOT:
+ Bvgen(n, res, true)
+ return
+
+ case OPLUS:
+ Cgen(nl, res)
+ return
+
+ // unary
+ case OCOM:
+ a := Thearch.Optoas(OXOR, nl.Type)
+
+ var n1 Node
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ var n2 Node
+ Nodconst(&n2, nl.Type, -1)
+ Thearch.Gins(a, &n2, &n1)
+ cgen_norm(n, &n1, res)
+ return
+
+ case OMINUS:
+ if Isfloat[nl.Type.Etype] {
+ nr = Nodintconst(-1)
+ Convlit(&nr, n.Type)
+ a = Thearch.Optoas(OMUL, nl.Type)
+ goto sbop
+ }
+
+ a := Thearch.Optoas(n.Op, nl.Type)
+ // unary
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+
+ Cgen(nl, &n1)
+ if Ctxt.Arch.Thechar == '5' {
+ var n2 Node
+ Nodconst(&n2, nl.Type, 0)
+ Thearch.Gins(a, &n2, &n1)
+ } else if Ctxt.Arch.Thechar == '7' {
+ Thearch.Gins(a, &n1, &n1)
+ } else {
+ Thearch.Gins(a, nil, &n1)
+ }
+ cgen_norm(n, &n1, res)
+ return
+
+ case OSQRT:
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+ Cgen(n.Left, &n1)
+ Thearch.Gins(Thearch.Optoas(OSQRT, nl.Type), &n1, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ return
+
+ case OGETG:
+ Thearch.Getg(res)
+ return
+
+ // symmetric binary
+ case OAND,
+ OOR,
+ OXOR,
+ OADD,
+ OMUL:
+ if n.Op == OMUL && Thearch.Cgen_bmul != nil && Thearch.Cgen_bmul(n.Op, nl, nr, res) {
+ break
+ }
+ a = Thearch.Optoas(n.Op, nl.Type)
+ goto sbop
+
+ // asymmetric binary
+ case OSUB:
+ a = Thearch.Optoas(n.Op, nl.Type)
+ goto abop
+
+ case OHMUL:
+ Thearch.Cgen_hmul(nl, nr, res)
+
+ case OCONV:
+ if Eqtype(n.Type, nl.Type) || Noconv(n.Type, nl.Type) {
+ Cgen(nl, res)
+ return
+ }
+
+ if Ctxt.Arch.Thechar == '8' {
+ var n1 Node
+ var n2 Node
+ Tempname(&n2, n.Type)
+ Mgen(nl, &n1, res)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Mfree(&n1)
+ break
+ }
+
+ var n1 Node
+ var n2 Node
+ if Ctxt.Arch.Thechar == '5' {
+ if nl.Addable && !Is64(nl.Type) {
+ Regalloc(&n1, nl.Type, res)
+ Thearch.Gmove(nl, &n1)
+ } else {
+ if n.Type.Width > int64(Widthptr) || Is64(nl.Type) || Isfloat[nl.Type.Etype] {
+ Tempname(&n1, nl.Type)
+ } else {
+ Regalloc(&n1, nl.Type, res)
+ }
+ Cgen(nl, &n1)
+ }
+ if n.Type.Width > int64(Widthptr) || Is64(n.Type) || Isfloat[n.Type.Etype] {
+ Tempname(&n2, n.Type)
+ } else {
+ Regalloc(&n2, n.Type, nil)
+ }
+ } else {
+ if n.Type.Width > nl.Type.Width {
+ // If loading from memory, do conversion during load,
+ // so as to avoid use of 8-bit register in, say, int(*byteptr).
+ switch nl.Op {
+ case ODOT, ODOTPTR, OINDEX, OIND, ONAME:
+ Igen(nl, &n1, res)
+ Regalloc(&n2, n.Type, res)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ Regfree(&n1)
+ return
+ }
+ }
+ Regalloc(&n1, nl.Type, res)
+ Regalloc(&n2, n.Type, &n1)
+ Cgen(nl, &n1)
+ }
+
+ // if we do the conversion n1 -> n2 here
+ // reusing the register, then gmove won't
+ // have to allocate its own register.
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gmove(&n2, res)
+ if n2.Op == OREGISTER {
+ Regfree(&n2)
+ }
+ if n1.Op == OREGISTER {
+ Regfree(&n1)
+ }
+
+ case ODOT,
+ ODOTPTR,
+ OINDEX,
+ OIND,
+ ONAME: // PHEAP or PPARAMREF var
+ var n1 Node
+ Igen(n, &n1, res)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ // interface table is first word of interface value
+ case OITAB:
+ var n1 Node
+ Igen(nl, &n1, res)
+
+ n1.Type = n.Type
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case OSPTR:
+ // pointer is the first word of string or slice.
+ if Isconst(nl, CTSTR) {
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, n1.Type), nil, &n1)
+ Datastring(nl.Val().U.(string), &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ var n1 Node
+ Igen(nl, &n1, res)
+ n1.Type = n.Type
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case OLEN:
+ if Istype(nl.Type, TMAP) || Istype(nl.Type, TCHAN) {
+ // map and chan have len in the first int-sized word.
+ // a zero pointer means zero length
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+
+ Cgen(nl, &n1)
+
+ var n2 Node
+ Nodconst(&n2, Types[Tptr], 0)
+ p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
+
+ n2 = n1
+ n2.Op = OINDREG
+ n2.Type = Types[Simtype[TINT]]
+ Thearch.Gmove(&n2, &n1)
+
+ Patch(p1, Pc)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ if Istype(nl.Type, TSTRING) || Isslice(nl.Type) {
+ // both slice and string have len one pointer into the struct.
+ // a zero pointer means zero length
+ var n1 Node
+ Igen(nl, &n1, res)
+
+ n1.Type = Types[Simtype[TUINT]]
+ n1.Xoffset += int64(Array_nel)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ Fatalf("cgen: OLEN: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+ case OCAP:
+ if Istype(nl.Type, TCHAN) {
+ // chan has cap in the second int-sized word.
+ // a zero pointer means zero length
+ var n1 Node
+ Regalloc(&n1, Types[Tptr], res)
+
+ Cgen(nl, &n1)
+
+ var n2 Node
+ Nodconst(&n2, Types[Tptr], 0)
+ p1 := Thearch.Ginscmp(OEQ, Types[Tptr], &n1, &n2, 0)
+
+ n2 = n1
+ n2.Op = OINDREG
+ n2.Xoffset = int64(Widthint)
+ n2.Type = Types[Simtype[TINT]]
+ Thearch.Gmove(&n2, &n1)
+
+ Patch(p1, Pc)
+
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ if Isslice(nl.Type) {
+ var n1 Node
+ Igen(nl, &n1, res)
+ n1.Type = Types[Simtype[TUINT]]
+ n1.Xoffset += int64(Array_cap)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ break
+ }
+
+ Fatalf("cgen: OCAP: unknown type %v", Tconv(nl.Type, obj.FmtLong))
+
+ case OADDR:
+ if n.Bounded { // let race detector avoid nil checks
+ Disable_checknil++
+ }
+ Agen(nl, res)
+ if n.Bounded {
+ Disable_checknil--
+ }
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+ cgen_callret(n, res)
+
+ case OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_callret(n, res)
+
+ case OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_callret(n, res)
+
+ case OMOD, ODIV:
+ if Isfloat[n.Type.Etype] || Thearch.Dodiv == nil {
+ a = Thearch.Optoas(n.Op, nl.Type)
+ goto abop
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+ Cgen(nl, &n1)
+ cgen_div(n.Op, &n1, nr, res)
+ Regfree(&n1)
+ } else {
+ var n2 Node
+ if !Smallintconst(nr) {
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ } else {
+ n2 = *nr
+ }
+
+ cgen_div(n.Op, nl, &n2, res)
+ if n2.Op != OLITERAL {
+ Regfree(&n2)
+ }
+ }
+
+ case OLSH, ORSH, OLROT:
+ Thearch.Cgen_shift(n.Op, n.Bounded, nl, nr, res)
+ }
+
+ return
+
+ // put simplest on right - we'll generate into left
+ // and then adjust it using the computation of right.
+ // constants and variables have the same ullman
+ // count, so look for constants specially.
+ //
+ // an integer constant we can use as an immediate
+ // is simpler than a variable - we can use the immediate
+ // in the adjustment instruction directly - so it goes
+ // on the right.
+ //
+ // other constants, like big integers or floating point
+ // constants, require a mov into a register, so those
+ // might as well go on the left, so we can reuse that
+ // register for the computation.
+sbop: // symmetric binary
+ if nl.Ullman < nr.Ullman || (nl.Ullman == nr.Ullman && (Smallintconst(nl) || (nr.Op == OLITERAL && !Smallintconst(nr)))) {
+ nl, nr = nr, nl
+ }
+
+abop: // asymmetric binary
+ var n1 Node
+ var n2 Node
+ if Ctxt.Arch.Thechar == '8' {
+ // no registers, sigh
+ if Smallintconst(nr) {
+ var n1 Node
+ Mgen(nl, &n1, res)
+ var n2 Node
+ Regalloc(&n2, nl.Type, &n1)
+ Thearch.Gmove(&n1, &n2)
+ Thearch.Gins(a, nr, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ Mfree(&n1)
+ } else if nl.Ullman >= nr.Ullman {
+ var nt Node
+ Tempname(&nt, nl.Type)
+ Cgen(nl, &nt)
+ var n2 Node
+ Mgen(nr, &n2, nil)
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+ Thearch.Gmove(&nt, &n1)
+ Thearch.Gins(a, &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Mfree(&n2)
+ } else {
+ var n2 Node
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ var n1 Node
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ Thearch.Gins(a, &n2, &n1)
+ Regfree(&n2)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ }
+ return
+ }
+
+ if nl.Ullman >= nr.Ullman {
+ Regalloc(&n1, nl.Type, res)
+ Cgen(nl, &n1)
+
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ n2 = *nr
+ } else {
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+ }
+ } else {
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' { // TODO(rsc): Check opcode for arm
+ n2 = *nr
+ } else {
+ Regalloc(&n2, nr.Type, res)
+ Cgen(nr, &n2)
+ }
+
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ }
+
+ Thearch.Gins(a, &n2, &n1)
+ if n2.Op != OLITERAL {
+ Regfree(&n2)
+ }
+ cgen_norm(n, &n1, res)
+}
+
+var sys_wbptr *Node
+
+func cgen_wbptr(n, res *Node) {
+ if Curfn != nil {
+ if Curfn.Func.Nowritebarrier {
+ Yyerror("write barrier prohibited")
+ }
+ if Curfn.Func.WBLineno == 0 {
+ Curfn.Func.WBLineno = lineno
+ }
+ }
+ if Debug_wb > 0 {
+ Warn("write barrier")
+ }
+
+ var dst, src Node
+ Igen(res, &dst, nil)
+ if n.Op == OREGISTER {
+ src = *n
+ Regrealloc(&src)
+ } else {
+ Cgenr(n, &src, nil)
+ }
+
+ wbVar := syslook("writeBarrier", 0)
+ wbEnabled := Nod(ODOT, wbVar, newname(wbVar.Type.Type.Sym))
+ wbEnabled = typecheck(&wbEnabled, Erv)
+ pbr := Thearch.Ginscmp(ONE, Types[TUINT8], wbEnabled, Nodintconst(0), -1)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, &dst)
+ pjmp := Gbranch(obj.AJMP, nil, 0)
+ Patch(pbr, Pc)
+ var adst Node
+ Agenr(&dst, &adst, &dst)
+ p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &adst, nil)
+ a := &p.To
+ a.Type = obj.TYPE_MEM
+ a.Reg = int16(Thearch.REGSP)
+ a.Offset = Ctxt.FixedFrameSize()
+ p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+ p2.To = p.To
+ p2.To.Offset += int64(Widthptr)
+ Regfree(&adst)
+ if sys_wbptr == nil {
+ sys_wbptr = writebarrierfn("writebarrierptr", Types[Tptr], Types[Tptr])
+ }
+ Ginscall(sys_wbptr, 0)
+ Patch(pjmp, Pc)
+
+ Regfree(&dst)
+ Regfree(&src)
+}
+
+func cgen_wbfat(n, res *Node) {
+ if Curfn != nil {
+ if Curfn.Func.Nowritebarrier {
+ Yyerror("write barrier prohibited")
+ }
+ if Curfn.Func.WBLineno == 0 {
+ Curfn.Func.WBLineno = lineno
+ }
+ }
+ if Debug_wb > 0 {
+ Warn("write barrier")
+ }
+ needType := true
+ funcName := "typedmemmove"
+ var dst, src Node
+ if n.Ullman >= res.Ullman {
+ Agenr(n, &src, nil)
+ Agenr(res, &dst, nil)
+ } else {
+ Agenr(res, &dst, nil)
+ Agenr(n, &src, nil)
+ }
+ p := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &dst, nil)
+ a := &p.To
+ a.Type = obj.TYPE_MEM
+ a.Reg = int16(Thearch.REGSP)
+ a.Offset = Ctxt.FixedFrameSize()
+ if needType {
+ a.Offset += int64(Widthptr)
+ }
+ p2 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+ p2.To = p.To
+ p2.To.Offset += int64(Widthptr)
+ Regfree(&dst)
+ if needType {
+ src.Type = Types[Tptr]
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), typename(n.Type), &src)
+ p3 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &src, nil)
+ p3.To = p2.To
+ p3.To.Offset -= 2 * int64(Widthptr)
+ }
+ Regfree(&src)
+ Ginscall(writebarrierfn(funcName, Types[Tptr], Types[Tptr]), 0)
+}
+
+// cgen_norm moves n1 to res, truncating to expected type if necessary.
+// n1 is a register, and cgen_norm frees it.
+func cgen_norm(n, n1, res *Node) {
+ switch Ctxt.Arch.Thechar {
+ case '6', '8':
+ // We use sized math, so the result is already truncated.
+ default:
+ switch n.Op {
+ case OADD, OSUB, OMUL, ODIV, OCOM, OMINUS:
+ // TODO(rsc): What about left shift?
+ Thearch.Gins(Thearch.Optoas(OAS, n.Type), n1, n1)
+ }
+ }
+
+ Thearch.Gmove(n1, res)
+ Regfree(n1)
+}
+
+func Mgen(n *Node, n1 *Node, rg *Node) {
+ n1.Op = OEMPTY
+
+ if n.Addable {
+ *n1 = *n
+ if n1.Op == OREGISTER || n1.Op == OINDREG {
+ reg[n.Reg-int16(Thearch.REGMIN)]++
+ }
+ return
+ }
+
+ Tempname(n1, n.Type)
+ Cgen(n, n1)
+ if n.Type.Width <= int64(Widthptr) || Isfloat[n.Type.Etype] {
+ n2 := *n1
+ Regalloc(n1, n.Type, rg)
+ Thearch.Gmove(&n2, n1)
+ }
+}
+
+func Mfree(n *Node) {
+ if n.Op == OREGISTER {
+ Regfree(n)
+ }
+}
+
+// allocate a register (reusing res if possible) and generate
+// a = n
+// The caller must call Regfree(a).
+func Cgenr(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("cgenr-n", n)
+ }
+
+ if Isfat(n.Type) {
+ Fatalf("cgenr on fat node")
+ }
+
+ if n.Addable {
+ Regalloc(a, n.Type, res)
+ Thearch.Gmove(n, a)
+ return
+ }
+
+ switch n.Op {
+ case ONAME,
+ ODOT,
+ ODOTPTR,
+ OINDEX,
+ OCALLFUNC,
+ OCALLMETH,
+ OCALLINTER:
+ var n1 Node
+ Igen(n, &n1, res)
+ Regalloc(a, Types[Tptr], &n1)
+ Thearch.Gmove(&n1, a)
+ Regfree(&n1)
+
+ default:
+ Regalloc(a, n.Type, res)
+ Cgen(n, a)
+ }
+}
+
+// allocate a register (reusing res if possible) and generate
+// a = &n
+// The caller must call Regfree(a).
+// The generated code checks that the result is not nil.
+func Agenr(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nagenr-n", n)
+ }
+
+ nl := n.Left
+ nr := n.Right
+
+ switch n.Op {
+ case ODOT, ODOTPTR, OCALLFUNC, OCALLMETH, OCALLINTER:
+ var n1 Node
+ Igen(n, &n1, res)
+ Regalloc(a, Types[Tptr], &n1)
+ Agen(&n1, a)
+ Regfree(&n1)
+
+ case OIND:
+ Cgenr(n.Left, a, res)
+ Cgen_checknil(a)
+
+ case OINDEX:
+ if Ctxt.Arch.Thechar == '5' {
+ var p2 *obj.Prog // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := Debug['B'] != 0 || n.Bounded
+ var n1 Node
+ var n3 Node
+ if nr.Addable {
+ var tmp Node
+ if !Isconst(nr, CTINT) {
+ Tempname(&tmp, Types[TINT32])
+ }
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+ } else if nl.Addable {
+ if !Isconst(nr, CTINT) {
+ var tmp Node
+ Tempname(&tmp, Types[TINT32])
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ } else {
+ var tmp Node
+ Tempname(&tmp, Types[TINT32])
+ p2 = Thearch.Cgenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !Isconst(nl, CTSTR) {
+ Agenr(nl, &n3, res)
+ }
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // w is width
+
+ // constant index
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatalf("constant string constant index")
+ }
+ v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
+ var n2 Node
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_nel)
+ Nodconst(&n2, Types[TUINT32], int64(v))
+ p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &n1, &n2, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_array)
+ Thearch.Gmove(&n1, &n3)
+ }
+
+ Nodconst(&n2, Types[Tptr], int64(v*uint64(w)))
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ *a = n3
+ break
+ }
+
+ var n2 Node
+ Regalloc(&n2, Types[TINT32], &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ var n4 Node
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ if Isconst(nl, CTSTR) {
+ Nodconst(&n4, Types[TUINT32], int64(len(nl.Val().U.(string))))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_nel)
+ Regalloc(&n4, Types[TUINT32], nil)
+ Thearch.Gmove(&n1, &n4)
+ } else {
+ Nodconst(&n4, Types[TUINT32], nl.Type.Bound)
+ }
+ p1 := Thearch.Ginscmp(OLT, Types[TUINT32], &n2, &n4, +1)
+ if n4.Op == OREGISTER {
+ Regfree(&n4)
+ }
+ if p2 != nil {
+ Patch(p2, Pc)
+ }
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+ Datastring(nl.Val().U.(string), &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n1 = n3
+ n1.Op = OINDREG
+ n1.Type = Types[Tptr]
+ n1.Xoffset = int64(Array_array)
+ Thearch.Gmove(&n1, &n3)
+ }
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ if w&(w-1) == 0 {
+ // Power of 2. Use shift.
+ Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
+ } else {
+ // Not a power of 2. Use multiply.
+ Regalloc(&n4, Types[TUINT32], nil)
+ Nodconst(&n1, Types[TUINT32], int64(w))
+ Thearch.Gmove(&n1, &n4)
+ Thearch.Gins(Thearch.Optoas(OMUL, Types[TUINT32]), &n4, &n2)
+ Regfree(&n4)
+ }
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ }
+ *a = n3
+ Regfree(&n2)
+ break
+ }
+ if Ctxt.Arch.Thechar == '8' {
+ var p2 *obj.Prog // to be patched to panicindex.
+ w := uint32(n.Type.Width)
+ bounded := Debug['B'] != 0 || n.Bounded
+ var n3 Node
+ var tmp Node
+ var n1 Node
+ if nr.Addable {
+ // Generate &nl first, and move nr into register.
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+ } else if nl.Addable {
+ // Generate nr first, and move &nl into register.
+ if !Isconst(nr, CTINT) {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gmove(&tmp, &n1)
+ }
+
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ } else {
+ p2 = Thearch.Igenindex(nr, &tmp, bounded)
+ nr = &tmp
+ if !Isconst(nl, CTSTR) {
+ Igen(nl, &n3, res)
+ }
+ Regalloc(&n1, tmp.Type, nil)
+ Thearch.Gins(Thearch.Optoas(OAS, tmp.Type), &tmp, &n1)
+ }
+
+ // For fixed array we really want the pointer in n3.
+ var n2 Node
+ if Isfixedarray(nl.Type) {
+ Regalloc(&n2, Types[Tptr], &n3)
+ Agen(&n3, &n2)
+ Regfree(&n3)
+ n3 = n2
+ }
+
+ // &a[0] is in n3 (allocated in res)
+ // i is in n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatalf("constant string constant index") // front end should handle
+ }
+ v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ nlen := n3
+ nlen.Type = Types[TUINT32]
+ nlen.Xoffset += int64(Array_nel)
+ Nodconst(&n2, Types[TUINT32], int64(v))
+ p1 := Thearch.Ginscmp(OGT, Types[TUINT32], &nlen, &n2, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+ }
+
+ // Load base pointer in n2 = n3.
+ Regalloc(&n2, Types[Tptr], &n3)
+
+ n3.Type = Types[Tptr]
+ n3.Xoffset += int64(Array_array)
+ Thearch.Gmove(&n3, &n2)
+ Regfree(&n3)
+ if v*uint64(w) != 0 {
+ Nodconst(&n1, Types[Tptr], int64(v*uint64(w)))
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n1, &n2)
+ }
+ *a = n2
+ break
+ }
+
+ // i is in register n1, extend to 32 bits.
+ t := Types[TUINT32]
+
+ if Issigned[n1.Type.Etype] {
+ t = Types[TINT32]
+ }
+
+ Regalloc(&n2, t, &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t := Types[TUINT32]
+
+ var nlen Node
+ if Isconst(nl, CTSTR) {
+ Nodconst(&nlen, t, int64(len(nl.Val().U.(string))))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ nlen = n3
+ nlen.Type = t
+ nlen.Xoffset += int64(Array_nel)
+ } else {
+ Nodconst(&nlen, t, nl.Type.Bound)
+ }
+
+ p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
+ if p2 != nil {
+ Patch(p2, Pc)
+ }
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), nil, &n3)
+ Datastring(nl.Val().U.(string), &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+ goto indexdone1
+ }
+
+ // Load base pointer in n3.
+ Regalloc(&tmp, Types[Tptr], &n3)
+
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ n3.Type = Types[Tptr]
+ n3.Xoffset += int64(Array_array)
+ Thearch.Gmove(&n3, &tmp)
+ }
+
+ Regfree(&n3)
+ n3 = tmp
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ if w&(w-1) == 0 {
+ // Power of 2. Use shift.
+ Thearch.Ginscon(Thearch.Optoas(OLSH, Types[TUINT32]), int64(log2(uint64(w))), &n2)
+ } else {
+ // Not a power of 2. Use multiply.
+ Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT32]), int64(w), &n2)
+ }
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ }
+
+ indexdone1:
+ *a = n3
+ Regfree(&n2)
+ break
+ }
+
+ freelen := 0
+ w := uint64(n.Type.Width)
+
+ // Generate the non-addressable child first.
+ var n3 Node
+ var nlen Node
+ var tmp Node
+ var n1 Node
+ if nr.Addable {
+ goto irad
+ }
+ if nl.Addable {
+ Cgenr(nr, &n1, nil)
+ if !Isconst(nl, CTSTR) {
+ if Isfixedarray(nl.Type) {
+ Agenr(nl, &n3, res)
+ } else {
+ Igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = Types[Tptr]
+ nlen.Xoffset += int64(Array_array)
+ Regalloc(&n3, Types[Tptr], res)
+ Thearch.Gmove(&nlen, &n3)
+ nlen.Type = Types[Simtype[TUINT]]
+ nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+ }
+ }
+
+ goto index
+ }
+
+ Tempname(&tmp, nr.Type)
+ Cgen(nr, &tmp)
+ nr = &tmp
+
+ irad:
+ if !Isconst(nl, CTSTR) {
+ if Isfixedarray(nl.Type) {
+ Agenr(nl, &n3, res)
+ } else {
+ if !nl.Addable {
+ if res != nil && res.Op == OREGISTER { // give up res, which we don't need yet.
+ Regfree(res)
+ }
+
+ // igen will need an addressable node.
+ var tmp2 Node
+ Tempname(&tmp2, nl.Type)
+ Cgen(nl, &tmp2)
+ nl = &tmp2
+
+ if res != nil && res.Op == OREGISTER { // reacquire res
+ Regrealloc(res)
+ }
+ }
+
+ Igen(nl, &nlen, res)
+ freelen = 1
+ nlen.Type = Types[Tptr]
+ nlen.Xoffset += int64(Array_array)
+ Regalloc(&n3, Types[Tptr], res)
+ Thearch.Gmove(&nlen, &n3)
+ nlen.Type = Types[Simtype[TUINT]]
+ nlen.Xoffset += int64(Array_nel) - int64(Array_array)
+ }
+ }
+
+ if !Isconst(nr, CTINT) {
+ Cgenr(nr, &n1, nil)
+ }
+
+ goto index
+
+ // &a is in &n3 (allocated in res)
+ // i is in &n1 (if not constant)
+ // len(a) is in nlen (if needed)
+ // w is width
+
+ // constant index
+ index:
+ if Isconst(nr, CTINT) {
+ if Isconst(nl, CTSTR) {
+ Fatalf("constant string constant index") // front end should handle
+ }
+ v := uint64(Mpgetfix(nr.Val().U.(*Mpint)))
+ if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ if Debug['B'] == 0 && !n.Bounded {
+ p1 := Thearch.Ginscmp(OGT, Types[Simtype[TUINT]], &nlen, Nodintconst(int64(v)), +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ Regfree(&nlen)
+ }
+
+ if v*w != 0 {
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), int64(v*w), &n3)
+ }
+ *a = n3
+ break
+ }
+
+ // type of the index
+ t := Types[TUINT64]
+
+ if Issigned[n1.Type.Etype] {
+ t = Types[TINT64]
+ }
+
+ var n2 Node
+ Regalloc(&n2, t, &n1) // i
+ Thearch.Gmove(&n1, &n2)
+ Regfree(&n1)
+
+ if Debug['B'] == 0 && !n.Bounded {
+ // check bounds
+ t = Types[Simtype[TUINT]]
+
+ if Is64(nr.Type) {
+ t = Types[TUINT64]
+ }
+ if Isconst(nl, CTSTR) {
+ Nodconst(&nlen, t, int64(len(nl.Val().U.(string))))
+ } else if Isslice(nl.Type) || nl.Type.Etype == TSTRING {
+ // nlen already initialized
+ } else {
+ Nodconst(&nlen, t, nl.Type.Bound)
+ }
+
+ p1 := Thearch.Ginscmp(OLT, t, &n2, &nlen, +1)
+ Ginscall(Panicindex, -1)
+ Patch(p1, Pc)
+ }
+
+ if Isconst(nl, CTSTR) {
+ Regalloc(&n3, Types[Tptr], res)
+ p1 := Thearch.Gins(Thearch.Optoas(OAS, n3.Type), nil, &n3) // XXX was LEAQ!
+ Datastring(nl.Val().U.(string), &p1.From)
+ p1.From.Type = obj.TYPE_ADDR
+ Thearch.Gins(Thearch.Optoas(OADD, n3.Type), &n2, &n3)
+ goto indexdone
+ }
+
+ if w == 0 {
+ // nothing to do
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&n2, int64(w), &n3) {
+ // done by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ } else {
+ if w&(w-1) == 0 {
+ // Power of 2. Use shift.
+ Thearch.Ginscon(Thearch.Optoas(OLSH, t), int64(log2(w)), &n2)
+ } else {
+ // Not a power of 2. Use multiply.
+ Thearch.Ginscon(Thearch.Optoas(OMUL, t), int64(w), &n2)
+ }
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n3)
+ }
+
+ indexdone:
+ *a = n3
+ Regfree(&n2)
+ if freelen != 0 {
+ Regfree(&nlen)
+ }
+
+ default:
+ Regalloc(a, Types[Tptr], res)
+ Agen(n, a)
+ }
+}
+
+// log2 returns the logarithm base 2 of n. n must be a power of 2.
+func log2(n uint64) int {
+ x := 0
+ for n>>uint(x) != 1 {
+ x++
+ }
+ return x
+}
+
+// generate:
+// res = &n;
+// The generated code checks that the result is not nil.
+func Agen(n *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nagen-res", res)
+ Dump("agen-r", n)
+ }
+
+ if n == nil || n.Type == nil {
+ return
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ }
+
+ if Isconst(n, CTNIL) && n.Type.Width > int64(Widthptr) {
+ // Use of a nil interface or nil slice.
+ // Create a temporary we can take the address of and read.
+ // The generated code is just going to panic, so it need not
+ // be terribly efficient. See issue 3670.
+ var n1 Node
+ Tempname(&n1, n.Type)
+
+ Gvardef(&n1)
+ Thearch.Clearfat(&n1)
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], res)
+ var n3 Node
+ n3.Op = OADDR
+ n3.Left = &n1
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n3, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ return
+ }
+
+ if n.Op == OINDREG && n.Xoffset == 0 {
+ // Generate MOVW R0, R1 instead of MOVW $0(R0), R1.
+ // This allows better move propagation in the back ends
+ // (and maybe it helps the processor).
+ n1 := *n
+ n1.Op = OREGISTER
+ n1.Type = res.Type
+ Thearch.Gmove(&n1, res)
+ return
+ }
+
+ if n.Addable {
+ if n.Op == OREGISTER {
+ Fatalf("agen OREGISTER")
+ }
+ var n1 Node
+ n1.Op = OADDR
+ n1.Left = n
+ var n2 Node
+ Regalloc(&n2, Types[Tptr], res)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &n1, &n2)
+ Thearch.Gmove(&n2, res)
+ Regfree(&n2)
+ return
+ }
+
+ nl := n.Left
+
+ switch n.Op {
+ default:
+ Fatalf("agen: unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+ cgen_aret(n, res)
+
+ case OCALLINTER:
+ cgen_callinter(n, res, 0)
+ cgen_aret(n, res)
+
+ case OCALLFUNC:
+ cgen_call(n, 0)
+ cgen_aret(n, res)
+
+ case OEFACE, ODOTTYPE, OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ var n1 Node
+ Tempname(&n1, n.Type)
+ Cgen(n, &n1)
+ Agen(&n1, res)
+
+ case OINDEX:
+ var n1 Node
+ Agenr(n, &n1, res)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+
+ case ONAME:
+ // should only get here with names in this func.
+ if n.Name.Funcdepth > 0 && n.Name.Funcdepth != Funcdepth {
+ Dump("bad agen", n)
+ Fatalf("agen: bad ONAME funcdepth %d != %d", n.Name.Funcdepth, Funcdepth)
+ }
+
+ // should only get here for heap vars or paramref
+ if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+ Dump("bad agen", n)
+ Fatalf("agen: bad ONAME class %#x", n.Class)
+ }
+
+ Cgen(n.Name.Heapaddr, res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+
+ case OIND:
+ Cgen(nl, res)
+ Cgen_checknil(res)
+
+ case ODOT:
+ Agen(nl, res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+
+ case ODOTPTR:
+ Cgen(nl, res)
+ Cgen_checknil(res)
+ if n.Xoffset != 0 {
+ addOffset(res, n.Xoffset)
+ }
+ }
+}
+
+func addOffset(res *Node, offset int64) {
+ if Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), Nodintconst(offset), res)
+ return
+ }
+
+ var n1, n2 Node
+ Regalloc(&n1, Types[Tptr], nil)
+ Thearch.Gmove(res, &n1)
+ Regalloc(&n2, Types[Tptr], nil)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), Nodintconst(offset), &n2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Regfree(&n2)
+}
+
+// Igen computes the address &n, stores it in a register r,
+// and rewrites a to refer to *r. The chosen r may be the
+// stack pointer, it may be borrowed from res, or it may
+// be a newly allocated register. The caller must call Regfree(a)
+// to free r when the address is no longer needed.
+// The generated code ensures that &n is not nil.
+func Igen(n *Node, a *Node, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("\nigen-n", n)
+ }
+
+ switch n.Op {
+ case ONAME:
+ if (n.Class&PHEAP != 0) || n.Class == PPARAMREF {
+ break
+ }
+ *a = *n
+ return
+
+ case OINDREG:
+ // Increase the refcount of the register so that igen's caller
+ // has to call Regfree.
+ if n.Reg != int16(Thearch.REGSP) {
+ reg[n.Reg-int16(Thearch.REGMIN)]++
+ }
+ *a = *n
+ return
+
+ case ODOT:
+ Igen(n.Left, a, res)
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ Fixlargeoffset(a)
+ return
+
+ case ODOTPTR:
+ Cgenr(n.Left, a, res)
+ Cgen_checknil(a)
+ a.Op = OINDREG
+ a.Xoffset += n.Xoffset
+ a.Type = n.Type
+ Fixlargeoffset(a)
+ return
+
+ case OCALLFUNC, OCALLMETH, OCALLINTER:
+ switch n.Op {
+ case OCALLFUNC:
+ cgen_call(n, 0)
+
+ case OCALLMETH:
+ cgen_callmeth(n, 0)
+
+ case OCALLINTER:
+ cgen_callinter(n, nil, 0)
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(n.Left.Type))
+ *a = Node{}
+ a.Op = OINDREG
+ a.Reg = int16(Thearch.REGSP)
+ a.Addable = true
+ a.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ a.Type = n.Type
+ return
+
+ // Index of fixed-size array by constant can
+ // put the offset in the addressing.
+ // Could do the same for slice except that we need
+ // to use the real index for the bounds checking.
+ case OINDEX:
+ if Isfixedarray(n.Left.Type) || (Isptr[n.Left.Type.Etype] && Isfixedarray(n.Left.Left.Type)) {
+ if Isconst(n.Right, CTINT) {
+ // Compute &a.
+ if !Isptr[n.Left.Type.Etype] {
+ Igen(n.Left, a, res)
+ } else {
+ var n1 Node
+ Igen(n.Left, &n1, res)
+ Cgen_checknil(&n1)
+ Regalloc(a, Types[Tptr], res)
+ Thearch.Gmove(&n1, a)
+ Regfree(&n1)
+ a.Op = OINDREG
+ }
+
+ // Compute &a[i] as &a + i*width.
+ a.Type = n.Type
+
+ a.Xoffset += Mpgetfix(n.Right.Val().U.(*Mpint)) * n.Type.Width
+ Fixlargeoffset(a)
+ return
+ }
+ }
+ }
+
+ Agenr(n, a, res)
+ a.Op = OINDREG
+ a.Type = n.Type
+}
+
+// Bgen generates code for branches:
+//
+// if n == wantTrue {
+// goto to
+// }
+func Bgen(n *Node, wantTrue bool, likely int, to *obj.Prog) {
+ bgenx(n, nil, wantTrue, likely, to)
+}
+
+// Bvgen generates code for calculating boolean values:
+// res = n == wantTrue
+func Bvgen(n, res *Node, wantTrue bool) {
+ if Thearch.Ginsboolval == nil {
+ // Direct value generation not implemented for this architecture.
+ // Implement using jumps.
+ bvgenjump(n, res, wantTrue, true)
+ return
+ }
+ bgenx(n, res, wantTrue, 0, nil)
+}
+
+// bvgenjump implements boolean value generation using jumps:
+// if n == wantTrue {
+// res = 1
+// } else {
+// res = 0
+// }
+// geninit controls whether n's Ninit is generated.
+func bvgenjump(n, res *Node, wantTrue, geninit bool) {
+ init := n.Ninit
+ if !geninit {
+ n.Ninit = nil
+ }
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Pc
+ Thearch.Gmove(Nodbool(true), res)
+ p3 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ Bgen(n, wantTrue, 0, p2)
+ Thearch.Gmove(Nodbool(false), res)
+ Patch(p3, Pc)
+ n.Ninit = init
+}
+
+// bgenx is the backend for Bgen and Bvgen.
+// If res is nil, it generates a branch.
+// Otherwise, it generates a boolean value.
+func bgenx(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+ if Debug['g'] != 0 {
+ fmt.Printf("\nbgenx wantTrue=%t likely=%d to=%v\n", wantTrue, likely, to)
+ Dump("n", n)
+ Dump("res", res)
+ }
+
+ genval := res != nil
+
+ if n == nil {
+ n = Nodbool(true)
+ }
+
+ Genlist(n.Ninit)
+
+ if n.Type == nil {
+ Convlit(&n, Types[TBOOL])
+ if n.Type == nil {
+ return
+ }
+ }
+
+ if n.Type.Etype != TBOOL {
+ Fatalf("bgen: bad type %v for %v", n.Type, Oconv(int(n.Op), 0))
+ }
+
+ for n.Op == OCONVNOP {
+ n = n.Left
+ Genlist(n.Ninit)
+ }
+
+ if Thearch.Bgen_float != nil && n.Left != nil && Isfloat[n.Left.Type.Etype] {
+ if genval {
+ bvgenjump(n, res, wantTrue, false)
+ return
+ }
+ Thearch.Bgen_float(n, wantTrue, likely, to)
+ return
+ }
+
+ switch n.Op {
+ default:
+ if genval {
+ Cgen(n, res)
+ if !wantTrue {
+ Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
+ }
+ return
+ }
+
+ var tmp Node
+ Regalloc(&tmp, n.Type, nil)
+ Cgen(n, &tmp)
+ bgenNonZero(&tmp, nil, wantTrue, likely, to)
+ Regfree(&tmp)
+ return
+
+ case ONAME:
+ if genval {
+ // 5g, 7g, and 9g might need a temporary or other help here,
+ // but they don't support direct generation of a bool value yet.
+ // We can fix that as we go.
+ switch Ctxt.Arch.Thechar {
+ case '0', '5', '7', '9':
+ Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
+ }
+ Cgen(n, res)
+ if !wantTrue {
+ Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res)
+ }
+ return
+ }
+
+ if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
+ // no need for a temporary
+ bgenNonZero(n, nil, wantTrue, likely, to)
+ return
+ }
+ var tmp Node
+ Regalloc(&tmp, n.Type, nil)
+ Cgen(n, &tmp)
+ bgenNonZero(&tmp, nil, wantTrue, likely, to)
+ Regfree(&tmp)
+ return
+
+ case OLITERAL:
+ // n is a constant.
+ if !Isconst(n, CTBOOL) {
+ Fatalf("bgen: non-bool const %v\n", Nconv(n, obj.FmtLong))
+ }
+ if genval {
+ Cgen(Nodbool(wantTrue == n.Val().U.(bool)), res)
+ return
+ }
+ // If n == wantTrue, jump; otherwise do nothing.
+ if wantTrue == n.Val().U.(bool) {
+ Patch(Gbranch(obj.AJMP, nil, likely), to)
+ }
+ return
+
+ case OANDAND, OOROR:
+ and := (n.Op == OANDAND) == wantTrue
+ if genval {
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p2, Pc)
+ Cgen(Nodbool(!and), res)
+ p3 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ Bgen(n.Left, wantTrue != and, 0, p2)
+ Bvgen(n.Right, res, wantTrue)
+ Patch(p3, Pc)
+ return
+ }
+
+ if and {
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ Bgen(n.Left, !wantTrue, -likely, p2)
+ Bgen(n.Right, !wantTrue, -likely, p2)
+ p1 = Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, to)
+ Patch(p2, Pc)
+ } else {
+ Bgen(n.Left, wantTrue, likely, to)
+ Bgen(n.Right, wantTrue, likely, to)
+ }
+ return
+
+ case ONOT: // unary
+ if n.Left == nil || n.Left.Type == nil {
+ return
+ }
+ bgenx(n.Left, res, !wantTrue, likely, to)
+ return
+
+ case OEQ, ONE, OLT, OGT, OLE, OGE:
+ if n.Left == nil || n.Left.Type == nil || n.Right == nil || n.Right.Type == nil {
+ return
+ }
+ }
+
+ // n.Op is one of OEQ, ONE, OLT, OGT, OLE, OGE
+ nl := n.Left
+ nr := n.Right
+ op := n.Op
+
+ if !wantTrue {
+ if Isfloat[nr.Type.Etype] {
+ // Brcom is not valid on floats when NaN is involved.
+ ll := n.Ninit // avoid re-genning Ninit
+ n.Ninit = nil
+ if genval {
+ bgenx(n, res, true, likely, to)
+ Thearch.Gins(Thearch.Optoas(OXOR, Types[TUINT8]), Nodintconst(1), res) // res = !res
+ n.Ninit = ll
+ return
+ }
+ p1 := Gbranch(obj.AJMP, nil, 0)
+ p2 := Gbranch(obj.AJMP, nil, 0)
+ Patch(p1, Pc)
+ bgenx(n, res, true, -likely, p2)
+ Patch(Gbranch(obj.AJMP, nil, 0), to)
+ Patch(p2, Pc)
+ n.Ninit = ll
+ return
+ }
+
+ op = Brcom(op)
+ }
+ wantTrue = true
+
+ // make simplest on right
+ if nl.Op == OLITERAL || (nl.Ullman < nr.Ullman && nl.Ullman < UINF) {
+ op = Brrev(op)
+ nl, nr = nr, nl
+ }
+
+ if Isslice(nl.Type) || Isinter(nl.Type) {
+ // front end should only leave cmp to literal nil
+ if (op != OEQ && op != ONE) || nr.Op != OLITERAL {
+ if Isslice(nl.Type) {
+ Yyerror("illegal slice comparison")
+ } else {
+ Yyerror("illegal interface comparison")
+ }
+ return
+ }
+
+ var ptr Node
+ Igen(nl, &ptr, nil)
+ if Isslice(nl.Type) {
+ ptr.Xoffset += int64(Array_array)
+ }
+ ptr.Type = Types[Tptr]
+ var tmp Node
+ Regalloc(&tmp, ptr.Type, &ptr)
+ Cgen(&ptr, &tmp)
+ Regfree(&ptr)
+ bgenNonZero(&tmp, res, op == OEQ != wantTrue, likely, to)
+ Regfree(&tmp)
+ return
+ }
+
+ if Iscomplex[nl.Type.Etype] {
+ complexbool(op, nl, nr, res, wantTrue, likely, to)
+ return
+ }
+
+ if Ctxt.Arch.Regsize == 4 && Is64(nr.Type) {
+ if genval {
+ // TODO: Teach Cmp64 to generate boolean values and remove this.
+ bvgenjump(n, res, wantTrue, false)
+ return
+ }
+ if !nl.Addable || Isconst(nl, CTINT) {
+ nl = CgenTemp(nl)
+ }
+ if !nr.Addable {
+ nr = CgenTemp(nr)
+ }
+ Thearch.Cmp64(nl, nr, op, likely, to)
+ return
+ }
+
+ if nr.Ullman >= UINF {
+ var n1 Node
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(nl, &n1)
+ nl = &n1
+
+ var tmp Node
+ Tempname(&tmp, nl.Type)
+ Thearch.Gmove(&n1, &tmp)
+ Regfree(&n1)
+
+ var n2 Node
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+ nr = &n2
+
+ Regalloc(&n1, nl.Type, nil)
+ Cgen(&tmp, &n1)
+ Regfree(&n1)
+ Regfree(&n2)
+ } else {
+ var n1 Node
+ if !nl.Addable && Ctxt.Arch.Thechar == '8' {
+ Tempname(&n1, nl.Type)
+ } else {
+ Regalloc(&n1, nl.Type, nil)
+ defer Regfree(&n1)
+ }
+ Cgen(nl, &n1)
+ nl = &n1
+
+ if Smallintconst(nr) && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '9' {
+ Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), nl, nr)
+ bins(nr.Type, res, op, likely, to)
+ return
+ }
+
+ if !nr.Addable && Ctxt.Arch.Thechar == '8' {
+ nr = CgenTemp(nr)
+ }
+
+ var n2 Node
+ Regalloc(&n2, nr.Type, nil)
+ Cgen(nr, &n2)
+ nr = &n2
+ Regfree(&n2)
+ }
+
+ l, r := nl, nr
+
+ // On x86, only < and <= work right with NaN; reverse if needed
+ if Ctxt.Arch.Thechar == '6' && Isfloat[nl.Type.Etype] && (op == OGT || op == OGE) {
+ l, r = r, l
+ op = Brrev(op)
+ }
+
+ // MIPS does not have CMP instruction
+ if Ctxt.Arch.Thechar == '0' {
+ p := Thearch.Ginscmp(op, nr.Type, l, r, likely)
+ Patch(p, to)
+ return
+ }
+
+ // Do the comparison.
+ Thearch.Gins(Thearch.Optoas(OCMP, nr.Type), l, r)
+
+ // Handle floating point special cases.
+ // Note that 8g has Bgen_float and is handled above.
+ if Isfloat[nl.Type.Etype] {
+ switch Ctxt.Arch.Thechar {
+ case '5':
+ if genval {
+ Fatalf("genval 5g Isfloat special cases not implemented")
+ }
+ switch n.Op {
+ case ONE:
+ Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
+ default:
+ p := Gbranch(Thearch.Optoas(OPS, nr.Type), nr.Type, -likely)
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
+ Patch(p, Pc)
+ }
+ return
+ case '6':
+ switch n.Op {
+ case OEQ:
+ // neither NE nor P
+ if genval {
+ var reg Node
+ Regalloc(®, Types[TBOOL], nil)
+ Thearch.Ginsboolval(Thearch.Optoas(OEQ, nr.Type), ®)
+ Thearch.Ginsboolval(Thearch.Optoas(OPC, nr.Type), res)
+ Thearch.Gins(Thearch.Optoas(OAND, Types[TBOOL]), ®, res)
+ Regfree(®)
+ } else {
+ p1 := Gbranch(Thearch.Optoas(ONE, nr.Type), nil, -likely)
+ p2 := Gbranch(Thearch.Optoas(OPS, nr.Type), nil, -likely)
+ Patch(Gbranch(obj.AJMP, nil, 0), to)
+ Patch(p1, Pc)
+ Patch(p2, Pc)
+ }
+ return
+ case ONE:
+ // either NE or P
+ if genval {
+ var reg Node
+ Regalloc(®, Types[TBOOL], nil)
+ Thearch.Ginsboolval(Thearch.Optoas(ONE, nr.Type), ®)
+ Thearch.Ginsboolval(Thearch.Optoas(OPS, nr.Type), res)
+ Thearch.Gins(Thearch.Optoas(OOR, Types[TBOOL]), ®, res)
+ Regfree(®)
+ } else {
+ Patch(Gbranch(Thearch.Optoas(ONE, nr.Type), nil, likely), to)
+ Patch(Gbranch(Thearch.Optoas(OPS, nr.Type), nil, likely), to)
+ }
+ return
+ }
+ case '7', '9':
+ if genval {
+ Fatalf("genval 7g, 9g Isfloat special cases not implemented")
+ }
+ switch n.Op {
+ // On arm64 and ppc64, <= and >= mishandle NaN. Must decompose into < or > and =.
+ // TODO(josh): Convert a <= b to b > a instead?
+ case OLE, OGE:
+ if op == OLE {
+ op = OLT
+ } else {
+ op = OGT
+ }
+ Patch(Gbranch(Thearch.Optoas(op, nr.Type), nr.Type, likely), to)
+ Patch(Gbranch(Thearch.Optoas(OEQ, nr.Type), nr.Type, likely), to)
+ return
+ }
+ }
+ }
+
+ // Not a special case. Insert the conditional jump or value gen.
+ bins(nr.Type, res, op, likely, to)
+}
+
+func bgenNonZero(n, res *Node, wantTrue bool, likely int, to *obj.Prog) {
+ // TODO: Optimize on systems that can compare to zero easily.
+ var op Op = ONE
+ if !wantTrue {
+ op = OEQ
+ }
+
+ // MIPS does not have CMP instruction
+ if Thearch.Thechar == '0' {
+ p := Gbranch(Thearch.Optoas(op, n.Type), n.Type, likely)
+ Naddr(&p.From, n)
+ Patch(p, to)
+ return
+ }
+
+ var zero Node
+ Nodconst(&zero, n.Type, 0)
+ Thearch.Gins(Thearch.Optoas(OCMP, n.Type), n, &zero)
+ bins(n.Type, res, op, likely, to)
+}
+
+// bins inserts an instruction to handle the result of a compare.
+// If res is non-nil, it inserts appropriate value generation instructions.
+// If res is nil, it inserts a branch to to.
+func bins(typ *Type, res *Node, op Op, likely int, to *obj.Prog) {
+ a := Thearch.Optoas(op, typ)
+ if res != nil {
+ // value gen
+ Thearch.Ginsboolval(a, res)
+ } else {
+ // jump
+ Patch(Gbranch(a, typ, likely), to)
+ }
+}
+
+// stkof returns n's offset from SP if n is on the stack
+// (either a local variable or the return value from a function call
+// or the arguments to a function call).
+// If n is not on the stack, stkof returns -1000.
+// If n is on the stack but in an unknown location
+// (due to array index arithmetic), stkof returns +1000.
+//
+// NOTE(rsc): It is possible that the ODOT and OINDEX cases
+// are not relevant here, since it shouldn't be possible for them
+// to be involved in an overlapping copy. Only function results
+// from one call and the arguments to the next can overlap in
+// any non-trivial way. If they can be dropped, then this function
+// becomes much simpler and also more trustworthy.
+// The fact that it works at all today is probably due to the fact
+// that ODOT and OINDEX are irrelevant.
+func stkof(n *Node) int64 {
+ switch n.Op {
+ case OINDREG:
+ if n.Reg != int16(Thearch.REGSP) {
+ return -1000 // not on stack
+ }
+ return n.Xoffset
+
+ case ODOT:
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ break
+ }
+ off := stkof(n.Left)
+ if off == -1000 || off == +1000 {
+ return off
+ }
+ return off + n.Xoffset
+
+ case OINDEX:
+ t := n.Left.Type
+ if !Isfixedarray(t) {
+ break
+ }
+ off := stkof(n.Left)
+ if off == -1000 || off == +1000 {
+ return off
+ }
+ if Isconst(n.Right, CTINT) {
+ return off + t.Type.Width*Mpgetfix(n.Right.Val().U.(*Mpint))
+ }
+ return +1000 // on stack but not sure exactly where
+
+ case OCALLMETH, OCALLINTER, OCALLFUNC:
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ t = t.Type
+ }
+
+ var flist Iter
+ t = Structfirst(&flist, Getoutarg(t))
+ if t != nil {
+ return t.Width + Ctxt.FixedFrameSize()
+ }
+ }
+
+ // botch - probably failing to recognize address
+ // arithmetic on the above. eg INDEX and DOT
+ return -1000 // not on stack
+}
+
+// block copy:
+// memmove(&ns, &n, w);
+// if wb is true, needs write barrier.
+func sgen_wb(n *Node, ns *Node, w int64, wb bool) {
+ if Debug['g'] != 0 {
+ op := "sgen"
+ if wb {
+ op = "sgen-wb"
+ }
+ fmt.Printf("\n%s w=%d\n", op, w)
+ Dump("r", n)
+ Dump("res", ns)
+ }
+
+ if n.Ullman >= UINF && ns.Ullman >= UINF {
+ Fatalf("sgen UINF")
+ }
+
+ if w < 0 {
+ Fatalf("sgen copy %d", w)
+ }
+
+ // If copying .args, that's all the results, so record definition sites
+ // for them for the liveness analysis.
+ if ns.Op == ONAME && ns.Sym.Name == ".args" {
+ for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+ if l.N.Class == PPARAMOUT {
+ Gvardef(l.N)
+ }
+ }
+ }
+
+ // Avoid taking the address for simple enough types.
+ if componentgen_wb(n, ns, wb) {
+ return
+ }
+
+ if w == 0 {
+ // evaluate side effects only
+ var nodr Node
+ Regalloc(&nodr, Types[Tptr], nil)
+ Agen(ns, &nodr)
+ Agen(n, &nodr)
+ Regfree(&nodr)
+ return
+ }
+
+ // offset on the stack
+ osrc := stkof(n)
+ odst := stkof(ns)
+
+ if odst != -1000 {
+ // on stack, write barrier not needed after all
+ wb = false
+ }
+
+ if osrc != -1000 && odst != -1000 && (osrc == 1000 || odst == 1000) || wb && osrc != -1000 {
+ // osrc and odst both on stack, and at least one is in
+ // an unknown position. Could generate code to test
+ // for forward/backward copy, but instead just copy
+ // to a temporary location first.
+ //
+ // OR: write barrier needed and source is on stack.
+ // Invoking the write barrier will use the stack to prepare its call.
+ // Copy to temporary.
+ var tmp Node
+ Tempname(&tmp, n.Type)
+ sgen_wb(n, &tmp, w, false)
+ sgen_wb(&tmp, ns, w, wb)
+ return
+ }
+
+ if wb {
+ cgen_wbfat(n, ns)
+ return
+ }
+
+ Thearch.Blockcopy(n, ns, osrc, odst, w)
+}
+
+// generate:
+// call f
+// proc=-1 normal call but no return
+// proc=0 normal call
+// proc=1 goroutine run in new proc
+// proc=2 defer call save away stack
+// proc=3 normal call to C pointer (not Go func value)
+func Ginscall(f *Node, proc int) {
+ if f.Type != nil {
+ extra := int32(0)
+ if proc == 1 || proc == 2 {
+ extra = 2 * int32(Widthptr)
+ }
+ Setmaxarg(f.Type, extra)
+ }
+
+ switch proc {
+ default:
+ Fatalf("Ginscall: bad proc %d", proc)
+
+ case 0, // normal call
+ -1: // normal call but no return
+ if f.Op == ONAME && f.Class == PFUNC {
+ if f == Deferreturn {
+ // Deferred calls will appear to be returning to the CALL
+ // deferreturn(SB) that we are about to emit. However, the
+ // stack scanning code will think that the instruction
+ // before the CALL is executing. To avoid the scanning
+ // code making bad assumptions (both cosmetic such as
+ // showing the wrong line number and fatal, such as being
+ // confused over whether a stack slot contains a pointer
+ // or a scalar) insert an actual hardware NOP that will
+ // have the right line number. This is different from
+ // obj.ANOP, which is a virtual no-op that doesn't make it
+ // into the instruction stream.
+ Thearch.Ginsnop()
+
+ if Thearch.Thechar == '9' {
+ // On ppc64, when compiling Go into position
+ // independent code on ppc64le we insert an
+ // instruction to reload the TOC pointer from the
+ // stack as well. See the long comment near
+ // jmpdefer in runtime/asm_ppc64.s for why.
+ // If the MOVD is not needed, insert a hardware NOP
+ // so that the same number of instructions are used
+ // on ppc64 in both shared and non-shared modes.
+ if Ctxt.Flag_shared != 0 {
+ p := Thearch.Gins(ppc64.AMOVD, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 24
+ p.From.Reg = ppc64.REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R2
+ } else {
+ Thearch.Ginsnop()
+ }
+ }
+ }
+
+ p := Thearch.Gins(obj.ACALL, nil, f)
+ Afunclit(&p.To, f)
+ if proc == -1 || Noreturn(p) {
+ Thearch.Gins(obj.AUNDEF, nil, nil)
+ }
+ break
+ }
+
+ var reg Node
+ Nodreg(®, Types[Tptr], Thearch.REGCTXT)
+ var r1 Node
+ Nodreg(&r1, Types[Tptr], Thearch.REGCALLX)
+ Thearch.Gmove(f, ®)
+ reg.Op = OINDREG
+ Thearch.Gmove(®, &r1)
+ reg.Op = OREGISTER
+ Thearch.Gins(obj.ACALL, ®, &r1)
+
+ case 3: // normal call of c function pointer
+ Thearch.Gins(obj.ACALL, nil, f)
+
+ case 1, // call in new proc (go)
+ 2: // deferred call (defer)
+ var stk Node
+
+ // size of arguments at 0(SP)
+ stk.Op = OINDREG
+ stk.Reg = int16(Thearch.REGSP)
+ stk.Xoffset = Ctxt.FixedFrameSize()
+ Thearch.Ginscon(Thearch.Optoas(OAS, Types[TINT32]), int64(Argsize(f.Type)), &stk)
+
+ // FuncVal* at 8(SP)
+ stk.Xoffset = int64(Widthptr) + Ctxt.FixedFrameSize()
+
+ var reg Node
+ Nodreg(®, Types[Tptr], Thearch.REGCALLX2)
+ Thearch.Gmove(f, ®)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), ®, &stk)
+
+ if proc == 1 {
+ Ginscall(Newproc, 0)
+ } else {
+ if !hasdefer {
+ Fatalf("hasdefer=0 but has defer")
+ }
+ Ginscall(Deferproc, 0)
+ }
+
+ if proc == 2 {
+ Nodreg(®, Types[TINT32], Thearch.REGRETURN)
+ p := Thearch.Ginscmp(OEQ, Types[TINT32], ®, Nodintconst(0), +1)
+ cgen_ret(nil)
+ Patch(p, Pc)
+ }
+ }
+}
+
+// n is call to interface method.
+// generate res = n.
+func cgen_callinter(n *Node, res *Node, proc int) {
+ i := n.Left
+ if i.Op != ODOTINTER {
+ Fatalf("cgen_callinter: not ODOTINTER %v", Oconv(int(i.Op), 0))
+ }
+
+ f := i.Right // field
+ if f.Op != ONAME {
+ Fatalf("cgen_callinter: not ONAME %v", Oconv(int(f.Op), 0))
+ }
+
+ i = i.Left // interface
+
+ if !i.Addable {
+ var tmpi Node
+ Tempname(&tmpi, i.Type)
+ Cgen(i, &tmpi)
+ i = &tmpi
+ }
+
+ Genlist(n.List) // assign the args
+
+ // i is now addable, prepare an indirected
+ // register to hold its address.
+ var nodi Node
+ Igen(i, &nodi, res) // REG = &inter
+
+ var nodsp Node
+ Nodindreg(&nodsp, Types[Tptr], Thearch.REGSP)
+ nodsp.Xoffset = Ctxt.FixedFrameSize()
+ if proc != 0 {
+ nodsp.Xoffset += 2 * int64(Widthptr) // leave room for size & fn
+ }
+ nodi.Type = Types[Tptr]
+ nodi.Xoffset += int64(Widthptr)
+ Cgen(&nodi, &nodsp) // {0, 8(nacl), or 16}(SP) = 8(REG) -- i.data
+
+ var nodo Node
+ Regalloc(&nodo, Types[Tptr], res)
+
+ nodi.Type = Types[Tptr]
+ nodi.Xoffset -= int64(Widthptr)
+ Cgen(&nodi, &nodo) // REG = 0(REG) -- i.tab
+ Regfree(&nodi)
+
+ var nodr Node
+ Regalloc(&nodr, Types[Tptr], &nodo)
+ if n.Left.Xoffset == BADWIDTH {
+ Fatalf("cgen_callinter: badwidth")
+ }
+ Cgen_checknil(&nodo) // in case offset is huge
+ nodo.Op = OINDREG
+ nodo.Xoffset = n.Left.Xoffset + 3*int64(Widthptr) + 8
+ if proc == 0 {
+ // plain call: use direct c function pointer - more efficient
+ Cgen(&nodo, &nodr) // REG = 32+offset(REG) -- i.tab->fun[f]
+ proc = 3
+ } else {
+ // go/defer. generate go func value.
+ Agen(&nodo, &nodr) // REG = &(32+offset(REG)) -- i.tab->fun[f]
+ }
+
+ nodr.Type = n.Left.Type
+ Ginscall(&nodr, proc)
+
+ Regfree(&nodr)
+ Regfree(&nodo)
+}
+
+// generate function call;
+// proc=0 normal call
+// proc=1 goroutine run in new proc
+// proc=2 defer call save away stack
+func cgen_call(n *Node, proc int) {
+ if n == nil {
+ return
+ }
+
+ var afun Node
+ if n.Left.Ullman >= UINF {
+ // if name involves a fn call
+ // precompute the address of the fn
+ Tempname(&afun, Types[Tptr])
+
+ Cgen(n.Left, &afun)
+ }
+
+ Genlist(n.List) // assign the args
+ t := n.Left.Type
+
+ // call tempname pointer
+ if n.Left.Ullman >= UINF {
+ var nod Node
+ Regalloc(&nod, Types[Tptr], nil)
+ Cgen_as(&nod, &afun)
+ nod.Type = t
+ Ginscall(&nod, proc)
+ Regfree(&nod)
+ return
+ }
+
+ // call pointer
+ if n.Left.Op != ONAME || n.Left.Class != PFUNC {
+ var nod Node
+ Regalloc(&nod, Types[Tptr], nil)
+ Cgen_as(&nod, n.Left)
+ nod.Type = t
+ Ginscall(&nod, proc)
+ Regfree(&nod)
+ return
+ }
+
+ // call direct
+ n.Left.Name.Method = true
+
+ Ginscall(n.Left, proc)
+}
+
+// call to n has already been generated.
+// generate:
+// res = return value from call.
+func cgen_callret(n *Node, res *Node) {
+ t := n.Left.Type
+ if t.Etype == TPTR32 || t.Etype == TPTR64 {
+ t = t.Type
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(t))
+ if fp == nil {
+ Fatalf("cgen_callret: nil")
+ }
+
+ var nod Node
+ nod.Op = OINDREG
+ nod.Reg = int16(Thearch.REGSP)
+ nod.Addable = true
+
+ nod.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ nod.Type = fp.Type
+ Cgen_as(res, &nod)
+}
+
+// call to n has already been generated.
+// generate:
+// res = &return value from call.
+func cgen_aret(n *Node, res *Node) {
+ t := n.Left.Type
+ if Isptr[t.Etype] {
+ t = t.Type
+ }
+
+ var flist Iter
+ fp := Structfirst(&flist, Getoutarg(t))
+ if fp == nil {
+ Fatalf("cgen_aret: nil")
+ }
+
+ var nod1 Node
+ nod1.Op = OINDREG
+ nod1.Reg = int16(Thearch.REGSP)
+ nod1.Addable = true
+ nod1.Xoffset = fp.Width + Ctxt.FixedFrameSize()
+ nod1.Type = fp.Type
+
+ if res.Op != OREGISTER {
+ var nod2 Node
+ Regalloc(&nod2, Types[Tptr], res)
+ Agen(&nod1, &nod2)
+ Thearch.Gins(Thearch.Optoas(OAS, Types[Tptr]), &nod2, res)
+ Regfree(&nod2)
+ } else {
+ Agen(&nod1, res)
+ }
+}
+
+// generate return.
+// n->left is assignments to return values.
+func cgen_ret(n *Node) {
+ if n != nil {
+ Genlist(n.List) // copy out args
+ }
+ if hasdefer {
+ Ginscall(Deferreturn, 0)
+ }
+ Genlist(Curfn.Func.Exit)
+ p := Thearch.Gins(obj.ARET, nil, nil)
+ if n != nil && n.Op == ORETJMP {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = Linksym(n.Left.Sym)
+ }
+}
+
+// generate division according to op, one of:
+// res = nl / nr
+// res = nl % nr
+func cgen_div(op Op, nl *Node, nr *Node, res *Node) {
+ var w int
+
+ // TODO(rsc): arm64 needs to support the relevant instructions
+ // in peep and optoas in order to enable this.
+ // TODO(rsc): ppc64 needs to support the relevant instructions
+ // in peep and optoas in order to enable this.
+ if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ goto longdiv
+ }
+ w = int(nl.Type.Width * 8)
+
+ // Front end handled 32-bit division. We only need to handle 64-bit.
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ switch Simtype[nl.Type.Etype] {
+ default:
+ goto longdiv
+
+ case TUINT64:
+ var m Magic
+ m.W = w
+ m.Ud = uint64(Mpgetfix(nr.Val().U.(*Mpint)))
+ Umagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == OMOD {
+ goto longmod
+ }
+
+ var n1 Node
+ Cgenr(nl, &n1, nil)
+ var n2 Node
+ Nodconst(&n2, nl.Type, int64(m.Um))
+ var n3 Node
+ Regalloc(&n3, nl.Type, res)
+ Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+ if m.Ua != 0 {
+ // need to add numerator accounting for overflow
+ Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+
+ Nodconst(&n2, nl.Type, 1)
+ Thearch.Gins(Thearch.Optoas(ORROTC, nl.Type), &n2, &n3)
+ Nodconst(&n2, nl.Type, int64(m.S)-1)
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3)
+ } else {
+ Nodconst(&n2, nl.Type, int64(m.S))
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift dx
+ }
+
+ Thearch.Gmove(&n3, res)
+ Regfree(&n1)
+ Regfree(&n3)
+ return
+
+ case TINT64:
+ var m Magic
+ m.W = w
+ m.Sd = Mpgetfix(nr.Val().U.(*Mpint))
+ Smagic(&m)
+ if m.Bad != 0 {
+ break
+ }
+ if op == OMOD {
+ goto longmod
+ }
+
+ var n1 Node
+ Cgenr(nl, &n1, res)
+ var n2 Node
+ Nodconst(&n2, nl.Type, m.Sm)
+ var n3 Node
+ Regalloc(&n3, nl.Type, nil)
+ Thearch.Cgen_hmul(&n1, &n2, &n3)
+
+ if m.Sm < 0 {
+ // need to add numerator
+ Thearch.Gins(Thearch.Optoas(OADD, nl.Type), &n1, &n3)
+ }
+
+ Nodconst(&n2, nl.Type, int64(m.S))
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n3) // shift n3
+
+ Nodconst(&n2, nl.Type, int64(w)-1)
+
+ Thearch.Gins(Thearch.Optoas(ORSH, nl.Type), &n2, &n1) // -1 iff num is neg
+ Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n1, &n3) // added
+
+ if m.Sd < 0 {
+ // this could probably be removed
+ // by factoring it into the multiplier
+ Thearch.Gins(Thearch.Optoas(OMINUS, nl.Type), nil, &n3)
+ }
+
+ Thearch.Gmove(&n3, res)
+ Regfree(&n1)
+ Regfree(&n3)
+ return
+ }
+
+ goto longdiv
+
+ // division and mod using (slow) hardware instruction
+longdiv:
+ Thearch.Dodiv(op, nl, nr, res)
+
+ return
+
+ // mod using formula A%B = A-(A/B*B) but
+ // we know that there is a fast algorithm for A/B
+longmod:
+ var n1 Node
+ Regalloc(&n1, nl.Type, res)
+
+ Cgen(nl, &n1)
+ var n2 Node
+ Regalloc(&n2, nl.Type, nil)
+ cgen_div(ODIV, &n1, nr, &n2)
+ a := Thearch.Optoas(OMUL, nl.Type)
+ if w == 8 {
+ // use 2-operand 16-bit multiply
+ // because there is no 2-operand 8-bit multiply
+ a = Thearch.Optoas(OMUL, Types[TINT16]) // XXX was IMULW
+ }
+
+ if !Smallintconst(nr) {
+ var n3 Node
+ Regalloc(&n3, nl.Type, nil)
+ Cgen(nr, &n3)
+ Thearch.Gins(a, &n3, &n2)
+ Regfree(&n3)
+ } else {
+ Thearch.Gins(a, nr, &n2)
+ }
+ Thearch.Gins(Thearch.Optoas(OSUB, nl.Type), &n2, &n1)
+ Thearch.Gmove(&n1, res)
+ Regfree(&n1)
+ Regfree(&n2)
+}
+
+func Fixlargeoffset(n *Node) {
+ if n == nil {
+ return
+ }
+ if n.Op != OINDREG {
+ return
+ }
+ if n.Reg == int16(Thearch.REGSP) { // stack offset cannot be large
+ return
+ }
+ if n.Xoffset != int64(int32(n.Xoffset)) {
+ // offset too large, add to register instead.
+ a := *n
+
+ a.Op = OREGISTER
+ a.Type = Types[Tptr]
+ a.Xoffset = 0
+ Cgen_checknil(&a)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[Tptr]), n.Xoffset, &a)
+ n.Xoffset = 0
+ }
+}
+
+func cgen_append(n, res *Node) {
+ if Debug['g'] != 0 {
+ Dump("cgen_append-n", n)
+ Dump("cgen_append-res", res)
+ }
+ if res.Op != ONAME && !samesafeexpr(res, n.List.N) {
+ Dump("cgen_append-n", n)
+ Dump("cgen_append-res", res)
+ Fatalf("append not lowered")
+ }
+ for l := n.List; l != nil; l = l.Next {
+ if l.N.Ullman >= UINF {
+ Fatalf("append with function call arguments")
+ }
+ }
+
+ // res = append(src, x, y, z)
+ //
+ // If res and src are the same, we can avoid writing to base and cap
+ // unless we grow the underlying array.
+ needFullUpdate := !samesafeexpr(res, n.List.N)
+
+ // Copy src triple into base, len, cap.
+ base := temp(Types[Tptr])
+ len := temp(Types[TUINT])
+ cap := temp(Types[TUINT])
+
+ var src Node
+ Igen(n.List.N, &src, nil)
+ src.Type = Types[Tptr]
+ Thearch.Gmove(&src, base)
+ src.Type = Types[TUINT]
+ src.Xoffset += int64(Widthptr)
+ Thearch.Gmove(&src, len)
+ src.Xoffset += int64(Widthptr)
+ Thearch.Gmove(&src, cap)
+
+ // if len+argc <= cap goto L1
+ var rlen Node
+ Regalloc(&rlen, Types[TUINT], nil)
+ Thearch.Gmove(len, &rlen)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &rlen)
+ p := Thearch.Ginscmp(OLE, Types[TUINT], &rlen, cap, +1)
+ // Note: rlen and src are Regrealloc'ed below at the target of the
+ // branch we just emitted; do not reuse these Go variables for
+ // other purposes. They need to still describe the same things
+ // below that they describe right here.
+ Regfree(&src)
+
+ // base, len, cap = growslice(type, base, len, cap, newlen)
+ var arg Node
+ arg.Op = OINDREG
+ arg.Reg = int16(Thearch.REGSP)
+ arg.Addable = true
+ arg.Xoffset = Ctxt.FixedFrameSize()
+ arg.Type = Ptrto(Types[TUINT8])
+ Cgen(typename(res.Type), &arg)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[Tptr]
+ Cgen(base, &arg)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[TUINT]
+ Cgen(len, &arg)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[TUINT]
+ Cgen(cap, &arg)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[TUINT]
+ Cgen(&rlen, &arg)
+ arg.Xoffset += int64(Widthptr)
+ Regfree(&rlen)
+
+ fn := syslook("growslice", 1)
+ substArgTypes(fn, res.Type.Type, res.Type.Type)
+ Ginscall(fn, 0)
+
+ if Widthptr == 4 && Widthreg == 8 {
+ arg.Xoffset += 4
+ }
+
+ arg.Type = Types[Tptr]
+ Cgen(&arg, base)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[TUINT]
+ Cgen(&arg, len)
+ arg.Xoffset += int64(Widthptr)
+
+ arg.Type = Types[TUINT]
+ Cgen(&arg, cap)
+
+ // Update res with base, len+argc, cap.
+ if needFullUpdate {
+ if Debug_append > 0 {
+ Warn("append: full update")
+ }
+ Patch(p, Pc)
+ }
+ if res.Op == ONAME {
+ Gvardef(res)
+ }
+ var dst, r1 Node
+ Igen(res, &dst, nil)
+ dst.Type = Types[TUINT]
+ dst.Xoffset += int64(Widthptr)
+ Regalloc(&r1, Types[TUINT], nil)
+ Thearch.Gmove(len, &r1)
+ Thearch.Ginscon(Thearch.Optoas(OADD, Types[TUINT]), int64(count(n.List)-1), &r1)
+ Thearch.Gmove(&r1, &dst)
+ Regfree(&r1)
+ dst.Xoffset += int64(Widthptr)
+ Thearch.Gmove(cap, &dst)
+ dst.Type = Types[Tptr]
+ dst.Xoffset -= 2 * int64(Widthptr)
+ cgen_wb(base, &dst, needwritebarrier(&dst, base))
+ Regfree(&dst)
+
+ if !needFullUpdate {
+ if Debug_append > 0 {
+ Warn("append: len-only update")
+ }
+ // goto L2;
+ // L1:
+ // update len only
+ // L2:
+ q := Gbranch(obj.AJMP, nil, 0)
+ Patch(p, Pc)
+ // At the goto above, src refers to cap and rlen holds the new len
+ if src.Op == OREGISTER || src.Op == OINDREG {
+ Regrealloc(&src)
+ }
+ Regrealloc(&rlen)
+ src.Xoffset -= int64(Widthptr)
+ Thearch.Gmove(&rlen, &src)
+ Regfree(&src)
+ Regfree(&rlen)
+ Patch(q, Pc)
+ }
+
+ // Copy data into place.
+ // Could do write barrier check around entire copy instead of each element.
+ // Could avoid reloading registers on each iteration if we know the cgen_wb
+ // is not going to use a write barrier.
+ i := 0
+ var r2 Node
+ for l := n.List.Next; l != nil; l = l.Next {
+ Regalloc(&r1, Types[Tptr], nil)
+ Thearch.Gmove(base, &r1)
+ Regalloc(&r2, Types[TUINT], nil)
+ Thearch.Gmove(len, &r2)
+ if i > 0 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[TUINT]), Nodintconst(int64(i)), &r2)
+ }
+ w := res.Type.Type.Width
+ if Thearch.AddIndex != nil && Thearch.AddIndex(&r2, w, &r1) {
+ // r1 updated by back end
+ } else if w == 1 {
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
+ } else {
+ Thearch.Ginscon(Thearch.Optoas(OMUL, Types[TUINT]), int64(w), &r2)
+ Thearch.Gins(Thearch.Optoas(OADD, Types[Tptr]), &r2, &r1)
+ }
+ Regfree(&r2)
+
+ r1.Op = OINDREG
+ r1.Type = res.Type.Type
+ cgen_wb(l.N, &r1, needwritebarrier(&r1, l.N))
+ Regfree(&r1)
+ i++
+ }
+}
+
+// Generate res = n, where n is x[i:j] or x[i:j:k].
+// If wb is true, need write barrier updating res's base pointer.
+// On systems with 32-bit ints, i, j, k are guaranteed to be 32-bit values.
+func cgen_slice(n, res *Node, wb bool) {
+ if Debug['g'] != 0 {
+ Dump("cgen_slice-n", n)
+ Dump("cgen_slice-res", res)
+ }
+
+ needFullUpdate := !samesafeexpr(n.Left, res)
+
+ // orderexpr has made sure that x is safe (but possibly expensive)
+ // and i, j, k are cheap. On a system with registers (anything but 386)
+ // we can evaluate x first and then know we have enough registers
+ // for i, j, k as well.
+ var x, xbase, xlen, xcap, i, j, k Node
+ if n.Op != OSLICEARR && n.Op != OSLICE3ARR {
+ Igen(n.Left, &x, nil)
+ }
+
+ indexRegType := Types[TUINT]
+ if Widthreg > Widthptr { // amd64p32
+ indexRegType = Types[TUINT64]
+ }
+
+ // On most systems, we use registers.
+ // The 386 has basically no registers, so substitute functions
+ // that can work with temporaries instead.
+ regalloc := Regalloc
+ ginscon := Thearch.Ginscon
+ gins := Thearch.Gins
+ if Thearch.Thechar == '8' {
+ regalloc = func(n *Node, t *Type, reuse *Node) {
+ Tempname(n, t)
+ }
+ ginscon = func(as int, c int64, n *Node) {
+ var n1 Node
+ Regalloc(&n1, n.Type, n)
+ Thearch.Gmove(n, &n1)
+ Thearch.Ginscon(as, c, &n1)
+ Thearch.Gmove(&n1, n)
+ Regfree(&n1)
+ }
+ gins = func(as int, f, t *Node) *obj.Prog {
+ var n1 Node
+ Regalloc(&n1, t.Type, t)
+ Thearch.Gmove(t, &n1)
+ Thearch.Gins(as, f, &n1)
+ Thearch.Gmove(&n1, t)
+ Regfree(&n1)
+ return nil
+ }
+ }
+
+ panics := make([]*obj.Prog, 0, 6) // 3 loads + 3 checks
+
+ loadlen := func() {
+ if xlen.Op != 0 {
+ return
+ }
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+ Nodconst(&xlen, indexRegType, n.Left.Type.Type.Bound)
+ return
+ }
+ if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
+ Nodconst(&xlen, indexRegType, int64(len(n.Left.Val().U.(string))))
+ return
+ }
+ regalloc(&xlen, indexRegType, nil)
+ x.Xoffset += int64(Widthptr)
+ x.Type = Types[TUINT]
+ Thearch.Gmove(&x, &xlen)
+ x.Xoffset -= int64(Widthptr)
+ }
+
+ loadcap := func() {
+ if xcap.Op != 0 {
+ return
+ }
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR || n.Op == OSLICESTR {
+ loadlen()
+ xcap = xlen
+ if xcap.Op == OREGISTER {
+ Regrealloc(&xcap)
+ }
+ return
+ }
+ regalloc(&xcap, indexRegType, nil)
+ x.Xoffset += 2 * int64(Widthptr)
+ x.Type = Types[TUINT]
+ Thearch.Gmove(&x, &xcap)
+ x.Xoffset -= 2 * int64(Widthptr)
+ }
+
+ var x1, x2, x3 *Node // unevaluated index arguments
+ x1 = n.Right.Left
+ switch n.Op {
+ default:
+ x2 = n.Right.Right
+ case OSLICE3, OSLICE3ARR:
+ x2 = n.Right.Right.Left
+ x3 = n.Right.Right.Right
+ }
+
+ // load computes src into targ, but if src refers to the len or cap of n.Left,
+ // load copies those from xlen, xcap, loading xlen if needed.
+ // If targ.Op == OREGISTER on return, it must be Regfreed,
+ // but it should not be modified without first checking whether it is
+ // xlen or xcap's register.
+ load := func(src, targ *Node) {
+ if src == nil {
+ return
+ }
+ switch src.Op {
+ case OLITERAL:
+ *targ = *src
+ return
+ case OLEN:
+ // NOTE(rsc): This doesn't actually trigger, because order.go
+ // has pulled all the len and cap calls into separate assignments
+ // to temporaries. There are tests in test/sliceopt.go that could
+ // be enabled if this is fixed.
+ if samesafeexpr(n.Left, src.Left) {
+ if Debug_slice > 0 {
+ Warn("slice: reuse len")
+ }
+ loadlen()
+ *targ = xlen
+ if targ.Op == OREGISTER {
+ Regrealloc(targ)
+ }
+ return
+ }
+ case OCAP:
+ // NOTE(rsc): This doesn't actually trigger; see note in case OLEN above.
+ if samesafeexpr(n.Left, src.Left) {
+ if Debug_slice > 0 {
+ Warn("slice: reuse cap")
+ }
+ loadcap()
+ *targ = xcap
+ if targ.Op == OREGISTER {
+ Regrealloc(targ)
+ }
+ return
+ }
+ }
+ if i.Op != 0 && samesafeexpr(x1, src) {
+ if Debug_slice > 0 {
+ Warn("slice: reuse 1st index")
+ }
+ *targ = i
+ if targ.Op == OREGISTER {
+ Regrealloc(targ)
+ }
+ return
+ }
+ if j.Op != 0 && samesafeexpr(x2, src) {
+ if Debug_slice > 0 {
+ Warn("slice: reuse 2nd index")
+ }
+ *targ = j
+ if targ.Op == OREGISTER {
+ Regrealloc(targ)
+ }
+ return
+ }
+ if Thearch.Cgenindex != nil {
+ regalloc(targ, indexRegType, nil)
+ p := Thearch.Cgenindex(src, targ, false)
+ if p != nil {
+ panics = append(panics, p)
+ }
+ } else if Thearch.Igenindex != nil {
+ p := Thearch.Igenindex(src, targ, false)
+ if p != nil {
+ panics = append(panics, p)
+ }
+ } else {
+ regalloc(targ, indexRegType, nil)
+ var tmp Node
+ Cgenr(src, &tmp, targ)
+ Thearch.Gmove(&tmp, targ)
+ Regfree(&tmp)
+ }
+ }
+
+ load(x1, &i)
+ load(x2, &j)
+ load(x3, &k)
+
+ // i defaults to 0.
+ if i.Op == 0 {
+ Nodconst(&i, indexRegType, 0)
+ }
+
+ // j defaults to len(x)
+ if j.Op == 0 {
+ loadlen()
+ j = xlen
+ if j.Op == OREGISTER {
+ Regrealloc(&j)
+ }
+ }
+
+ // k defaults to cap(x)
+ // Only need to load it if we're recalculating cap or doing a full update.
+ if k.Op == 0 && n.Op != OSLICESTR && (!iszero(&i) || needFullUpdate) {
+ loadcap()
+ k = xcap
+ if k.Op == OREGISTER {
+ Regrealloc(&k)
+ }
+ }
+
+ // Check constant indexes for negative values, and against constant length if known.
+ // The func obvious below checks for out-of-order constant indexes.
+ var bound int64 = -1
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+ bound = n.Left.Type.Type.Bound
+ } else if n.Op == OSLICESTR && Isconst(n.Left, CTSTR) {
+ bound = int64(len(n.Left.Val().U.(string)))
+ }
+ if Isconst(&i, CTINT) {
+ if mpcmpfixc(i.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(i.Val().U.(*Mpint), bound) > 0 {
+ Yyerror("slice index out of bounds")
+ }
+ }
+ if Isconst(&j, CTINT) {
+ if mpcmpfixc(j.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(j.Val().U.(*Mpint), bound) > 0 {
+ Yyerror("slice index out of bounds")
+ }
+ }
+ if Isconst(&k, CTINT) {
+ if mpcmpfixc(k.Val().U.(*Mpint), 0) < 0 || bound >= 0 && mpcmpfixc(k.Val().U.(*Mpint), bound) > 0 {
+ Yyerror("slice index out of bounds")
+ }
+ }
+
+ // same reports whether n1 and n2 are the same register or constant.
+ same := func(n1, n2 *Node) bool {
+ return n1.Op == OREGISTER && n2.Op == OREGISTER && n1.Reg == n2.Reg ||
+ n1.Op == ONAME && n2.Op == ONAME && n1.Orig == n2.Orig && n1.Type == n2.Type && n1.Xoffset == n2.Xoffset ||
+ n1.Op == OLITERAL && n2.Op == OLITERAL && Mpcmpfixfix(n1.Val().U.(*Mpint), n2.Val().U.(*Mpint)) == 0
+ }
+
+ // obvious reports whether n1 <= n2 is obviously true,
+ // and it calls Yyerror if n1 <= n2 is obviously false.
+ obvious := func(n1, n2 *Node) bool {
+ if Debug['B'] != 0 { // -B disables bounds checks
+ return true
+ }
+ if same(n1, n2) {
+ return true // n1 == n2
+ }
+ if iszero(n1) {
+ return true // using unsigned compare, so 0 <= n2 always true
+ }
+ if xlen.Op != 0 && same(n1, &xlen) && xcap.Op != 0 && same(n2, &xcap) {
+ return true // len(x) <= cap(x) always true
+ }
+ if Isconst(n1, CTINT) && Isconst(n2, CTINT) {
+ if Mpcmpfixfix(n1.Val().U.(*Mpint), n2.Val().U.(*Mpint)) <= 0 {
+ return true // n1, n2 constants such that n1 <= n2
+ }
+ Yyerror("slice index out of bounds")
+ return true
+ }
+ return false
+ }
+
+ compare := func(n1, n2 *Node) {
+ // n1 might be a 64-bit constant, even on 32-bit architectures,
+ // but it will be represented in 32 bits.
+ if Ctxt.Arch.Regsize == 4 && Is64(n1.Type) {
+ if mpcmpfixc(n1.Val().U.(*Mpint), 1<<31) >= 0 {
+ Fatalf("missed slice out of bounds check")
+ }
+ var tmp Node
+ Nodconst(&tmp, indexRegType, Mpgetfix(n1.Val().U.(*Mpint)))
+ n1 = &tmp
+ }
+ p := Thearch.Ginscmp(OGT, indexRegType, n1, n2, -1)
+ panics = append(panics, p)
+ }
+
+ loadcap()
+ max := &xcap
+ if k.Op != 0 && (n.Op == OSLICE3 || n.Op == OSLICE3ARR) {
+ if obvious(&k, max) {
+ if Debug_slice > 0 {
+ Warn("slice: omit check for 3rd index")
+ }
+ } else {
+ compare(&k, max)
+ }
+ max = &k
+ }
+ if j.Op != 0 {
+ if obvious(&j, max) {
+ if Debug_slice > 0 {
+ Warn("slice: omit check for 2nd index")
+ }
+ } else {
+ compare(&j, max)
+ }
+ max = &j
+ }
+ if i.Op != 0 {
+ if obvious(&i, max) {
+ if Debug_slice > 0 {
+ Warn("slice: omit check for 1st index")
+ }
+ } else {
+ compare(&i, max)
+ }
+ max = &i
+ }
+ if k.Op != 0 && i.Op != 0 {
+ obvious(&i, &k) // emit compile-time error for x[3:n:2]
+ }
+
+ if len(panics) > 0 {
+ p := Gbranch(obj.AJMP, nil, 0)
+ for _, q := range panics {
+ Patch(q, Pc)
+ }
+ Ginscall(panicslice, -1)
+ Patch(p, Pc)
+ }
+
+ // Checks are done.
+ // Compute new len as j-i, cap as k-i.
+ // If i and j are same register, len is constant 0.
+ // If i and k are same register, cap is constant 0.
+ // If j and k are same register, len and cap are same.
+
+ // Done with xlen and xcap.
+ // Now safe to modify j and k even if they alias xlen, xcap.
+ if xlen.Op == OREGISTER {
+ Regfree(&xlen)
+ }
+ if xcap.Op == OREGISTER {
+ Regfree(&xcap)
+ }
+
+ // are j and k the same value?
+ sameJK := same(&j, &k)
+
+ if i.Op != 0 {
+ // j -= i
+ if same(&i, &j) {
+ if Debug_slice > 0 {
+ Warn("slice: result len == 0")
+ }
+ if j.Op == OREGISTER {
+ Regfree(&j)
+ }
+ Nodconst(&j, indexRegType, 0)
+ } else {
+ switch j.Op {
+ case OLITERAL:
+ if Isconst(&i, CTINT) {
+ Nodconst(&j, indexRegType, Mpgetfix(j.Val().U.(*Mpint))-Mpgetfix(i.Val().U.(*Mpint)))
+ if Debug_slice > 0 {
+ Warn("slice: result len == %d", Mpgetfix(j.Val().U.(*Mpint)))
+ }
+ break
+ }
+ fallthrough
+ case ONAME:
+ if !istemp(&j) {
+ var r Node
+ regalloc(&r, indexRegType, nil)
+ Thearch.Gmove(&j, &r)
+ j = r
+ }
+ fallthrough
+ case OREGISTER:
+ if i.Op == OLITERAL {
+ v := Mpgetfix(i.Val().U.(*Mpint))
+ if v != 0 {
+ ginscon(Thearch.Optoas(OSUB, indexRegType), v, &j)
+ }
+ } else {
+ gins(Thearch.Optoas(OSUB, indexRegType), &i, &j)
+ }
+ }
+ }
+
+ // k -= i if k different from j and cap is needed.j
+ // (The modifications to j above cannot affect i: if j and i were aliased,
+ // we replace j with a constant 0 instead of doing a subtraction,
+ // leaving i unmodified.)
+ if k.Op == 0 {
+ if Debug_slice > 0 && n.Op != OSLICESTR {
+ Warn("slice: result cap not computed")
+ }
+ // no need
+ } else if same(&i, &k) {
+ if k.Op == OREGISTER {
+ Regfree(&k)
+ }
+ Nodconst(&k, indexRegType, 0)
+ if Debug_slice > 0 {
+ Warn("slice: result cap == 0")
+ }
+ } else if sameJK {
+ if Debug_slice > 0 {
+ Warn("slice: result cap == result len")
+ }
+ // k and j were the same value; make k-i the same as j-i.
+ if k.Op == OREGISTER {
+ Regfree(&k)
+ }
+ k = j
+ if k.Op == OREGISTER {
+ Regrealloc(&k)
+ }
+ } else {
+ switch k.Op {
+ case OLITERAL:
+ if Isconst(&i, CTINT) {
+ Nodconst(&k, indexRegType, Mpgetfix(k.Val().U.(*Mpint))-Mpgetfix(i.Val().U.(*Mpint)))
+ if Debug_slice > 0 {
+ Warn("slice: result cap == %d", Mpgetfix(k.Val().U.(*Mpint)))
+ }
+ break
+ }
+ fallthrough
+ case ONAME:
+ if !istemp(&k) {
+ var r Node
+ regalloc(&r, indexRegType, nil)
+ Thearch.Gmove(&k, &r)
+ k = r
+ }
+ fallthrough
+ case OREGISTER:
+ if same(&i, &k) {
+ Regfree(&k)
+ Nodconst(&k, indexRegType, 0)
+ if Debug_slice > 0 {
+ Warn("slice: result cap == 0")
+ }
+ } else if i.Op == OLITERAL {
+ v := Mpgetfix(i.Val().U.(*Mpint))
+ if v != 0 {
+ ginscon(Thearch.Optoas(OSUB, indexRegType), v, &k)
+ }
+ } else {
+ gins(Thearch.Optoas(OSUB, indexRegType), &i, &k)
+ }
+ }
+ }
+ }
+
+ adjustBase := true
+ if i.Op == 0 || iszero(&i) {
+ if Debug_slice > 0 {
+ Warn("slice: skip base adjustment for 1st index 0")
+ }
+ adjustBase = false
+ } else if k.Op != 0 && iszero(&k) || k.Op == 0 && iszero(&j) {
+ if Debug_slice > 0 {
+ if n.Op == OSLICESTR {
+ Warn("slice: skip base adjustment for string len == 0")
+ } else {
+ Warn("slice: skip base adjustment for cap == 0")
+ }
+ }
+ adjustBase = false
+ }
+
+ if !adjustBase && !needFullUpdate {
+ if Debug_slice > 0 {
+ if k.Op != 0 {
+ Warn("slice: len/cap-only update")
+ } else {
+ Warn("slice: len-only update")
+ }
+ }
+ if i.Op == OREGISTER {
+ Regfree(&i)
+ }
+ // Write len (and cap if needed) back to x.
+ x.Xoffset += int64(Widthptr)
+ x.Type = Types[TUINT]
+ Thearch.Gmove(&j, &x)
+ x.Xoffset -= int64(Widthptr)
+ if k.Op != 0 {
+ x.Xoffset += 2 * int64(Widthptr)
+ x.Type = Types[TUINT]
+ Thearch.Gmove(&k, &x)
+ x.Xoffset -= 2 * int64(Widthptr)
+ }
+ Regfree(&x)
+ } else {
+ // Compute new base. May smash i.
+ if n.Op == OSLICEARR || n.Op == OSLICE3ARR {
+ Cgenr(n.Left, &xbase, nil)
+ Cgen_checknil(&xbase)
+ } else {
+ regalloc(&xbase, Ptrto(res.Type.Type), nil)
+ x.Type = xbase.Type
+ Thearch.Gmove(&x, &xbase)
+ Regfree(&x)
+ }
+ if i.Op != 0 && adjustBase {
+ // Branch around the base adjustment if the resulting cap will be 0.
+ var p *obj.Prog
+ size := &k
+ if k.Op == 0 {
+ size = &j
+ }
+ if Isconst(size, CTINT) {
+ // zero was checked above, must be non-zero.
+ } else {
+ var tmp Node
+ Nodconst(&tmp, indexRegType, 0)
+ p = Thearch.Ginscmp(OEQ, indexRegType, size, &tmp, -1)
+ }
+ var w int64
+ if n.Op == OSLICESTR {
+ w = 1 // res is string, elem size is 1 (byte)
+ } else {
+ w = res.Type.Type.Width // res is []T, elem size is T.width
+ }
+ if Isconst(&i, CTINT) {
+ ginscon(Thearch.Optoas(OADD, xbase.Type), Mpgetfix(i.Val().U.(*Mpint))*w, &xbase)
+ } else if Thearch.AddIndex != nil && Thearch.AddIndex(&i, w, &xbase) {
+ // done by back end
+ } else if w == 1 {
+ gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
+ } else {
+ if i.Op == ONAME && !istemp(&i) {
+ var tmp Node
+ Tempname(&tmp, i.Type)
+ Thearch.Gmove(&i, &tmp)
+ i = tmp
+ }
+ ginscon(Thearch.Optoas(OMUL, i.Type), w, &i)
+ gins(Thearch.Optoas(OADD, xbase.Type), &i, &xbase)
+ }
+ if p != nil {
+ Patch(p, Pc)
+ }
+ }
+ if i.Op == OREGISTER {
+ Regfree(&i)
+ }
+
+ // Write len, cap, base to result.
+ if res.Op == ONAME {
+ Gvardef(res)
+ }
+ Igen(res, &x, nil)
+ x.Xoffset += int64(Widthptr)
+ x.Type = Types[TUINT]
+ Thearch.Gmove(&j, &x)
+ x.Xoffset -= int64(Widthptr)
+ if k.Op != 0 {
+ x.Xoffset += 2 * int64(Widthptr)
+ Thearch.Gmove(&k, &x)
+ x.Xoffset -= 2 * int64(Widthptr)
+ }
+ x.Type = xbase.Type
+ cgen_wb(&xbase, &x, wb)
+ Regfree(&xbase)
+ Regfree(&x)
+ }
+
+ if j.Op == OREGISTER {
+ Regfree(&j)
+ }
+ if k.Op == OREGISTER {
+ Regfree(&k)
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/gsubr.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/gsubr.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/gsubr.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/gsubr.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,843 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "runtime"
+ "strings"
+)
+
+var ddumped int
+
+var dfirst *obj.Prog
+
+var dpc *obj.Prog
+
+// Is this node a memory operand?
+func Ismem(n *Node) bool {
+ switch n.Op {
+ case OITAB,
+ OSPTR,
+ OLEN,
+ OCAP,
+ OINDREG,
+ ONAME,
+ OPARAM,
+ OCLOSUREVAR:
+ return true
+
+ case OADDR:
+ return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ }
+
+ return false
+}
+
+func Samereg(a *Node, b *Node) bool {
+ if a == nil || b == nil {
+ return false
+ }
+ if a.Op != OREGISTER {
+ return false
+ }
+ if b.Op != OREGISTER {
+ return false
+ }
+ if a.Reg != b.Reg {
+ return false
+ }
+ return true
+}
+
+func Gbranch(as int, t *Type, likely int) *obj.Prog {
+ p := Prog(as)
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Val = nil
+ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
+ p.From.Type = obj.TYPE_CONST
+ if likely > 0 {
+ p.From.Offset = 1
+ }
+ }
+
+ if Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ return p
+}
+
+func Prog(as int) *obj.Prog {
+ var p *obj.Prog
+
+ if as == obj.ADATA || as == obj.AGLOBL {
+ if ddumped != 0 {
+ Fatalf("already dumped data")
+ }
+ if dpc == nil {
+ dpc = Ctxt.NewProg()
+ dfirst = dpc
+ }
+
+ p = dpc
+ dpc = Ctxt.NewProg()
+ p.Link = dpc
+ } else {
+ p = Pc
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ p.Link = Pc
+ }
+
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("prog: line 0")
+ }
+ }
+
+ p.As = int16(as)
+ p.Lineno = lineno
+ return p
+}
+
+func Nodreg(n *Node, t *Type, r int) {
+ if t == nil {
+ Fatalf("nodreg: t nil")
+ }
+
+ *n = Node{}
+ n.Op = OREGISTER
+ n.Addable = true
+ ullmancalc(n)
+ n.Reg = int16(r)
+ n.Type = t
+}
+
+func Nodindreg(n *Node, t *Type, r int) {
+ Nodreg(n, t, r)
+ n.Op = OINDREG
+}
+
+func Afunclit(a *obj.Addr, n *Node) {
+ if a.Type == obj.TYPE_ADDR && a.Name == obj.NAME_EXTERN {
+ a.Type = obj.TYPE_MEM
+ a.Sym = Linksym(n.Sym)
+ }
+}
+
+func Clearp(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = int64(pcloc)
+ pcloc++
+}
+
+func dumpdata() {
+ ddumped = 1
+ if dfirst == nil {
+ return
+ }
+ newplist()
+ *Pc = *dfirst
+ Pc = dpc
+ Clearp(Pc)
+}
+
+// Fixup instructions after allocauto (formerly compactframe) has moved all autos around.
+func fixautoused(p *obj.Prog) {
+ for lp := &p; ; {
+ p = *lp
+ if p == nil {
+ break
+ }
+ if p.As == obj.ATYPE && p.From.Node != nil && p.From.Name == obj.NAME_AUTO && !((p.From.Node).(*Node)).Used {
+ *lp = p.Link
+ continue
+ }
+
+ if (p.As == obj.AVARDEF || p.As == obj.AVARKILL || p.As == obj.AVARLIVE) && p.To.Node != nil && !((p.To.Node).(*Node)).Used {
+ // Cannot remove VARDEF instruction, because - unlike TYPE handled above -
+ // VARDEFs are interspersed with other code, and a jump might be using the
+ // VARDEF as a target. Replace with a no-op instead. A later pass will remove
+ // the no-ops.
+ obj.Nopout(p)
+
+ continue
+ }
+
+ if p.From.Name == obj.NAME_AUTO && p.From.Node != nil {
+ p.From.Offset += stkdelta[p.From.Node.(*Node)]
+ }
+
+ if p.To.Name == obj.NAME_AUTO && p.To.Node != nil {
+ p.To.Offset += stkdelta[p.To.Node.(*Node)]
+ }
+
+ lp = &p.Link
+ }
+}
+
+func ggloblnod(nam *Node) {
+ p := Thearch.Gins(obj.AGLOBL, nam, nil)
+ p.Lineno = nam.Lineno
+ p.From.Sym.Gotype = Linksym(ngotype(nam))
+ p.To.Sym = nil
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = nam.Type.Width
+ p.From3 = new(obj.Addr)
+ if nam.Name.Readonly {
+ p.From3.Offset = obj.RODATA
+ }
+ if nam.Type != nil && !haspointers(nam.Type) {
+ p.From3.Offset |= obj.NOPTR
+ }
+}
+
+func ggloblsym(s *Sym, width int32, flags int16) {
+ p := Thearch.Gins(obj.AGLOBL, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+ if flags&obj.LOCAL != 0 {
+ p.From.Sym.Local = true
+ flags &= ^obj.LOCAL
+ }
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(width)
+ p.From3 = new(obj.Addr)
+ p.From3.Offset = int64(flags)
+}
+
+func gjmp(to *obj.Prog) *obj.Prog {
+ p := Gbranch(obj.AJMP, nil, 0)
+ if to != nil {
+ Patch(p, to)
+ }
+ return p
+}
+
+func gtrack(s *Sym) {
+ p := Thearch.Gins(obj.AUSEFIELD, nil, nil)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = Linksym(s)
+}
+
+func gused(n *Node) {
+ Thearch.Gins(obj.ANOP, n, nil) // used
+}
+
+func Isfat(t *Type) bool {
+ if t != nil {
+ switch t.Etype {
+ case TSTRUCT, TARRAY, TSTRING,
+ TINTER: // maybe remove later
+ return true
+ }
+ }
+
+ return false
+}
+
+// Sweep the prog list to mark any used nodes.
+func markautoused(p *obj.Prog) {
+ for ; p != nil; p = p.Link {
+ if p.As == obj.ATYPE || p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+
+ if p.From.Node != nil {
+ ((p.From.Node).(*Node)).Used = true
+ }
+
+ if p.To.Node != nil {
+ ((p.To.Node).(*Node)).Used = true
+ }
+ }
+}
+
+// Naddr rewrites a to refer to n.
+// It assumes that a is zeroed on entry.
+func Naddr(a *obj.Addr, n *Node) {
+ if n == nil {
+ return
+ }
+
+ if n.Type != nil && n.Type.Etype != TIDEAL {
+ // TODO(rsc): This is undone by the selective clearing of width below,
+ // to match architectures that were not as aggressive in setting width
+ // during naddr. Those widths must be cleared to avoid triggering
+ // failures in gins when it detects real but heretofore latent (and one
+ // hopes innocuous) type mismatches.
+ // The type mismatches should be fixed and the clearing below removed.
+ dowidth(n.Type)
+
+ a.Width = n.Type.Width
+ }
+
+ switch n.Op {
+ default:
+ a := a // copy to let escape into Ctxt.Dconv
+ Debug['h'] = 1
+ Dump("naddr", n)
+ Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+
+ case OREGISTER:
+ a.Type = obj.TYPE_REG
+ a.Reg = n.Reg
+ a.Sym = nil
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ case OINDREG:
+ a.Type = obj.TYPE_MEM
+ a.Reg = n.Reg
+ a.Sym = Linksym(n.Sym)
+ a.Offset = n.Xoffset
+ if a.Offset != int64(int32(a.Offset)) {
+ Yyerror("offset %d too large for OINDREG", a.Offset)
+ }
+ if Thearch.Thechar == '8' { // TODO(rsc): Never clear a->width.
+ a.Width = 0
+ }
+
+ // n->left is PHEAP ONAME for stack parameter.
+ // compute address of actual parameter on stack.
+ case OPARAM:
+ a.Etype = uint8(Simtype[n.Left.Type.Etype])
+
+ a.Width = n.Left.Type.Width
+ a.Offset = n.Xoffset
+ a.Sym = Linksym(n.Left.Sym)
+ a.Type = obj.TYPE_MEM
+ a.Name = obj.NAME_PARAM
+ a.Node = n.Left.Orig
+
+ case OCLOSUREVAR:
+ if !Curfn.Func.Needctxt {
+ Fatalf("closurevar without needctxt")
+ }
+ a.Type = obj.TYPE_MEM
+ a.Reg = int16(Thearch.REGCTXT)
+ a.Sym = nil
+ a.Offset = n.Xoffset
+
+ case OCFUNC:
+ Naddr(a, n.Left)
+ a.Sym = Linksym(n.Left.Sym)
+
+ case ONAME:
+ a.Etype = 0
+ if n.Type != nil {
+ a.Etype = uint8(Simtype[n.Type.Etype])
+ }
+ a.Offset = n.Xoffset
+ s := n.Sym
+ a.Node = n.Orig
+
+ //if(a->node >= (Node*)&n)
+ // fatal("stack node");
+ if s == nil {
+ s = Lookup(".noname")
+ }
+ if n.Name.Method {
+ if n.Type != nil {
+ if n.Type.Sym != nil {
+ if n.Type.Sym.Pkg != nil {
+ s = Pkglookup(s.Name, n.Type.Sym.Pkg)
+ }
+ }
+ }
+ }
+
+ a.Type = obj.TYPE_MEM
+ switch n.Class {
+ default:
+ Fatalf("naddr: ONAME class %v %d\n", n.Sym, n.Class)
+
+ case PEXTERN:
+ a.Name = obj.NAME_EXTERN
+
+ case PAUTO:
+ a.Name = obj.NAME_AUTO
+
+ case PPARAM, PPARAMOUT:
+ a.Name = obj.NAME_PARAM
+
+ case PFUNC:
+ a.Name = obj.NAME_EXTERN
+ a.Type = obj.TYPE_ADDR
+ a.Width = int64(Widthptr)
+ s = funcsym(s)
+ }
+
+ a.Sym = Linksym(s)
+
+ case ODOT:
+ // A special case to make write barriers more efficient.
+ // Taking the address of the first field of a named struct
+ // is the same as taking the address of the struct.
+ if n.Left.Type.Etype != TSTRUCT || n.Left.Type.Type.Sym != n.Right.Sym {
+ Debug['h'] = 1
+ Dump("naddr", n)
+ Fatalf("naddr: bad %v %v", Oconv(int(n.Op), 0), Ctxt.Dconv(a))
+ }
+ Naddr(a, n.Left)
+
+ case OLITERAL:
+ if Thearch.Thechar == '8' {
+ a.Width = 0
+ }
+ switch n.Val().Ctype() {
+ default:
+ Fatalf("naddr: const %v", Tconv(n.Type, obj.FmtLong))
+
+ case CTFLT:
+ a.Type = obj.TYPE_FCONST
+ a.Val = mpgetflt(n.Val().U.(*Mpflt))
+
+ case CTINT, CTRUNE:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = Mpgetfix(n.Val().U.(*Mpint))
+
+ case CTSTR:
+ datagostring(n.Val().U.(string), a)
+
+ case CTBOOL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = int64(obj.Bool2int(n.Val().U.(bool)))
+
+ case CTNIL:
+ a.Sym = nil
+ a.Type = obj.TYPE_CONST
+ a.Offset = 0
+ }
+
+ case OADDR:
+ Naddr(a, n.Left)
+ a.Etype = uint8(Tptr)
+ if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+ a.Width = int64(Widthptr)
+ }
+ if a.Type != obj.TYPE_MEM {
+ a := a // copy to let escape into Ctxt.Dconv
+ Fatalf("naddr: OADDR %v (from %v)", Ctxt.Dconv(a), Oconv(int(n.Left.Op), 0))
+ }
+ a.Type = obj.TYPE_ADDR
+
+ // itable of interface value
+ case OITAB:
+ Naddr(a, n.Left)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // itab(nil)
+ }
+ a.Etype = uint8(Tptr)
+ a.Width = int64(Widthptr)
+
+ // pointer in a string or slice
+ case OSPTR:
+ Naddr(a, n.Left)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // ptr(nil)
+ }
+ a.Etype = uint8(Simtype[Tptr])
+ a.Offset += int64(Array_array)
+ a.Width = int64(Widthptr)
+
+ // len of string or slice
+ case OLEN:
+ Naddr(a, n.Left)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // len(nil)
+ }
+ a.Etype = uint8(Simtype[TUINT])
+ a.Offset += int64(Array_nel)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+
+ // cap of string or slice
+ case OCAP:
+ Naddr(a, n.Left)
+
+ if a.Type == obj.TYPE_CONST && a.Offset == 0 {
+ break // cap(nil)
+ }
+ a.Etype = uint8(Simtype[TUINT])
+ a.Offset += int64(Array_cap)
+ if Thearch.Thechar != '5' { // TODO(rsc): Do this even on arm.
+ a.Width = int64(Widthint)
+ }
+ }
+ return
+}
+
+func newplist() *obj.Plist {
+ pl := obj.Linknewplist(Ctxt)
+
+ Pc = Ctxt.NewProg()
+ Clearp(Pc)
+ pl.Firstpc = Pc
+
+ return pl
+}
+
+func nodarg(t *Type, fp int) *Node {
+ var n *Node
+
+ // entire argument struct, not just one arg
+ if t.Etype == TSTRUCT && t.Funarg {
+ n = Nod(ONAME, nil, nil)
+ n.Sym = Lookup(".args")
+ n.Type = t
+ var savet Iter
+ first := Structfirst(&savet, &t)
+ if first == nil {
+ Fatalf("nodarg: bad struct")
+ }
+ if first.Width == BADWIDTH {
+ Fatalf("nodarg: offset not computed for %v", t)
+ }
+ n.Xoffset = first.Width
+ n.Addable = true
+ goto fp
+ }
+
+ if t.Etype != TFIELD {
+ Fatalf("nodarg: not field %v", t)
+ }
+
+ if fp == 1 {
+ var n *Node
+ for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if (n.Class == PPARAM || n.Class == PPARAMOUT) && !isblanksym(t.Sym) && n.Sym == t.Sym {
+ return n
+ }
+ }
+ }
+
+ n = Nod(ONAME, nil, nil)
+ n.Type = t.Type
+ n.Sym = t.Sym
+
+ if t.Width == BADWIDTH {
+ Fatalf("nodarg: offset not computed for %v", t)
+ }
+ n.Xoffset = t.Width
+ n.Addable = true
+ n.Orig = t.Nname
+
+ // Rewrite argument named _ to __,
+ // or else the assignment to _ will be
+ // discarded during code generation.
+fp:
+ if isblank(n) {
+ n.Sym = Lookup("__")
+ }
+
+ switch fp {
+ case 0: // output arg
+ n.Op = OINDREG
+
+ n.Reg = int16(Thearch.REGSP)
+ n.Xoffset += Ctxt.FixedFrameSize()
+
+ case 1: // input arg
+ n.Class = PPARAM
+
+ case 2: // offset output arg
+ Fatalf("shouldn't be used")
+ }
+
+ n.Typecheck = 1
+ return n
+}
+
+func Patch(p *obj.Prog, to *obj.Prog) {
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatalf("patch: not a branch")
+ }
+ p.To.Val = to
+ p.To.Offset = to.Pc
+}
+
+func unpatch(p *obj.Prog) *obj.Prog {
+ if p.To.Type != obj.TYPE_BRANCH {
+ Fatalf("unpatch: not a branch")
+ }
+ q, _ := p.To.Val.(*obj.Prog)
+ p.To.Val = nil
+ p.To.Offset = 0
+ return q
+}
+
+var reg [100]int // count of references to reg
+var regstk [100][]byte // allocation sites, when -v is given
+
+func GetReg(r int) int {
+ return reg[r-Thearch.REGMIN]
+}
+func SetReg(r, v int) {
+ reg[r-Thearch.REGMIN] = v
+}
+
+func ginit() {
+ for r := range reg {
+ reg[r] = 1
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ reg[r-Thearch.REGMIN] = 0
+ }
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ reg[r-Thearch.REGMIN] = 0
+ }
+
+ for _, r := range Thearch.ReservedRegs {
+ reg[r-Thearch.REGMIN] = 1
+ }
+}
+
+func gclean() {
+ for _, r := range Thearch.ReservedRegs {
+ reg[r-Thearch.REGMIN]--
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ n := reg[r-Thearch.REGMIN]
+ if n != 0 {
+ if Debug['v'] != 0 {
+ Regdump()
+ }
+ Yyerror("reg %v left allocated", obj.Rconv(r))
+ }
+ }
+
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ n := reg[r-Thearch.REGMIN]
+ if n != 0 {
+ if Debug['v'] != 0 {
+ Regdump()
+ }
+ Yyerror("reg %v left allocated", obj.Rconv(r))
+ }
+ }
+}
+
+func Anyregalloc() bool {
+ n := 0
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ if reg[r-Thearch.REGMIN] == 0 {
+ n++
+ }
+ }
+ return n > len(Thearch.ReservedRegs)
+}
+
+// allocate register of type t, leave in n.
+// if o != N, o may be reusable register.
+// caller must Regfree(n).
+func Regalloc(n *Node, t *Type, o *Node) {
+ if t == nil {
+ Fatalf("regalloc: t nil")
+ }
+ et := Simtype[t.Etype]
+ if Ctxt.Arch.Regsize == 4 && (et == TINT64 || et == TUINT64) {
+ Fatalf("regalloc 64bit")
+ }
+
+ var i int
+Switch:
+ switch et {
+ default:
+ Fatalf("regalloc: unknown type %v", t)
+
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TPTR32, TPTR64, TBOOL:
+ if o != nil && o.Op == OREGISTER {
+ i = int(o.Reg)
+ if Thearch.REGMIN <= i && i <= Thearch.REGMAX {
+ break Switch
+ }
+ }
+ for i = Thearch.REGMIN; i <= Thearch.REGMAX; i++ {
+ if reg[i-Thearch.REGMIN] == 0 {
+ break Switch
+ }
+ }
+ Flusherrors()
+ Regdump()
+ Fatalf("out of fixed registers")
+
+ case TFLOAT32, TFLOAT64:
+ if Thearch.Use387 {
+ i = Thearch.FREGMIN // x86.REG_F0
+ break Switch
+ }
+ if o != nil && o.Op == OREGISTER {
+ i = int(o.Reg)
+ if Thearch.FREGMIN <= i && i <= Thearch.FREGMAX {
+ break Switch
+ }
+ }
+ for i = Thearch.FREGMIN; i <= Thearch.FREGMAX; i++ {
+ if reg[i-Thearch.REGMIN] == 0 { // note: REGMIN, not FREGMIN
+ break Switch
+ }
+ }
+ Flusherrors()
+ Regdump()
+ Fatalf("out of floating registers")
+
+ case TCOMPLEX64, TCOMPLEX128:
+ Tempname(n, t)
+ return
+ }
+
+ ix := i - Thearch.REGMIN
+ if reg[ix] == 0 && Debug['v'] > 0 {
+ if regstk[ix] == nil {
+ regstk[ix] = make([]byte, 4096)
+ }
+ stk := regstk[ix]
+ n := runtime.Stack(stk[:cap(stk)], false)
+ regstk[ix] = stk[:n]
+ }
+ reg[ix]++
+ Nodreg(n, t, i)
+}
+
+func Regfree(n *Node) {
+ if n.Op == ONAME {
+ return
+ }
+ if n.Op != OREGISTER && n.Op != OINDREG {
+ Fatalf("regfree: not a register")
+ }
+ i := int(n.Reg)
+ if i == Thearch.REGSP {
+ return
+ }
+ switch {
+ case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+ Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatalf("regfree: reg out of range")
+ }
+
+ i -= Thearch.REGMIN
+ if reg[i] <= 0 {
+ Fatalf("regfree: reg not allocated")
+ }
+ reg[i]--
+ if reg[i] == 0 {
+ regstk[i] = regstk[i][:0]
+ }
+}
+
+// Reginuse reports whether r is in use.
+func Reginuse(r int) bool {
+ switch {
+ case Thearch.REGMIN <= r && r <= Thearch.REGMAX,
+ Thearch.FREGMIN <= r && r <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatalf("reginuse: reg out of range")
+ }
+
+ return reg[r-Thearch.REGMIN] > 0
+}
+
+// Regrealloc(n) undoes the effect of Regfree(n),
+// so that a register can be given up but then reclaimed.
+func Regrealloc(n *Node) {
+ if n.Op != OREGISTER && n.Op != OINDREG {
+ Fatalf("regrealloc: not a register")
+ }
+ i := int(n.Reg)
+ if i == Thearch.REGSP {
+ return
+ }
+ switch {
+ case Thearch.REGMIN <= i && i <= Thearch.REGMAX,
+ Thearch.FREGMIN <= i && i <= Thearch.FREGMAX:
+ // ok
+ default:
+ Fatalf("regrealloc: reg out of range")
+ }
+
+ i -= Thearch.REGMIN
+ if reg[i] == 0 && Debug['v'] > 0 {
+ if regstk[i] == nil {
+ regstk[i] = make([]byte, 4096)
+ }
+ stk := regstk[i]
+ n := runtime.Stack(stk[:cap(stk)], false)
+ regstk[i] = stk[:n]
+ }
+ reg[i]++
+}
+
+func Regdump() {
+ if Debug['v'] == 0 {
+ fmt.Printf("run compiler with -v for register allocation sites\n")
+ return
+ }
+
+ dump := func(r int) {
+ stk := regstk[r-Thearch.REGMIN]
+ if len(stk) == 0 {
+ return
+ }
+ fmt.Printf("reg %v allocated at:\n", obj.Rconv(r))
+ fmt.Printf("\t%s\n", strings.Replace(strings.TrimSpace(string(stk)), "\n", "\n\t", -1))
+ }
+
+ for r := Thearch.REGMIN; r <= Thearch.REGMAX; r++ {
+ if reg[r-Thearch.REGMIN] != 0 {
+ dump(r)
+ }
+ }
+ for r := Thearch.FREGMIN; r <= Thearch.FREGMAX; r++ {
+ if reg[r-Thearch.REGMIN] == 0 {
+ dump(r)
+ }
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/lex.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/lex.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/lex.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/lex.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,2587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go runtime unsafe
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "flag"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+var imported_unsafe bool
+
+var (
+ goos string
+ goarch string
+ goroot string
+ buildid string
+)
+
+var (
+ Debug_append int
+ Debug_panic int
+ Debug_slice int
+ Debug_wb int
+)
+
+// Debug arguments.
+// These can be specified with the -d flag, as in "-d nil"
+// to set the debug_checknil variable. In general the list passed
+// to -d can be comma-separated.
+var debugtab = []struct {
+ name string
+ val *int
+}{
+ {"append", &Debug_append}, // print information about append compilation
+ {"disablenil", &Disable_checknil}, // disable nil checks
+ {"gcprog", &Debug_gcprog}, // print dump of GC programs
+ {"nil", &Debug_checknil}, // print information about nil checks
+ {"panic", &Debug_panic}, // do not hide any compiler panic
+ {"slice", &Debug_slice}, // print information about slice compilation
+ {"typeassert", &Debug_typeassert}, // print information about type assertion inlining
+ {"wb", &Debug_wb}, // print information about write barriers
+ {"export", &Debug_export}, // print export data
+}
+
+const (
+ EOF = -1
+)
+
+func usage() {
+ fmt.Printf("usage: compile [options] file.go...\n")
+ obj.Flagprint(1)
+ Exit(2)
+}
+
+func hidePanic() {
+ if Debug_panic == 0 && nsavederrors+nerrors > 0 {
+ // If we've already complained about things
+ // in the program, don't bother complaining
+ // about a panic too; let the user clean up
+ // the code and try again.
+ if err := recover(); err != nil {
+ errorexit()
+ }
+ }
+}
+
+func doversion() {
+ p := obj.Expstring()
+ if p == "X:none" {
+ p = ""
+ }
+ sep := ""
+ if p != "" {
+ sep = " "
+ }
+ fmt.Printf("compile version %s%s%s\n", obj.Getgoversion(), sep, p)
+ os.Exit(0)
+}
+
+func Main() {
+ defer hidePanic()
+
+ // Allow GOARCH=thearch.thestring or GOARCH=thearch.thestringsuffix,
+ // but not other values.
+ p := obj.Getgoarch()
+
+ if !strings.HasPrefix(p, Thearch.Thestring) {
+ log.Fatalf("cannot use %cg with GOARCH=%s", Thearch.Thechar, p)
+ }
+ goarch = p
+
+ Thearch.Linkarchinit()
+ Ctxt = obj.Linknew(Thearch.Thelinkarch)
+ Ctxt.DiagFunc = Yyerror
+ Ctxt.Bso = &bstdout
+ bstdout = *obj.Binitw(os.Stdout)
+
+ localpkg = mkpkg("")
+ localpkg.Prefix = "\"\""
+
+ // pseudo-package, for scoping
+ builtinpkg = mkpkg("go.builtin")
+
+ builtinpkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+ // pseudo-package, accessed by import "unsafe"
+ unsafepkg = mkpkg("unsafe")
+
+ unsafepkg.Name = "unsafe"
+
+ // real package, referred to by generated runtime calls
+ Runtimepkg = mkpkg("runtime")
+
+ Runtimepkg.Name = "runtime"
+
+ // pseudo-packages used in symbol tables
+ gostringpkg = mkpkg("go.string")
+
+ gostringpkg.Name = "go.string"
+ gostringpkg.Prefix = "go.string" // not go%2estring
+
+ itabpkg = mkpkg("go.itab")
+
+ itabpkg.Name = "go.itab"
+ itabpkg.Prefix = "go.itab" // not go%2eitab
+
+ weaktypepkg = mkpkg("go.weak.type")
+
+ weaktypepkg.Name = "go.weak.type"
+ weaktypepkg.Prefix = "go.weak.type" // not go%2eweak%2etype
+
+ typelinkpkg = mkpkg("go.typelink")
+ typelinkpkg.Name = "go.typelink"
+ typelinkpkg.Prefix = "go.typelink" // not go%2etypelink
+
+ trackpkg = mkpkg("go.track")
+
+ trackpkg.Name = "go.track"
+ trackpkg.Prefix = "go.track" // not go%2etrack
+
+ typepkg = mkpkg("type")
+
+ typepkg.Name = "type"
+
+ goroot = obj.Getgoroot()
+ goos = obj.Getgoos()
+
+ Nacl = goos == "nacl"
+ if Nacl {
+ flag_largemodel = 1
+ }
+
+ outfile = ""
+ obj.Flagcount("+", "compiling runtime", &compiling_runtime)
+ obj.Flagcount("%", "debug non-static initializers", &Debug['%'])
+ obj.Flagcount("A", "for bootstrapping, allow 'any' type", &Debug['A'])
+ obj.Flagcount("B", "disable bounds checking", &Debug['B'])
+ obj.Flagstr("D", "set relative `path` for local imports", &localimport)
+ obj.Flagcount("E", "debug symbol export", &Debug['E'])
+ obj.Flagfn1("I", "add `directory` to import search path", addidir)
+ obj.Flagcount("K", "debug missing line numbers", &Debug['K'])
+ obj.Flagcount("L", "use full (long) path in error messages", &Debug['L'])
+ obj.Flagcount("M", "debug move generation", &Debug['M'])
+ obj.Flagcount("N", "disable optimizations", &Debug['N'])
+ obj.Flagcount("P", "debug peephole optimizer", &Debug['P'])
+ obj.Flagcount("R", "debug register optimizer", &Debug['R'])
+ obj.Flagcount("S", "print assembly listing", &Debug['S'])
+ obj.Flagfn0("V", "print compiler version", doversion)
+ obj.Flagcount("W", "debug parse tree after type checking", &Debug['W'])
+ obj.Flagstr("asmhdr", "write assembly header to `file`", &asmhdr)
+ obj.Flagstr("buildid", "record `id` as the build id in the export metadata", &buildid)
+ obj.Flagcount("complete", "compiling complete package (no C or assembly)", &pure_go)
+ obj.Flagstr("d", "print debug information about items in `list`", &debugstr)
+ obj.Flagcount("e", "no limit on number of errors reported", &Debug['e'])
+ obj.Flagcount("f", "debug stack frames", &Debug['f'])
+ obj.Flagcount("g", "debug code generation", &Debug['g'])
+ obj.Flagcount("h", "halt on error", &Debug['h'])
+ obj.Flagcount("i", "debug line number stack", &Debug['i'])
+ obj.Flagfn1("importmap", "add `definition` of the form source=actual to import map", addImportMap)
+ obj.Flagstr("installsuffix", "set pkg directory `suffix`", &flag_installsuffix)
+ obj.Flagcount("j", "debug runtime-initialized variables", &Debug['j'])
+ obj.Flagcount("l", "disable inlining", &Debug['l'])
+ obj.Flagcount("live", "debug liveness analysis", &debuglive)
+ obj.Flagcount("m", "print optimization decisions", &Debug['m'])
+ obj.Flagcount("msan", "build code compatible with C/C++ memory sanitizer", &flag_msan)
+ obj.Flagcount("newexport", "use new export format", &newexport) // TODO(gri) remove eventually (issue 13241)
+ obj.Flagcount("nolocalimports", "reject local (relative) imports", &nolocalimports)
+ obj.Flagstr("o", "write output to `file`", &outfile)
+ obj.Flagstr("p", "set expected package import `path`", &myimportpath)
+ obj.Flagcount("pack", "write package file instead of object file", &writearchive)
+ obj.Flagcount("r", "debug generated wrappers", &Debug['r'])
+ obj.Flagcount("race", "enable race detector", &flag_race)
+ obj.Flagcount("s", "warn about composite literals that can be simplified", &Debug['s'])
+ obj.Flagstr("trimpath", "remove `prefix` from recorded source file paths", &Ctxt.LineHist.TrimPathPrefix)
+ obj.Flagcount("u", "reject unsafe code", &safemode)
+ obj.Flagcount("v", "increase debug verbosity", &Debug['v'])
+ obj.Flagcount("w", "debug type checking", &Debug['w'])
+ use_writebarrier = 1
+ obj.Flagcount("wb", "enable write barrier", &use_writebarrier)
+ obj.Flagcount("x", "debug lexer", &Debug['x'])
+ obj.Flagcount("y", "debug declarations in canned imports (with -d)", &Debug['y'])
+ var flag_shared int
+ var flag_dynlink bool
+ switch Thearch.Thechar {
+ case '5', '6', '7', '8', '9':
+ obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
+ }
+ if Thearch.Thechar == '6' {
+ obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
+ }
+ switch Thearch.Thechar {
+ case '5', '6', '7', '8', '9':
+ flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
+ }
+ obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
+ obj.Flagstr("memprofile", "write memory profile to `file`", &memprofile)
+ obj.Flagint64("memprofilerate", "set runtime.MemProfileRate to `rate`", &memprofilerate)
+ obj.Flagparse(usage)
+
+ if flag_dynlink {
+ flag_shared = 1
+ }
+ Ctxt.Flag_shared = int32(flag_shared)
+ Ctxt.Flag_dynlink = flag_dynlink
+
+ Ctxt.Debugasm = int32(Debug['S'])
+ Ctxt.Debugvlog = int32(Debug['v'])
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ startProfile()
+
+ if flag_race != 0 {
+ racepkg = mkpkg("runtime/race")
+ racepkg.Name = "race"
+ }
+ if flag_msan != 0 {
+ msanpkg = mkpkg("runtime/msan")
+ msanpkg.Name = "msan"
+ }
+ if flag_race != 0 && flag_msan != 0 {
+ log.Fatal("can not use both -race and -msan")
+ } else if flag_race != 0 || flag_msan != 0 {
+ instrumenting = true
+ }
+
+ // parse -d argument
+ if debugstr != "" {
+ Split:
+ for _, name := range strings.Split(debugstr, ",") {
+ if name == "" {
+ continue
+ }
+ val := 1
+ if i := strings.Index(name, "="); i >= 0 {
+ var err error
+ val, err = strconv.Atoi(name[i+1:])
+ if err != nil {
+ log.Fatalf("invalid debug value %v", name)
+ }
+ name = name[:i]
+ }
+ for _, t := range debugtab {
+ if t.name == name {
+ if t.val != nil {
+ *t.val = val
+ continue Split
+ }
+ }
+ }
+ log.Fatalf("unknown debug key -d %s\n", name)
+ }
+ }
+
+ // enable inlining. for now:
+ // default: inlining on. (debug['l'] == 1)
+ // -l: inlining off (debug['l'] == 0)
+ // -ll, -lll: inlining on again, with extra debugging (debug['l'] > 1)
+ if Debug['l'] <= 1 {
+ Debug['l'] = 1 - Debug['l']
+ }
+
+ Thearch.Betypeinit()
+ if Widthptr == 0 {
+ Fatalf("betypeinit failed")
+ }
+
+ lexinit()
+ typeinit()
+ lexinit1()
+
+ blockgen = 1
+ dclcontext = PEXTERN
+ nerrors = 0
+ lexlineno = 1
+ const BOM = 0xFEFF
+
+ for _, infile = range flag.Args() {
+ if trace && Debug['x'] != 0 {
+ fmt.Printf("--- %s ---\n", infile)
+ }
+
+ linehistpush(infile)
+
+ curio.infile = infile
+ var err error
+ curio.bin, err = obj.Bopenr(infile)
+ if err != nil {
+ fmt.Printf("open %s: %v\n", infile, err)
+ errorexit()
+ }
+
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.nlsemi = false
+ curio.eofnl = false
+ curio.last = 0
+
+ // Skip initial BOM if present.
+ if obj.Bgetrune(curio.bin) != BOM {
+ obj.Bungetrune(curio.bin)
+ }
+
+ block = 1
+ iota_ = -1000000
+
+ imported_unsafe = false
+
+ parse_file()
+ if nsyntaxerrors != 0 {
+ errorexit()
+ }
+
+ linehistpop()
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ }
+ }
+
+ testdclstack()
+ mkpackage(localpkg.Name) // final import not used checks
+ lexfini()
+
+ typecheckok = true
+ if Debug['f'] != 0 {
+ frame(1)
+ }
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ defercheckwidth()
+
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op != ODCL && l.N.Op != OAS && l.N.Op != OAS2 {
+ typecheck(&l.N, Etop)
+ }
+ }
+
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCL || l.N.Op == OAS || l.N.Op == OAS2 {
+ typecheck(&l.N, Etop)
+ }
+ }
+ resumecheckwidth()
+
+ // Phase 3: Type check function bodies.
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC || l.N.Op == OCLOSURE {
+ Curfn = l.N
+ decldepth = 1
+ saveerrors()
+ typechecklist(l.N.Nbody, Etop)
+ checkreturn(l.N)
+ if nerrors != 0 {
+ l.N.Nbody = nil // type errors; do not compile
+ }
+ }
+ }
+
+ // Phase 4: Decide how to capture closed variables.
+ // This needs to run before escape analysis,
+ // because variables captured by value do not escape.
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Func.Closure != nil {
+ Curfn = l.N
+ capturevars(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+
+ // Phase 5: Inlining
+ if Debug['l'] > 1 {
+ // Typecheck imported function bodies if debug['l'] > 1,
+ // otherwise lazily when used or re-exported.
+ for _, n := range importlist {
+ if n.Func.Inl != nil {
+ saveerrors()
+ typecheckinl(n)
+ }
+ }
+
+ if nsavederrors+nerrors != 0 {
+ errorexit()
+ }
+ }
+
+ if Debug['l'] != 0 {
+ // Find functions that can be inlined and clone them before walk expands them.
+ visitBottomUp(xtop, func(list []*Node, recursive bool) {
+ // TODO: use a range statement here if the order does not matter
+ for i := len(list) - 1; i >= 0; i-- {
+ n := list[i]
+ if n.Op == ODCLFUNC {
+ caninl(n)
+ inlcalls(n)
+ }
+ }
+ })
+ }
+
+ // Phase 6: Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ // Large values are also moved off stack in escape analysis;
+ // because large values may contain pointers, it must happen early.
+ escapes(xtop)
+
+ // Phase 7: Transform closure bodies to properly reference captured variables.
+ // This needs to happen before walk, because closures must be transformed
+ // before walk reaches a call of a closure.
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC && l.N.Func.Closure != nil {
+ Curfn = l.N
+ transformclosure(l.N)
+ }
+ }
+
+ Curfn = nil
+
+ // Phase 8: Compile top level functions.
+ for l := xtop; l != nil; l = l.Next {
+ if l.N.Op == ODCLFUNC {
+ funccompile(l.N)
+ }
+ }
+
+ if nsavederrors+nerrors == 0 {
+ fninit(xtop)
+ }
+
+ if compiling_runtime != 0 {
+ checknowritebarrierrec()
+ }
+
+ // Phase 9: Check external declarations.
+ for i, n := range externdcl {
+ if n.Op == ONAME {
+ typecheck(&externdcl[i], Erv)
+ }
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ dumpobj()
+
+ if asmhdr != "" {
+ dumpasmhdr()
+ }
+
+ if nerrors+nsavederrors != 0 {
+ errorexit()
+ }
+
+ Flusherrors()
+}
+
+var importMap = map[string]string{}
+
+func addImportMap(s string) {
+ if strings.Count(s, "=") != 1 {
+ log.Fatal("-importmap argument must be of the form source=actual")
+ }
+ i := strings.Index(s, "=")
+ source, actual := s[:i], s[i+1:]
+ if source == "" || actual == "" {
+ log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+ }
+ importMap[source] = actual
+}
+
+func saveerrors() {
+ nsavederrors += nerrors
+ nerrors = 0
+}
+
+func arsize(b *obj.Biobuf, name string) int {
+ var buf [ArhdrSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
+
+func skiptopkgdef(b *obj.Biobuf) bool {
+ // archive header
+ p := obj.Brdline(b, '\n')
+ if p == "" {
+ return false
+ }
+ if obj.Blinelen(b) != 8 {
+ return false
+ }
+ if p != "!\n" {
+ return false
+ }
+
+ // symbol table may be first; skip it
+ sz := arsize(b, "__.GOSYMDEF")
+
+ if sz >= 0 {
+ obj.Bseek(b, int64(sz), 1)
+ } else {
+ obj.Bseek(b, 8, 0)
+ }
+
+ // package export block is next
+ sz = arsize(b, "__.PKGDEF")
+
+ if sz <= 0 {
+ return false
+ }
+ return true
+}
+
+func addidir(dir string) {
+ if dir == "" {
+ return
+ }
+
+ var pp **Idir
+ for pp = &idirs; *pp != nil; pp = &(*pp).link {
+ }
+ *pp = new(Idir)
+ (*pp).link = nil
+ (*pp).dir = dir
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+ return strings.HasPrefix(name, "/") ||
+ Ctxt.Windows != 0 && len(name) >= 3 && isAlpha(int(name[0])) && name[1] == ':' && name[2] == '/' ||
+ strings.HasPrefix(name, "./") || name == "." ||
+ strings.HasPrefix(name, "../") || name == ".."
+}
+
+func findpkg(name string) (file string, ok bool) {
+ if islocalname(name) {
+ if safemode != 0 || nolocalimports != 0 {
+ return "", false
+ }
+
+ // try .a before .6. important for building libraries:
+ // if there is an array.6 in the array.a library,
+ // want to find all of array.a, not just array.6.
+ file = fmt.Sprintf("%s.a", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s.o", name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ return "", false
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ if q := path.Clean(name); q != name {
+ Yyerror("non-canonical import path %q (should be %q)", name, q)
+ return "", false
+ }
+
+ for p := idirs; p != nil; p = p.link {
+ file = fmt.Sprintf("%s/%s.a", p.dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/%s.o", p.dir, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ if goroot != "" {
+ suffix := ""
+ suffixsep := ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race != 0 {
+ suffixsep = "_"
+ suffix = "race"
+ } else if flag_msan != 0 {
+ suffixsep = "_"
+ suffix = "msan"
+ }
+
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.a", goroot, goos, goarch, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ file = fmt.Sprintf("%s/pkg/%s_%s%s%s/%s.o", goroot, goos, goarch, suffixsep, suffix, name)
+ if _, err := os.Stat(file); err == nil {
+ return file, true
+ }
+ }
+
+ return "", false
+}
+
+func fakeimport() {
+ importpkg = mkpkg("fake")
+ cannedimports("fake.o", "$$\n")
+}
+
+// TODO(gri) line argument doesn't appear to be used
+func importfile(f *Val, line int) {
+ if _, ok := f.U.(string); !ok {
+ Yyerror("import statement not a string")
+ fakeimport()
+ return
+ }
+
+ if len(f.U.(string)) == 0 {
+ Yyerror("import path is empty")
+ fakeimport()
+ return
+ }
+
+ if isbadimport(f.U.(string)) {
+ fakeimport()
+ return
+ }
+
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if f.U.(string) == "main" {
+ Yyerror("cannot import \"main\"")
+ errorexit()
+ }
+
+ if myimportpath != "" && f.U.(string) == myimportpath {
+ Yyerror("import %q while compiling that package (import cycle)", f.U.(string))
+ errorexit()
+ }
+
+ path_ := f.U.(string)
+
+ if mapped, ok := importMap[path_]; ok {
+ path_ = mapped
+ }
+
+ if path_ == "unsafe" {
+ if safemode != 0 {
+ Yyerror("cannot import package unsafe")
+ errorexit()
+ }
+
+ importpkg = mkpkg(f.U.(string))
+ cannedimports("unsafe.o", unsafeimport)
+ imported_unsafe = true
+ return
+ }
+
+ if islocalname(path_) {
+ if path_[0] == '/' {
+ Yyerror("import path cannot be absolute path")
+ fakeimport()
+ return
+ }
+
+ prefix := Ctxt.Pathname
+ if localimport != "" {
+ prefix = localimport
+ }
+ cleanbuf := prefix
+ cleanbuf += "/"
+ cleanbuf += path_
+ cleanbuf = path.Clean(cleanbuf)
+ path_ = cleanbuf
+
+ if isbadimport(path_) {
+ fakeimport()
+ return
+ }
+ }
+
+ file, found := findpkg(path_)
+ if !found {
+ Yyerror("can't find import: %q", f.U.(string))
+ errorexit()
+ }
+
+ importpkg = mkpkg(path_)
+
+ // If we already saw that package, feed a dummy statement
+ // to the lexer to avoid parsing export data twice.
+ if importpkg.Imported {
+ tag := ""
+ if importpkg.Safe {
+ tag = "safe"
+ }
+
+ p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+ cannedimports(file, p)
+ return
+ }
+
+ importpkg.Imported = true
+
+ var err error
+ var imp *obj.Biobuf
+ imp, err = obj.Bopenr(file)
+ if err != nil {
+ Yyerror("can't open import: %q: %v", f.U.(string), err)
+ errorexit()
+ }
+
+ if strings.HasSuffix(file, ".a") {
+ if !skiptopkgdef(imp) {
+ Yyerror("import %s: not a package file", file)
+ errorexit()
+ }
+ }
+
+ // check object header
+ p := obj.Brdstr(imp, '\n', 1)
+
+ if p != "empty archive" {
+ if !strings.HasPrefix(p, "go object ") {
+ Yyerror("import %s: not a go object file", file)
+ errorexit()
+ }
+
+ q := fmt.Sprintf("%s %s %s %s", obj.Getgoos(), obj.Getgoarch(), obj.Getgoversion(), obj.Expstring())
+ if p[10:] != q {
+ Yyerror("import %s: object is [%s] expected [%s]", file, p[10:], q)
+ errorexit()
+ }
+ }
+
+ // assume files move (get installed)
+ // so don't record the full path.
+ linehistpragma(file[len(file)-len(path_)-2:]) // acts as #pragma lib
+
+ // In the importfile, if we find:
+ // $$\n (old format): position the input right after $$\n and return
+ // $$B\n (new format): import directly, then feed the lexer a dummy statement
+
+ // look for $$
+ var c int
+ for {
+ c = obj.Bgetc(imp)
+ if c < 0 {
+ break
+ }
+ if c == '$' {
+ c = obj.Bgetc(imp)
+ if c == '$' || c < 0 {
+ break
+ }
+ }
+ }
+
+ // get character after $$
+ if c >= 0 {
+ c = obj.Bgetc(imp)
+ }
+
+ switch c {
+ case '\n':
+ // old export format
+ pushedio = curio
+
+ curio.bin = imp
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.nlsemi = false
+ typecheckok = true
+
+ push_parser()
+
+ case 'B':
+ // new export format
+ obj.Bgetc(imp) // skip \n after $$B
+ Import(imp)
+
+ // continue as if the package was imported before (see above)
+ tag := ""
+ if importpkg.Safe {
+ tag = "safe"
+ }
+ p := fmt.Sprintf("package %s %s\n$$\n", importpkg.Name, tag)
+ cannedimports(file, p)
+ // Reset incannedimport flag (we are not truly in a
+ // canned import) - this will cause importpkg.Direct to
+ // be set via parser.import_package (was issue #13977).
+ //
+ // TODO(gri) Remove this global variable and convoluted
+ // code in the process of streamlining the import code.
+ incannedimport = 0
+
+ default:
+ Yyerror("no import in %q", f.U.(string))
+ }
+}
+
+func unimportfile() {
+ pop_parser()
+
+ if curio.bin != nil {
+ obj.Bterm(curio.bin)
+ curio.bin = nil
+ } else {
+ lexlineno-- // re correct sys.6 line number
+ }
+
+ curio = pushedio
+
+ pushedio.bin = nil
+ incannedimport = 0
+ typecheckok = false
+}
+
+func cannedimports(file string, cp string) {
+ lexlineno++ // if sys.6 is included on line 1,
+
+ pushedio = curio
+
+ curio.bin = nil
+ curio.peekc = 0
+ curio.peekc1 = 0
+ curio.infile = file
+ curio.cp = cp
+ curio.nlsemi = false
+ curio.importsafe = false
+
+ typecheckok = true
+ incannedimport = 1
+
+ push_parser()
+}
+
+func isSpace(c int) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func isAlpha(c int) bool {
+ return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z'
+}
+
+func isDigit(c int) bool {
+ return '0' <= c && c <= '9'
+}
+func isAlnum(c int) bool {
+ return isAlpha(c) || isDigit(c)
+}
+
+func plan9quote(s string) string {
+ if s == "" {
+ return "''"
+ }
+ for _, c := range s {
+ if c <= ' ' || c == '\'' {
+ return "'" + strings.Replace(s, "'", "''", -1) + "'"
+ }
+ }
+ return s
+}
+
+func isfrog(c int) bool {
+ // complain about possibly invisible control characters
+ if c < ' ' {
+ return !isSpace(c) // exclude good white space
+ }
+
+ if 0x7f <= c && c <= 0xa0 { // DEL, unicode block including unbreakable space.
+ return true
+ }
+ return false
+}
+
+type yySymType struct {
+ sym *Sym
+ val Val
+ op Op
+}
+
+const (
+ LLITERAL = 57346 + iota
+ LASOP
+ LCOLAS
+ LBREAK
+ LCASE
+ LCHAN
+ LCONST
+ LCONTINUE
+ LDDD
+ LDEFAULT
+ LDEFER
+ LELSE
+ LFALL
+ LFOR
+ LFUNC
+ LGO
+ LGOTO
+ LIF
+ LIMPORT
+ LINTERFACE
+ LMAP
+ LNAME
+ LPACKAGE
+ LRANGE
+ LRETURN
+ LSELECT
+ LSTRUCT
+ LSWITCH
+ LTYPE
+ LVAR
+ LANDAND
+ LANDNOT
+ LCOMM
+ LDEC
+ LEQ
+ LGE
+ LGT
+ LIGNORE
+ LINC
+ LLE
+ LLSH
+ LLT
+ LNE
+ LOROR
+ LRSH
+)
+
+func _yylex(yylval *yySymType) int32 {
+ var c1 int
+ var op Op
+ var escflag int
+ var v int64
+ var cp *bytes.Buffer
+ var s *Sym
+ var str string
+
+ prevlineno = lineno
+
+l0:
+ c := getc()
+ if isSpace(c) {
+ if c == '\n' && curio.nlsemi {
+ ungetc(c)
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: implicit semi\n")
+ }
+ return ';'
+ }
+
+ goto l0
+ }
+
+ lineno = lexlineno // start of token
+
+ if c >= utf8.RuneSelf {
+ // all multibyte runes are alpha
+ cp = &lexbuf
+ cp.Reset()
+
+ goto talph
+ }
+
+ if isAlpha(c) {
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+ }
+
+ if isDigit(c) {
+ cp = &lexbuf
+ cp.Reset()
+ if c != '0' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if isDigit(c) {
+ continue
+ }
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' || c == 'p' || c == 'P' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ goto ncu
+ }
+ }
+
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == 'x' || c == 'X' {
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if isDigit(c) {
+ continue
+ }
+ if c >= 'a' && c <= 'f' {
+ continue
+ }
+ if c >= 'A' && c <= 'F' {
+ continue
+ }
+ if lexbuf.Len() == 2 {
+ Yyerror("malformed hex constant")
+ }
+ if c == 'p' {
+ goto caseep
+ }
+ goto ncu
+ }
+ }
+
+ if c == 'p' { // 0p begins floating point zero
+ goto caseep
+ }
+
+ c1 = 0
+ for {
+ if !isDigit(c) {
+ break
+ }
+ if c < '0' || c > '7' {
+ c1 = 1 // not octal
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == '.' {
+ goto casedot
+ }
+ if c == 'e' || c == 'E' {
+ goto caseep
+ }
+ if c == 'i' {
+ goto casei
+ }
+ if c1 != 0 {
+ Yyerror("malformed octal constant")
+ }
+ goto ncu
+ }
+
+ switch c {
+ case EOF:
+ lineno = prevlineno
+ ungetc(EOF)
+ return -1
+
+ case '_':
+ cp = &lexbuf
+ cp.Reset()
+ goto talph
+
+ case '.':
+ c1 = getc()
+ if isDigit(c1) {
+ cp = &lexbuf
+ cp.Reset()
+ cp.WriteByte(byte(c))
+ c = c1
+ goto casedot
+ }
+
+ if c1 == '.' {
+ c1 = getc()
+ if c1 == '.' {
+ c = LDDD
+ goto lx
+ }
+
+ ungetc(c1)
+ c1 = '.'
+ }
+
+ // "..."
+ case '"':
+ lexbuf.Reset()
+ lexbuf.WriteString(`""`)
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+ if escchar('"', &escflag, &v) {
+ break
+ }
+ if v < utf8.RuneSelf || escflag != 0 {
+ cp.WriteByte(byte(v))
+ } else {
+ cp.WriteRune(rune(v))
+ }
+ }
+
+ goto strlit
+
+ // `...`
+ case '`':
+ lexbuf.Reset()
+ lexbuf.WriteString("``")
+
+ cp = &strbuf
+ cp.Reset()
+
+ for {
+ c = int(getr())
+ if c == '\r' {
+ continue
+ }
+ if c == EOF {
+ Yyerror("eof in string")
+ break
+ }
+
+ if c == '`' {
+ break
+ }
+ cp.WriteRune(rune(c))
+ }
+
+ goto strlit
+
+ // '.'
+ case '\'':
+ if escchar('\'', &escflag, &v) {
+ Yyerror("empty character literal or unescaped ' in character literal")
+ v = '\''
+ }
+
+ if !escchar('\'', &escflag, &v) {
+ Yyerror("missing '")
+ ungetc(int(v))
+ }
+
+ x := new(Mpint)
+ yylval.val.U = x
+ Mpmovecfix(x, v)
+ x.Rune = true
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: codepoint literal\n")
+ }
+ litbuf = "string literal"
+ return LLITERAL
+
+ case '/':
+ c1 = getc()
+ if c1 == '*' {
+ nl := false
+ for {
+ c = int(getr())
+ if c == '\n' {
+ nl = true
+ }
+ for c == '*' {
+ c = int(getr())
+ if c == '/' {
+ if nl {
+ ungetc('\n')
+ }
+ goto l0
+ }
+
+ if c == '\n' {
+ nl = true
+ }
+ }
+
+ if c == EOF {
+ Yyerror("eof in comment")
+ errorexit()
+ }
+ }
+ }
+
+ if c1 == '/' {
+ c = getlinepragma()
+ for {
+ if c == '\n' || c == EOF {
+ ungetc(c)
+ goto l0
+ }
+
+ c = int(getr())
+ }
+ }
+
+ if c1 == '=' {
+ op = ODIV
+ goto asop
+ }
+
+ case ':':
+ c1 = getc()
+ if c1 == '=' {
+ c = int(LCOLAS)
+ goto lx
+ }
+
+ case '*':
+ c1 = getc()
+ if c1 == '=' {
+ op = OMUL
+ goto asop
+ }
+
+ case '%':
+ c1 = getc()
+ if c1 == '=' {
+ op = OMOD
+ goto asop
+ }
+
+ case '+':
+ c1 = getc()
+ if c1 == '+' {
+ c = int(LINC)
+ goto lx
+ }
+
+ if c1 == '=' {
+ op = OADD
+ goto asop
+ }
+
+ case '-':
+ c1 = getc()
+ if c1 == '-' {
+ c = int(LDEC)
+ goto lx
+ }
+
+ if c1 == '=' {
+ op = OSUB
+ goto asop
+ }
+
+ case '>':
+ c1 = getc()
+ if c1 == '>' {
+ c = int(LRSH)
+ c1 = getc()
+ if c1 == '=' {
+ op = ORSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = int(LGE)
+ goto lx
+ }
+
+ c = int(LGT)
+
+ case '<':
+ c1 = getc()
+ if c1 == '<' {
+ c = int(LLSH)
+ c1 = getc()
+ if c1 == '=' {
+ op = OLSH
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ c = int(LLE)
+ goto lx
+ }
+
+ if c1 == '-' {
+ c = int(LCOMM)
+ goto lx
+ }
+
+ c = int(LLT)
+
+ case '=':
+ c1 = getc()
+ if c1 == '=' {
+ c = int(LEQ)
+ goto lx
+ }
+
+ case '!':
+ c1 = getc()
+ if c1 == '=' {
+ c = int(LNE)
+ goto lx
+ }
+
+ case '&':
+ c1 = getc()
+ if c1 == '&' {
+ c = int(LANDAND)
+ goto lx
+ }
+
+ if c1 == '^' {
+ c = int(LANDNOT)
+ c1 = getc()
+ if c1 == '=' {
+ op = OANDNOT
+ goto asop
+ }
+
+ break
+ }
+
+ if c1 == '=' {
+ op = OAND
+ goto asop
+ }
+
+ case '|':
+ c1 = getc()
+ if c1 == '|' {
+ c = int(LOROR)
+ goto lx
+ }
+
+ if c1 == '=' {
+ op = OOR
+ goto asop
+ }
+
+ case '^':
+ c1 = getc()
+ if c1 == '=' {
+ op = OXOR
+ goto asop
+ }
+
+ default:
+ goto lx
+ }
+
+ ungetc(c1)
+
+lx:
+ if Debug['x'] != 0 {
+ if c > 0xff {
+ fmt.Printf("%v lex: TOKEN %s\n", Ctxt.Line(int(lexlineno)), lexname(c))
+ } else {
+ fmt.Printf("%v lex: TOKEN '%c'\n", Ctxt.Line(int(lexlineno)), c)
+ }
+ }
+ if isfrog(c) {
+ Yyerror("illegal character 0x%x", uint(c))
+ goto l0
+ }
+
+ if importpkg == nil && (c == '#' || c == '$' || c == '?' || c == '@' || c == '\\') {
+ Yyerror("%s: unexpected %c", "syntax error", c)
+ goto l0
+ }
+
+ return int32(c)
+
+asop:
+ yylval.op = op
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: TOKEN ASOP %s=\n", goopnames[op])
+ }
+ return LASOP
+
+ // cp is set to lexbuf and some
+ // prefix has been stored
+talph:
+ for {
+ if c >= utf8.RuneSelf {
+ ungetc(c)
+ r := rune(getr())
+
+ // 0xb7 · is used for internal names
+ if !unicode.IsLetter(r) && !unicode.IsDigit(r) && (importpkg == nil || r != 0xb7) {
+ Yyerror("invalid identifier character U+%04x", r)
+ }
+ if cp.Len() == 0 && unicode.IsDigit(r) {
+ Yyerror("identifier cannot begin with digit U+%04x", r)
+ }
+ cp.WriteRune(r)
+ } else if !isAlnum(c) && c != '_' {
+ break
+ } else {
+ cp.WriteByte(byte(c))
+ }
+ c = getc()
+ }
+
+ cp = nil
+ ungetc(c)
+
+ s = LookupBytes(lexbuf.Bytes())
+ if s.Lexical == LIGNORE {
+ goto l0
+ }
+
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: %s %s\n", s, lexname(int(s.Lexical)))
+ }
+ yylval.sym = s
+ return int32(s.Lexical)
+
+ncu:
+ cp = nil
+ ungetc(c)
+
+ str = lexbuf.String()
+ yylval.val.U = new(Mpint)
+ mpatofix(yylval.val.U.(*Mpint), str)
+ if yylval.val.U.(*Mpint).Ovf {
+ Yyerror("overflow in constant")
+ Mpmovecfix(yylval.val.U.(*Mpint), 0)
+ }
+
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: integer literal\n")
+ }
+ litbuf = "literal " + str
+ return LLITERAL
+
+casedot:
+ for {
+ cp.WriteByte(byte(c))
+ c = getc()
+ if !isDigit(c) {
+ break
+ }
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ if c != 'e' && c != 'E' {
+ goto caseout
+ }
+
+caseep:
+ if importpkg == nil && (c == 'p' || c == 'P') {
+ // p is allowed in .a/.o imports,
+ // but not in .go sources. See #9036.
+ Yyerror("malformed floating point constant")
+ }
+ cp.WriteByte(byte(c))
+ c = getc()
+ if c == '+' || c == '-' {
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if !isDigit(c) {
+ Yyerror("malformed floating point constant exponent")
+ }
+ for isDigit(c) {
+ cp.WriteByte(byte(c))
+ c = getc()
+ }
+
+ if c == 'i' {
+ goto casei
+ }
+ goto caseout
+
+ // imaginary constant
+casei:
+ cp = nil
+
+ str = lexbuf.String()
+ yylval.val.U = new(Mpcplx)
+ Mpmovecflt(&yylval.val.U.(*Mpcplx).Real, 0.0)
+ mpatoflt(&yylval.val.U.(*Mpcplx).Imag, str)
+ if yylval.val.U.(*Mpcplx).Imag.Val.IsInf() {
+ Yyerror("overflow in imaginary constant")
+ Mpmovecflt(&yylval.val.U.(*Mpcplx).Imag, 0.0)
+ }
+
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: imaginary literal\n")
+ }
+ litbuf = "literal " + str
+ return LLITERAL
+
+caseout:
+ cp = nil
+ ungetc(c)
+
+ str = lexbuf.String()
+ yylval.val.U = newMpflt()
+ mpatoflt(yylval.val.U.(*Mpflt), str)
+ if yylval.val.U.(*Mpflt).Val.IsInf() {
+ Yyerror("overflow in float constant")
+ Mpmovecflt(yylval.val.U.(*Mpflt), 0.0)
+ }
+
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: floating literal\n")
+ }
+ litbuf = "literal " + str
+ return LLITERAL
+
+strlit:
+ yylval.val.U = internString(cp.Bytes())
+ if Debug['x'] != 0 {
+ fmt.Printf("lex: string literal\n")
+ }
+ litbuf = "string literal"
+ return LLITERAL
+}
+
+var internedStrings = map[string]string{}
+
+func internString(b []byte) string {
+ s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+ if ok {
+ return s
+ }
+ s = string(b)
+ internedStrings[s] = s
+ return s
+}
+
+func more(pp *string) bool {
+ p := *pp
+ for p != "" && isSpace(int(p[0])) {
+ p = p[1:]
+ }
+ *pp = p
+ return p != ""
+}
+
+// read and interpret syntax that looks like
+// //line parse.y:15
+// as a discontinuity in sequential line numbers.
+// the next line of input comes from parse.y:15
+func getlinepragma() int {
+ var cmd, verb, name string
+
+ c := int(getr())
+ if c == 'g' {
+ cp := &lexbuf
+ cp.Reset()
+ cp.WriteByte('g') // already read
+ for {
+ c = int(getr())
+ if c == EOF || c >= utf8.RuneSelf {
+ return c
+ }
+ if c == '\n' {
+ break
+ }
+ cp.WriteByte(byte(c))
+ }
+ cp = nil
+
+ text := strings.TrimSuffix(lexbuf.String(), "\r")
+
+ if strings.HasPrefix(text, "go:cgo_") {
+ pragcgo(text)
+ }
+
+ cmd = text
+ verb = cmd
+ if i := strings.Index(verb, " "); i >= 0 {
+ verb = verb[:i]
+ }
+
+ if verb == "go:linkname" {
+ if !imported_unsafe {
+ Yyerror("//go:linkname only allowed in Go files that import \"unsafe\"")
+ }
+ f := strings.Fields(cmd)
+ if len(f) != 3 {
+ Yyerror("usage: //go:linkname localname linkname")
+ return c
+ }
+
+ Lookup(f[1]).Linkname = f[2]
+ return c
+ }
+
+ if verb == "go:nointerface" && obj.Fieldtrack_enabled != 0 {
+ nointerface = true
+ return c
+ }
+
+ if verb == "go:noescape" {
+ noescape = true
+ return c
+ }
+
+ if verb == "go:norace" {
+ norace = true
+ return c
+ }
+
+ if verb == "go:nosplit" {
+ nosplit = true
+ return c
+ }
+
+ if verb == "go:noinline" {
+ noinline = true
+ return c
+ }
+
+ if verb == "go:systemstack" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:systemstack only allowed in runtime")
+ }
+ systemstack = true
+ return c
+ }
+
+ if verb == "go:nowritebarrier" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:nowritebarrier only allowed in runtime")
+ }
+ nowritebarrier = true
+ return c
+ }
+
+ if verb == "go:nowritebarrierrec" {
+ if compiling_runtime == 0 {
+ Yyerror("//go:nowritebarrierrec only allowed in runtime")
+ }
+ nowritebarrierrec = true
+ nowritebarrier = true // Implies nowritebarrier
+ return c
+ }
+ return c
+ }
+ if c != 'l' {
+ return c
+ }
+ for i := 1; i < 5; i++ {
+ c = int(getr())
+ if c != int("line "[i]) {
+ return c
+ }
+ }
+
+ cp := &lexbuf
+ cp.Reset()
+ linep := 0
+ for {
+ c = int(getr())
+ if c == EOF {
+ return c
+ }
+ if c == '\n' {
+ break
+ }
+ if c == ' ' {
+ continue
+ }
+ if c == ':' {
+ linep = cp.Len() + 1
+ }
+ cp.WriteByte(byte(c))
+ }
+
+ cp = nil
+
+ if linep == 0 {
+ return c
+ }
+ text := strings.TrimSuffix(lexbuf.String(), "\r")
+ n := 0
+ for _, c := range text[linep:] {
+ if c < '0' || c > '9' {
+ goto out
+ }
+ n = n*10 + int(c) - '0'
+ if n > 1e8 {
+ Yyerror("line number out of range")
+ errorexit()
+ }
+ }
+
+ if n <= 0 {
+ return c
+ }
+
+ name = text[:linep-1]
+ linehistupdate(name, n)
+ return c
+
+out:
+ return c
+}
+
+func getimpsym(pp *string) string {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] == '"' {
+ return ""
+ }
+ i := 0
+ for i < len(p) && !isSpace(int(p[i])) && p[i] != '"' {
+ i++
+ }
+ sym := p[:i]
+ *pp = p[i:]
+ return sym
+}
+
+func getquoted(pp *string) (string, bool) {
+ more(pp) // skip spaces
+ p := *pp
+ if p == "" || p[0] != '"' {
+ return "", false
+ }
+ p = p[1:]
+ i := strings.Index(p, `"`)
+ if i < 0 {
+ return "", false
+ }
+ *pp = p[i+1:]
+ return p[:i], true
+}
+
+// Copied nearly verbatim from the C compiler's #pragma parser.
+// TODO: Rewrite more cleanly once the compiler is written in Go.
+func pragcgo(text string) {
+ var q string
+
+ if i := strings.Index(text, " "); i >= 0 {
+ text, q = text[:i], text[i:]
+ }
+
+ verb := text[3:] // skip "go:"
+
+ if verb == "cgo_dynamic_linker" || verb == "dynlinker" {
+ p, ok := getquoted(&q)
+ if !ok {
+ Yyerror("usage: //go:cgo_dynamic_linker \"path\"")
+ return
+ }
+ pragcgobuf += fmt.Sprintf("cgo_dynamic_linker %v\n", plan9quote(p))
+ return
+
+ }
+
+ if verb == "dynexport" {
+ verb = "cgo_export_dynamic"
+ }
+ if verb == "cgo_export_static" || verb == "cgo_export_dynamic" {
+ local := getimpsym(&q)
+ var remote string
+ if local == "" {
+ goto err2
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("%s %v\n", verb, plan9quote(local))
+ return
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err2
+ }
+ pragcgobuf += fmt.Sprintf("%s %v %v\n", verb, plan9quote(local), plan9quote(remote))
+ return
+
+ err2:
+ Yyerror("usage: //go:%s local [remote]", verb)
+ return
+ }
+
+ if verb == "cgo_import_dynamic" || verb == "dynimport" {
+ var ok bool
+ local := getimpsym(&q)
+ var p string
+ var remote string
+ if local == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v\n", plan9quote(local))
+ return
+ }
+
+ remote = getimpsym(&q)
+ if remote == "" {
+ goto err3
+ }
+ if !more(&q) {
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v\n", plan9quote(local), plan9quote(remote))
+ return
+ }
+
+ p, ok = getquoted(&q)
+ if !ok {
+ goto err3
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_dynamic %v %v %v\n", plan9quote(local), plan9quote(remote), plan9quote(p))
+ return
+
+ err3:
+ Yyerror("usage: //go:cgo_import_dynamic local [remote [\"library\"]]")
+ return
+ }
+
+ if verb == "cgo_import_static" {
+ local := getimpsym(&q)
+ if local == "" || more(&q) {
+ Yyerror("usage: //go:cgo_import_static local")
+ return
+ }
+ pragcgobuf += fmt.Sprintf("cgo_import_static %v\n", plan9quote(local))
+ return
+
+ }
+
+ if verb == "cgo_ldflag" {
+ p, ok := getquoted(&q)
+ if !ok {
+ Yyerror("usage: //go:cgo_ldflag \"arg\"")
+ return
+ }
+ pragcgobuf += fmt.Sprintf("cgo_ldflag %v\n", plan9quote(p))
+ return
+
+ }
+}
+
+func yylex(yylval *yySymType) int32 {
+ lx := _yylex(yylval)
+
+ if curio.nlsemi && lx == EOF {
+ // Treat EOF as "end of line" for the purposes
+ // of inserting a semicolon.
+ lx = ';'
+ }
+
+ switch lx {
+ case LNAME,
+ LLITERAL,
+ LBREAK,
+ LCONTINUE,
+ LFALL,
+ LRETURN,
+ LINC,
+ LDEC,
+ ')',
+ '}',
+ ']':
+ curio.nlsemi = true
+
+ default:
+ curio.nlsemi = false
+ }
+
+ return lx
+}
+
+func getc() int {
+ c := curio.peekc
+ if c != 0 {
+ curio.peekc = curio.peekc1
+ curio.peekc1 = 0
+ goto check
+ }
+
+ if curio.bin == nil {
+ if len(curio.cp) == 0 {
+ c = 0
+ } else {
+ c = int(curio.cp[0])
+ curio.cp = curio.cp[1:]
+ }
+ } else {
+ loop:
+ c = obj.Bgetc(curio.bin)
+ // recognize BOM (U+FEFF): UTF-8 encoding is 0xef 0xbb 0xbf
+ if c == 0xef {
+ buf, err := curio.bin.Peek(2)
+ if err != nil {
+ yyerrorl(int(lexlineno), "illegal UTF-8 sequence ef % x followed by read error (%v)", string(buf), err)
+ errorexit()
+ }
+ if buf[0] == 0xbb && buf[1] == 0xbf {
+ yyerrorl(int(lexlineno), "Unicode (UTF-8) BOM in middle of file")
+
+ // consume BOM bytes
+ obj.Bgetc(curio.bin)
+ obj.Bgetc(curio.bin)
+ goto loop
+ }
+ }
+ }
+
+check:
+ switch c {
+ case 0:
+ if curio.bin != nil {
+ Yyerror("illegal NUL byte")
+ break
+ }
+ fallthrough
+
+ // insert \n at EOF
+ case EOF:
+ if curio.eofnl || curio.last == '\n' {
+ return EOF
+ }
+ curio.eofnl = true
+ c = '\n'
+ fallthrough
+
+ case '\n':
+ if pushedio.bin == nil {
+ lexlineno++
+ }
+ }
+
+ curio.last = c
+ return c
+}
+
+func ungetc(c int) {
+ curio.peekc1 = curio.peekc
+ curio.peekc = c
+ if c == '\n' && pushedio.bin == nil {
+ lexlineno--
+ }
+}
+
+func getr() int32 {
+ var buf [utf8.UTFMax]byte
+
+ for i := 0; ; i++ {
+ c := getc()
+ if i == 0 && c < utf8.RuneSelf {
+ return int32(c)
+ }
+ buf[i] = byte(c)
+ if i+1 == len(buf) || utf8.FullRune(buf[:i+1]) {
+ r, w := utf8.DecodeRune(buf[:i+1])
+ if r == utf8.RuneError && w == 1 {
+ lineno = lexlineno
+ // The string conversion here makes a copy for passing
+ // to fmt.Printf, so that buf itself does not escape and can
+ // be allocated on the stack.
+ Yyerror("illegal UTF-8 sequence % x", string(buf[:i+1]))
+ }
+ return int32(r)
+ }
+ }
+}
+
+func escchar(e int, escflg *int, val *int64) bool {
+ *escflg = 0
+
+ c := int(getr())
+ switch c {
+ case EOF:
+ Yyerror("eof in string")
+ return true
+
+ case '\n':
+ Yyerror("newline in string")
+ return true
+
+ case '\\':
+ break
+
+ default:
+ if c == e {
+ return true
+ }
+ *val = int64(c)
+ return false
+ }
+
+ u := 0
+ c = int(getr())
+ var i int
+ switch c {
+ case 'x':
+ *escflg = 1 // it's a byte
+ i = 2
+ goto hex
+
+ case 'u':
+ i = 4
+ u = 1
+ goto hex
+
+ case 'U':
+ i = 8
+ u = 1
+ goto hex
+
+ case '0',
+ '1',
+ '2',
+ '3',
+ '4',
+ '5',
+ '6',
+ '7':
+ *escflg = 1 // it's a byte
+ l := int64(c) - '0'
+ for i := 2; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '7' {
+ l = l*8 + int64(c) - '0'
+ continue
+ }
+
+ Yyerror("non-octal character in escape sequence: %c", c)
+ ungetc(c)
+ }
+
+ if l > 255 {
+ Yyerror("octal escape value > 255: %d", l)
+ }
+
+ *val = l
+ return false
+
+ case 'a':
+ c = '\a'
+ case 'b':
+ c = '\b'
+ case 'f':
+ c = '\f'
+ case 'n':
+ c = '\n'
+ case 'r':
+ c = '\r'
+ case 't':
+ c = '\t'
+ case 'v':
+ c = '\v'
+ case '\\':
+ c = '\\'
+
+ default:
+ if c != e {
+ Yyerror("unknown escape sequence: %c", c)
+ }
+ }
+
+ *val = int64(c)
+ return false
+
+hex:
+ l := int64(0)
+ for ; i > 0; i-- {
+ c = getc()
+ if c >= '0' && c <= '9' {
+ l = l*16 + int64(c) - '0'
+ continue
+ }
+
+ if c >= 'a' && c <= 'f' {
+ l = l*16 + int64(c) - 'a' + 10
+ continue
+ }
+
+ if c >= 'A' && c <= 'F' {
+ l = l*16 + int64(c) - 'A' + 10
+ continue
+ }
+
+ Yyerror("non-hex character in escape sequence: %c", c)
+ ungetc(c)
+ break
+ }
+
+ if u != 0 && (l > utf8.MaxRune || (0xd800 <= l && l < 0xe000)) {
+ Yyerror("invalid Unicode code point in escape sequence: %#x", l)
+ l = utf8.RuneError
+ }
+
+ *val = l
+ return false
+}
+
+var syms = []struct {
+ name string
+ lexical int
+ etype EType
+ op Op
+}{
+ // basic types
+ {"int8", LNAME, TINT8, OXXX},
+ {"int16", LNAME, TINT16, OXXX},
+ {"int32", LNAME, TINT32, OXXX},
+ {"int64", LNAME, TINT64, OXXX},
+ {"uint8", LNAME, TUINT8, OXXX},
+ {"uint16", LNAME, TUINT16, OXXX},
+ {"uint32", LNAME, TUINT32, OXXX},
+ {"uint64", LNAME, TUINT64, OXXX},
+ {"float32", LNAME, TFLOAT32, OXXX},
+ {"float64", LNAME, TFLOAT64, OXXX},
+ {"complex64", LNAME, TCOMPLEX64, OXXX},
+ {"complex128", LNAME, TCOMPLEX128, OXXX},
+ {"bool", LNAME, TBOOL, OXXX},
+ {"string", LNAME, TSTRING, OXXX},
+ {"any", LNAME, TANY, OXXX},
+ {"break", LBREAK, Txxx, OXXX},
+ {"case", LCASE, Txxx, OXXX},
+ {"chan", LCHAN, Txxx, OXXX},
+ {"const", LCONST, Txxx, OXXX},
+ {"continue", LCONTINUE, Txxx, OXXX},
+ {"default", LDEFAULT, Txxx, OXXX},
+ {"else", LELSE, Txxx, OXXX},
+ {"defer", LDEFER, Txxx, OXXX},
+ {"fallthrough", LFALL, Txxx, OXXX},
+ {"for", LFOR, Txxx, OXXX},
+ {"func", LFUNC, Txxx, OXXX},
+ {"go", LGO, Txxx, OXXX},
+ {"goto", LGOTO, Txxx, OXXX},
+ {"if", LIF, Txxx, OXXX},
+ {"import", LIMPORT, Txxx, OXXX},
+ {"interface", LINTERFACE, Txxx, OXXX},
+ {"map", LMAP, Txxx, OXXX},
+ {"package", LPACKAGE, Txxx, OXXX},
+ {"range", LRANGE, Txxx, OXXX},
+ {"return", LRETURN, Txxx, OXXX},
+ {"select", LSELECT, Txxx, OXXX},
+ {"struct", LSTRUCT, Txxx, OXXX},
+ {"switch", LSWITCH, Txxx, OXXX},
+ {"type", LTYPE, Txxx, OXXX},
+ {"var", LVAR, Txxx, OXXX},
+ {"append", LNAME, Txxx, OAPPEND},
+ {"cap", LNAME, Txxx, OCAP},
+ {"close", LNAME, Txxx, OCLOSE},
+ {"complex", LNAME, Txxx, OCOMPLEX},
+ {"copy", LNAME, Txxx, OCOPY},
+ {"delete", LNAME, Txxx, ODELETE},
+ {"imag", LNAME, Txxx, OIMAG},
+ {"len", LNAME, Txxx, OLEN},
+ {"make", LNAME, Txxx, OMAKE},
+ {"new", LNAME, Txxx, ONEW},
+ {"panic", LNAME, Txxx, OPANIC},
+ {"print", LNAME, Txxx, OPRINT},
+ {"println", LNAME, Txxx, OPRINTN},
+ {"real", LNAME, Txxx, OREAL},
+ {"recover", LNAME, Txxx, ORECOVER},
+ {"notwithstanding", LIGNORE, Txxx, OXXX},
+ {"thetruthofthematter", LIGNORE, Txxx, OXXX},
+ {"despiteallobjections", LIGNORE, Txxx, OXXX},
+ {"whereas", LIGNORE, Txxx, OXXX},
+ {"insofaras", LIGNORE, Txxx, OXXX},
+}
+
+// lexinit initializes known symbols and the basic types.
+func lexinit() {
+ for _, s := range syms {
+ lex := s.lexical
+ s1 := Lookup(s.name)
+ s1.Lexical = uint16(lex)
+
+ if etype := s.etype; etype != Txxx {
+ if int(etype) >= len(Types) {
+ Fatalf("lexinit: %s bad etype", s.name)
+ }
+ s2 := Pkglookup(s.name, builtinpkg)
+ t := Types[etype]
+ if t == nil {
+ t = typ(etype)
+ t.Sym = s2
+
+ if etype != TANY && etype != TSTRING {
+ dowidth(t)
+ }
+ Types[etype] = t
+ }
+
+ s2.Lexical = LNAME
+ s2.Def = typenod(t)
+ s2.Def.Name = new(Name)
+ continue
+ }
+
+ // TODO(marvin): Fix Node.EType type union.
+ if etype := s.op; etype != OXXX {
+ s2 := Pkglookup(s.name, builtinpkg)
+ s2.Lexical = LNAME
+ s2.Def = Nod(ONAME, nil, nil)
+ s2.Def.Sym = s2
+ s2.Def.Etype = EType(etype)
+ }
+ }
+
+ // logically, the type of a string literal.
+ // types[TSTRING] is the named type string
+ // (the type of x in var x string or var x = "hello").
+ // this is the ideal form
+ // (the type of x in const x = "hello").
+ idealstring = typ(TSTRING)
+
+ idealbool = typ(TBOOL)
+
+ s := Pkglookup("true", builtinpkg)
+ s.Def = Nodbool(true)
+ s.Def.Sym = Lookup("true")
+ s.Def.Name = new(Name)
+ s.Def.Type = idealbool
+
+ s = Pkglookup("false", builtinpkg)
+ s.Def = Nodbool(false)
+ s.Def.Sym = Lookup("false")
+ s.Def.Name = new(Name)
+ s.Def.Type = idealbool
+
+ s = Lookup("_")
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+ nblank = s.Def
+
+ s = Pkglookup("_", builtinpkg)
+ s.Block = -100
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ Types[TBLANK] = typ(TBLANK)
+ s.Def.Type = Types[TBLANK]
+
+ Types[TNIL] = typ(TNIL)
+ s = Pkglookup("nil", builtinpkg)
+ var v Val
+ v.U = new(NilVal)
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+ s.Def.Name = new(Name)
+}
+
+func lexinit1() {
+ // t = interface { Error() string }
+ rcvr := typ(TSTRUCT)
+
+ rcvr.Type = typ(TFIELD)
+ rcvr.Type.Type = Ptrto(typ(TSTRUCT))
+ rcvr.Funarg = true
+ in := typ(TSTRUCT)
+ in.Funarg = true
+ out := typ(TSTRUCT)
+ out.Type = typ(TFIELD)
+ out.Type.Type = Types[TSTRING]
+ out.Funarg = true
+ f := typ(TFUNC)
+ *getthis(f) = rcvr
+ *Getoutarg(f) = out
+ *getinarg(f) = in
+ f.Thistuple = 1
+ f.Intuple = 0
+ f.Outnamed = false
+ f.Outtuple = 1
+ t := typ(TINTER)
+ t.Type = typ(TFIELD)
+ t.Type.Sym = Lookup("Error")
+ t.Type.Type = f
+
+ // error type
+ s := Lookup("error")
+
+ s.Lexical = LNAME
+ s1 := Pkglookup("error", builtinpkg)
+ errortype = t
+ errortype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(errortype)
+
+ // byte alias
+ s = Lookup("byte")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("byte", builtinpkg)
+ bytetype = typ(TUINT8)
+ bytetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(bytetype)
+ s1.Def.Name = new(Name)
+
+ // rune alias
+ s = Lookup("rune")
+
+ s.Lexical = LNAME
+ s1 = Pkglookup("rune", builtinpkg)
+ runetype = typ(TINT32)
+ runetype.Sym = s1
+ s1.Lexical = LNAME
+ s1.Def = typenod(runetype)
+ s1.Def.Name = new(Name)
+}
+
+func lexfini() {
+ for i := range syms {
+ lex := syms[i].lexical
+ if lex != LNAME {
+ continue
+ }
+ s := Lookup(syms[i].name)
+ s.Lexical = uint16(lex)
+
+ etype := syms[i].etype
+ if etype != Txxx && (etype != TANY || Debug['A'] != 0) && s.Def == nil {
+ s.Def = typenod(Types[etype])
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ // TODO(marvin): Fix Node.EType type union.
+ etype = EType(syms[i].op)
+ if etype != EType(OXXX) && s.Def == nil {
+ s.Def = Nod(ONAME, nil, nil)
+ s.Def.Sym = s
+ s.Def.Etype = etype
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // backend-specific builtin types (e.g. int).
+ for i := range Thearch.Typedefs {
+ s := Lookup(Thearch.Typedefs[i].Name)
+ if s.Def == nil {
+ s.Def = typenod(Types[Thearch.Typedefs[i].Etype])
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+ }
+
+ // there's only so much table-driven we can handle.
+ // these are special cases.
+ if s := Lookup("byte"); s.Def == nil {
+ s.Def = typenod(bytetype)
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("error"); s.Def == nil {
+ s.Def = typenod(errortype)
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("rune"); s.Def == nil {
+ s.Def = typenod(runetype)
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("nil"); s.Def == nil {
+ var v Val
+ v.U = new(NilVal)
+ s.Def = nodlit(v)
+ s.Def.Sym = s
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("iota"); s.Def == nil {
+ s.Def = Nod(OIOTA, nil, nil)
+ s.Def.Sym = s
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("true"); s.Def == nil {
+ s.Def = Nodbool(true)
+ s.Def.Sym = s
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ if s := Lookup("false"); s.Def == nil {
+ s.Def = Nodbool(false)
+ s.Def.Sym = s
+ s.Def.Name = new(Name)
+ s.Origpkg = builtinpkg
+ }
+
+ nodfp = Nod(ONAME, nil, nil)
+ nodfp.Type = Types[TINT32]
+ nodfp.Xoffset = 0
+ nodfp.Class = PPARAM
+ nodfp.Sym = Lookup(".fp")
+}
+
+var lexn = map[int]string{
+ LANDAND: "ANDAND",
+ LANDNOT: "ANDNOT",
+ LASOP: "ASOP",
+ LBREAK: "BREAK",
+ LCASE: "CASE",
+ LCHAN: "CHAN",
+ LCOLAS: "COLAS",
+ LCOMM: "<-",
+ LCONST: "CONST",
+ LCONTINUE: "CONTINUE",
+ LDDD: "...",
+ LDEC: "DEC",
+ LDEFAULT: "DEFAULT",
+ LDEFER: "DEFER",
+ LELSE: "ELSE",
+ LEQ: "EQ",
+ LFALL: "FALL",
+ LFOR: "FOR",
+ LFUNC: "FUNC",
+ LGE: "GE",
+ LGO: "GO",
+ LGOTO: "GOTO",
+ LGT: "GT",
+ LIF: "IF",
+ LIMPORT: "IMPORT",
+ LINC: "INC",
+ LINTERFACE: "INTERFACE",
+ LLE: "LE",
+ LLITERAL: "LITERAL",
+ LLSH: "LSH",
+ LLT: "LT",
+ LMAP: "MAP",
+ LNAME: "NAME",
+ LNE: "NE",
+ LOROR: "OROR",
+ LPACKAGE: "PACKAGE",
+ LRANGE: "RANGE",
+ LRETURN: "RETURN",
+ LRSH: "RSH",
+ LSELECT: "SELECT",
+ LSTRUCT: "STRUCT",
+ LSWITCH: "SWITCH",
+ LTYPE: "TYPE",
+ LVAR: "VAR",
+}
+
+func lexname(lex int) string {
+ if s, ok := lexn[lex]; ok {
+ return s
+ }
+ return fmt.Sprintf("LEX-%d", lex)
+}
+
+func pkgnotused(lineno int, path string, name string) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ yyerrorl(int(lineno), "imported and not used: %q", path)
+ } else {
+ yyerrorl(int(lineno), "imported and not used: %q as %s", path, name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ if localpkg.Name == "" {
+ if pkgname == "_" {
+ Yyerror("invalid package name _")
+ }
+ localpkg.Name = pkgname
+ } else {
+ if pkgname != localpkg.Name {
+ Yyerror("package %s; expected %s", pkgname, localpkg.Name)
+ }
+ for _, s := range localpkg.Syms {
+ if s.Def == nil {
+ continue
+ }
+ if s.Def.Op == OPACK {
+ // throw away top-level package name leftover
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ if !s.Def.Used && nsyntaxerrors == 0 {
+ pkgnotused(int(s.Def.Lineno), s.Def.Name.Pkg.Path, s.Name)
+ }
+ s.Def = nil
+ continue
+ }
+
+ if s.Def.Sym != s {
+ // throw away top-level name left over
+ // from previous import . "x"
+ if s.Def.Name != nil && s.Def.Name.Pack != nil && !s.Def.Name.Pack.Used && nsyntaxerrors == 0 {
+ pkgnotused(int(s.Def.Name.Pack.Lineno), s.Def.Name.Pack.Name.Pkg.Path, "")
+ s.Def.Name.Pack.Used = true
+ }
+
+ s.Def = nil
+ continue
+ }
+ }
+ }
+
+ if outfile == "" {
+ p := infile
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if Ctxt.Windows != 0 {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if writearchive > 0 {
+ suffix = ".a"
+ }
+ outfile = p + suffix
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/pgen.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/pgen.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/pgen.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/pgen.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,557 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "crypto/md5"
+ "fmt"
+ "strings"
+)
+
+// "Portable" code generation.
+
+var makefuncdatasym_nsym int32
+
+func makefuncdatasym(namefmt string, funcdatakind int64) *Sym {
+ var nod Node
+
+ sym := Lookupf(namefmt, makefuncdatasym_nsym)
+ makefuncdatasym_nsym++
+ pnod := newname(sym)
+ pnod.Class = PEXTERN
+ Nodconst(&nod, Types[TINT32], funcdatakind)
+ Thearch.Gins(obj.AFUNCDATA, &nod, pnod)
+ return sym
+}
+
+// gvardef inserts a VARDEF for n into the instruction stream.
+// VARDEF is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, gvardef is usually only called for multi-word
+// or 'fat' variables, those satisfying isfat(n->type).
+// However, gvardef is also called when a non-fat variable is initialized
+// via a block move; the only time this happens is when you have
+// return f()
+// for a function with multiple return values exactly matching the return
+// types of the current function.
+//
+// A 'VARDEF x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The VARDEF must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// VARDEF x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// VARDEF x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// VARDEF x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the VARDEF appears to have "overwritten" it.
+//
+// VARDEF is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// VARKILL is the opposite of VARDEF: it marks a value as no longer needed,
+// even if its address has been taken. That is, a VARKILL annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+func gvardefx(n *Node, as int) {
+ if n == nil {
+ Fatalf("gvardef nil")
+ }
+ if n.Op != ONAME {
+ Yyerror("gvardef %v; %v", Oconv(int(n.Op), obj.FmtSharp), n)
+ return
+ }
+
+ switch n.Class {
+ case PAUTO, PPARAM, PPARAMOUT:
+ if as == obj.AVARLIVE {
+ Thearch.Gins(as, n, nil)
+ } else {
+ Thearch.Gins(as, nil, n)
+ }
+ }
+}
+
+func Gvardef(n *Node) {
+ gvardefx(n, obj.AVARDEF)
+}
+
+func gvarkill(n *Node) {
+ gvardefx(n, obj.AVARKILL)
+}
+
+func gvarlive(n *Node) {
+ gvardefx(n, obj.AVARLIVE)
+}
+
+func removevardef(firstp *obj.Prog) {
+ for p := firstp; p != nil; p = p.Link {
+ for p.Link != nil && (p.Link.As == obj.AVARDEF || p.Link.As == obj.AVARKILL || p.Link.As == obj.AVARLIVE) {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.Val.(*obj.Prog) != nil && (p.To.Val.(*obj.Prog).As == obj.AVARDEF || p.To.Val.(*obj.Prog).As == obj.AVARKILL || p.To.Val.(*obj.Prog).As == obj.AVARLIVE) {
+ p.To.Val = p.To.Val.(*obj.Prog).Link
+ }
+ }
+ }
+}
+
+func gcsymdup(s *Sym) {
+ ls := Linksym(s)
+ if len(ls.R) > 0 {
+ Fatalf("cannot rosymdup %s with relocations", ls.Name)
+ }
+ ls.Name = fmt.Sprintf("gclocals·%x", md5.Sum(ls.P))
+ ls.Dupok = 1
+}
+
+func emitptrargsmap() {
+ if Curfn.Func.Nname.Sym.Name == "_" {
+ return
+ }
+ sym := Lookup(fmt.Sprintf("%s.args_stackmap", Curfn.Func.Nname.Sym.Name))
+
+ nptr := int(Curfn.Type.Argwid / int64(Widthptr))
+ bv := bvalloc(int32(nptr) * 2)
+ nbitmap := 1
+ if Curfn.Type.Outtuple > 0 {
+ nbitmap = 2
+ }
+ off := duint32(sym, 0, uint32(nbitmap))
+ off = duint32(sym, off, uint32(bv.n))
+ var xoffset int64
+ if Curfn.Type.Thistuple > 0 {
+ xoffset = 0
+ onebitwalktype1(getthisx(Curfn.Type), &xoffset, bv)
+ }
+
+ if Curfn.Type.Intuple > 0 {
+ xoffset = 0
+ onebitwalktype1(getinargx(Curfn.Type), &xoffset, bv)
+ }
+
+ for j := 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ if Curfn.Type.Outtuple > 0 {
+ xoffset = 0
+ onebitwalktype1(getoutargx(Curfn.Type), &xoffset, bv)
+ for j := 0; int32(j) < bv.n; j += 32 {
+ off = duint32(sym, off, bv.b[j/32])
+ }
+ }
+
+ ggloblsym(sym, int32(off), obj.RODATA|obj.LOCAL)
+}
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+//
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvarlt(a, b *Node) bool {
+ if a.Class != b.Class {
+ if a.Class == PAUTO {
+ return false
+ }
+ return true
+ }
+
+ if a.Class != PAUTO {
+ if a.Xoffset < b.Xoffset {
+ return true
+ }
+ if a.Xoffset > b.Xoffset {
+ return false
+ }
+ return false
+ }
+
+ if a.Used != b.Used {
+ return a.Used
+ }
+
+ ap := haspointers(a.Type)
+ bp := haspointers(b.Type)
+ if ap != bp {
+ return ap
+ }
+
+ ap = a.Name.Needzero
+ bp = b.Name.Needzero
+ if ap != bp {
+ return ap
+ }
+
+ if a.Type.Width < b.Type.Width {
+ return false
+ }
+ if a.Type.Width > b.Type.Width {
+ return true
+ }
+
+ return a.Sym.Name < b.Sym.Name
+}
+
+// stkdelta records the stack offset delta for a node
+// during the compaction of the stack frame to remove
+// unused stack slots.
+var stkdelta = map[*Node]int64{}
+
+// TODO(lvd) find out where the PAUTO/OLITERAL nodes come from.
+func allocauto(ptxt *obj.Prog) {
+ Stksize = 0
+ stkptrsize = 0
+
+ if Curfn.Func.Dcl == nil {
+ return
+ }
+
+ // Mark the PAUTO's unused.
+ for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class == PAUTO {
+ ll.N.Used = false
+ }
+ }
+
+ markautoused(ptxt)
+
+ listsort(&Curfn.Func.Dcl, cmpstackvarlt)
+
+ // Unused autos are at the end, chop 'em off.
+ ll := Curfn.Func.Dcl
+
+ n := ll.N
+ if n.Class == PAUTO && n.Op == ONAME && !n.Used {
+ // No locals used at all
+ Curfn.Func.Dcl = nil
+
+ fixautoused(ptxt)
+ return
+ }
+
+ for ll := Curfn.Func.Dcl; ll.Next != nil; ll = ll.Next {
+ n = ll.Next.N
+ if n.Class == PAUTO && n.Op == ONAME && !n.Used {
+ ll.Next = nil
+ Curfn.Func.Dcl.End = ll
+ break
+ }
+ }
+
+ // Reassign stack offsets of the locals that are still there.
+ var w int64
+ for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+ n = ll.N
+ if n.Class != PAUTO || n.Op != ONAME {
+ continue
+ }
+
+ dowidth(n.Type)
+ w = n.Type.Width
+ if w >= Thearch.MAXWIDTH || w < 0 {
+ Fatalf("bad width")
+ }
+ Stksize += w
+ Stksize = Rnd(Stksize, int64(n.Type.Align))
+ if haspointers(n.Type) {
+ stkptrsize = Stksize
+ }
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ Stksize = Rnd(Stksize, int64(Widthptr))
+ }
+ if Stksize >= 1<<31 {
+ setlineno(Curfn)
+ Yyerror("stack frame too large (>2GB)")
+ }
+
+ stkdelta[n] = -Stksize - n.Xoffset
+ }
+
+ Stksize = Rnd(Stksize, int64(Widthreg))
+ stkptrsize = Rnd(stkptrsize, int64(Widthreg))
+
+ fixautoused(ptxt)
+
+ // The debug information needs accurate offsets on the symbols.
+ for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+ if ll.N.Class != PAUTO || ll.N.Op != ONAME {
+ continue
+ }
+ ll.N.Xoffset += stkdelta[ll.N]
+ delete(stkdelta, ll.N)
+ }
+}
+
+func Cgen_checknil(n *Node) {
+ if Disable_checknil != 0 {
+ return
+ }
+
+ // Ideally we wouldn't see any integer types here, but we do.
+ if n.Type == nil || (!Isptr[n.Type.Etype] && !Isint[n.Type.Etype] && n.Type.Etype != TUNSAFEPTR) {
+ Dump("checknil", n)
+ Fatalf("bad checknil")
+ }
+
+ if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+ var reg Node
+ Regalloc(®, Types[Tptr], n)
+ Cgen(n, ®)
+ Thearch.Gins(obj.ACHECKNIL, ®, nil)
+ Regfree(®)
+ return
+ }
+
+ Thearch.Gins(obj.ACHECKNIL, n, nil)
+}
+
+func compile(fn *Node) {
+ if Newproc == nil {
+ Newproc = Sysfunc("newproc")
+ Deferproc = Sysfunc("deferproc")
+ Deferreturn = Sysfunc("deferreturn")
+ Panicindex = Sysfunc("panicindex")
+ panicslice = Sysfunc("panicslice")
+ throwreturn = Sysfunc("throwreturn")
+ }
+
+ lno := setlineno(fn)
+
+ Curfn = fn
+ dowidth(Curfn.Type)
+
+ var oldstksize int64
+ var nod1 Node
+ var ptxt *obj.Prog
+ var pl *obj.Plist
+ var p *obj.Prog
+ var n *Node
+ var nam *Node
+ var gcargs *Sym
+ var gclocals *Sym
+ if fn.Nbody == nil {
+ if pure_go != 0 || strings.HasPrefix(fn.Func.Nname.Sym.Name, "init.") {
+ Yyerror("missing function body for %q", fn.Func.Nname.Sym.Name)
+ goto ret
+ }
+
+ if Debug['A'] != 0 {
+ goto ret
+ }
+ emitptrargsmap()
+ goto ret
+ }
+
+ saveerrors()
+
+ // set up domain for labels
+ clearlabels()
+
+ if Curfn.Type.Outnamed {
+ // add clearing of the output parameters
+ var save Iter
+ t := Structfirst(&save, Getoutarg(Curfn.Type))
+
+ for t != nil {
+ if t.Nname != nil {
+ n = Nod(OAS, t.Nname, nil)
+ typecheck(&n, Etop)
+ Curfn.Nbody = concat(list1(n), Curfn.Nbody)
+ }
+
+ t = structnext(&save)
+ }
+ }
+
+ order(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+
+ hasdefer = false
+ walk(Curfn)
+ if nerrors != 0 {
+ goto ret
+ }
+ if instrumenting {
+ instrument(Curfn)
+ }
+ if nerrors != 0 {
+ goto ret
+ }
+
+ continpc = nil
+ breakpc = nil
+
+ pl = newplist()
+ pl.Name = Linksym(Curfn.Func.Nname.Sym)
+
+ setlineno(Curfn)
+
+ Nodconst(&nod1, Types[TINT32], 0)
+ nam = Curfn.Func.Nname
+ if isblank(nam) {
+ nam = nil
+ }
+ ptxt = Thearch.Gins(obj.ATEXT, nam, &nod1)
+ Afunclit(&ptxt.From, Curfn.Func.Nname)
+ ptxt.From3 = new(obj.Addr)
+ if fn.Func.Dupok {
+ ptxt.From3.Offset |= obj.DUPOK
+ }
+ if fn.Func.Wrapper {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ if fn.Func.Needctxt {
+ ptxt.From3.Offset |= obj.NEEDCTXT
+ }
+ if fn.Func.Nosplit {
+ ptxt.From3.Offset |= obj.NOSPLIT
+ }
+ if fn.Func.Systemstack {
+ ptxt.From.Sym.Cfunc = 1
+ }
+
+ // Clumsy but important.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ if myimportpath != "" && myimportpath == "reflect" {
+ if Curfn.Func.Nname.Sym.Name == "callReflect" || Curfn.Func.Nname.Sym.Name == "callMethod" {
+ ptxt.From3.Offset |= obj.WRAPPER
+ }
+ }
+
+ ginit()
+
+ gcargs = makefuncdatasym("gcargs·%d", obj.FUNCDATA_ArgsPointerMaps)
+ gclocals = makefuncdatasym("gclocals·%d", obj.FUNCDATA_LocalsPointerMaps)
+
+ for _, t := range Curfn.Func.Fieldtrack {
+ gtrack(tracksym(t))
+ }
+
+ for l := fn.Func.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if n.Op != ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class {
+ case PAUTO, PPARAM, PPARAMOUT:
+ Nodconst(&nod1, Types[TUINTPTR], l.N.Type.Width)
+ p = Thearch.Gins(obj.ATYPE, l.N, &nod1)
+ p.From.Gotype = Linksym(ngotype(l.N))
+ }
+ }
+
+ Genlist(Curfn.Func.Enter)
+ Genlist(Curfn.Nbody)
+ gclean()
+ checklabels()
+ if nerrors != 0 {
+ goto ret
+ }
+ if Curfn.Func.Endlineno != 0 {
+ lineno = Curfn.Func.Endlineno
+ }
+
+ if Curfn.Type.Outtuple != 0 {
+ Ginscall(throwreturn, 0)
+ }
+
+ ginit()
+
+ // TODO: Determine when the final cgen_ret can be omitted. Perhaps always?
+ cgen_ret(nil)
+
+ if hasdefer {
+ // deferreturn pretends to have one uintptr argument.
+ // Reserve space for it so stack scanner is happy.
+ if Maxarg < int64(Widthptr) {
+ Maxarg = int64(Widthptr)
+ }
+ }
+
+ gclean()
+ if nerrors != 0 {
+ goto ret
+ }
+
+ Pc.As = obj.ARET // overwrite AEND
+ Pc.Lineno = lineno
+
+ fixjmp(ptxt)
+ if Debug['N'] == 0 || Debug['R'] != 0 || Debug['P'] != 0 {
+ regopt(ptxt)
+ nilopt(ptxt)
+ }
+
+ Thearch.Expandchecks(ptxt)
+
+ oldstksize = Stksize
+ allocauto(ptxt)
+
+ if false {
+ fmt.Printf("allocauto: %d to %d\n", oldstksize, int64(Stksize))
+ }
+
+ setlineno(Curfn)
+ if int64(Stksize)+Maxarg > 1<<31 {
+ Yyerror("stack frame too large (>2GB)")
+ goto ret
+ }
+
+ // Emit garbage collection symbols.
+ liveness(Curfn, ptxt, gcargs, gclocals)
+
+ gcsymdup(gcargs)
+ gcsymdup(gclocals)
+
+ Thearch.Defframe(ptxt)
+
+ if Debug['f'] != 0 {
+ frame(0)
+ }
+
+ // Remove leftover instrumentation from the instruction stream.
+ removevardef(ptxt)
+
+ret:
+ lineno = lno
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/reg.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/reg.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/reg.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/reg.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,1534 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// A Var represents a single variable that may be stored in a register.
+// That variable may itself correspond to a hardware register,
+// to represent the use of registers in the unoptimized instruction stream.
+type Var struct {
+ offset int64
+ node *Node
+ nextinnode *Var
+ width int
+ id int // index in vars
+ name int8
+ etype EType
+ addr int8
+}
+
+// Bits represents a set of Vars, stored as a bit set of var numbers
+// (the index in vars, or equivalently v.id).
+type Bits struct {
+ b [BITS]uint64
+}
+
+const (
+ BITS = 3
+ NVAR = BITS * 64
+)
+
+var (
+ vars [NVAR]Var // variables under consideration
+ nvar int // number of vars
+
+ regbits uint64 // bits for hardware registers
+
+ zbits Bits // zero
+ externs Bits // global variables
+ params Bits // function parameters and results
+ ivar Bits // function parameters (inputs)
+ ovar Bits // function results (outputs)
+ consts Bits // constant values
+ addrs Bits // variables with address taken
+)
+
+// A Reg is a wrapper around a single Prog (one instruction) that holds
+// register optimization information while the optimizer runs.
+// r->prog is the instruction.
+type Reg struct {
+ set Bits // regopt variables written by this instruction.
+ use1 Bits // regopt variables read by prog->from.
+ use2 Bits // regopt variables read by prog->to.
+
+ // refahead/refbehind are the regopt variables whose current
+ // value may be used in the following/preceding instructions
+ // up to a CALL (or the value is clobbered).
+ refbehind Bits
+ refahead Bits
+
+ // calahead/calbehind are similar, but for variables in
+ // instructions that are reachable after hitting at least one
+ // CALL.
+ calbehind Bits
+ calahead Bits
+
+ regdiff Bits
+ act Bits
+ regu uint64 // register used bitmap
+}
+
+// A Rgn represents a single regopt variable over a region of code
+// where a register could potentially be dedicated to that variable.
+// The code encompassed by a Rgn is defined by the flow graph,
+// starting at enter, flood-filling forward while varno is refahead
+// and backward while varno is refbehind, and following branches.
+// A single variable may be represented by multiple disjoint Rgns and
+// each Rgn may choose a different register for that variable.
+// Registers are allocated to regions greedily in order of descending
+// cost.
+type Rgn struct {
+ enter *Flow
+ cost int16
+ varno int16
+ regno int16
+}
+
+// The Plan 9 C compilers used a limit of 600 regions,
+// but the yacc-generated parser in y.go has 3100 regions.
+// We set MaxRgn large enough to handle that.
+// There's not a huge cost to having too many regions:
+// the main processing traces the live area for each variable,
+// which is limited by the number of variables times the area,
+// not the raw region count. If there are many regions, they
+// are almost certainly small and easy to trace.
+// The only operation that scales with region count is the
+// sorting by cost, which uses sort.Sort and is therefore
+// guaranteed n log n.
+const MaxRgn = 6000
+
+var (
+ region []Rgn
+ nregion int
+)
+
+type rcmp []Rgn
+
+func (x rcmp) Len() int {
+ return len(x)
+}
+
+func (x rcmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x rcmp) Less(i, j int) bool {
+ p1 := &x[i]
+ p2 := &x[j]
+ if p1.cost != p2.cost {
+ return int(p2.cost)-int(p1.cost) < 0
+ }
+ if p1.varno != p2.varno {
+ return int(p2.varno)-int(p1.varno) < 0
+ }
+ if p1.enter != p2.enter {
+ return int(p2.enter.Id-p1.enter.Id) < 0
+ }
+ return false
+}
+
+func setaddrs(bit Bits) {
+ var i int
+ var n int
+ var v *Var
+ var node *Node
+
+ for bany(&bit) {
+ // convert each bit to a variable
+ i = bnum(&bit)
+
+ node = vars[i].node
+ n = int(vars[i].name)
+ biclr(&bit, uint(i))
+
+ // disable all pieces of that variable
+ for i = 0; i < nvar; i++ {
+ v = &vars[i]
+ if v.node == node && int(v.name) == n {
+ v.addr = 2
+ }
+ }
+ }
+}
+
+var regnodes [64]*Node
+
+func walkvardef(n *Node, f *Flow, active int) {
+ var f1 *Flow
+ var bn int
+ var v *Var
+
+ for f1 = f; f1 != nil; f1 = f1.S1 {
+ if f1.Active == int32(active) {
+ break
+ }
+ f1.Active = int32(active)
+ if f1.Prog.As == obj.AVARKILL && f1.Prog.To.Node == n {
+ break
+ }
+ for v, _ = n.Opt().(*Var); v != nil; v = v.nextinnode {
+ bn = v.id
+ biset(&(f1.Data.(*Reg)).act, uint(bn))
+ }
+
+ if f1.Prog.As == obj.ACALL {
+ break
+ }
+ }
+
+ for f2 := f; f2 != f1; f2 = f2.S1 {
+ if f2.S2 != nil {
+ walkvardef(n, f2.S2, active)
+ }
+ }
+}
+
+// add mov b,rn
+// just after r
+func addmove(r *Flow, bn int, rn int, f int) {
+ p1 := Ctxt.NewProg()
+ Clearp(p1)
+ p1.Pc = 9999
+
+ p := r.Prog
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+
+ v := &vars[bn]
+
+ a := &p1.To
+ a.Offset = v.offset
+ a.Etype = uint8(v.etype)
+ a.Type = obj.TYPE_MEM
+ a.Name = v.name
+ a.Node = v.node
+ a.Sym = Linksym(v.node.Sym)
+
+ /* NOTE(rsc): 9g did
+ if(a->etype == TARRAY)
+ a->type = TYPE_ADDR;
+ else if(a->sym == nil)
+ a->type = TYPE_CONST;
+ */
+ p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
+
+ // TODO(rsc): Remove special case here.
+ if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
+ p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
+ }
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = int16(rn)
+ p1.From.Name = obj.NAME_NONE
+ if f == 0 {
+ p1.From = *a
+ *a = obj.Addr{}
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v ===add=== %v\n", p, p1)
+ }
+ Ostats.Nspill++
+}
+
+func overlap_reg(o1 int64, w1 int, o2 int64, w2 int) bool {
+ t1 := o1 + int64(w1)
+ t2 := o2 + int64(w2)
+
+ if t1 <= o2 || t2 <= o1 {
+ return false
+ }
+
+ return true
+}
+
+func mkvar(f *Flow, a *obj.Addr) Bits {
+ // mark registers used
+ if a.Type == obj.TYPE_NONE {
+ return zbits
+ }
+
+ r := f.Data.(*Reg)
+ r.use1.b[0] |= Thearch.Doregbits(int(a.Index)) // TODO: Use RtoB
+
+ var n int
+ switch a.Type {
+ default:
+ regu := Thearch.Doregbits(int(a.Reg)) | Thearch.RtoB(int(a.Reg)) // TODO: Use RtoB
+ if regu == 0 {
+ return zbits
+ }
+ bit := zbits
+ bit.b[0] = regu
+ return bit
+
+ // TODO(rsc): Remove special case here.
+ case obj.TYPE_ADDR:
+ var bit Bits
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ goto memcase
+ }
+ a.Type = obj.TYPE_MEM
+ bit = mkvar(f, a)
+ setaddrs(bit)
+ a.Type = obj.TYPE_ADDR
+ Ostats.Naddr++
+ return zbits
+
+ memcase:
+ fallthrough
+
+ case obj.TYPE_MEM:
+ if r != nil {
+ r.use1.b[0] |= Thearch.RtoB(int(a.Reg))
+ }
+
+ /* NOTE: 5g did
+ if(r->f.prog->scond & (C_PBIT|C_WBIT))
+ r->set.b[0] |= RtoB(a->reg);
+ */
+ switch a.Name {
+ default:
+ // Note: This case handles NAME_EXTERN and NAME_STATIC.
+ // We treat these as requiring eager writes to memory, due to
+ // the possibility of a fault handler looking at them, so there is
+ // not much point in registerizing the loads.
+ // If we later choose the set of candidate variables from a
+ // larger list, these cases could be deprioritized instead of
+ // removed entirely.
+ return zbits
+
+ case obj.NAME_PARAM,
+ obj.NAME_AUTO:
+ n = int(a.Name)
+ }
+ }
+
+ node, _ := a.Node.(*Node)
+ if node == nil || node.Op != ONAME || node.Orig == nil {
+ return zbits
+ }
+ node = node.Orig
+ if node.Orig != node {
+ Fatalf("%v: bad node", Ctxt.Dconv(a))
+ }
+ if node.Sym == nil || node.Sym.Name[0] == '.' {
+ return zbits
+ }
+ et := EType(a.Etype)
+ o := a.Offset
+ w := a.Width
+ if w < 0 {
+ Fatalf("bad width %d for %v", w, Ctxt.Dconv(a))
+ }
+
+ flag := 0
+ var v *Var
+ for i := 0; i < nvar; i++ {
+ v = &vars[i]
+ if v.node == node && int(v.name) == n {
+ if v.offset == o {
+ if v.etype == et {
+ if int64(v.width) == w {
+ // TODO(rsc): Remove special case for arm here.
+ if flag == 0 || Thearch.Thechar != '5' {
+ return blsh(uint(i))
+ }
+ }
+ }
+ }
+
+ // if they overlap, disable both
+ if overlap_reg(v.offset, v.width, o, int(w)) {
+ // print("disable overlap %s %d %d %d %d, %E != %E\n", s->name, v->offset, v->width, o, w, v->etype, et);
+ v.addr = 1
+
+ flag = 1
+ }
+ }
+ }
+
+ switch et {
+ case 0, TFUNC:
+ return zbits
+ }
+
+ if nvar >= NVAR {
+ if Debug['w'] > 1 && node != nil {
+ Fatalf("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+ }
+ if Debug['v'] > 0 {
+ Warn("variable not optimized: %v", Nconv(node, obj.FmtSharp))
+ }
+
+ // If we're not tracking a word in a variable, mark the rest as
+ // having its address taken, so that we keep the whole thing
+ // live at all calls. otherwise we might optimize away part of
+ // a variable but not all of it.
+ var v *Var
+ for i := 0; i < nvar; i++ {
+ v = &vars[i]
+ if v.node == node {
+ v.addr = 1
+ }
+ }
+
+ return zbits
+ }
+
+ i := nvar
+ nvar++
+ v = &vars[i]
+ v.id = i
+ v.offset = o
+ v.name = int8(n)
+ v.etype = et
+ v.width = int(w)
+ v.addr = int8(flag) // funny punning
+ v.node = node
+
+ // node->opt is the head of a linked list
+ // of Vars within the given Node, so that
+ // we can start at a Var and find all the other
+ // Vars in the same Go variable.
+ v.nextinnode, _ = node.Opt().(*Var)
+
+ node.SetOpt(v)
+
+ bit := blsh(uint(i))
+ if n == obj.NAME_EXTERN || n == obj.NAME_STATIC {
+ for z := 0; z < BITS; z++ {
+ externs.b[z] |= bit.b[z]
+ }
+ }
+ if n == obj.NAME_PARAM {
+ for z := 0; z < BITS; z++ {
+ params.b[z] |= bit.b[z]
+ }
+ }
+
+ if node.Class == PPARAM {
+ for z := 0; z < BITS; z++ {
+ ivar.b[z] |= bit.b[z]
+ }
+ }
+ if node.Class == PPARAMOUT {
+ for z := 0; z < BITS; z++ {
+ ovar.b[z] |= bit.b[z]
+ }
+ }
+
+ // Treat values with their address taken as live at calls,
+ // because the garbage collector's liveness analysis in plive.go does.
+ // These must be consistent or else we will elide stores and the garbage
+ // collector will see uninitialized data.
+ // The typical case where our own analysis is out of sync is when the
+ // node appears to have its address taken but that code doesn't actually
+ // get generated and therefore doesn't show up as an address being
+ // taken when we analyze the instruction stream.
+ // One instance of this case is when a closure uses the same name as
+ // an outer variable for one of its own variables declared with :=.
+ // The parser flags the outer variable as possibly shared, and therefore
+ // sets addrtaken, even though it ends up not being actually shared.
+ // If we were better about _ elision, _ = &x would suffice too.
+ // The broader := in a closure problem is mentioned in a comment in
+ // closure.go:/^typecheckclosure and dcl.go:/^oldname.
+ if node.Addrtaken {
+ v.addr = 1
+ }
+
+ // Disable registerization for globals, because:
+ // (1) we might panic at any time and we want the recovery code
+ // to see the latest values (issue 1304).
+ // (2) we don't know what pointers might point at them and we want
+ // loads via those pointers to see updated values and vice versa (issue 7995).
+ //
+ // Disable registerization for results if using defer, because the deferred func
+ // might recover and return, causing the current values to be used.
+ if node.Class == PEXTERN || (hasdefer && node.Class == PPARAMOUT) {
+ v.addr = 1
+ }
+
+ if Debug['R'] != 0 {
+ fmt.Printf("bit=%2d et=%v w=%d+%d %v %v flag=%d\n", i, Econv(et), o, w, Nconv(node, obj.FmtSharp), Ctxt.Dconv(a), v.addr)
+ }
+ Ostats.Nvar++
+
+ return bit
+}
+
+var change int
+
+func prop(f *Flow, ref Bits, cal Bits) {
+ var f1 *Flow
+ var r1 *Reg
+ var z int
+ var i int
+ var v *Var
+ var v1 *Var
+
+ for f1 = f; f1 != nil; f1 = f1.P1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ ref.b[z] |= r1.refahead.b[z]
+ if ref.b[z] != r1.refahead.b[z] {
+ r1.refahead.b[z] = ref.b[z]
+ change = 1
+ }
+
+ cal.b[z] |= r1.calahead.b[z]
+ if cal.b[z] != r1.calahead.b[z] {
+ r1.calahead.b[z] = cal.b[z]
+ change = 1
+ }
+ }
+
+ switch f1.Prog.As {
+ case obj.ACALL:
+ if Noreturn(f1.Prog) {
+ break
+ }
+
+ // Mark all input variables (ivar) as used, because that's what the
+ // liveness bitmaps say. The liveness bitmaps say that so that a
+ // panic will not show stale values in the parameter dump.
+ // Mark variables with a recent VARDEF (r1->act) as used,
+ // so that the optimizer flushes initializations to memory,
+ // so that if a garbage collection happens during this CALL,
+ // the collector will see initialized memory. Again this is to
+ // match what the liveness bitmaps say.
+ for z = 0; z < BITS; z++ {
+ cal.b[z] |= ref.b[z] | externs.b[z] | ivar.b[z] | r1.act.b[z]
+ ref.b[z] = 0
+ }
+
+ // cal.b is the current approximation of what's live across the call.
+ // Every bit in cal.b is a single stack word. For each such word,
+ // find all the other tracked stack words in the same Go variable
+ // (struct/slice/string/interface) and mark them live too.
+ // This is necessary because the liveness analysis for the garbage
+ // collector works at variable granularity, not at word granularity.
+ // It is fundamental for slice/string/interface: the garbage collector
+ // needs the whole value, not just some of the words, in order to
+ // interpret the other bits correctly. Specifically, slice needs a consistent
+ // ptr and cap, string needs a consistent ptr and len, and interface
+ // needs a consistent type word and data word.
+ for z = 0; z < BITS; z++ {
+ if cal.b[z] == 0 {
+ continue
+ }
+ for i = 0; i < 64; i++ {
+ if z*64+i >= nvar || (cal.b[z]>>uint(i))&1 == 0 {
+ continue
+ }
+ v = &vars[z*64+i]
+ if v.node.Opt() == nil { // v represents fixed register, not Go variable
+ continue
+ }
+
+ // v->node->opt is the head of a linked list of Vars
+ // corresponding to tracked words from the Go variable v->node.
+ // Walk the list and set all the bits.
+ // For a large struct this could end up being quadratic:
+ // after the first setting, the outer loop (for z, i) would see a 1 bit
+ // for all of the remaining words in the struct, and for each such
+ // word would go through and turn on all the bits again.
+ // To avoid the quadratic behavior, we only turn on the bits if
+ // v is the head of the list or if the head's bit is not yet turned on.
+ // This will set the bits at most twice, keeping the overall loop linear.
+ v1, _ = v.node.Opt().(*Var)
+
+ if v == v1 || !btest(&cal, uint(v1.id)) {
+ for ; v1 != nil; v1 = v1.nextinnode {
+ biset(&cal, uint(v1.id))
+ }
+ }
+ }
+ }
+
+ case obj.ATEXT:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = 0
+ ref.b[z] = 0
+ }
+
+ case obj.ARET:
+ for z = 0; z < BITS; z++ {
+ cal.b[z] = externs.b[z] | ovar.b[z]
+ ref.b[z] = 0
+ }
+ }
+
+ for z = 0; z < BITS; z++ {
+ ref.b[z] = ref.b[z]&^r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z]
+ cal.b[z] &^= (r1.set.b[z] | r1.use1.b[z] | r1.use2.b[z])
+ r1.refbehind.b[z] = ref.b[z]
+ r1.calbehind.b[z] = cal.b[z]
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ }
+
+ var r *Reg
+ var f2 *Flow
+ for ; f != f1; f = f.P1 {
+ r = f.Data.(*Reg)
+ for f2 = f.P2; f2 != nil; f2 = f2.P2link {
+ prop(f2, r.refbehind, r.calbehind)
+ }
+ }
+}
+
+func synch(f *Flow, dif Bits) {
+ var r1 *Reg
+ var z int
+
+ for f1 := f; f1 != nil; f1 = f1.S1 {
+ r1 = f1.Data.(*Reg)
+ for z = 0; z < BITS; z++ {
+ dif.b[z] = dif.b[z]&^(^r1.refbehind.b[z]&r1.refahead.b[z]) | r1.set.b[z] | r1.regdiff.b[z]
+ if dif.b[z] != r1.regdiff.b[z] {
+ r1.regdiff.b[z] = dif.b[z]
+ change = 1
+ }
+ }
+
+ if f1.Active != 0 {
+ break
+ }
+ f1.Active = 1
+ for z = 0; z < BITS; z++ {
+ dif.b[z] &^= (^r1.calbehind.b[z] & r1.calahead.b[z])
+ }
+ if f1.S2 != nil {
+ synch(f1.S2, dif)
+ }
+ }
+}
+
+func allreg(b uint64, r *Rgn) uint64 {
+ v := &vars[r.varno]
+ r.regno = 0
+ switch v.etype {
+ default:
+ Fatalf("unknown etype %d/%v", Bitno(b), Econv(v.etype))
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64:
+ i := Thearch.BtoR(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.RtoB(i)
+ }
+
+ case TFLOAT32, TFLOAT64:
+ i := Thearch.BtoF(^b)
+ if i != 0 && r.cost > 0 {
+ r.regno = int16(i)
+ return Thearch.FtoB(i)
+ }
+ }
+
+ return 0
+}
+
+func LOAD(r *Reg, z int) uint64 {
+ return ^r.refbehind.b[z] & r.refahead.b[z]
+}
+
+func STORE(r *Reg, z int) uint64 {
+ return ^r.calbehind.b[z] & r.calahead.b[z]
+}
+
+// Cost parameters
+const (
+ CLOAD = 5 // cost of load
+ CREF = 5 // cost of reference if not registerized
+ LOOP = 3 // loop execution count (applied in popt.go)
+)
+
+func paint1(f *Flow, bn int) {
+ z := bn / 64
+ bb := uint64(1 << uint(bn%64))
+ r := f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ var f1 *Flow
+ var r1 *Reg
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ for {
+ r.act.b[z] |= bb
+
+ if f.Prog.As != obj.ANOP { // don't give credit for NOPs
+ if r.use1.b[z]&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ change += CREF * int(f.Loop)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ change -= CLOAD * int(f.Loop)
+ }
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint1(f1, bn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+}
+
+func paint2(f *Flow, bn int, depth int) uint64 {
+ z := bn / 64
+ bb := uint64(1 << uint(bn%64))
+ vreg := regbits
+ r := f.Data.(*Reg)
+ if r.act.b[z]&bb == 0 {
+ return vreg
+ }
+ var r1 *Reg
+ var f1 *Flow
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb == 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ for {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" paint2 %d %v\n", depth, f.Prog)
+ }
+
+ r.act.b[z] &^= bb
+
+ vreg |= r.regu
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ vreg |= paint2(f1, bn, depth+1)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb == 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+
+ return vreg
+}
+
+func paint3(f *Flow, bn int, rb uint64, rn int) {
+ z := bn / 64
+ bb := uint64(1 << uint(bn%64))
+ r := f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ return
+ }
+ var r1 *Reg
+ var f1 *Flow
+ for {
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.P1
+ if f1 == nil {
+ break
+ }
+ r1 = f1.Data.(*Reg)
+ if r1.refahead.b[z]&bb == 0 {
+ break
+ }
+ if r1.act.b[z]&bb != 0 {
+ break
+ }
+ f = f1
+ r = r1
+ }
+
+ if LOAD(r, z)&^(r.set.b[z]&^(r.use1.b[z]|r.use2.b[z]))&bb != 0 {
+ addmove(f, bn, rn, 0)
+ }
+ var p *obj.Prog
+ for {
+ r.act.b[z] |= bb
+ p = f.Prog
+
+ if r.use1.b[z]&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.From, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if (r.use2.b[z]|r.set.b[z])&bb != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ addreg(&p.To, rn)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf(" ===change== %v\n", p)
+ }
+ }
+
+ if STORE(r, z)&r.regdiff.b[z]&bb != 0 {
+ addmove(f, bn, rn, 1)
+ }
+ r.regu |= rb
+
+ if r.refbehind.b[z]&bb != 0 {
+ for f1 = f.P2; f1 != nil; f1 = f1.P2link {
+ if (f1.Data.(*Reg)).refahead.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ }
+
+ if r.refahead.b[z]&bb == 0 {
+ break
+ }
+ f1 = f.S2
+ if f1 != nil {
+ if (f1.Data.(*Reg)).refbehind.b[z]&bb != 0 {
+ paint3(f1, bn, rb, rn)
+ }
+ }
+ f = f.S1
+ if f == nil {
+ break
+ }
+ r = f.Data.(*Reg)
+ if r.act.b[z]&bb != 0 {
+ break
+ }
+ if r.refbehind.b[z]&bb == 0 {
+ break
+ }
+ }
+}
+
+func addreg(a *obj.Addr, rn int) {
+ a.Sym = nil
+ a.Node = nil
+ a.Offset = 0
+ a.Type = obj.TYPE_REG
+ a.Reg = int16(rn)
+ a.Name = 0
+
+ Ostats.Ncvtreg++
+}
+
+func dumpone(f *Flow, isreg int) {
+ fmt.Printf("%d:%v", f.Loop, f.Prog)
+ if isreg != 0 {
+ r := f.Data.(*Reg)
+ var bit Bits
+ for z := 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] | r.use1.b[z] | r.use2.b[z] | r.refbehind.b[z] | r.refahead.b[z] | r.calbehind.b[z] | r.calahead.b[z] | r.regdiff.b[z] | r.act.b[z] | 0
+ }
+ if bany(&bit) {
+ fmt.Printf("\t")
+ if bany(&r.set) {
+ fmt.Printf(" s:%v", &r.set)
+ }
+ if bany(&r.use1) {
+ fmt.Printf(" u1:%v", &r.use1)
+ }
+ if bany(&r.use2) {
+ fmt.Printf(" u2:%v", &r.use2)
+ }
+ if bany(&r.refbehind) {
+ fmt.Printf(" rb:%v ", &r.refbehind)
+ }
+ if bany(&r.refahead) {
+ fmt.Printf(" ra:%v ", &r.refahead)
+ }
+ if bany(&r.calbehind) {
+ fmt.Printf(" cb:%v ", &r.calbehind)
+ }
+ if bany(&r.calahead) {
+ fmt.Printf(" ca:%v ", &r.calahead)
+ }
+ if bany(&r.regdiff) {
+ fmt.Printf(" d:%v ", &r.regdiff)
+ }
+ if bany(&r.act) {
+ fmt.Printf(" a:%v ", &r.act)
+ }
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+func Dumpit(str string, r0 *Flow, isreg int) {
+ var r1 *Flow
+
+ fmt.Printf("\n%s\n", str)
+ for r := r0; r != nil; r = r.Link {
+ dumpone(r, isreg)
+ r1 = r.P2
+ if r1 != nil {
+ fmt.Printf("\tpred:")
+ for ; r1 != nil; r1 = r1.P2link {
+ fmt.Printf(" %.4d", uint(int(r1.Prog.Pc)))
+ }
+ if r.P1 != nil {
+ fmt.Printf(" (and %.4d)", uint(int(r.P1.Prog.Pc)))
+ } else {
+ fmt.Printf(" (only)")
+ }
+ fmt.Printf("\n")
+ }
+
+ // Print successors if it's not just the next one
+ if r.S1 != r.Link || r.S2 != nil {
+ fmt.Printf("\tsucc:")
+ if r.S1 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S1.Prog.Pc)))
+ }
+ if r.S2 != nil {
+ fmt.Printf(" %.4d", uint(int(r.S2.Prog.Pc)))
+ }
+ fmt.Printf("\n")
+ }
+ }
+}
+
+func regopt(firstp *obj.Prog) {
+ mergetemp(firstp)
+
+ // control flow is more complicated in generated go code
+ // than in generated c code. define pseudo-variables for
+ // registers, so we have complete register usage information.
+ var nreg int
+ regnames := Thearch.Regnames(&nreg)
+
+ nvar = nreg
+ for i := 0; i < nreg; i++ {
+ vars[i] = Var{}
+ }
+ for i := 0; i < nreg; i++ {
+ if regnodes[i] == nil {
+ regnodes[i] = newname(Lookup(regnames[i]))
+ }
+ vars[i].node = regnodes[i]
+ }
+
+ regbits = Thearch.Excludedregs()
+ externs = zbits
+ params = zbits
+ consts = zbits
+ addrs = zbits
+ ivar = zbits
+ ovar = zbits
+
+ // pass 1
+ // build aux data structure
+ // allocate pcs
+ // find use and set of variables
+ g := Flowstart(firstp, func() interface{} { return new(Reg) })
+ if g == nil {
+ for i := 0; i < nvar; i++ {
+ vars[i].node.SetOpt(nil)
+ }
+ return
+ }
+
+ firstf := g.Start
+
+ for f := firstf; f != nil; f = f.Link {
+ p := f.Prog
+ // AVARLIVE must be considered a use, do not skip it.
+ // Otherwise the variable will be optimized away,
+ // and the whole point of AVARLIVE is to keep it on the stack.
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+
+ // Avoid making variables for direct-called functions.
+ if p.As == obj.ACALL && p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_EXTERN {
+ continue
+ }
+
+ // from vs to doesn't matter for registers.
+ r := f.Data.(*Reg)
+ r.use1.b[0] |= p.Info.Reguse | p.Info.Regindex
+ r.set.b[0] |= p.Info.Regset
+
+ bit := mkvar(f, &p.From)
+ if bany(&bit) {
+ if p.Info.Flags&LeftAddr != 0 {
+ setaddrs(bit)
+ }
+ if p.Info.Flags&LeftRead != 0 {
+ for z := 0; z < BITS; z++ {
+ r.use1.b[z] |= bit.b[z]
+ }
+ }
+ if p.Info.Flags&LeftWrite != 0 {
+ for z := 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+
+ // Compute used register for reg
+ if p.Info.Flags&RegRead != 0 {
+ r.use1.b[0] |= Thearch.RtoB(int(p.Reg))
+ }
+
+ // Currently we never generate three register forms.
+ // If we do, this will need to change.
+ if p.From3Type() != obj.TYPE_NONE {
+ Fatalf("regopt not implemented for from3")
+ }
+
+ bit = mkvar(f, &p.To)
+ if bany(&bit) {
+ if p.Info.Flags&RightAddr != 0 {
+ setaddrs(bit)
+ }
+ if p.Info.Flags&RightRead != 0 {
+ for z := 0; z < BITS; z++ {
+ r.use2.b[z] |= bit.b[z]
+ }
+ }
+ if p.Info.Flags&RightWrite != 0 {
+ for z := 0; z < BITS; z++ {
+ r.set.b[z] |= bit.b[z]
+ }
+ }
+ }
+ }
+
+ for i := 0; i < nvar; i++ {
+ v := &vars[i]
+ if v.addr != 0 {
+ bit := blsh(uint(i))
+ for z := 0; z < BITS; z++ {
+ addrs.b[z] |= bit.b[z]
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("bit=%2d addr=%d et=%v w=%-2d s=%v + %d\n", i, v.addr, Econv(v.etype), v.width, v.node, v.offset)
+ }
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass1", firstf, 1)
+ }
+
+ // pass 2
+ // find looping structure
+ flowrpo(g)
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass2", firstf, 1)
+ }
+
+ // pass 2.5
+ // iterate propagating fat vardef covering forward
+ // r->act records vars with a VARDEF since the last CALL.
+ // (r->act will be reused in pass 5 for something else,
+ // but we'll be done with it by then.)
+ active := 0
+
+ for f := firstf; f != nil; f = f.Link {
+ f.Active = 0
+ r := f.Data.(*Reg)
+ r.act = zbits
+ }
+
+ for f := firstf; f != nil; f = f.Link {
+ p := f.Prog
+ if p.As == obj.AVARDEF && Isfat(((p.To.Node).(*Node)).Type) && ((p.To.Node).(*Node)).Opt() != nil {
+ active++
+ walkvardef(p.To.Node.(*Node), f, active)
+ }
+ }
+
+ // pass 3
+ // iterate propagating usage
+ // back until flow graph is complete
+ var f1 *Flow
+ var i int
+ var f *Flow
+loop1:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ for f = firstf; f != nil; f = f.Link {
+ if f.Prog.As == obj.ARET {
+ prop(f, zbits, zbits)
+ }
+ }
+
+ // pick up unreachable code
+loop11:
+ i = 0
+
+ for f = firstf; f != nil; f = f1 {
+ f1 = f.Link
+ if f1 != nil && f1.Active != 0 && f.Active == 0 {
+ prop(f, zbits, zbits)
+ i = 1
+ }
+ }
+
+ if i != 0 {
+ goto loop11
+ }
+ if change != 0 {
+ goto loop1
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass3", firstf, 1)
+ }
+
+ // pass 4
+ // iterate propagating register/variable synchrony
+ // forward until graph is complete
+loop2:
+ change = 0
+
+ for f = firstf; f != nil; f = f.Link {
+ f.Active = 0
+ }
+ synch(firstf, zbits)
+ if change != 0 {
+ goto loop2
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4", firstf, 1)
+ }
+
+ // pass 4.5
+ // move register pseudo-variables into regu.
+ mask := uint64((1 << uint(nreg)) - 1)
+ for f := firstf; f != nil; f = f.Link {
+ r := f.Data.(*Reg)
+ r.regu = (r.refbehind.b[0] | r.set.b[0]) & mask
+ r.set.b[0] &^= mask
+ r.use1.b[0] &^= mask
+ r.use2.b[0] &^= mask
+ r.refbehind.b[0] &^= mask
+ r.refahead.b[0] &^= mask
+ r.calbehind.b[0] &^= mask
+ r.calahead.b[0] &^= mask
+ r.regdiff.b[0] &^= mask
+ r.act.b[0] &^= mask
+ }
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass4.5", firstf, 1)
+ }
+
+ // pass 5
+ // isolate regions
+ // calculate costs (paint1)
+ var bit Bits
+ if f := firstf; f != nil {
+ r := f.Data.(*Reg)
+ for z := 0; z < BITS; z++ {
+ bit.b[z] = (r.refahead.b[z] | r.calahead.b[z]) &^ (externs.b[z] | params.b[z] | addrs.b[z] | consts.b[z])
+ }
+ if bany(&bit) && !f.Refset {
+ // should never happen - all variables are preset
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: used and not set: %v\n", f.Prog.Line(), &bit)
+ }
+ f.Refset = true
+ }
+ }
+
+ for f := firstf; f != nil; f = f.Link {
+ (f.Data.(*Reg)).act = zbits
+ }
+ nregion = 0
+ region = region[:0]
+ var rgp *Rgn
+ for f := firstf; f != nil; f = f.Link {
+ r := f.Data.(*Reg)
+ for z := 0; z < BITS; z++ {
+ bit.b[z] = r.set.b[z] &^ (r.refahead.b[z] | r.calahead.b[z] | addrs.b[z])
+ }
+ if bany(&bit) && !f.Refset {
+ if Debug['w'] != 0 {
+ fmt.Printf("%v: set and not used: %v\n", f.Prog.Line(), &bit)
+ }
+ f.Refset = true
+ Thearch.Excise(f)
+ }
+
+ for z := 0; z < BITS; z++ {
+ bit.b[z] = LOAD(r, z) &^ (r.act.b[z] | addrs.b[z])
+ }
+ for bany(&bit) {
+ i = bnum(&bit)
+ change = 0
+ paint1(f, i)
+ biclr(&bit, uint(i))
+ if change <= 0 {
+ continue
+ }
+ if nregion >= MaxRgn {
+ nregion++
+ continue
+ }
+
+ region = append(region, Rgn{
+ enter: f,
+ cost: int16(change),
+ varno: int16(i),
+ })
+ nregion++
+ }
+ }
+
+ if false && Debug['v'] != 0 && strings.Contains(Curfn.Func.Nname.Sym.Name, "Parse") {
+ Warn("regions: %d\n", nregion)
+ }
+ if nregion >= MaxRgn {
+ if Debug['v'] != 0 {
+ Warn("too many regions: %d\n", nregion)
+ }
+ nregion = MaxRgn
+ }
+
+ sort.Sort(rcmp(region[:nregion]))
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ Dumpit("pass5", firstf, 1)
+ }
+
+ // pass 6
+ // determine used registers (paint2)
+ // replace code (paint3)
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("\nregisterizing\n")
+ }
+ var usedreg uint64
+ var vreg uint64
+ for i := 0; i < nregion; i++ {
+ rgp = ®ion[i]
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ fmt.Printf("region %d: cost %d varno %d enter %d\n", i, rgp.cost, rgp.varno, rgp.enter.Prog.Pc)
+ }
+ bit = blsh(uint(rgp.varno))
+ usedreg = paint2(rgp.enter, int(rgp.varno), 0)
+ vreg = allreg(usedreg, rgp)
+ if rgp.regno != 0 {
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ v := &vars[rgp.varno]
+ fmt.Printf("registerize %v+%d (bit=%2d et=%v) in %v usedreg=%#x vreg=%#x\n", v.node, v.offset, rgp.varno, Econv(v.etype), obj.Rconv(int(rgp.regno)), usedreg, vreg)
+ }
+
+ paint3(rgp.enter, int(rgp.varno), vreg, int(rgp.regno))
+ }
+ }
+
+ // free aux structures. peep allocates new ones.
+ for i := 0; i < nvar; i++ {
+ vars[i].node.SetOpt(nil)
+ }
+ Flowend(g)
+ firstf = nil
+
+ if Debug['R'] != 0 && Debug['v'] != 0 {
+ // Rebuild flow graph, since we inserted instructions
+ g := Flowstart(firstp, nil)
+ firstf = g.Start
+ Dumpit("pass6", firstf, 0)
+ Flowend(g)
+ firstf = nil
+ }
+
+ // pass 7
+ // peep-hole on basic block
+ if Debug['R'] == 0 || Debug['P'] != 0 {
+ Thearch.Peep(firstp)
+ }
+
+ // eliminate nops
+ for p := firstp; p != nil; p = p.Link {
+ for p.Link != nil && p.Link.As == obj.ANOP {
+ p.Link = p.Link.Link
+ }
+ if p.To.Type == obj.TYPE_BRANCH {
+ for p.To.Val.(*obj.Prog) != nil && p.To.Val.(*obj.Prog).As == obj.ANOP {
+ p.To.Val = p.To.Val.(*obj.Prog).Link
+ }
+ }
+ }
+
+ if Debug['R'] != 0 {
+ if Ostats.Ncvtreg != 0 || Ostats.Nspill != 0 || Ostats.Nreload != 0 || Ostats.Ndelmov != 0 || Ostats.Nvar != 0 || Ostats.Naddr != 0 || false {
+ fmt.Printf("\nstats\n")
+ }
+
+ if Ostats.Ncvtreg != 0 {
+ fmt.Printf("\t%4d cvtreg\n", Ostats.Ncvtreg)
+ }
+ if Ostats.Nspill != 0 {
+ fmt.Printf("\t%4d spill\n", Ostats.Nspill)
+ }
+ if Ostats.Nreload != 0 {
+ fmt.Printf("\t%4d reload\n", Ostats.Nreload)
+ }
+ if Ostats.Ndelmov != 0 {
+ fmt.Printf("\t%4d delmov\n", Ostats.Ndelmov)
+ }
+ if Ostats.Nvar != 0 {
+ fmt.Printf("\t%4d var\n", Ostats.Nvar)
+ }
+ if Ostats.Naddr != 0 {
+ fmt.Printf("\t%4d addr\n", Ostats.Naddr)
+ }
+
+ Ostats = OptStats{}
+ }
+}
+
+// bany reports whether any bits in a are set.
+func bany(a *Bits) bool {
+ for _, x := range &a.b { // & to avoid making a copy of a.b
+ if x != 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// bnum reports the lowest index of a 1 bit in a.
+func bnum(a *Bits) int {
+ for i, x := range &a.b { // & to avoid making a copy of a.b
+ if x != 0 {
+ return 64*i + Bitno(x)
+ }
+ }
+
+ Fatalf("bad in bnum")
+ return 0
+}
+
+// blsh returns a Bits with 1 at index n, 0 elsewhere (1<>= 32
+ }
+ if b&(1<<16-1) == 0 {
+ n += 16
+ b >>= 16
+ }
+ if b&(1<<8-1) == 0 {
+ n += 8
+ b >>= 8
+ }
+ if b&(1<<4-1) == 0 {
+ n += 4
+ b >>= 4
+ }
+ if b&(1<<2-1) == 0 {
+ n += 2
+ b >>= 2
+ }
+ if b&1 == 0 {
+ n++
+ }
+ return n
+}
+
+// String returns a space-separated list of the variables represented by bits.
+func (bits Bits) String() string {
+ // Note: This method takes a value receiver, both for convenience
+ // and to make it safe to modify the bits as we process them.
+ // Even so, most prints above use &bits, because then the value
+ // being stored in the interface{} is a pointer and does not require
+ // an allocation and copy to create the interface{}.
+ var buf bytes.Buffer
+ sep := ""
+ for bany(&bits) {
+ i := bnum(&bits)
+ buf.WriteString(sep)
+ sep = " "
+ v := &vars[i]
+ if v.node == nil || v.node.Sym == nil {
+ fmt.Fprintf(&buf, "$%d", i)
+ } else {
+ fmt.Fprintf(&buf, "%s(%d)", v.node.Sym.Name, i)
+ if v.offset != 0 {
+ fmt.Fprintf(&buf, "%+d", int64(v.offset))
+ }
+ }
+ biclr(&bits, uint(i))
+ }
+ return buf.String()
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/walk.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/walk.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/walk.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/compile/internal/gc/walk.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,3942 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+var mpzero Mpint
+
+// The constant is known to runtime.
+const (
+ tmpstringbufsize = 32
+)
+
+func walk(fn *Node) {
+ Curfn = fn
+
+ if Debug['W'] != 0 {
+ s := fmt.Sprintf("\nbefore %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Nbody)
+ }
+
+ lno := int(lineno)
+
+ // Final typecheck for any unused variables.
+ // It's hard to be on the heap when not-used, but best to be consistent about &~PHEAP here and below.
+ for l := fn.Func.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO {
+ typecheck(&l.N, Erv|Easgn)
+ }
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in it's definition.
+ for l := fn.Func.Dcl; l != nil; l = l.Next {
+ if l.N.Op == ONAME && l.N.Class&^PHEAP == PAUTO && l.N.Name.Defn != nil && l.N.Name.Defn.Op == OTYPESW && l.N.Used {
+ l.N.Name.Defn.Left.Used = true
+ }
+ }
+
+ for l := fn.Func.Dcl; l != nil; l = l.Next {
+ if l.N.Op != ONAME || l.N.Class&^PHEAP != PAUTO || l.N.Sym.Name[0] == '&' || l.N.Used {
+ continue
+ }
+ if defn := l.N.Name.Defn; defn != nil && defn.Op == OTYPESW {
+ if defn.Left.Used {
+ continue
+ }
+ lineno = defn.Left.Lineno
+ Yyerror("%v declared and not used", l.N.Sym)
+ defn.Left.Used = true // suppress repeats
+ } else {
+ lineno = l.N.Lineno
+ Yyerror("%v declared and not used", l.N.Sym)
+ }
+ }
+
+ lineno = int32(lno)
+ if nerrors != 0 {
+ return
+ }
+ walkstmtlist(Curfn.Nbody)
+ if Debug['W'] != 0 {
+ s := fmt.Sprintf("after walk %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Nbody)
+ }
+
+ heapmoves()
+ if Debug['W'] != 0 && Curfn.Func.Enter != nil {
+ s := fmt.Sprintf("enter %v", Curfn.Func.Nname.Sym)
+ dumplist(s, Curfn.Func.Enter)
+ }
+}
+
+func walkstmtlist(l *NodeList) {
+ for ; l != nil; l = l.Next {
+ walkstmt(&l.N)
+ }
+}
+
+func samelist(a *NodeList, b *NodeList) bool {
+ for ; a != nil && b != nil; a, b = a.Next, b.Next {
+ if a.N != b.N {
+ return false
+ }
+ }
+ return a == b
+}
+
+func paramoutheap(fn *Node) bool {
+ for l := fn.Func.Dcl; l != nil; l = l.Next {
+ switch l.N.Class {
+ case PPARAMOUT,
+ PPARAMOUT | PHEAP:
+ return l.N.Addrtaken
+
+ // stop early - parameters are over
+ case PAUTO,
+ PAUTO | PHEAP:
+ return false
+ }
+ }
+
+ return false
+}
+
+// adds "adjust" to all the argument locations for the call n.
+// n must be a defer or go node that has already been walked.
+func adjustargs(n *Node, adjust int) {
+ var arg *Node
+ var lhs *Node
+
+ callfunc := n.Left
+ for args := callfunc.List; args != nil; args = args.Next {
+ arg = args.N
+ if arg.Op != OAS {
+ Yyerror("call arg not assignment")
+ }
+ lhs = arg.Left
+ if lhs.Op == ONAME {
+ // This is a temporary introduced by reorder1.
+ // The real store to the stack appears later in the arg list.
+ continue
+ }
+
+ if lhs.Op != OINDREG {
+ Yyerror("call argument store does not use OINDREG")
+ }
+
+ // can't really check this in machine-indep code.
+ //if(lhs->val.u.reg != D_SP)
+ // yyerror("call arg assign not indreg(SP)");
+ lhs.Xoffset += int64(adjust)
+ }
+}
+
+func walkstmt(np **Node) {
+ n := *np
+ if n == nil {
+ return
+ }
+ if n.Dodata == 2 { // don't walk, generated by anylit.
+ return
+ }
+
+ setlineno(n)
+
+ walkstmtlist(n.Ninit)
+
+ switch n.Op {
+ default:
+ if n.Op == ONAME {
+ Yyerror("%v is not a top level statement", n.Sym)
+ } else {
+ Yyerror("%v is not a top level statement", Oconv(int(n.Op), 0))
+ }
+ Dump("nottop", n)
+
+ case OAS,
+ OASOP,
+ OAS2,
+ OAS2DOTTYPE,
+ OAS2RECV,
+ OAS2FUNC,
+ OAS2MAPR,
+ OCLOSE,
+ OCOPY,
+ OCALLMETH,
+ OCALLINTER,
+ OCALL,
+ OCALLFUNC,
+ ODELETE,
+ OSEND,
+ OPRINT,
+ OPRINTN,
+ OPANIC,
+ OEMPTY,
+ ORECOVER,
+ OGETG:
+ if n.Typecheck == 0 {
+ Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init := n.Ninit
+ n.Ninit = nil
+ walkexpr(&n, &init)
+ addinit(&n, init)
+ if (*np).Op == OCOPY && n.Op == OCONVNOP {
+ n.Op = OEMPTY // don't leave plain values as statements.
+ }
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ORECV:
+ if n.Typecheck == 0 {
+ Fatalf("missing typecheck: %v", Nconv(n, obj.FmtSign))
+ }
+ init := n.Ninit
+ n.Ninit = nil
+
+ walkexpr(&n.Left, &init)
+ n = mkcall1(chanfn("chanrecv1", 2, n.Left.Type), nil, &init, typename(n.Left.Type), n.Left, nodnil())
+ walkexpr(&n, &init)
+
+ addinit(&n, init)
+
+ case OBREAK,
+ ODCL,
+ OCONTINUE,
+ OFALL,
+ OGOTO,
+ OLABEL,
+ ODCLCONST,
+ ODCLTYPE,
+ OCHECKNIL,
+ OVARKILL,
+ OVARLIVE:
+ break
+
+ case OBLOCK:
+ walkstmtlist(n.List)
+
+ case OXCASE:
+ Yyerror("case statement out of place")
+ n.Op = OCASE
+ fallthrough
+
+ case OCASE:
+ walkstmt(&n.Right)
+
+ case ODEFER:
+ hasdefer = true
+ switch n.Left.Op {
+ case OPRINT, OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, true)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case OFOR:
+ if n.Left != nil {
+ walkstmtlist(n.Left.Ninit)
+ init := n.Left.Ninit
+ n.Left.Ninit = nil
+ walkexpr(&n.Left, &init)
+ addinit(&n.Left, init)
+ }
+
+ walkstmt(&n.Right)
+ walkstmtlist(n.Nbody)
+
+ case OIF:
+ walkexpr(&n.Left, &n.Ninit)
+ walkstmtlist(n.Nbody)
+ walkstmtlist(n.Rlist)
+
+ case OPROC:
+ switch n.Left.Op {
+ case OPRINT, OPRINTN:
+ walkprintfunc(&n.Left, &n.Ninit)
+
+ case OCOPY:
+ n.Left = copyany(n.Left, &n.Ninit, true)
+
+ default:
+ walkexpr(&n.Left, &n.Ninit)
+ }
+
+ // make room for size & fn arguments.
+ adjustargs(n, 2*Widthptr)
+
+ case ORETURN:
+ walkexprlist(n.List, &n.Ninit)
+ if n.List == nil {
+ break
+ }
+ if (Curfn.Type.Outnamed && count(n.List) > 1) || paramoutheap(Curfn) {
+ // assign to the function out parameters,
+ // so that reorder3 can fix up conflicts
+ var rl *NodeList
+
+ var cl Class
+ for ll := Curfn.Func.Dcl; ll != nil; ll = ll.Next {
+ cl = ll.N.Class &^ PHEAP
+ if cl == PAUTO {
+ break
+ }
+ if cl == PPARAMOUT {
+ rl = list(rl, ll.N)
+ }
+ }
+
+ if got, want := count(n.List), count(rl); got != want {
+ // order should have rewritten multi-value function calls
+ // with explicit OAS2FUNC nodes.
+ Fatalf("expected %v return arguments, have %v", want, got)
+ }
+
+ if samelist(rl, n.List) {
+ // special return in disguise
+ n.List = nil
+
+ break
+ }
+
+ // move function calls out, to make reorder3's job easier.
+ walkexprlistsafe(n.List, &n.Ninit)
+
+ ll := ascompatee(n.Op, rl, n.List, &n.Ninit)
+ n.List = reorder3(ll)
+ for lr := n.List; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, &n.Ninit)
+ }
+ break
+ }
+
+ ll := ascompatte(n.Op, nil, false, Getoutarg(Curfn.Type), n.List, 1, &n.Ninit)
+ n.List = ll
+
+ case ORETJMP:
+ break
+
+ case OSELECT:
+ walkselect(n)
+
+ case OSWITCH:
+ walkswitch(n)
+
+ case ORANGE:
+ walkrange(n)
+
+ case OXFALL:
+ Yyerror("fallthrough statement out of place")
+ n.Op = OFALL
+ }
+
+ if n.Op == ONAME {
+ Fatalf("walkstmt ended up with name: %v", Nconv(n, obj.FmtSign))
+ }
+
+ *np = n
+}
+
+func isSmallMakeSlice(n *Node) bool {
+ if n.Op != OMAKESLICE {
+ return false
+ }
+ l := n.Left
+ r := n.Right
+ if r == nil {
+ r = l
+ }
+ t := n.Type
+
+ return Smallintconst(l) && Smallintconst(r) && (t.Type.Width == 0 || Mpgetfix(r.Val().U.(*Mpint)) < (1<<16)/t.Type.Width)
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
+func walkexprlist(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistsafe(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = safeexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexprlistcheap(l *NodeList, init **NodeList) {
+ for ; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+}
+
+func walkexpr(np **Node, init **NodeList) {
+ n := *np
+
+ if n == nil {
+ return
+ }
+
+ if init == &n.Ninit {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ Fatalf("walkexpr init == &n->ninit")
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ // annoying case - not typechecked
+ if n.Op == OKEY {
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ return
+ }
+
+ lno := setlineno(n)
+
+ if Debug['w'] > 1 {
+ Dump("walk-before", n)
+ }
+
+ if n.Typecheck != 1 {
+ Fatalf("missed typecheck: %v\n", Nconv(n, obj.FmtSign))
+ }
+
+opswitch:
+ switch n.Op {
+ default:
+ Dump("walk", n)
+ Fatalf("walkexpr: switch 1 unknown op %v", Nconv(n, obj.FmtShort|obj.FmtSign))
+
+ case OTYPE,
+ ONONAME,
+ OINDREG,
+ OEMPTY,
+ OPARAM,
+ OGETG:
+
+ case ONOT,
+ OMINUS,
+ OPLUS,
+ OCOM,
+ OREAL,
+ OIMAG,
+ ODOTMETH,
+ ODOTINTER:
+ walkexpr(&n.Left, init)
+
+ case OIND:
+ walkexpr(&n.Left, init)
+
+ case ODOT:
+ usefield(n)
+ walkexpr(&n.Left, init)
+
+ case ODOTPTR:
+ usefield(n)
+ if n.Op == ODOTPTR && n.Left.Type.Type.Width == 0 {
+ // No actual copy will be generated, so emit an explicit nil check.
+ n.Left = cheapexpr(n.Left, init)
+
+ checknil(n.Left, init)
+ }
+
+ walkexpr(&n.Left, init)
+
+ case OEFACE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ case OSPTR, OITAB:
+ walkexpr(&n.Left, init)
+
+ case OLEN, OCAP:
+ walkexpr(&n.Left, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t := n.Left.Type
+
+ if Isptr[t.Etype] {
+ t = t.Type
+ }
+ if Isfixedarray(t) {
+ safeexpr(n.Left, init)
+ Nodconst(n, n.Type, t.Bound)
+ n.Typecheck = 1
+ }
+
+ case OLSH, ORSH:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ t := n.Left.Type
+ n.Bounded = bounded(n.Right, 8*t.Width)
+ if Debug['m'] != 0 && n.Etype != 0 && !Isconst(n.Right, CTINT) {
+ Warn("shift bounds check elided")
+ }
+
+ // Use results from call expression as arguments for complex.
+ case OAND,
+ OSUB,
+ OHMUL,
+ OLT,
+ OLE,
+ OGE,
+ OGT,
+ OADD,
+ OCOMPLEX,
+ OLROT:
+ if n.Op == OCOMPLEX && n.Left == nil && n.Right == nil {
+ n.Left = n.List.N
+ n.Right = n.List.Next.N
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ case OOR, OXOR:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkrotate(&n)
+
+ case OEQ, ONE:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode := safemode
+
+ safemode = 0
+ walkcompare(&n, init)
+ safemode = old_safemode
+
+ case OANDAND, OOROR:
+ walkexpr(&n.Left, init)
+
+ // cannot put side effects from n.Right on init,
+ // because they cannot run before n.Left is checked.
+ // save elsewhere and store on the eventual n.Right.
+ var ll *NodeList
+
+ walkexpr(&n.Right, &ll)
+ addinit(&n.Right, ll)
+
+ case OPRINT, OPRINTN:
+ walkexprlist(n.List, init)
+ n = walkprint(n, init)
+
+ case OPANIC:
+ n = mkcall("gopanic", nil, init, n.Left)
+
+ case ORECOVER:
+ n = mkcall("gorecover", n.Type, init, Nod(OADDR, nodfp, nil))
+
+ case OLITERAL:
+ n.Addable = true
+
+ case OCLOSUREVAR, OCFUNC:
+ n.Addable = true
+
+ case ONAME:
+ if n.Class&PHEAP == 0 && n.Class != PPARAMREF {
+ n.Addable = true
+ }
+
+ case OCALLINTER:
+ t := n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ break
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+
+ case OCALLFUNC:
+ if n.Left.Op == OCLOSURE {
+ // Transform direct call of a closure to call of a normal function.
+ // transformclosure already did all preparation work.
+
+ // Prepend captured variables to argument list.
+ n.List = concat(n.Left.Func.Enter, n.List)
+
+ n.Left.Func.Enter = nil
+
+ // Replace OCLOSURE with ONAME/PFUNC.
+ n.Left = n.Left.Func.Closure.Func.Nname
+
+ // Update type of OCALLFUNC node.
+ // Output arguments had not changed, but their offsets could.
+ if n.Left.Type.Outtuple == 1 {
+ t := getoutargx(n.Left.Type).Type
+ if t.Etype == TFIELD {
+ t = t.Type
+ }
+ n.Type = t
+ } else {
+ n.Type = getoutargx(n.Left.Type)
+ }
+ }
+
+ t := n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ break
+ }
+
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+
+ if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" {
+ switch Thearch.Thechar {
+ case '5', '6', '7':
+ n.Op = OSQRT
+ n.Left = n.List.N
+ n.List = nil
+ break opswitch
+ }
+ }
+
+ ll := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
+ n.List = reorder1(ll)
+
+ case OCALLMETH:
+ t := n.Left.Type
+ if n.List != nil && n.List.N.Op == OAS {
+ break
+ }
+ walkexpr(&n.Left, init)
+ walkexprlist(n.List, init)
+ ll := ascompatte(n.Op, n, false, getthis(t), list1(n.Left.Left), 0, init)
+ lr := ascompatte(n.Op, n, n.Isddd, getinarg(t), n.List, 0, init)
+ ll = concat(ll, lr)
+ n.Left.Left = nil
+ ullmancalc(n.Left)
+ n.List = reorder1(ll)
+
+ case OAS:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+
+ walkexpr(&n.Left, init)
+ n.Left = safeexpr(n.Left, init)
+
+ if oaslit(n, init) {
+ break
+ }
+
+ if n.Right == nil || iszero(n.Right) && !instrumenting {
+ break
+ }
+
+ switch n.Right.Op {
+ default:
+ walkexpr(&n.Right, init)
+
+ case ODOTTYPE:
+ // TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
+ // It needs to be removed in all three places.
+ // That would allow inlining x.(struct{*int}) the same as x.(*int).
+ if isdirectiface(n.Right.Type) && !Isfat(n.Right.Type) && !instrumenting {
+ // handled directly during cgen
+ walkexpr(&n.Right, init)
+ break
+ }
+
+ // x = i.(T); n.Left is x, n.Right.Left is i.
+ // orderstmt made sure x is addressable.
+ walkexpr(&n.Right.Left, init)
+
+ n1 := Nod(OADDR, n.Left, nil)
+ r := n.Right // i.(T)
+
+ if Debug_typeassert > 0 {
+ Warn("type assertion not inlined")
+ }
+
+ buf := "assert" + type2IET(r.Left.Type) + "2" + type2IET(r.Type)
+ fn := syslook(buf, 1)
+ substArgTypes(fn, r.Left.Type, r.Type)
+
+ n = mkcall1(fn, nil, init, typename(r.Type), r.Left, n1)
+ walkexpr(&n, init)
+ break opswitch
+
+ case ORECV:
+ // x = <-c; n.Left is x, n.Right.Left is c.
+ // orderstmt made sure x is addressable.
+ walkexpr(&n.Right.Left, init)
+
+ n1 := Nod(OADDR, n.Left, nil)
+ r := n.Right.Left // the channel
+ n = mkcall1(chanfn("chanrecv1", 2, r.Type), nil, init, typename(r.Type), r, n1)
+ walkexpr(&n, init)
+ break opswitch
+
+ case OAPPEND:
+ // x = append(...)
+ r := n.Right
+ if r.Isddd {
+ r = appendslice(r, init) // also works for append(slice, string).
+ } else {
+ r = walkappend(r, init, n)
+ }
+ n.Right = r
+ if r.Op == OAPPEND {
+ // Left in place for back end.
+ // Do not add a new write barrier.
+ break opswitch
+ }
+ // Otherwise, lowered for race detector.
+ // Treat as ordinary assignment.
+ }
+
+ if n.Left != nil && n.Right != nil {
+ r := convas(Nod(OAS, n.Left, n.Right), init)
+ r.Dodata = n.Dodata
+ n = r
+ n = applywritebarrier(n, init)
+ }
+
+ case OAS2:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ walkexprlistsafe(n.List, init)
+ walkexprlistsafe(n.Rlist, init)
+ ll := ascompatee(OAS, n.List, n.Rlist, init)
+ ll = reorder3(ll)
+ for lr := ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(ll)
+
+ // a,b,... = fn()
+ case OAS2FUNC:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r := n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r, init)
+
+ ll := ascompatet(n.Op, n.List, &r.Type, 0, init)
+ for lr := ll; lr != nil; lr = lr.Next {
+ lr.N = applywritebarrier(lr.N, init)
+ }
+ n = liststmt(concat(list1(r), ll))
+
+ // x, y = <-c
+ // orderstmt made sure x is addressable.
+ case OAS2RECV:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r := n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ var n1 *Node
+ if isblank(n.List.N) {
+ n1 = nodnil()
+ } else {
+ n1 = Nod(OADDR, n.List.N, nil)
+ }
+ n1.Etype = 1 // addr does not escape
+ fn := chanfn("chanrecv2", 2, r.Left.Type)
+ r = mkcall1(fn, n.List.Next.N.Type, init, typename(r.Left.Type), r.Left, n1)
+ n = Nod(OAS, n.List.Next.N, r)
+ typecheck(&n, Etop)
+
+ // a,b = m[i];
+ case OAS2MAPR:
+ *init = concat(*init, n.Ninit)
+
+ n.Ninit = nil
+ r := n.Rlist.N
+ walkexprlistsafe(n.List, init)
+ walkexpr(&r.Left, init)
+ walkexpr(&r.Right, init)
+ t := r.Left.Type
+ p := ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch algtype(t.Down) {
+ case AMEM32:
+ p = "mapaccess2_fast32"
+ case AMEM64:
+ p = "mapaccess2_fast64"
+ case ASTRING:
+ p = "mapaccess2_faststr"
+ }
+ }
+
+ var key *Node
+ if p != "" {
+ // fast versions take key by value
+ key = r.Right
+ } else {
+ // standard version takes key by reference
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, r.Right, nil)
+
+ p = "mapaccess2"
+ }
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a := n.List.N
+
+ fn := mapfn(p, t)
+ r = mkcall1(fn, getoutargx(fn.Type), init, typename(t), r.Left, key)
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if !isblank(n.List.Next.N) {
+ r.Type.Type.Down.Type = n.List.Next.N.Type
+ }
+ n.Rlist = list1(r)
+ n.Op = OAS2FUNC
+
+ // don't generate a = *var if a is _
+ if !isblank(a) {
+ var_ := temp(Ptrto(t.Type))
+ var_.Typecheck = 1
+ n.List.N = var_
+ walkexpr(&n, init)
+ *init = list(*init, n)
+ n = Nod(OAS, a, Nod(OIND, var_, nil))
+ }
+
+ typecheck(&n, Etop)
+ walkexpr(&n, init)
+
+ // TODO: ptr is always non-nil, so disable nil check for this OIND op.
+
+ case ODELETE:
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ map_ := n.List.N
+ key := n.List.Next.N
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+
+ // orderstmt made sure key is addressable.
+ key = Nod(OADDR, key, nil)
+
+ t := map_.Type
+ n = mkcall1(mapfndel("mapdelete", t), nil, init, typename(t), map_, key)
+
+ case OAS2DOTTYPE:
+ e := n.Rlist.N // i.(T)
+ // TODO(rsc): The Isfat is for consistency with componentgen and orderexpr.
+ // It needs to be removed in all three places.
+ // That would allow inlining x.(struct{*int}) the same as x.(*int).
+ if isdirectiface(e.Type) && !Isfat(e.Type) && !instrumenting {
+ // handled directly during gen.
+ walkexprlistsafe(n.List, init)
+ walkexpr(&e.Left, init)
+ break
+ }
+
+ // res, ok = i.(T)
+ // orderstmt made sure a is addressable.
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+
+ walkexprlistsafe(n.List, init)
+ walkexpr(&e.Left, init)
+ t := e.Type // T
+ from := e.Left // i
+
+ oktype := Types[TBOOL]
+ ok := n.List.Next.N
+ if !isblank(ok) {
+ oktype = ok.Type
+ }
+
+ fromKind := type2IET(from.Type)
+ toKind := type2IET(t)
+
+ // Avoid runtime calls in a few cases of the form _, ok := i.(T).
+ // This is faster and shorter and allows the corresponding assertX2X2
+ // routines to skip nil checks on their last argument.
+ if isblank(n.List.N) {
+ var fast *Node
+ switch {
+ case fromKind == "E" && toKind == "T":
+ tab := Nod(OITAB, from, nil) // type:eface::tab:iface
+ typ := Nod(OCONVNOP, typename(t), nil)
+ typ.Type = Ptrto(Types[TUINTPTR])
+ fast = Nod(OEQ, tab, typ)
+ case fromKind == "I" && toKind == "E",
+ fromKind == "E" && toKind == "E":
+ tab := Nod(OITAB, from, nil)
+ fast = Nod(ONE, nodnil(), tab)
+ }
+ if fast != nil {
+ if Debug_typeassert > 0 {
+ Warn("type assertion (ok only) inlined")
+ }
+ n = Nod(OAS, ok, fast)
+ typecheck(&n, Etop)
+ break
+ }
+ }
+
+ var resptr *Node // &res
+ if isblank(n.List.N) {
+ resptr = nodnil()
+ } else {
+ resptr = Nod(OADDR, n.List.N, nil)
+ }
+ resptr.Etype = 1 // addr does not escape
+
+ if Debug_typeassert > 0 {
+ Warn("type assertion not inlined")
+ }
+ buf := "assert" + fromKind + "2" + toKind + "2"
+ fn := syslook(buf, 1)
+ substArgTypes(fn, from.Type, t)
+ call := mkcall1(fn, oktype, init, typename(t), from, resptr)
+ n = Nod(OAS, ok, call)
+ typecheck(&n, Etop)
+
+ case ODOTTYPE, ODOTTYPE2:
+ if !isdirectiface(n.Type) || Isfat(n.Type) {
+ Fatalf("walkexpr ODOTTYPE") // should see inside OAS only
+ }
+ walkexpr(&n.Left, init)
+
+ case OCONVIFACE:
+ walkexpr(&n.Left, init)
+
+ // Optimize convT2E as a two-word copy when T is pointer-shaped.
+ if isnilinter(n.Type) && isdirectiface(n.Left.Type) {
+ l := Nod(OEFACE, typename(n.Left.Type), n.Left)
+ l.Type = n.Type
+ l.Typecheck = n.Typecheck
+ n = l
+ break
+ }
+
+ // Build name of function: convI2E etc.
+ // Not all names are possible
+ // (e.g., we'll never generate convE2E or convE2I).
+ buf := "conv" + type2IET(n.Left.Type) + "2" + type2IET(n.Type)
+ fn := syslook(buf, 1)
+ var ll *NodeList
+ if !Isinter(n.Left.Type) {
+ ll = list(ll, typename(n.Left.Type))
+ }
+ if !isnilinter(n.Type) {
+ ll = list(ll, typename(n.Type))
+ }
+ if !Isinter(n.Left.Type) && !isnilinter(n.Type) {
+ sym := Pkglookup(Tconv(n.Left.Type, obj.FmtLeft)+"."+Tconv(n.Type, obj.FmtLeft), itabpkg)
+ if sym.Def == nil {
+ l := Nod(ONAME, nil, nil)
+ l.Sym = sym
+ l.Type = Ptrto(Types[TUINT8])
+ l.Addable = true
+ l.Class = PEXTERN
+ l.Xoffset = 0
+ sym.Def = l
+ ggloblsym(sym, int32(Widthptr), obj.DUPOK|obj.NOPTR)
+ }
+
+ l := Nod(OADDR, sym.Def, nil)
+ l.Addable = true
+ ll = list(ll, l)
+
+ if isdirectiface(n.Left.Type) {
+ // For pointer types, we can make a special form of optimization
+ //
+ // These statements are put onto the expression init list:
+ // Itab *tab = atomicloadtype(&cache);
+ // if(tab == nil)
+ // tab = typ2Itab(type, itype, &cache);
+ //
+ // The CONVIFACE expression is replaced with this:
+ // OEFACE{tab, ptr};
+ l := temp(Ptrto(Types[TUINT8]))
+
+ n1 := Nod(OAS, l, sym.Def)
+ typecheck(&n1, Etop)
+ *init = list(*init, n1)
+
+ fn := syslook("typ2Itab", 1)
+ n1 = Nod(OCALL, fn, nil)
+ n1.List = ll
+ typecheck(&n1, Erv)
+ walkexpr(&n1, init)
+
+ n2 := Nod(OIF, nil, nil)
+ n2.Left = Nod(OEQ, l, nodnil())
+ n2.Nbody = list1(Nod(OAS, l, n1))
+ n2.Likely = -1
+ typecheck(&n2, Etop)
+ *init = list(*init, n2)
+
+ l = Nod(OEFACE, l, n.Left)
+ l.Typecheck = n.Typecheck
+ l.Type = n.Type
+ n = l
+ break
+ }
+ }
+
+ if Isinter(n.Left.Type) {
+ ll = list(ll, n.Left)
+ } else {
+ // regular types are passed by reference to avoid C vararg calls
+ // orderexpr arranged for n.Left to be a temporary for all
+ // the conversions it could see. comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to orderstmt, so we
+ // have to fall back on allocating a temp here.
+ if islvalue(n.Left) {
+ ll = list(ll, Nod(OADDR, n.Left, nil))
+ } else {
+ ll = list(ll, Nod(OADDR, copyexpr(n.Left, n.Left.Type, init), nil))
+ }
+ dowidth(n.Left.Type)
+ r := nodnil()
+ if n.Esc == EscNone && n.Left.Type.Width <= 1024 {
+ // Allocate stack buffer for value stored in interface.
+ r = temp(n.Left.Type)
+ r = Nod(OAS, r, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, r.Left, nil)
+ typecheck(&r, Erv)
+ }
+ ll = list(ll, r)
+ }
+
+ if !Isinter(n.Left.Type) {
+ substArgTypes(fn, n.Left.Type, n.Left.Type, n.Type)
+ } else {
+ substArgTypes(fn, n.Left.Type, n.Type)
+ }
+ dowidth(fn.Type)
+ n = Nod(OCALL, fn, nil)
+ n.List = ll
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+
+ case OCONV, OCONVNOP:
+ if Thearch.Thechar == '5' {
+ if Isfloat[n.Left.Type.Etype] {
+ if n.Type.Etype == TINT64 {
+ n = mkcall("float64toint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ break
+ }
+
+ if n.Type.Etype == TUINT64 {
+ n = mkcall("float64touint64", n.Type, init, conv(n.Left, Types[TFLOAT64]))
+ break
+ }
+ }
+
+ if Isfloat[n.Type.Etype] {
+ if n.Left.Type.Etype == TINT64 {
+ n = mkcall("int64tofloat64", n.Type, init, conv(n.Left, Types[TINT64]))
+ break
+ }
+
+ if n.Left.Type.Etype == TUINT64 {
+ n = mkcall("uint64tofloat64", n.Type, init, conv(n.Left, Types[TUINT64]))
+ break
+ }
+ }
+ }
+
+ walkexpr(&n.Left, init)
+
+ case OANDNOT:
+ walkexpr(&n.Left, init)
+ n.Op = OAND
+ n.Right = Nod(OCOM, n.Right, nil)
+ typecheck(&n.Right, Erv)
+ walkexpr(&n.Right, init)
+
+ case OMUL:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ walkmul(&n, init)
+
+ case ODIV, OMOD:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ // rewrite complex div into function call.
+ et := n.Left.Type.Etype
+
+ if Iscomplex[et] && n.Op == ODIV {
+ t := n.Type
+ n = mkcall("complex128div", Types[TCOMPLEX128], init, conv(n.Left, Types[TCOMPLEX128]), conv(n.Right, Types[TCOMPLEX128]))
+ n = conv(n, t)
+ break
+ }
+
+ // Nothing to do for float divisions.
+ if Isfloat[et] {
+ break
+ }
+
+ // Try rewriting as shifts or magic multiplies.
+ walkdiv(&n, init)
+
+ // rewrite 64-bit div and mod into function calls
+ // on 32-bit architectures.
+ switch n.Op {
+ case OMOD, ODIV:
+ if Widthreg >= 8 || (et != TUINT64 && et != TINT64) {
+ break opswitch
+ }
+ var fn string
+ if et == TINT64 {
+ fn = "int64"
+ } else {
+ fn = "uint64"
+ }
+ if n.Op == ODIV {
+ fn += "div"
+ } else {
+ fn += "mod"
+ }
+ n = mkcall(fn, n.Type, init, conv(n.Left, Types[et]), conv(n.Right, Types[et]))
+ }
+
+ case OINDEX:
+ walkexpr(&n.Left, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r := n.Right
+
+ walkexpr(&n.Right, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded {
+ break
+ }
+ t := n.Left.Type
+ if t != nil && Isptr[t.Etype] {
+ t = t.Type
+ }
+ if Isfixedarray(t) {
+ n.Bounded = bounded(r, t.Bound)
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) && !n.Bounded {
+ Yyerror("index out of bounds")
+ }
+ } else if Isconst(n.Left, CTSTR) {
+ n.Bounded = bounded(r, int64(len(n.Left.Val().U.(string))))
+ if Debug['m'] != 0 && n.Bounded && !Isconst(n.Right, CTINT) {
+ Warn("index bounds check elided")
+ }
+ if Smallintconst(n.Right) {
+ if !n.Bounded {
+ Yyerror("index out of bounds")
+ } else {
+ // replace "abc"[1] with 'b'.
+ // delayed until now because "abc"[1] is not
+ // an ideal constant.
+ v := Mpgetfix(n.Right.Val().U.(*Mpint))
+
+ Nodconst(n, n.Type, int64(n.Left.Val().U.(string)[v]))
+ n.Typecheck = 1
+ }
+ }
+ }
+
+ if Isconst(n.Right, CTINT) {
+ if Mpcmpfixfix(n.Right.Val().U.(*Mpint), &mpzero) < 0 || Mpcmpfixfix(n.Right.Val().U.(*Mpint), Maxintval[TINT]) > 0 {
+ Yyerror("index out of bounds")
+ }
+ }
+
+ case OINDEXMAP:
+ if n.Etype == 1 {
+ break
+ }
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+
+ t := n.Left.Type
+ p := ""
+ if t.Type.Width <= 128 { // Check ../../runtime/hashmap.go:maxValueSize before changing.
+ switch algtype(t.Down) {
+ case AMEM32:
+ p = "mapaccess1_fast32"
+ case AMEM64:
+ p = "mapaccess1_fast64"
+ case ASTRING:
+ p = "mapaccess1_faststr"
+ }
+ }
+
+ var key *Node
+ if p != "" {
+ // fast versions take key by value
+ key = n.Right
+ } else {
+ // standard version takes key by reference.
+ // orderexpr made sure key is addressable.
+ key = Nod(OADDR, n.Right, nil)
+
+ p = "mapaccess1"
+ }
+
+ n = mkcall1(mapfn(p, t), Ptrto(t.Type), init, typename(t), n.Left, key)
+ n = Nod(OIND, n, nil)
+ n.Type = t.Type
+ n.Typecheck = 1
+
+ case ORECV:
+ Fatalf("walkexpr ORECV") // should see inside OAS only
+
+ case OSLICE, OSLICEARR, OSLICESTR:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right.Left, init)
+ if n.Right.Left != nil && iszero(n.Right.Left) {
+ // Reduce x[0:j] to x[:j].
+ n.Right.Left = nil
+ }
+ walkexpr(&n.Right.Right, init)
+ n = reduceSlice(n)
+
+ case OSLICE3, OSLICE3ARR:
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right.Left, init)
+ if n.Right.Left != nil && iszero(n.Right.Left) {
+ // Reduce x[0:j:k] to x[:j:k].
+ n.Right.Left = nil
+ }
+ walkexpr(&n.Right.Right.Left, init)
+ walkexpr(&n.Right.Right.Right, init)
+
+ r := n.Right.Right.Right
+ if r != nil && r.Op == OCAP && samesafeexpr(n.Left, r.Left) {
+ // Reduce x[i:j:cap(x)] to x[i:j].
+ n.Right.Right = n.Right.Right.Left
+ if n.Op == OSLICE3 {
+ n.Op = OSLICE
+ } else {
+ n.Op = OSLICEARR
+ }
+ n = reduceSlice(n)
+ }
+
+ case OADDR:
+ walkexpr(&n.Left, init)
+
+ case ONEW:
+ if n.Esc == EscNone {
+ if n.Type.Type.Width >= 1<<16 {
+ Fatalf("large ONEW with EscNone: %v", n)
+ }
+ r := temp(n.Type.Type)
+ r = Nod(OAS, r, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, r.Left, nil)
+ typecheck(&r, Erv)
+ n = r
+ } else {
+ n = callnew(n.Type.Type)
+ }
+
+ // If one argument to the comparison is an empty string,
+ // comparing the lengths instead will yield the same result
+ // without the function call.
+ case OCMPSTR:
+ if (Isconst(n.Left, CTSTR) && len(n.Left.Val().U.(string)) == 0) || (Isconst(n.Right, CTSTR) && len(n.Right.Val().U.(string)) == 0) {
+ // TODO(marvin): Fix Node.EType type union.
+ r := Nod(Op(n.Etype), Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ break
+ }
+
+ // s + "badgerbadgerbadger" == "badgerbadgerbadger"
+ if (Op(n.Etype) == OEQ || Op(n.Etype) == ONE) && Isconst(n.Right, CTSTR) && n.Left.Op == OADDSTR && count(n.Left.List) == 2 && Isconst(n.Left.List.Next.N, CTSTR) && strlit(n.Right) == strlit(n.Left.List.Next.N) {
+ // TODO(marvin): Fix Node.EType type union.
+ r := Nod(Op(n.Etype), Nod(OLEN, n.Left.List.N, nil), Nodintconst(0))
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+ break
+ }
+
+ var r *Node
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ || Op(n.Etype) == ONE {
+ // prepare for rewrite below
+ n.Left = cheapexpr(n.Left, init)
+
+ n.Right = cheapexpr(n.Right, init)
+
+ r = mkcall("eqstring", Types[TBOOL], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ // quick check of len before full compare for == or !=
+ // eqstring assumes that the lengths are equal
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ {
+ // len(left) == len(right) && eqstring(left, right)
+ r = Nod(OANDAND, Nod(OEQ, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ } else {
+ // len(left) != len(right) || !eqstring(left, right)
+ r = Nod(ONOT, r, nil)
+
+ r = Nod(OOROR, Nod(ONE, Nod(OLEN, n.Left, nil), Nod(OLEN, n.Right, nil)), r)
+ }
+
+ typecheck(&r, Erv)
+ walkexpr(&r, nil)
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", Types[TINT], init, conv(n.Left, Types[TSTRING]), conv(n.Right, Types[TSTRING]))
+
+ // TODO(marvin): Fix Node.EType type union.
+ r = Nod(Op(n.Etype), r, Nodintconst(0))
+ }
+
+ typecheck(&r, Erv)
+ if n.Type.Etype != TBOOL {
+ Fatalf("cmp %v", n.Type)
+ }
+ r.Type = n.Type
+ n = r
+
+ case OADDSTR:
+ n = addstr(n, init)
+
+ case OAPPEND:
+ // order should make sure we only see OAS(node, OAPPEND), which we handle above.
+ Fatalf("append outside assignment")
+
+ case OCOPY:
+ n = copyany(n, init, instrumenting)
+
+ // cannot use chanfn - closechan takes any, not chan any
+ case OCLOSE:
+ fn := syslook("closechan", 1)
+
+ substArgTypes(fn, n.Left.Type)
+ n = mkcall1(fn, nil, init, n.Left)
+
+ case OMAKECHAN:
+ n = mkcall1(chanfn("makechan", 1, n.Type), n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]))
+
+ case OMAKEMAP:
+ t := n.Type
+
+ fn := syslook("makemap", 1)
+
+ a := nodnil() // hmap buffer
+ r := nodnil() // bucket buffer
+ if n.Esc == EscNone {
+ // Allocate hmap buffer on stack.
+ var_ := temp(hmap(t))
+
+ a = Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ a = Nod(OADDR, var_, nil)
+
+ // Allocate one bucket on stack.
+ // Maximum key/value size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ var_ = temp(mapbucket(t))
+
+ r = Nod(OAS, var_, nil) // zero temp
+ typecheck(&r, Etop)
+ *init = list(*init, r)
+ r = Nod(OADDR, var_, nil)
+ }
+
+ substArgTypes(fn, hmap(t), mapbucket(t), t.Down, t.Type)
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(n.Left, Types[TINT64]), a, r)
+
+ case OMAKESLICE:
+ l := n.Left
+ r := n.Right
+ if r == nil {
+ r = safeexpr(l, init)
+ l = r
+ }
+ t := n.Type
+ if n.Esc == EscNone {
+ if !isSmallMakeSlice(n) {
+ Fatalf("non-small OMAKESLICE with EscNone: %v", n)
+ }
+ // var arr [r]T
+ // n = arr[:l]
+ t = aindex(r, t.Type) // [r]T
+ var_ := temp(t)
+ a := Nod(OAS, var_, nil) // zero temp
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ r := Nod(OSLICE, var_, Nod(OKEY, nil, l)) // arr[:l]
+ r = conv(r, n.Type) // in case n.Type is named.
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ n = r
+ } else {
+ // makeslice(t *Type, nel int64, max int64) (ary []any)
+ fn := syslook("makeslice", 1)
+
+ substArgTypes(fn, t.Type) // any-1
+ n = mkcall1(fn, n.Type, init, typename(n.Type), conv(l, Types[TINT64]), conv(r, Types[TINT64]))
+ }
+
+ case ORUNESTR:
+ a := nodnil()
+ if n.Esc == EscNone {
+ t := aindex(Nodintconst(4), Types[TUINT8])
+ var_ := temp(t)
+ a = Nod(OADDR, var_, nil)
+ }
+
+ // intstring(*[4]byte, rune)
+ n = mkcall("intstring", n.Type, init, a, conv(n.Left, Types[TINT64]))
+
+ case OARRAYBYTESTR:
+ a := nodnil()
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ // slicebytetostring(*[32]byte, []byte) string;
+ n = mkcall("slicebytetostring", n.Type, init, a, n.Left)
+
+ // slicebytetostringtmp([]byte) string;
+ case OARRAYBYTESTRTMP:
+ n = mkcall("slicebytetostringtmp", n.Type, init, n.Left)
+
+ // slicerunetostring(*[32]byte, []rune) string;
+ case OARRAYRUNESTR:
+ a := nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for string on stack.
+ t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("slicerunetostring", n.Type, init, a, n.Left)
+
+ // stringtoslicebyte(*32[byte], string) []byte;
+ case OSTRARRAYBYTE:
+ a := nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicebyte", n.Type, init, a, conv(n.Left, Types[TSTRING]))
+
+ // stringtoslicebytetmp(string) []byte;
+ case OSTRARRAYBYTETMP:
+ n = mkcall("stringtoslicebytetmp", n.Type, init, conv(n.Left, Types[TSTRING]))
+
+ // stringtoslicerune(*[32]rune, string) []rune
+ case OSTRARRAYRUNE:
+ a := nodnil()
+
+ if n.Esc == EscNone {
+ // Create temporary buffer for slice on stack.
+ t := aindex(Nodintconst(tmpstringbufsize), Types[TINT32])
+
+ a = Nod(OADDR, temp(t), nil)
+ }
+
+ n = mkcall("stringtoslicerune", n.Type, init, a, n.Left)
+
+ // ifaceeq(i1 any-1, i2 any-2) (ret bool);
+ case OCMPIFACE:
+ if !Eqtype(n.Left.Type, n.Right.Type) {
+ Fatalf("ifaceeq %v %v %v", Oconv(int(n.Op), 0), n.Left.Type, n.Right.Type)
+ }
+ var fn *Node
+ if isnilinter(n.Left.Type) {
+ fn = syslook("efaceeq", 1)
+ } else {
+ fn = syslook("ifaceeq", 1)
+ }
+
+ n.Right = cheapexpr(n.Right, init)
+ n.Left = cheapexpr(n.Left, init)
+ substArgTypes(fn, n.Right.Type, n.Left.Type)
+ r := mkcall1(fn, n.Type, init, n.Left, n.Right)
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == ONE {
+ r = Nod(ONOT, r, nil)
+ }
+
+ // check itable/type before full compare.
+ // TODO(marvin): Fix Node.EType type union.
+ if Op(n.Etype) == OEQ {
+ r = Nod(OANDAND, Nod(OEQ, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ } else {
+ r = Nod(OOROR, Nod(ONE, Nod(OITAB, n.Left, nil), Nod(OITAB, n.Right, nil)), r)
+ }
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+ n = r
+
+ case OARRAYLIT, OMAPLIT, OSTRUCTLIT, OPTRLIT:
+ var_ := temp(n.Type)
+ anylit(0, n, var_, init)
+ n = var_
+
+ case OSEND:
+ n1 := n.Right
+ n1 = assignconv(n1, n.Left.Type.Type, "chan send")
+ walkexpr(&n1, init)
+ n1 = Nod(OADDR, n1, nil)
+ n = mkcall1(chanfn("chansend1", 2, n.Left.Type), nil, init, typename(n.Left.Type), n.Left, n1)
+
+ case OCLOSURE:
+ n = walkclosure(n, init)
+
+ case OCALLPART:
+ n = walkpartialcall(n, init)
+ }
+
+ // Expressions that are constant at run time but not
+ // considered const by the language spec are not turned into
+ // constants until walk. For example, if n is y%1 == 0, the
+ // walk of y%1 may have replaced it by 0.
+ // Check whether n with its updated args is itself now a constant.
+ t := n.Type
+
+ evconst(n)
+ n.Type = t
+ if n.Op == OLITERAL {
+ typecheck(&n, Erv)
+ }
+
+ ullmancalc(n)
+
+ if Debug['w'] != 0 && n != nil {
+ Dump("walk", n)
+ }
+
+ lineno = lno
+ *np = n
+}
+
+func reduceSlice(n *Node) *Node {
+ r := n.Right.Right
+ if r != nil && r.Op == OLEN && samesafeexpr(n.Left, r.Left) {
+ // Reduce x[i:len(x)] to x[i:].
+ n.Right.Right = nil
+ }
+ if (n.Op == OSLICE || n.Op == OSLICESTR) && n.Right.Left == nil && n.Right.Right == nil {
+ // Reduce x[:] to x.
+ if Debug_slice > 0 {
+ Warn("slice: omit slice operation")
+ }
+ return n.Left
+ }
+ return n
+}
+
+func ascompatee1(op Op, l *Node, r *Node, init **NodeList) *Node {
+ // convas will turn map assigns into function calls,
+ // making it impossible for reorder3 to work.
+ n := Nod(OAS, l, r)
+
+ if l.Op == OINDEXMAP {
+ return n
+ }
+
+ return convas(n, init)
+}
+
+func ascompatee(op Op, nl *NodeList, nr *NodeList, init **NodeList) *NodeList {
+ // check assign expression list to
+ // a expression list. called in
+ // expr-list = expr-list
+
+ // ensure order of evaluation for function calls
+ for ll := nl; ll != nil; ll = ll.Next {
+ ll.N = safeexpr(ll.N, init)
+ }
+ for lr := nr; lr != nil; lr = lr.Next {
+ lr.N = safeexpr(lr.N, init)
+ }
+
+ var nn *NodeList
+ ll := nl
+ lr := nr
+ for ; ll != nil && lr != nil; ll, lr = ll.Next, lr.Next {
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ORETURN && ll.N == lr.N {
+ continue
+ }
+ nn = list(nn, ascompatee1(op, ll.N, lr.N, init))
+ }
+
+ // cannot happen: caller checked that lists had same length
+ if ll != nil || lr != nil {
+ Yyerror("error in shape across %v %v %v / %d %d [%s]", Hconv(nl, obj.FmtSign), Oconv(int(op), 0), Hconv(nr, obj.FmtSign), count(nl), count(nr), Curfn.Func.Nname.Sym.Name)
+ }
+ return nn
+}
+
+// l is an lv and rt is the type of an rv
+// return 1 if this implies a function call
+// evaluating the lv or a function call
+// in the conversion of the types
+func fncall(l *Node, rt *Type) bool {
+ if l.Ullman >= UINF || l.Op == OINDEXMAP {
+ return true
+ }
+ var r Node
+ if needwritebarrier(l, &r) {
+ return true
+ }
+ if Eqtype(l.Type, rt) {
+ return false
+ }
+ return true
+}
+
+func ascompatet(op Op, nl *NodeList, nr **Type, fp int, init **NodeList) *NodeList {
+ var l *Node
+ var tmp *Node
+ var a *Node
+ var ll *NodeList
+ var saver Iter
+
+ // check assign type list to
+ // a expression list. called in
+ // expr-list = func()
+ r := Structfirst(&saver, nr)
+
+ var nn *NodeList
+ var mm *NodeList
+ ucount := 0
+ for ll = nl; ll != nil; ll = ll.Next {
+ if r == nil {
+ break
+ }
+ l = ll.N
+ if isblank(l) {
+ r = structnext(&saver)
+ continue
+ }
+
+ // any lv that causes a fn call must be
+ // deferred until all the return arguments
+ // have been pulled from the output arguments
+ if fncall(l, r.Type) {
+ tmp = temp(r.Type)
+ typecheck(&tmp, Erv)
+ a = Nod(OAS, l, tmp)
+ a = convas(a, init)
+ mm = list(mm, a)
+ l = tmp
+ }
+
+ a = Nod(OAS, l, nodarg(r, fp))
+ a = convas(a, init)
+ ullmancalc(a)
+ if a.Ullman >= UINF {
+ Dump("ascompatet ucount", a)
+ ucount++
+ }
+
+ nn = list(nn, a)
+ r = structnext(&saver)
+ }
+
+ if ll != nil || r != nil {
+ Yyerror("ascompatet: assignment count mismatch: %d = %d", count(nl), structcount(*nr))
+ }
+
+ if ucount != 0 {
+ Fatalf("ascompatet: too many function calls evaluating parameters")
+ }
+ return concat(nn, mm)
+}
+
+// package all the arguments that match a ... T parameter into a []T.
+func mkdotargslice(lr0 *NodeList, nn *NodeList, l *Type, fp int, init **NodeList, ddd *Node) *NodeList {
+ esc := uint16(EscUnknown)
+ if ddd != nil {
+ esc = ddd.Esc
+ }
+
+ tslice := typ(TARRAY)
+ tslice.Type = l.Type.Type
+ tslice.Bound = -1
+
+ var n *Node
+ if count(lr0) == 0 {
+ n = nodnil()
+ n.Type = tslice
+ } else {
+ n = Nod(OCOMPLIT, nil, typenod(tslice))
+ if ddd != nil && prealloc[ddd] != nil {
+ prealloc[n] = prealloc[ddd] // temporary to use
+ }
+ n.List = lr0
+ n.Esc = esc
+ typecheck(&n, Erv)
+ if n.Type == nil {
+ Fatalf("mkdotargslice: typecheck failed")
+ }
+ walkexpr(&n, init)
+ }
+
+ a := Nod(OAS, nodarg(l, fp), n)
+ nn = list(nn, convas(a, init))
+ return nn
+}
+
+// helpers for shape errors
+func dumptypes(nl **Type, what string) string {
+ var savel Iter
+
+ fmt_ := ""
+ fmt_ += "\t"
+ first := 1
+ for l := Structfirst(&savel, nl); l != nil; l = structnext(&savel) {
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += ", "
+ }
+ fmt_ += Tconv(l, 0)
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+func dumpnodetypes(l *NodeList, what string) string {
+ var r *Node
+
+ fmt_ := ""
+ fmt_ += "\t"
+ first := 1
+ for ; l != nil; l = l.Next {
+ r = l.N
+ if first != 0 {
+ first = 0
+ } else {
+ fmt_ += ", "
+ }
+ fmt_ += Tconv(r.Type, 0)
+ }
+
+ if first != 0 {
+ fmt_ += fmt.Sprintf("[no arguments %s]", what)
+ }
+ return fmt_
+}
+
+// check assign expression list to
+// a type list. called in
+// return expr-list
+// func(expr-list)
+func ascompatte(op Op, call *Node, isddd bool, nl **Type, lr *NodeList, fp int, init **NodeList) *NodeList {
+ var savel Iter
+
+ lr0 := lr
+ l := Structfirst(&savel, nl)
+ var r *Node
+ if lr != nil {
+ r = lr.N
+ }
+ var nn *NodeList
+
+ // f(g()) where g has multiple return values
+ var a *Node
+ var l2 string
+ var ll *Type
+ var l1 string
+ if r != nil && lr.Next == nil && r.Type.Etype == TSTRUCT && r.Type.Funarg {
+ // optimization - can do block copy
+ if eqtypenoname(r.Type, *nl) {
+ a := nodarg(*nl, fp)
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = a.Type
+ nn = list1(convas(Nod(OAS, a, r), init))
+ goto ret
+ }
+
+ // conversions involved.
+ // copy into temporaries.
+ var alist *NodeList
+
+ for l := Structfirst(&savel, &r.Type); l != nil; l = structnext(&savel) {
+ a = temp(l.Type)
+ alist = list(alist, a)
+ }
+
+ a = Nod(OAS2, nil, nil)
+ a.List = alist
+ a.Rlist = lr
+ typecheck(&a, Etop)
+ walkstmt(&a)
+ *init = list(*init, a)
+ lr = alist
+ r = lr.N
+ l = Structfirst(&savel, nl)
+ }
+
+loop:
+ if l != nil && l.Isddd {
+ // the ddd parameter must be last
+ ll = structnext(&savel)
+
+ if ll != nil {
+ Yyerror("... must be last argument")
+ }
+
+ // special case --
+ // only if we are assigning a single ddd
+ // argument to a ddd parameter then it is
+ // passed thru unencapsulated
+ if r != nil && lr.Next == nil && isddd && Eqtype(l.Type, r.Type) {
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+ goto ret
+ }
+
+ // normal case -- make a slice of all
+ // remaining arguments and pass it to
+ // the ddd parameter.
+ nn = mkdotargslice(lr, nn, l, fp, init, call.Right)
+
+ goto ret
+ }
+
+ if l == nil || r == nil {
+ if l != nil || r != nil {
+ l1 = dumptypes(nl, "expected")
+ l2 = dumpnodetypes(lr0, "given")
+ if l != nil {
+ Yyerror("not enough arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ } else {
+ Yyerror("too many arguments to %v\n%s\n%s", Oconv(int(op), 0), l1, l2)
+ }
+ }
+
+ goto ret
+ }
+
+ a = Nod(OAS, nodarg(l, fp), r)
+ a = convas(a, init)
+ nn = list(nn, a)
+
+ l = structnext(&savel)
+ r = nil
+ lr = lr.Next
+ if lr != nil {
+ r = lr.N
+ }
+ goto loop
+
+ret:
+ for lr = nn; lr != nil; lr = lr.Next {
+ lr.N.Typecheck = 1
+ }
+ return nn
+}
+
+// generate code for print
+func walkprint(nn *Node, init **NodeList) *Node {
+ var r *Node
+ var n *Node
+ var on *Node
+ var t *Type
+ var et EType
+
+ op := nn.Op
+ all := nn.List
+ var calls *NodeList
+ notfirst := false
+
+ // Hoist all the argument evaluation up before the lock.
+ walkexprlistcheap(all, init)
+
+ calls = list(calls, mkcall("printlock", nil, init))
+
+ for l := all; l != nil; l = l.Next {
+ if notfirst {
+ calls = list(calls, mkcall("printsp", nil, init))
+ }
+
+ notfirst = op == OPRINTN
+
+ n = l.N
+ if n.Op == OLITERAL {
+ switch n.Val().Ctype() {
+ case CTRUNE:
+ defaultlit(&n, runetype)
+
+ case CTINT:
+ defaultlit(&n, Types[TINT64])
+
+ case CTFLT:
+ defaultlit(&n, Types[TFLOAT64])
+ }
+ }
+
+ if n.Op != OLITERAL && n.Type != nil && n.Type.Etype == TIDEAL {
+ defaultlit(&n, Types[TINT64])
+ }
+ defaultlit(&n, nil)
+ l.N = n
+ if n.Type == nil || n.Type.Etype == TFORW {
+ continue
+ }
+
+ t = n.Type
+ et = n.Type.Etype
+ if Isinter(n.Type) {
+ if isnilinter(n.Type) {
+ on = syslook("printeface", 1)
+ } else {
+ on = syslook("printiface", 1)
+ }
+ substArgTypes(on, n.Type) // any-1
+ } else if Isptr[et] || et == TCHAN || et == TMAP || et == TFUNC || et == TUNSAFEPTR {
+ on = syslook("printpointer", 1)
+ substArgTypes(on, n.Type) // any-1
+ } else if Isslice(n.Type) {
+ on = syslook("printslice", 1)
+ substArgTypes(on, n.Type) // any-1
+ } else if Isint[et] {
+ if et == TUINT64 {
+ if (t.Sym.Pkg == Runtimepkg || compiling_runtime != 0) && t.Sym.Name == "hex" {
+ on = syslook("printhex", 0)
+ } else {
+ on = syslook("printuint", 0)
+ }
+ } else {
+ on = syslook("printint", 0)
+ }
+ } else if Isfloat[et] {
+ on = syslook("printfloat", 0)
+ } else if Iscomplex[et] {
+ on = syslook("printcomplex", 0)
+ } else if et == TBOOL {
+ on = syslook("printbool", 0)
+ } else if et == TSTRING {
+ on = syslook("printstring", 0)
+ } else {
+ badtype(OPRINT, n.Type, nil)
+ continue
+ }
+
+ t = *getinarg(on.Type)
+ if t != nil {
+ t = t.Type
+ }
+ if t != nil {
+ t = t.Type
+ }
+
+ if !Eqtype(t, n.Type) {
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ }
+
+ r = Nod(OCALL, on, nil)
+ r.List = list1(n)
+ calls = list(calls, r)
+ }
+
+ if op == OPRINTN {
+ calls = list(calls, mkcall("printnl", nil, nil))
+ }
+
+ calls = list(calls, mkcall("printunlock", nil, init))
+
+ typechecklist(calls, Etop)
+ walkexprlist(calls, init)
+
+ r = Nod(OEMPTY, nil, nil)
+ typecheck(&r, Etop)
+ walkexpr(&r, init)
+ r.Ninit = calls
+ return r
+}
+
+func callnew(t *Type) *Node {
+ dowidth(t)
+ fn := syslook("newobject", 1)
+ substArgTypes(fn, t)
+ return mkcall1(fn, Ptrto(t), nil, typename(t))
+}
+
+func iscallret(n *Node) bool {
+ n = outervalue(n)
+ return n.Op == OINDREG && n.Reg == int16(Thearch.REGSP)
+}
+
+func isstack(n *Node) bool {
+ n = outervalue(n)
+
+ // If n is *autotmp and autotmp = &foo, replace n with foo.
+ // We introduce such temps when initializing struct literals.
+ if n.Op == OIND && n.Left.Op == ONAME && strings.HasPrefix(n.Left.Sym.Name, "autotmp_") {
+ defn := n.Left.Name.Defn
+ if defn != nil && defn.Op == OAS && defn.Right.Op == OADDR {
+ n = defn.Right.Left
+ }
+ }
+
+ switch n.Op {
+ case OINDREG:
+ return n.Reg == int16(Thearch.REGSP)
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO, PPARAM, PPARAMOUT:
+ return true
+ }
+ }
+
+ return false
+}
+
+func isglobal(n *Node) bool {
+ n = outervalue(n)
+
+ switch n.Op {
+ case ONAME:
+ switch n.Class {
+ case PEXTERN:
+ return true
+ }
+ }
+
+ return false
+}
+
+// Do we need a write barrier for the assignment l = r?
+func needwritebarrier(l *Node, r *Node) bool {
+ if use_writebarrier == 0 {
+ return false
+ }
+
+ if l == nil || isblank(l) {
+ return false
+ }
+
+ // No write barrier for write of non-pointers.
+ dowidth(l.Type)
+
+ if !haspointers(l.Type) {
+ return false
+ }
+
+ // No write barrier for write to stack.
+ if isstack(l) {
+ return false
+ }
+
+ // No write barrier for implicit zeroing.
+ if r == nil {
+ return false
+ }
+
+ // Ignore no-op conversions when making decision.
+ // Ensures that xp = unsafe.Pointer(&x) is treated
+ // the same as xp = &x.
+ for r.Op == OCONVNOP {
+ r = r.Left
+ }
+
+ // No write barrier for zeroing or initialization to constant.
+ if iszero(r) || r.Op == OLITERAL {
+ return false
+ }
+
+ // No write barrier for storing static (read-only) data.
+ if r.Op == ONAME && strings.HasPrefix(r.Sym.Name, "statictmp_") {
+ return false
+ }
+
+ // No write barrier for storing address of stack values,
+ // which are guaranteed only to be written to the stack.
+ if r.Op == OADDR && isstack(r.Left) {
+ return false
+ }
+
+ // No write barrier for storing address of global, which
+ // is live no matter what.
+ if r.Op == OADDR && isglobal(r.Left) {
+ return false
+ }
+
+ // Otherwise, be conservative and use write barrier.
+ return true
+}
+
+// TODO(rsc): Perhaps componentgen should run before this.
+
+func applywritebarrier(n *Node, init **NodeList) *Node {
+ if n.Left != nil && n.Right != nil && needwritebarrier(n.Left, n.Right) {
+ if Debug_wb > 1 {
+ Warnl(int(n.Lineno), "marking %v for barrier", Nconv(n.Left, 0))
+ }
+ n.Op = OASWB
+ return n
+ }
+ return n
+}
+
+func convas(n *Node, init **NodeList) *Node {
+ if n.Op != OAS {
+ Fatalf("convas: not OAS %v", Oconv(int(n.Op), 0))
+ }
+
+ n.Typecheck = 1
+
+ var lt *Type
+ var rt *Type
+ if n.Left == nil || n.Right == nil {
+ goto out
+ }
+
+ lt = n.Left.Type
+ rt = n.Right.Type
+ if lt == nil || rt == nil {
+ goto out
+ }
+
+ if isblank(n.Left) {
+ defaultlit(&n.Right, nil)
+ goto out
+ }
+
+ if n.Left.Op == OINDEXMAP {
+ map_ := n.Left.Left
+ key := n.Left.Right
+ val := n.Right
+ walkexpr(&map_, init)
+ walkexpr(&key, init)
+ walkexpr(&val, init)
+
+ // orderexpr made sure key and val are addressable.
+ key = Nod(OADDR, key, nil)
+
+ val = Nod(OADDR, val, nil)
+ n = mkcall1(mapfn("mapassign1", map_.Type), nil, init, typename(map_.Type), map_, key, val)
+ goto out
+ }
+
+ if !Eqtype(lt, rt) {
+ n.Right = assignconv(n.Right, lt, "assignment")
+ walkexpr(&n.Right, init)
+ }
+
+out:
+ ullmancalc(n)
+ return n
+}
+
+// from ascompat[te]
+// evaluating actual function arguments.
+// f(a,b)
+// if there is exactly one function expr,
+// then it is done first. otherwise must
+// make temp variables
+func reorder1(all *NodeList) *NodeList {
+ var n *Node
+
+ c := 0 // function calls
+ t := 0 // total parameters
+
+ for l := all; l != nil; l = l.Next {
+ n = l.N
+ t++
+ ullmancalc(n)
+ if n.Ullman >= UINF {
+ c++
+ }
+ }
+
+ if c == 0 || t == 1 {
+ return all
+ }
+
+ var g *NodeList // fncalls assigned to tempnames
+ var f *Node // last fncall assigned to stack
+ var r *NodeList // non fncalls and tempnames assigned to stack
+ d := 0
+ var a *Node
+ for l := all; l != nil; l = l.Next {
+ n = l.N
+ if n.Ullman < UINF {
+ r = list(r, n)
+ continue
+ }
+
+ d++
+ if d == c {
+ f = n
+ continue
+ }
+
+ // make assignment of fncall to tempname
+ a = temp(n.Right.Type)
+
+ a = Nod(OAS, a, n.Right)
+ g = list(g, a)
+
+ // put normal arg assignment on list
+ // with fncall replaced by tempname
+ n.Right = a.Left
+
+ r = list(r, n)
+ }
+
+ if f != nil {
+ g = list(g, f)
+ }
+ return concat(g, r)
+}
+
+// from ascompat[ee]
+// a,b = c,d
+// simultaneous assignment. there cannot
+// be later use of an earlier lvalue.
+//
+// function calls have been removed.
+func reorder3(all *NodeList) *NodeList {
+ var l *Node
+
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ var early *NodeList
+
+ var mapinit *NodeList
+ for list := all; list != nil; list = list.Next {
+ l = list.N.Left
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ if l.Op == ODOT || l.Op == OPAREN {
+ l = l.Left
+ continue
+ }
+
+ if l.Op == OINDEX && Isfixedarray(l.Left.Type) {
+ reorder3save(&l.Right, all, list, &early)
+ l = l.Left
+ continue
+ }
+
+ break
+ }
+
+ switch l.Op {
+ default:
+ Fatalf("reorder3 unexpected lvalue %v", Oconv(int(l.Op), obj.FmtSharp))
+
+ case ONAME:
+ break
+
+ case OINDEX, OINDEXMAP:
+ reorder3save(&l.Left, all, list, &early)
+ reorder3save(&l.Right, all, list, &early)
+ if l.Op == OINDEXMAP {
+ list.N = convas(list.N, &mapinit)
+ }
+
+ case OIND, ODOTPTR:
+ reorder3save(&l.Left, all, list, &early)
+ }
+
+ // Save expression on right side.
+ reorder3save(&list.N.Right, all, list, &early)
+ }
+
+ early = concat(mapinit, early)
+ return concat(early, all)
+}
+
+// if the evaluation of *np would be affected by the
+// assignments in all up to but not including stop,
+// copy into a temporary during *early and
+// replace *np with that temp.
+func reorder3save(np **Node, all *NodeList, stop *NodeList, early **NodeList) {
+ n := *np
+ if !aliased(n, all, stop) {
+ return
+ }
+
+ q := temp(n.Type)
+ q = Nod(OAS, q, n)
+ typecheck(&q, Etop)
+ *early = list(*early, q)
+ *np = q.Left
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func outervalue(n *Node) *Node {
+ for {
+ if n.Op == OXDOT {
+ Fatalf("OXDOT in walk")
+ }
+ if n.Op == ODOT || n.Op == OPAREN || n.Op == OCONVNOP {
+ n = n.Left
+ continue
+ }
+
+ if n.Op == OINDEX && Isfixedarray(n.Left.Type) {
+ n = n.Left
+ continue
+ }
+
+ break
+ }
+
+ return n
+}
+
+// Is it possible that the computation of n might be
+// affected by writes in as up to but not including stop?
+func aliased(n *Node, all *NodeList, stop *NodeList) bool {
+ if n == nil {
+ return false
+ }
+
+ // Look for obvious aliasing: a variable being assigned
+ // during the all list and appearing in n.
+ // Also record whether there are any writes to main memory.
+ // Also record whether there are any writes to variables
+ // whose addresses have been taken.
+ memwrite := 0
+
+ varwrite := 0
+ var a *Node
+ for l := all; l != stop; l = l.Next {
+ a = outervalue(l.N.Left)
+ if a.Op != ONAME {
+ memwrite = 1
+ continue
+ }
+
+ switch n.Class {
+ default:
+ varwrite = 1
+ continue
+
+ case PAUTO, PPARAM, PPARAMOUT:
+ if n.Addrtaken {
+ varwrite = 1
+ continue
+ }
+
+ if vmatch2(a, n) {
+ // Direct hit.
+ return true
+ }
+ }
+ }
+
+ // The variables being written do not appear in n.
+ // However, n might refer to computed addresses
+ // that are being written.
+
+ // If no computed addresses are affected by the writes, no aliasing.
+ if memwrite == 0 && varwrite == 0 {
+ return false
+ }
+
+ // If n does not refer to computed addresses
+ // (that is, if n only refers to variables whose addresses
+ // have not been taken), no aliasing.
+ if varexpr(n) {
+ return false
+ }
+
+ // Otherwise, both the writes and n refer to computed memory addresses.
+ // Assume that they might conflict.
+ return true
+}
+
+// does the evaluation of n only refer to variables
+// whose addresses have not been taken?
+// (and no other memory)
+func varexpr(n *Node) bool {
+ if n == nil {
+ return true
+ }
+
+ switch n.Op {
+ case OLITERAL:
+ return true
+
+ case ONAME:
+ switch n.Class {
+ case PAUTO, PPARAM, PPARAMOUT:
+ if !n.Addrtaken {
+ return true
+ }
+ }
+
+ return false
+
+ case OADD,
+ OSUB,
+ OOR,
+ OXOR,
+ OMUL,
+ ODIV,
+ OMOD,
+ OLSH,
+ ORSH,
+ OAND,
+ OANDNOT,
+ OPLUS,
+ OMINUS,
+ OCOM,
+ OPAREN,
+ OANDAND,
+ OOROR,
+ ODOT, // but not ODOTPTR
+ OCONV,
+ OCONVNOP,
+ OCONVIFACE,
+ ODOTTYPE:
+ return varexpr(n.Left) && varexpr(n.Right)
+ }
+
+ // Be conservative.
+ return false
+}
+
+// is the name l mentioned in r?
+func vmatch2(l *Node, r *Node) bool {
+ if r == nil {
+ return false
+ }
+ switch r.Op {
+ // match each right given left
+ case ONAME:
+ return l == r
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch2(l, r.Left) {
+ return true
+ }
+ if vmatch2(l, r.Right) {
+ return true
+ }
+ for ll := r.List; ll != nil; ll = ll.Next {
+ if vmatch2(l, ll.N) {
+ return true
+ }
+ }
+ return false
+}
+
+// is any name mentioned in l also mentioned in r?
+// called by sinit.go
+func vmatch1(l *Node, r *Node) bool {
+ // isolate all left sides
+ if l == nil || r == nil {
+ return false
+ }
+ switch l.Op {
+ case ONAME:
+ switch l.Class {
+ case PPARAM, PPARAMREF, PAUTO:
+ break
+
+ // assignment to non-stack variable
+ // must be delayed if right has function calls.
+ default:
+ if r.Ullman >= UINF {
+ return true
+ }
+ }
+
+ return vmatch2(l, r)
+
+ case OLITERAL:
+ return false
+ }
+
+ if vmatch1(l.Left, r) {
+ return true
+ }
+ if vmatch1(l.Right, r) {
+ return true
+ }
+ for ll := l.List; ll != nil; ll = ll.Next {
+ if vmatch1(ll.N, r) {
+ return true
+ }
+ }
+ return false
+}
+
+// walk through argin parameters.
+// generate and return code to allocate
+// copies of escaped parameters to the heap.
+func paramstoheap(argin **Type, out int) *NodeList {
+ var savet Iter
+ var v *Node
+ var as *Node
+
+ var nn *NodeList
+ for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v != nil && v.Sym != nil && v.Sym.Name[0] == '~' && v.Sym.Name[1] == 'r' { // unnamed result
+ v = nil
+ }
+
+ // For precise stacks, the garbage collector assumes results
+ // are always live, so zero them always.
+ if out != 0 {
+ // Defer might stop a panic and show the
+ // return values as they exist at the time of panic.
+ // Make sure to zero them on entry to the function.
+ nn = list(nn, Nod(OAS, nodarg(t, 1), nil))
+ }
+
+ if v == nil || v.Class&PHEAP == 0 {
+ continue
+ }
+
+ // generate allocation & copying code
+ if compiling_runtime != 0 {
+ Yyerror("%v escapes to heap, not allowed in runtime.", v)
+ }
+ if prealloc[v] == nil {
+ prealloc[v] = callnew(v.Type)
+ }
+ nn = list(nn, Nod(OAS, v.Name.Heapaddr, prealloc[v]))
+ if v.Class&^PHEAP != PPARAMOUT {
+ as = Nod(OAS, v, v.Name.Param.Stackparam)
+ v.Name.Param.Stackparam.Typecheck = 1
+ typecheck(&as, Etop)
+ as = applywritebarrier(as, &nn)
+ nn = list(nn, as)
+ }
+ }
+
+ return nn
+}
+
+// walk through argout parameters copying back to stack
+func returnsfromheap(argin **Type) *NodeList {
+ var savet Iter
+ var v *Node
+
+ var nn *NodeList
+ for t := Structfirst(&savet, argin); t != nil; t = structnext(&savet) {
+ v = t.Nname
+ if v == nil || v.Class != PHEAP|PPARAMOUT {
+ continue
+ }
+ nn = list(nn, Nod(OAS, v.Name.Param.Stackparam, v))
+ }
+
+ return nn
+}
+
+// take care of migrating any function in/out args
+// between the stack and the heap. adds code to
+// curfn's before and after lists.
+func heapmoves() {
+ lno := lineno
+ lineno = Curfn.Lineno
+ nn := paramstoheap(getthis(Curfn.Type), 0)
+ nn = concat(nn, paramstoheap(getinarg(Curfn.Type), 0))
+ nn = concat(nn, paramstoheap(Getoutarg(Curfn.Type), 1))
+ Curfn.Func.Enter = concat(Curfn.Func.Enter, nn)
+ lineno = Curfn.Func.Endlineno
+ Curfn.Func.Exit = returnsfromheap(Getoutarg(Curfn.Type))
+ lineno = lno
+}
+
+func vmkcall(fn *Node, t *Type, init **NodeList, va []*Node) *Node {
+ if fn.Type == nil || fn.Type.Etype != TFUNC {
+ Fatalf("mkcall %v %v", fn, fn.Type)
+ }
+
+ var args *NodeList
+ n := fn.Type.Intuple
+ for i := 0; i < n; i++ {
+ args = list(args, va[i])
+ }
+
+ r := Nod(OCALL, fn, nil)
+ r.List = args
+ if fn.Type.Outtuple > 0 {
+ typecheck(&r, Erv|Efnstruct)
+ } else {
+ typecheck(&r, Etop)
+ }
+ walkexpr(&r, init)
+ r.Type = t
+ return r
+}
+
+func mkcall(name string, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(syslook(name, 0), t, init, args)
+}
+
+func mkcall1(fn *Node, t *Type, init **NodeList, args ...*Node) *Node {
+ return vmkcall(fn, t, init, args)
+}
+
+func conv(n *Node, t *Type) *Node {
+ if Eqtype(n.Type, t) {
+ return n
+ }
+ n = Nod(OCONV, n, nil)
+ n.Type = t
+ typecheck(&n, Erv)
+ return n
+}
+
+func chanfn(name string, n int, t *Type) *Node {
+ if t.Etype != TCHAN {
+ Fatalf("chanfn %v", t)
+ }
+ fn := syslook(name, 1)
+ switch n {
+ default:
+ Fatalf("chanfn %d", n)
+ case 1:
+ substArgTypes(fn, t.Type)
+ case 2:
+ substArgTypes(fn, t.Type, t.Type)
+ }
+ return fn
+}
+
+func mapfn(name string, t *Type) *Node {
+ if t.Etype != TMAP {
+ Fatalf("mapfn %v", t)
+ }
+ fn := syslook(name, 1)
+ substArgTypes(fn, t.Down, t.Type, t.Down, t.Type)
+ return fn
+}
+
+func mapfndel(name string, t *Type) *Node {
+ if t.Etype != TMAP {
+ Fatalf("mapfn %v", t)
+ }
+ fn := syslook(name, 1)
+ substArgTypes(fn, t.Down, t.Type, t.Down)
+ return fn
+}
+
+func writebarrierfn(name string, l *Type, r *Type) *Node {
+ fn := syslook(name, 1)
+ substArgTypes(fn, l, r)
+ return fn
+}
+
+func addstr(n *Node, init **NodeList) *Node {
+ // orderexpr rewrote OADDSTR to have a list of strings.
+ c := count(n.List)
+
+ if c < 2 {
+ Yyerror("addstr count %d too small", c)
+ }
+
+ buf := nodnil()
+ if n.Esc == EscNone {
+ sz := int64(0)
+ for l := n.List; l != nil; l = l.Next {
+ if n.Op == OLITERAL {
+ sz += int64(len(n.Val().U.(string)))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ t := aindex(Nodintconst(tmpstringbufsize), Types[TUINT8])
+
+ buf = Nod(OADDR, temp(t), nil)
+ }
+ }
+
+ // build list of string arguments
+ args := list1(buf)
+
+ for l := n.List; l != nil; l = l.Next {
+ args = list(args, conv(l.N, Types[TSTRING]))
+ }
+
+ var fn string
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: orderexpr knows this cutoff too.
+ fn = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ fn = "concatstrings"
+
+ t := typ(TARRAY)
+ t.Type = Types[TSTRING]
+ t.Bound = -1
+ slice := Nod(OCOMPLIT, nil, typenod(t))
+ if prealloc[n] != nil {
+ prealloc[slice] = prealloc[n]
+ }
+ slice.List = args.Next // skip buf arg
+ args = list1(buf)
+ args = list(args, slice)
+ slice.Esc = EscNone
+ }
+
+ cat := syslook(fn, 1)
+ r := Nod(OCALL, cat, nil)
+ r.List = args
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ r.Type = n.Type
+
+ return r
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// if n := len(l1) + len(l2) - cap(s); n > 0 {
+// s = growslice_n(s, n)
+// }
+// s = s[:len(l1)+len(l2)]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendslice(n *Node, init **NodeList) *Node {
+ walkexprlistsafe(n.List, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ for l := n.List; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ l1 := n.List.N
+ l2 := n.List.Next.N
+
+ s := temp(l1.Type) // var s []T
+ var l *NodeList
+ l = list(l, Nod(OAS, s, l1)) // s = l1
+
+ nt := temp(Types[TINT])
+
+ nif := Nod(OIF, nil, nil)
+
+ // n := len(s) + len(l2) - cap(s)
+ nif.Ninit = list1(Nod(OAS, nt, Nod(OSUB, Nod(OADD, Nod(OLEN, s, nil), Nod(OLEN, l2, nil)), Nod(OCAP, s, nil))))
+
+ nif.Left = Nod(OGT, nt, Nodintconst(0))
+
+ // instantiate growslice_n(Type*, []any, int) []any
+ fn := syslook("growslice_n", 1) // growslice_n(, old []T, n int64) (ret []T)
+ substArgTypes(fn, s.Type.Type, s.Type.Type)
+
+ // s = growslice_n(T, s, n)
+ nif.Nbody = list1(Nod(OAS, s, mkcall1(fn, s.Type, &nif.Ninit, typename(s.Type), s, nt)))
+
+ l = list(l, nif)
+
+ if haspointers(l1.Type.Type) {
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 := l2
+ fn := syslook("typedslicecopy", 1)
+ substArgTypes(fn, l1.Type, l2.Type)
+ nt := mkcall1(fn, Types[TINT], &l, typename(l1.Type.Type), nptr1, nptr2)
+ l = list(l, nt)
+ } else if instrumenting {
+ // rely on runtime to instrument copy.
+ // copy(s[len(l1):len(l1)+len(l2)], l2)
+ nptr1 := Nod(OSLICE, s, Nod(OKEY, Nod(OLEN, l1, nil), Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))))
+
+ nptr1.Etype = 1
+ nptr2 := l2
+ var fn *Node
+ if l2.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ substArgTypes(fn, l1.Type, l2.Type)
+ nt := mkcall1(fn, Types[TINT], &l, nptr1, nptr2, Nodintconst(s.Type.Type.Width))
+ l = list(l, nt)
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ nptr1 := Nod(OINDEX, s, Nod(OLEN, l1, nil))
+
+ nptr1.Bounded = true
+ nptr1 = Nod(OADDR, nptr1, nil)
+
+ nptr2 := Nod(OSPTR, l2, nil)
+
+ fn := syslook("memmove", 1)
+ substArgTypes(fn, s.Type.Type, s.Type.Type)
+
+ nwid := cheapexpr(conv(Nod(OLEN, l2, nil), Types[TUINTPTR]), &l)
+
+ nwid = Nod(OMUL, nwid, Nodintconst(s.Type.Type.Width))
+ nt := mkcall1(fn, nil, &l, nptr1, nptr2, nwid)
+ l = list(l, nt)
+ }
+
+ // s = s[:len(l1)+len(l2)]
+ nt = Nod(OADD, Nod(OLEN, l1, nil), Nod(OLEN, l2, nil))
+
+ nt = Nod(OSLICE, s, Nod(OKEY, nil, nt))
+ nt.Etype = 1
+ l = list(l, Nod(OAS, s, nt))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return s
+}
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, len(s)+argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkappend(n *Node, init **NodeList, dst *Node) *Node {
+ if !samesafeexpr(dst, n.List.N) {
+ l := n.List
+ l.N = safeexpr(l.N, init)
+ walkexpr(&l.N, init)
+ }
+ walkexprlistsafe(n.List.Next, init)
+
+ // walkexprlistsafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ // Using cheapexpr also makes sure that the evaluation
+ // of all arguments (and especially any panics) happen
+ // before we begin to modify the slice in a visible way.
+ for l := n.List.Next; l != nil; l = l.Next {
+ l.N = cheapexpr(l.N, init)
+ }
+
+ nsrc := n.List.N
+
+ // Resolve slice type of multi-valued return.
+ if Istype(nsrc.Type, TSTRUCT) {
+ nsrc.Type = nsrc.Type.Type.Type
+ }
+ argc := count(n.List) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ // General case, with no function calls left as arguments.
+ // Leave for gen, except that instrumentation requires old form.
+ if !instrumenting {
+ return n
+ }
+
+ var l *NodeList
+
+ ns := temp(nsrc.Type)
+ l = list(l, Nod(OAS, ns, nsrc)) // s = src
+
+ na := Nodintconst(int64(argc)) // const argc
+ nx := Nod(OIF, nil, nil) // if cap(s) - len(s) < argc
+ nx.Left = Nod(OLT, Nod(OSUB, Nod(OCAP, ns, nil), Nod(OLEN, ns, nil)), na)
+
+ fn := syslook("growslice", 1) // growslice(, old []T, mincap int) (ret []T)
+ substArgTypes(fn, ns.Type.Type, ns.Type.Type)
+
+ nx.Nbody = list1(Nod(OAS, ns, mkcall1(fn, ns.Type, &nx.Ninit, typename(ns.Type), ns, Nod(OADD, Nod(OLEN, ns, nil), na))))
+
+ l = list(l, nx)
+
+ nn := temp(Types[TINT])
+ l = list(l, Nod(OAS, nn, Nod(OLEN, ns, nil))) // n = len(s)
+
+ nx = Nod(OSLICE, ns, Nod(OKEY, nil, Nod(OADD, nn, na))) // ...s[:n+argc]
+ nx.Etype = 1
+ l = list(l, Nod(OAS, ns, nx)) // s = s[:n+argc]
+
+ for a := n.List.Next; a != nil; a = a.Next {
+ nx = Nod(OINDEX, ns, nn) // s[n] ...
+ nx.Bounded = true
+ l = list(l, Nod(OAS, nx, a.N)) // s[n] = arg
+ if a.Next != nil {
+ l = list(l, Nod(OAS, nn, Nod(OADD, nn, Nodintconst(1)))) // n = n + 1
+ }
+ }
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return ns
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// memmove(a.ptr, b.ptr, n*sizeof(elem(a)))
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func copyany(n *Node, init **NodeList, runtimecall bool) *Node {
+ if haspointers(n.Left.Type.Type) {
+ fn := writebarrierfn("typedslicecopy", n.Left.Type, n.Right.Type)
+ return mkcall1(fn, n.Type, init, typename(n.Left.Type.Type), n.Left, n.Right)
+ }
+
+ if runtimecall {
+ var fn *Node
+ if n.Right.Type.Etype == TSTRING {
+ fn = syslook("slicestringcopy", 1)
+ } else {
+ fn = syslook("slicecopy", 1)
+ }
+ substArgTypes(fn, n.Left.Type, n.Right.Type)
+ return mkcall1(fn, n.Type, init, n.Left, n.Right, Nodintconst(n.Left.Type.Type.Width))
+ }
+
+ walkexpr(&n.Left, init)
+ walkexpr(&n.Right, init)
+ nl := temp(n.Left.Type)
+ nr := temp(n.Right.Type)
+ var l *NodeList
+ l = list(l, Nod(OAS, nl, n.Left))
+ l = list(l, Nod(OAS, nr, n.Right))
+
+ nfrm := Nod(OSPTR, nr, nil)
+ nto := Nod(OSPTR, nl, nil)
+
+ nlen := temp(Types[TINT])
+
+ // n = len(to)
+ l = list(l, Nod(OAS, nlen, Nod(OLEN, nl, nil)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif := Nod(OIF, nil, nil)
+
+ nif.Left = Nod(OGT, nlen, Nod(OLEN, nr, nil))
+ nif.Nbody = list(nif.Nbody, Nod(OAS, nlen, Nod(OLEN, nr, nil)))
+ l = list(l, nif)
+
+ // Call memmove.
+ fn := syslook("memmove", 1)
+
+ substArgTypes(fn, nl.Type.Type, nl.Type.Type)
+ nwid := temp(Types[TUINTPTR])
+ l = list(l, Nod(OAS, nwid, conv(nlen, Types[TUINTPTR])))
+ nwid = Nod(OMUL, nwid, Nodintconst(nl.Type.Type.Width))
+ l = list(l, mkcall1(fn, nil, init, nto, nfrm, nwid))
+
+ typechecklist(l, Etop)
+ walkstmtlist(l)
+ *init = concat(*init, l)
+ return nlen
+}
+
+func eqfor(t *Type, needsize *int) *Node {
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled during type check (OCMPSTR etc).
+ a := algtype1(t, nil)
+
+ if a != AMEM && a != -1 {
+ Fatalf("eqfor %v", t)
+ }
+
+ if a == AMEM {
+ n := syslook("memequal", 1)
+ substArgTypes(n, t, t)
+ *needsize = 1
+ return n
+ }
+
+ sym := typesymprefix(".eq", t)
+ n := newname(sym)
+ n.Class = PFUNC
+ ntype := Nod(OTFUNC, nil, nil)
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.List = list(ntype.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ ntype.Rlist = list(ntype.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TBOOL])))
+ typecheck(&ntype, Etype)
+ n.Type = ntype.Type
+ *needsize = 0
+ return n
+}
+
+func countfield(t *Type) int {
+ n := 0
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ n++
+ }
+ return n
+}
+
+func walkcompare(np **Node, init **NodeList) {
+ n := *np
+
+ // Given interface value l and concrete value r, rewrite
+ // l == r
+ // to
+ // x, ok := l.(type(r)); ok && x == r
+ // Handle != similarly.
+ // This avoids the allocation that would be required
+ // to convert r to l for comparison.
+ var l *Node
+
+ var r *Node
+ if Isinter(n.Left.Type) && !Isinter(n.Right.Type) {
+ l = n.Left
+ r = n.Right
+ } else if !Isinter(n.Left.Type) && Isinter(n.Right.Type) {
+ l = n.Right
+ r = n.Left
+ }
+
+ if l != nil {
+ x := temp(r.Type)
+ if haspointers(r.Type) {
+ a := Nod(OAS, x, nil)
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+ }
+ ok := temp(Types[TBOOL])
+
+ // l.(type(r))
+ a := Nod(ODOTTYPE, l, nil)
+
+ a.Type = r.Type
+
+ // x, ok := l.(type(r))
+ expr := Nod(OAS2, nil, nil)
+
+ expr.List = list1(x)
+ expr.List = list(expr.List, ok)
+ expr.Rlist = list1(a)
+ typecheck(&expr, Etop)
+ walkexpr(&expr, init)
+
+ if n.Op == OEQ {
+ r = Nod(OANDAND, ok, Nod(OEQ, x, r))
+ } else {
+ r = Nod(OOROR, Nod(ONOT, ok, nil), Nod(ONE, x, r))
+ }
+ *init = list(*init, expr)
+ finishcompare(np, n, r, init)
+ return
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ t := n.Left.Type
+
+ switch t.Etype {
+ default:
+ return
+
+ case TARRAY:
+ if Isslice(t) {
+ return
+ }
+
+ case TSTRUCT:
+ break
+ }
+
+ cmpl := n.Left
+ for cmpl != nil && cmpl.Op == OCONVNOP {
+ cmpl = cmpl.Left
+ }
+ cmpr := n.Right
+ for cmpr != nil && cmpr.Op == OCONVNOP {
+ cmpr = cmpr.Left
+ }
+
+ if !islvalue(cmpl) || !islvalue(cmpr) {
+ Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ }
+
+ l = temp(Ptrto(t))
+ a := Nod(OAS, l, Nod(OADDR, cmpl, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ r = temp(Ptrto(t))
+ a = Nod(OAS, r, Nod(OADDR, cmpr, nil))
+ a.Right.Etype = 1 // addr does not escape
+ typecheck(&a, Etop)
+ *init = list(*init, a)
+
+ var andor Op = OANDAND
+ if n.Op == ONE {
+ andor = OOROR
+ }
+
+ var expr *Node
+ if t.Etype == TARRAY && t.Bound <= 4 && issimple[t.Type.Etype] {
+ // Four or fewer elements of a basic type.
+ // Unroll comparisons.
+ var li *Node
+ var ri *Node
+ for i := 0; int64(i) < t.Bound; i++ {
+ li = Nod(OINDEX, l, Nodintconst(int64(i)))
+ ri = Nod(OINDEX, r, Nodintconst(int64(i)))
+ a = Nod(n.Op, li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(n.Op == OEQ)
+ }
+ finishcompare(np, n, expr, init)
+ return
+ }
+
+ if t.Etype == TSTRUCT && countfield(t) <= 4 {
+ // Struct of four or fewer fields.
+ // Inline comparisons.
+ var li *Node
+ var ri *Node
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ if isblanksym(t1.Sym) {
+ continue
+ }
+ li = Nod(OXDOT, l, newname(t1.Sym))
+ ri = Nod(OXDOT, r, newname(t1.Sym))
+ a = Nod(n.Op, li, ri)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = Nod(andor, expr, a)
+ }
+ }
+
+ if expr == nil {
+ expr = Nodbool(n.Op == OEQ)
+ }
+ finishcompare(np, n, expr, init)
+ return
+ }
+
+ // Chose not to inline. Call equality function directly.
+ var needsize int
+ call := Nod(OCALL, eqfor(t, &needsize), nil)
+
+ call.List = list(call.List, l)
+ call.List = list(call.List, r)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(t.Width))
+ }
+ r = call
+ if n.Op != OEQ {
+ r = Nod(ONOT, r, nil)
+ }
+
+ finishcompare(np, n, r, init)
+ return
+}
+
+func finishcompare(np **Node, n, r *Node, init **NodeList) {
+ // Using np here to avoid passing &r to typecheck.
+ *np = r
+ typecheck(np, Erv)
+ walkexpr(np, init)
+ r = *np
+ if r.Type != n.Type {
+ r = Nod(OCONVNOP, r, nil)
+ r.Type = n.Type
+ r.Typecheck = 1
+ *np = r
+ }
+}
+
+func samecheap(a *Node, b *Node) bool {
+ var ar *Node
+ var br *Node
+ for a != nil && b != nil && a.Op == b.Op {
+ switch a.Op {
+ default:
+ return false
+
+ case ONAME:
+ return a == b
+
+ case ODOT, ODOTPTR:
+ ar = a.Right
+ br = b.Right
+ if ar.Op != ONAME || br.Op != ONAME || ar.Sym != br.Sym {
+ return false
+ }
+
+ case OINDEX:
+ ar = a.Right
+ br = b.Right
+ if !Isconst(ar, CTINT) || !Isconst(br, CTINT) || Mpcmpfixfix(ar.Val().U.(*Mpint), br.Val().U.(*Mpint)) != 0 {
+ return false
+ }
+ }
+
+ a = a.Left
+ b = b.Left
+ }
+
+ return false
+}
+
+func walkrotate(np **Node) {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ return
+ }
+
+ n := *np
+
+ // Want << | >> or >> | << or << ^ >> or >> ^ << on unsigned value.
+ l := n.Left
+
+ r := n.Right
+ if (n.Op != OOR && n.Op != OXOR) || (l.Op != OLSH && l.Op != ORSH) || (r.Op != OLSH && r.Op != ORSH) || n.Type == nil || Issigned[n.Type.Etype] || l.Op == r.Op {
+ return
+ }
+
+ // Want same, side effect-free expression on lhs of both shifts.
+ if !samecheap(l.Left, r.Left) {
+ return
+ }
+
+ // Constants adding to width?
+ w := int(l.Type.Width * 8)
+
+ if Smallintconst(l.Right) && Smallintconst(r.Right) {
+ sl := int(Mpgetfix(l.Right.Val().U.(*Mpint)))
+ if sl >= 0 {
+ sr := int(Mpgetfix(r.Right.Val().U.(*Mpint)))
+ if sr >= 0 && sl+sr == w {
+ // Rewrite left shift half to left rotate.
+ if l.Op == OLSH {
+ n = l
+ } else {
+ n = r
+ }
+ n.Op = OLROT
+
+ // Remove rotate 0 and rotate w.
+ s := int(Mpgetfix(n.Right.Val().U.(*Mpint)))
+
+ if s == 0 || s == w {
+ n = n.Left
+ }
+
+ *np = n
+ return
+ }
+ }
+ return
+ }
+
+ // TODO: Could allow s and 32-s if s is bounded (maybe s&31 and 32-s&31).
+ return
+}
+
+// walkmul rewrites integer multiplication by powers of two as shifts.
+func walkmul(np **Node, init **NodeList) {
+ n := *np
+ if !Isint[n.Type.Etype] {
+ return
+ }
+
+ var nr *Node
+ var nl *Node
+ if n.Right.Op == OLITERAL {
+ nl = n.Left
+ nr = n.Right
+ } else if n.Left.Op == OLITERAL {
+ nl = n.Right
+ nr = n.Left
+ } else {
+ return
+ }
+
+ neg := 0
+
+ // x*0 is 0 (and side effects of x).
+ var pow int
+ var w int
+ if Mpgetfix(nr.Val().U.(*Mpint)) == 0 {
+ cheapexpr(nl, init)
+ Nodconst(n, n.Type, 0)
+ goto ret
+ }
+
+ // nr is a constant.
+ pow = powtwo(nr)
+
+ if pow < 0 {
+ return
+ }
+ if pow >= 1000 {
+ // negative power of 2, like -16
+ neg = 1
+
+ pow -= 1000
+ }
+
+ w = int(nl.Type.Width * 8)
+ if pow+1 >= w { // too big, shouldn't happen
+ return
+ }
+
+ nl = cheapexpr(nl, init)
+
+ if pow == 0 {
+ // x*1 is x
+ n = nl
+
+ goto ret
+ }
+
+ n = Nod(OLSH, nl, Nodintconst(int64(pow)))
+
+ret:
+ if neg != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+// walkdiv rewrites division by a constant as less expensive
+// operations.
+func walkdiv(np **Node, init **NodeList) {
+ // if >= 0, nr is 1<= 0, nr is 1<= 1000 {
+ // negative power of 2
+ s = 1
+
+ pow -= 1000
+ }
+
+ if pow+1 >= w {
+ // divisor too large.
+ return
+ }
+
+ if pow < 0 {
+ // try to do division by multiply by (2^w)/d
+ // see hacker's delight chapter 10
+ // TODO: support 64-bit magic multiply here.
+ var m Magic
+ m.W = w
+
+ if Issigned[nl.Type.Etype] {
+ m.Sd = Mpgetfix(nr.Val().U.(*Mpint))
+ Smagic(&m)
+ } else {
+ m.Ud = uint64(Mpgetfix(nr.Val().U.(*Mpint)))
+ Umagic(&m)
+ }
+
+ if m.Bad != 0 {
+ return
+ }
+
+ // We have a quick division method so use it
+ // for modulo too.
+ if n.Op == OMOD {
+ // rewrite as A%B = A - (A/B*B).
+ n1 := Nod(ODIV, nl, nr)
+
+ n2 := Nod(OMUL, n1, nr)
+ n = Nod(OSUB, nl, n2)
+ goto ret
+ }
+
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ // n1 = nl * magic >> w (HMUL)
+ case TUINT8, TUINT16, TUINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, int64(m.Um))
+ n1 := Nod(OHMUL, nl, nc)
+ typecheck(&n1, Erv)
+ if m.Ua != 0 {
+ // Select a Go type with (at least) twice the width.
+ var twide *Type
+ switch Simtype[nl.Type.Etype] {
+ default:
+ return
+
+ case TUINT8, TUINT16:
+ twide = Types[TUINT32]
+
+ case TUINT32:
+ twide = Types[TUINT64]
+
+ case TINT8, TINT16:
+ twide = Types[TINT32]
+
+ case TINT32:
+ twide = Types[TINT64]
+ }
+
+ // add numerator (might overflow).
+ // n2 = (n1 + nl)
+ n2 := Nod(OADD, conv(n1, twide), conv(nl, twide))
+
+ // shift by m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = conv(Nod(ORSH, n2, nc), nl.Type)
+ } else {
+ // n = n1 >> m.s
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n = Nod(ORSH, n1, nc)
+ }
+
+ // n1 = nl * magic >> w
+ case TINT8, TINT16, TINT32:
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, m.Sm)
+ n1 := Nod(OHMUL, nl, nc)
+ typecheck(&n1, Erv)
+ if m.Sm < 0 {
+ // add the numerator.
+ n1 = Nod(OADD, n1, nl)
+ }
+
+ // shift by m.s
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(m.S))
+ n2 := conv(Nod(ORSH, n1, nc), nl.Type)
+
+ // add 1 iff n1 is negative.
+ nc = Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[TUINT], int64(w)-1)
+ n3 := Nod(ORSH, nl, nc) // n4 = -1 iff n1 is negative.
+ n = Nod(OSUB, n2, n3)
+
+ // apply sign.
+ if m.Sd < 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ }
+
+ goto ret
+ }
+
+ switch pow {
+ case 0:
+ if n.Op == OMOD {
+ // nl % 1 is zero.
+ Nodconst(n, n.Type, 0)
+ } else if s != 0 {
+ // divide by -1
+ n.Op = OMINUS
+
+ n.Right = nil
+ } else {
+ // divide by 1
+ n = nl
+ }
+
+ default:
+ if Issigned[n.Type.Etype] {
+ if n.Op == OMOD {
+ // signed modulo 2^pow is like ANDing
+ // with the last pow bits, but if nl < 0,
+ // nl & (2^pow-1) is (nl+1)%2^pow - 1.
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ typecheck(&n1, Erv)
+ n1 = cheapexpr(n1, init)
+
+ // n = (nl+ε)&1 -ε where ε=1 iff nl<0.
+ n2 := Nod(OSUB, nl, n1)
+
+ nc := Nod(OXXX, nil, nil)
+ Nodconst(nc, nl.Type, 1)
+ n3 := Nod(OAND, n2, nc)
+ n = Nod(OADD, n3, n1)
+ } else {
+ // n = (nl+ε)&(nr-1) - ε where ε=2^pow-1 iff nl<0.
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, nl.Type, (1<= 0, nl >> n == nl / nr
+ // if nl < 0, we want to add 2^n-1 first.
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-1)
+ n1 := Nod(ORSH, nl, nc) // n1 = -1 iff nl < 0.
+ if pow == 1 {
+ // nl+1 is nl-(-1)
+ n.Left = Nod(OSUB, nl, n1)
+ } else {
+ // Do a logical right right on -1 to keep pow bits.
+ nc := Nod(OXXX, nil, nil)
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(w)-int64(pow))
+ n2 := Nod(ORSH, conv(n1, tounsigned(nl.Type)), nc)
+ n.Left = Nod(OADD, nl, conv(n2, nl.Type))
+ }
+
+ // n = (nl + 2^pow-1) >> pow
+ n.Op = ORSH
+
+ nc = Nod(OXXX, nil, nil)
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ n.Right = nc
+ n.Typecheck = 0
+ }
+
+ if s != 0 {
+ n = Nod(OMINUS, n, nil)
+ }
+ break
+ }
+
+ nc := Nod(OXXX, nil, nil)
+ if n.Op == OMOD {
+ // n = nl & (nr-1)
+ n.Op = OAND
+
+ Nodconst(nc, nl.Type, Mpgetfix(nr.Val().U.(*Mpint))-1)
+ } else {
+ // n = nl >> pow
+ n.Op = ORSH
+
+ Nodconst(nc, Types[Simtype[TUINT]], int64(pow))
+ }
+
+ n.Typecheck = 0
+ n.Right = nc
+ }
+
+ goto ret
+
+ret:
+ typecheck(&n, Erv)
+ walkexpr(&n, init)
+ *np = n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n *Node, max int64) bool {
+ if n.Type == nil || !Isint[n.Type.Etype] {
+ return false
+ }
+
+ sign := Issigned[n.Type.Etype]
+ bits := int32(8 * n.Type.Width)
+
+ if Smallintconst(n) {
+ v := Mpgetfix(n.Val().U.(*Mpint))
+ return 0 <= v && v < max
+ }
+
+ switch n.Op {
+ case OAND:
+ v := int64(-1)
+ if Smallintconst(n.Left) {
+ v = Mpgetfix(n.Left.Val().U.(*Mpint))
+ } else if Smallintconst(n.Right) {
+ v = Mpgetfix(n.Right.Val().U.(*Mpint))
+ }
+
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case OMOD:
+ if !sign && Smallintconst(n.Right) {
+ v := Mpgetfix(n.Right.Val().U.(*Mpint))
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ODIV:
+ if !sign && Smallintconst(n.Right) {
+ v := Mpgetfix(n.Right.Val().U.(*Mpint))
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ORSH:
+ if !sign && Smallintconst(n.Right) {
+ v := Mpgetfix(n.Right.Val().U.(*Mpint))
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !sign && bits <= 62 && 1< 1 || slash == `\` && len(goroot) > 3 {
+ // if not "/" or "c:\", then strip trailing path separator
+ goroot = strings.TrimSuffix(goroot, slash)
+ }
+ if goroot == "" {
+ fatal("$GOROOT must be set")
+ }
+
+ goroot_final = os.Getenv("GOROOT_FINAL")
+ if goroot_final == "" {
+ goroot_final = goroot
+ }
+
+ b := os.Getenv("GOBIN")
+ if b == "" {
+ b = goroot + slash + "bin"
+ }
+ gobin = b
+
+ b = os.Getenv("GOOS")
+ if b == "" {
+ b = gohostos
+ }
+ goos = b
+ if find(goos, okgoos) < 0 {
+ fatal("unknown $GOOS %s", goos)
+ }
+
+ b = os.Getenv("GOARM")
+ if b == "" {
+ b = xgetgoarm()
+ }
+ goarm = b
+
+ b = os.Getenv("GO386")
+ if b == "" {
+ if cansse2() {
+ b = "sse2"
+ } else {
+ b = "387"
+ }
+ }
+ go386 = b
+
+ p := pathf("%s/src/all.bash", goroot)
+ if !isfile(p) {
+ fatal("$GOROOT is not set correctly or not exported\n"+
+ "\tGOROOT=%s\n"+
+ "\t%s does not exist", goroot, p)
+ }
+
+ b = os.Getenv("GOHOSTARCH")
+ if b != "" {
+ gohostarch = b
+ }
+
+ if find(gohostarch, okgoarch) < 0 {
+ fatal("unknown $GOHOSTARCH %s", gohostarch)
+ }
+
+ b = os.Getenv("GOARCH")
+ if b == "" {
+ b = gohostarch
+ }
+ goarch = b
+ if find(goarch, okgoarch) < 0 {
+ fatal("unknown $GOARCH %s", goarch)
+ }
+
+ b = os.Getenv("GO_EXTLINK_ENABLED")
+ if b != "" {
+ if b != "0" && b != "1" {
+ fatal("unknown $GO_EXTLINK_ENABLED %s", b)
+ }
+ goextlinkenabled = b
+ }
+
+ b = os.Getenv("CC")
+ if b == "" {
+ // Use clang on OS X, because gcc is deprecated there.
+ // Xcode for OS X 10.9 Mavericks will ship a fake "gcc" binary that
+ // actually runs clang. We prepare different command
+ // lines for the two binaries, so it matters what we call it.
+ // See golang.org/issue/5822.
+ if defaultclang {
+ b = "clang"
+ } else {
+ b = "gcc"
+ }
+ }
+ defaultcc = b
+
+ defaultcflags = os.Getenv("CFLAGS")
+
+ defaultldflags = os.Getenv("LDFLAGS")
+
+ b = os.Getenv("CC_FOR_TARGET")
+ if b == "" {
+ b = defaultcc
+ }
+ defaultcctarget = b
+
+ b = os.Getenv("CXX_FOR_TARGET")
+ if b == "" {
+ b = os.Getenv("CXX")
+ if b == "" {
+ if defaultclang {
+ b = "clang++"
+ } else {
+ b = "g++"
+ }
+ }
+ }
+ defaultcxxtarget = b
+
+ // For tools being invoked but also for os.ExpandEnv.
+ os.Setenv("GO386", go386)
+ os.Setenv("GOARCH", goarch)
+ os.Setenv("GOARM", goarm)
+ os.Setenv("GOHOSTARCH", gohostarch)
+ os.Setenv("GOHOSTOS", gohostos)
+ os.Setenv("GOOS", goos)
+ os.Setenv("GOROOT", goroot)
+ os.Setenv("GOROOT_FINAL", goroot_final)
+
+ // Make the environment more predictable.
+ os.Setenv("LANG", "C")
+ os.Setenv("LANGUAGE", "en_US.UTF8")
+
+ workdir = xworkdir()
+ xatexit(rmworkdir)
+
+ tooldir = pathf("%s/pkg/tool/%s_%s", goroot, gohostos, gohostarch)
+}
+
+// rmworkdir deletes the work directory.
+func rmworkdir() {
+ if vflag > 1 {
+ errprintf("rm -rf %s\n", workdir)
+ }
+ xremoveall(workdir)
+}
+
+// Remove trailing spaces.
+func chomp(s string) string {
+ return strings.TrimRight(s, " \t\r\n")
+}
+
+func branchtag(branch string) (tag string, precise bool) {
+ b := run(goroot, CheckExit, "git", "log", "--decorate=full", "--format=format:%d", "master.."+branch)
+ tag = branch
+ for _, line := range splitlines(b) {
+ // Each line is either blank, or looks like
+ // (tag: refs/tags/go1.4rc2, refs/remotes/origin/release-branch.go1.4, refs/heads/release-branch.go1.4)
+ // We need to find an element starting with refs/tags/.
+ i := strings.Index(line, " refs/tags/")
+ if i < 0 {
+ continue
+ }
+ i += len(" refs/tags/")
+ // The tag name ends at a comma or paren (prefer the first).
+ j := strings.Index(line[i:], ",")
+ if j < 0 {
+ j = strings.Index(line[i:], ")")
+ }
+ if j < 0 {
+ continue // malformed line; ignore it
+ }
+ tag = line[i : i+j]
+ if i == 0 {
+ precise = true // tag denotes HEAD
+ }
+ break
+ }
+ return
+}
+
+// findgoversion determines the Go version to use in the version string.
+func findgoversion() string {
+ // The $GOROOT/VERSION file takes priority, for distributions
+ // without the source repo.
+ path := pathf("%s/VERSION", goroot)
+ if isfile(path) {
+ b := chomp(readfile(path))
+ // Commands such as "dist version > VERSION" will cause
+ // the shell to create an empty VERSION file and set dist's
+ // stdout to its fd. dist in turn looks at VERSION and uses
+ // its content if available, which is empty at this point.
+ // Only use the VERSION file if it is non-empty.
+ if b != "" {
+ return b
+ }
+ }
+
+ // The $GOROOT/VERSION.cache file is a cache to avoid invoking
+ // git every time we run this command. Unlike VERSION, it gets
+ // deleted by the clean command.
+ path = pathf("%s/VERSION.cache", goroot)
+ if isfile(path) {
+ return chomp(readfile(path))
+ }
+
+ // Show a nicer error message if this isn't a Git repo.
+ if !isGitRepo() {
+ fatal("FAILED: not a Git repo; must put a VERSION file in $GOROOT")
+ }
+
+ // Otherwise, use Git.
+ // What is the current branch?
+ branch := chomp(run(goroot, CheckExit, "git", "rev-parse", "--abbrev-ref", "HEAD"))
+
+ // What are the tags along the current branch?
+ tag := "devel"
+ precise := false
+
+ // If we're on a release branch, use the closest matching tag
+ // that is on the release branch (and not on the master branch).
+ if strings.HasPrefix(branch, "release-branch.") {
+ tag, precise = branchtag(branch)
+ }
+
+ if !precise {
+ // Tag does not point at HEAD; add hash and date to version.
+ tag += chomp(run(goroot, CheckExit, "git", "log", "-n", "1", "--format=format: +%h %cd", "HEAD"))
+ }
+
+ // Cache version.
+ writefile(tag, path, 0)
+
+ return tag
+}
+
+// isGitRepo reports whether the working directory is inside a Git repository.
+func isGitRepo() bool {
+ // NB: simply checking the exit code of `git rev-parse --git-dir` would
+ // suffice here, but that requires deviating from the infrastructure
+ // provided by `run`.
+ gitDir := chomp(run(goroot, 0, "git", "rev-parse", "--git-dir"))
+ if !filepath.IsAbs(gitDir) {
+ gitDir = filepath.Join(goroot, gitDir)
+ }
+ fi, err := os.Stat(gitDir)
+ return err == nil && fi.IsDir()
+}
+
+/*
+ * Initial tree setup.
+ */
+
+// The old tools that no longer live in $GOBIN or $GOROOT/bin.
+var oldtool = []string{
+ "5a", "5c", "5g", "5l",
+ "6a", "6c", "6g", "6l",
+ "8a", "8c", "8g", "8l",
+ "9a", "9c", "9g", "9l",
+ "6cov",
+ "6nm",
+ "6prof",
+ "cgo",
+ "ebnflint",
+ "goapi",
+ "gofix",
+ "goinstall",
+ "gomake",
+ "gopack",
+ "gopprof",
+ "gotest",
+ "gotype",
+ "govet",
+ "goyacc",
+ "quietgcc",
+}
+
+// Unreleased directories (relative to $GOROOT) that should
+// not be in release branches.
+var unreleased = []string{
+ "src/cmd/newlink",
+ "src/cmd/objwriter",
+ "src/debug/goobj",
+ "src/old",
+}
+
+// setup sets up the tree for the initial build.
+func setup() {
+ // Create bin directory.
+ if p := pathf("%s/bin", goroot); !isdir(p) {
+ xmkdir(p)
+ }
+
+ // Create package directory.
+ if p := pathf("%s/pkg", goroot); !isdir(p) {
+ xmkdir(p)
+ }
+
+ p := pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch)
+ if rebuildall {
+ xremoveall(p)
+ }
+ xmkdirall(p)
+
+ if goos != gohostos || goarch != gohostarch {
+ p := pathf("%s/pkg/%s_%s", goroot, goos, goarch)
+ if rebuildall {
+ xremoveall(p)
+ }
+ xmkdirall(p)
+ }
+
+ // Create object directory.
+ // We keep it in pkg/ so that all the generated binaries
+ // are in one tree. If pkg/obj/libgc.a exists, it is a dreg from
+ // before we used subdirectories of obj. Delete all of obj
+ // to clean up.
+ if p := pathf("%s/pkg/obj/libgc.a", goroot); isfile(p) {
+ xremoveall(pathf("%s/pkg/obj", goroot))
+ }
+ p = pathf("%s/pkg/obj/%s_%s", goroot, gohostos, gohostarch)
+ if rebuildall {
+ xremoveall(p)
+ }
+ xmkdirall(p)
+
+ // Create tool directory.
+ // We keep it in pkg/, just like the object directory above.
+ if rebuildall {
+ xremoveall(tooldir)
+ }
+ xmkdirall(tooldir)
+
+ // Remove tool binaries from before the tool/gohostos_gohostarch
+ xremoveall(pathf("%s/bin/tool", goroot))
+
+ // Remove old pre-tool binaries.
+ for _, old := range oldtool {
+ xremove(pathf("%s/bin/%s", goroot, old))
+ }
+
+ // If $GOBIN is set and has a Go compiler, it must be cleaned.
+ for _, char := range "56789" {
+ if isfile(pathf("%s%s%c%s", gobin, slash, char, "g")) {
+ for _, old := range oldtool {
+ xremove(pathf("%s/%s", gobin, old))
+ }
+ break
+ }
+ }
+
+ // For release, make sure excluded things are excluded.
+ goversion := findgoversion()
+ if strings.HasPrefix(goversion, "release.") || (strings.HasPrefix(goversion, "go") && !strings.Contains(goversion, "beta")) {
+ for _, dir := range unreleased {
+ if p := pathf("%s/%s", goroot, dir); isdir(p) {
+ fatal("%s should not exist in release build", p)
+ }
+ }
+ }
+}
+
+/*
+ * Tool building
+ */
+
+// deptab lists changes to the default dependencies for a given prefix.
+// deps ending in /* read the whole directory; deps beginning with -
+// exclude files with that prefix.
+var deptab = []struct {
+ prefix string // prefix of target
+ dep []string // dependency tweaks for targets with that prefix
+}{
+ {"cmd/go", []string{
+ "zdefaultcc.go",
+ }},
+ {"runtime/internal/sys", []string{
+ "zversion.go",
+ }},
+}
+
+// depsuffix records the allowed suffixes for source files.
+var depsuffix = []string{
+ ".s",
+ ".go",
+}
+
+// gentab records how to generate some trivial files.
+var gentab = []struct {
+ nameprefix string
+ gen func(string, string)
+}{
+ {"zdefaultcc.go", mkzdefaultcc},
+ {"zversion.go", mkzversion},
+
+ // not generated anymore, but delete the file if we see it
+ {"enam.c", nil},
+ {"anames5.c", nil},
+ {"anames6.c", nil},
+ {"anames8.c", nil},
+ {"anames9.c", nil},
+}
+
+// installed maps from a dir name (as given to install) to a chan
+// closed when the dir's package is installed.
+var installed = make(map[string]chan struct{})
+
+// install installs the library, package, or binary associated with dir,
+// which is relative to $GOROOT/src.
+func install(dir string) {
+ if ch, ok := installed[dir]; ok {
+ defer close(ch)
+ }
+ for _, dep := range builddeps[dir] {
+ <-installed[dep]
+ }
+
+ if vflag > 0 {
+ if goos != gohostos || goarch != gohostarch {
+ errprintf("%s (%s/%s)\n", dir, goos, goarch)
+ } else {
+ errprintf("%s\n", dir)
+ }
+ }
+
+ workdir := pathf("%s/%s", workdir, dir)
+ xmkdirall(workdir)
+
+ var clean []string
+ defer func() {
+ for _, name := range clean {
+ xremove(name)
+ }
+ }()
+
+ // path = full path to dir.
+ path := pathf("%s/src/%s", goroot, dir)
+ name := filepath.Base(dir)
+
+ ispkg := !strings.HasPrefix(dir, "cmd/") || strings.HasPrefix(dir, "cmd/internal/") || strings.HasPrefix(dir, "cmd/asm/internal/")
+
+ // Start final link command line.
+ // Note: code below knows that link.p[targ] is the target.
+ var (
+ link []string
+ targ int
+ ispackcmd bool
+ )
+ if ispkg {
+ // Go library (package).
+ ispackcmd = true
+ link = []string{"pack", pathf("%s/pkg/%s_%s/%s.a", goroot, goos, goarch, dir)}
+ targ = len(link) - 1
+ xmkdirall(filepath.Dir(link[targ]))
+ } else {
+ // Go command.
+ elem := name
+ if elem == "go" {
+ elem = "go_bootstrap"
+ }
+ link = []string{pathf("%s/link", tooldir), "-o", pathf("%s/%s%s", tooldir, elem, exe)}
+ targ = len(link) - 1
+ }
+ ttarg := mtime(link[targ])
+
+ // Gather files that are sources for this target.
+ // Everything in that directory, and any target-specific
+ // additions.
+ files := xreaddir(path)
+
+ // Remove files beginning with . or _,
+ // which are likely to be editor temporary files.
+ // This is the same heuristic build.ScanDir uses.
+ // There do exist real C files beginning with _,
+ // so limit that check to just Go files.
+ files = filter(files, func(p string) bool {
+ return !strings.HasPrefix(p, ".") && (!strings.HasPrefix(p, "_") || !strings.HasSuffix(p, ".go"))
+ })
+
+ for _, dt := range deptab {
+ if dir == dt.prefix || strings.HasSuffix(dt.prefix, "/") && strings.HasPrefix(dir, dt.prefix) {
+ for _, p := range dt.dep {
+ p = os.ExpandEnv(p)
+ files = append(files, p)
+ }
+ }
+ }
+ files = uniq(files)
+
+ // Convert to absolute paths.
+ for i, p := range files {
+ if !isabs(p) {
+ files[i] = pathf("%s/%s", path, p)
+ }
+ }
+
+ // Is the target up-to-date?
+ var gofiles, missing []string
+ stale := rebuildall
+ files = filter(files, func(p string) bool {
+ for _, suf := range depsuffix {
+ if strings.HasSuffix(p, suf) {
+ goto ok
+ }
+ }
+ return false
+ ok:
+ t := mtime(p)
+ if !t.IsZero() && !strings.HasSuffix(p, ".a") && !shouldbuild(p, dir) {
+ return false
+ }
+ if strings.HasSuffix(p, ".go") {
+ gofiles = append(gofiles, p)
+ }
+ if t.After(ttarg) {
+ stale = true
+ }
+ if t.IsZero() {
+ missing = append(missing, p)
+ }
+ return true
+ })
+
+ // If there are no files to compile, we're done.
+ if len(files) == 0 {
+ return
+ }
+
+ if !stale {
+ return
+ }
+
+ // For package runtime, copy some files into the work space.
+ if dir == "runtime" || strings.HasPrefix(dir, "runtime/internal/") {
+ xmkdirall(pathf("%s/pkg/include", goroot))
+ // For use by assembly and C files.
+ copyfile(pathf("%s/pkg/include/textflag.h", goroot),
+ pathf("%s/src/runtime/textflag.h", goroot), 0)
+ copyfile(pathf("%s/pkg/include/funcdata.h", goroot),
+ pathf("%s/src/runtime/funcdata.h", goroot), 0)
+ copyfile(pathf("%s/pkg/include/asm_ppc64x.h", goroot),
+ pathf("%s/src/runtime/asm_ppc64x.h", goroot), 0)
+ }
+
+ // Generate any missing files; regenerate existing ones.
+ for _, p := range files {
+ elem := filepath.Base(p)
+ for _, gt := range gentab {
+ if gt.gen == nil {
+ continue
+ }
+ if strings.HasPrefix(elem, gt.nameprefix) {
+ if vflag > 1 {
+ errprintf("generate %s\n", p)
+ }
+ gt.gen(path, p)
+ // Do not add generated file to clean list.
+ // In runtime, we want to be able to
+ // build the package with the go tool,
+ // and it assumes these generated files already
+ // exist (it does not know how to build them).
+ // The 'clean' command can remove
+ // the generated files.
+ goto built
+ }
+ }
+ // Did not rebuild p.
+ if find(p, missing) >= 0 {
+ fatal("missing file %s", p)
+ }
+ built:
+ }
+
+ if goos != gohostos || goarch != gohostarch {
+ // We've generated the right files; the go command can do the build.
+ if vflag > 1 {
+ errprintf("skip build for cross-compile %s\n", dir)
+ }
+ return
+ }
+
+ var archive string
+ // The next loop will compile individual non-Go files.
+ // Hand the Go files to the compiler en masse.
+ // For package runtime, this writes go_asm.h, which
+ // the assembly files will need.
+ pkg := dir
+ if strings.HasPrefix(dir, "cmd/") {
+ pkg = "main"
+ }
+ b := pathf("%s/_go_.a", workdir)
+ clean = append(clean, b)
+ if !ispackcmd {
+ link = append(link, b)
+ } else {
+ archive = b
+ }
+ compile := []string{pathf("%s/compile", tooldir), "-pack", "-o", b, "-p", pkg}
+ if dir == "runtime" {
+ compile = append(compile, "-+", "-asmhdr", pathf("%s/go_asm.h", workdir))
+ }
+ compile = append(compile, gofiles...)
+ run(path, CheckExit|ShowOutput, compile...)
+
+ // Compile the files.
+ var wg sync.WaitGroup
+ for _, p := range files {
+ if !strings.HasSuffix(p, ".s") {
+ continue
+ }
+
+ var compile []string
+ // Assembly file for a Go package.
+ compile = []string{
+ pathf("%s/asm", tooldir),
+ "-I", workdir,
+ "-I", pathf("%s/pkg/include", goroot),
+ "-D", "GOOS_" + goos,
+ "-D", "GOARCH_" + goarch,
+ "-D", "GOOS_GOARCH_" + goos + "_" + goarch,
+ }
+
+ doclean := true
+ b := pathf("%s/%s", workdir, filepath.Base(p))
+
+ // Change the last character of the output file (which was c or s).
+ b = b[:len(b)-1] + "o"
+ compile = append(compile, "-o", b, p)
+ bgrun(&wg, path, compile...)
+
+ link = append(link, b)
+ if doclean {
+ clean = append(clean, b)
+ }
+ }
+ bgwait(&wg)
+
+ if ispackcmd {
+ xremove(link[targ])
+ dopack(link[targ], archive, link[targ+1:])
+ return
+ }
+
+ // Remove target before writing it.
+ xremove(link[targ])
+ run("", CheckExit|ShowOutput, link...)
+}
+
+// matchfield reports whether the field (x,y,z) matches this build.
+// all the elements in the field must be satisfied.
+func matchfield(f string) bool {
+ for _, tag := range strings.Split(f, ",") {
+ if !matchtag(tag) {
+ return false
+ }
+ }
+ return true
+}
+
+// matchtag reports whether the tag (x or !x) matches this build.
+func matchtag(tag string) bool {
+ if tag == "" {
+ return false
+ }
+ if tag[0] == '!' {
+ if len(tag) == 1 || tag[1] == '!' {
+ return false
+ }
+ return !matchtag(tag[1:])
+ }
+ return tag == goos || tag == goarch || tag == "cmd_go_bootstrap" || tag == "go1.1" || (goos == "android" && tag == "linux")
+}
+
+// shouldbuild reports whether we should build this file.
+// It applies the same rules that are used with context tags
+// in package go/build, except that the GOOS and GOARCH
+// can appear anywhere in the file name, not just after _.
+// In particular, they can be the entire file name (like windows.c).
+// We also allow the special tag cmd_go_bootstrap.
+// See ../go/bootstrap.go and package go/build.
+func shouldbuild(file, dir string) bool {
+ // Check file name for GOOS or GOARCH.
+ name := filepath.Base(file)
+ excluded := func(list []string, ok string) bool {
+ for _, x := range list {
+ if x == ok {
+ continue
+ }
+ i := strings.Index(name, x)
+ if i < 0 {
+ continue
+ }
+ i += len(x)
+ if i == len(name) || name[i] == '.' || name[i] == '_' {
+ return true
+ }
+ }
+ return false
+ }
+ if excluded(okgoos, goos) || excluded(okgoarch, goarch) {
+ return false
+ }
+
+ // Omit test files.
+ if strings.Contains(name, "_test") {
+ return false
+ }
+
+ // Check file contents for // +build lines.
+ for _, p := range splitlines(readfile(file)) {
+ p = strings.TrimSpace(p)
+ if p == "" {
+ continue
+ }
+ if strings.Contains(p, "package documentation") {
+ return false
+ }
+ if strings.Contains(p, "package main") && dir != "cmd/go" && dir != "cmd/cgo" {
+ return false
+ }
+ if !strings.HasPrefix(p, "//") {
+ break
+ }
+ if !strings.Contains(p, "+build") {
+ continue
+ }
+ fields := splitfields(p)
+ if len(fields) < 2 || fields[1] != "+build" {
+ continue
+ }
+ for _, p := range fields[2:] {
+ if matchfield(p) {
+ goto fieldmatch
+ }
+ }
+ return false
+ fieldmatch:
+ }
+
+ return true
+}
+
+// copy copies the file src to dst, via memory (so only good for small files).
+func copyfile(dst, src string, flag int) {
+ if vflag > 1 {
+ errprintf("cp %s %s\n", src, dst)
+ }
+ writefile(readfile(src), dst, flag)
+}
+
+// dopack copies the package src to dst,
+// appending the files listed in extra.
+// The archive format is the traditional Unix ar format.
+func dopack(dst, src string, extra []string) {
+ bdst := bytes.NewBufferString(readfile(src))
+ for _, file := range extra {
+ b := readfile(file)
+ // find last path element for archive member name
+ i := strings.LastIndex(file, "/") + 1
+ j := strings.LastIndex(file, `\`) + 1
+ if i < j {
+ i = j
+ }
+ fmt.Fprintf(bdst, "%-16.16s%-12d%-6d%-6d%-8o%-10d`\n", file[i:], 0, 0, 0, 0644, len(b))
+ bdst.WriteString(b)
+ if len(b)&1 != 0 {
+ bdst.WriteByte(0)
+ }
+ }
+ writefile(bdst.String(), dst, 0)
+}
+
+// builddeps records the build dependencies for the 'go bootstrap' command.
+// It is a map[string][]string and generated by mkdeps.bash into deps.go.
+
+// buildlist is the list of directories being built, sorted by name.
+var buildlist = makeBuildlist()
+
+func makeBuildlist() []string {
+ var all []string
+ for dir := range builddeps {
+ all = append(all, dir)
+ }
+ sort.Strings(all)
+ return all
+}
+
+var runtimegen = []string{
+ "zaexperiment.h",
+ "zversion.go",
+}
+
+func clean() {
+ for _, name := range buildlist {
+ path := pathf("%s/src/%s", goroot, name)
+ // Remove generated files.
+ for _, elem := range xreaddir(path) {
+ for _, gt := range gentab {
+ if strings.HasPrefix(elem, gt.nameprefix) {
+ xremove(pathf("%s/%s", path, elem))
+ }
+ }
+ }
+ // Remove generated binary named for directory.
+ if strings.HasPrefix(name, "cmd/") {
+ xremove(pathf("%s/%s", path, name[4:]))
+ }
+ }
+
+ // remove runtimegen files.
+ path := pathf("%s/src/runtime", goroot)
+ for _, elem := range runtimegen {
+ xremove(pathf("%s/%s", path, elem))
+ }
+
+ if rebuildall {
+ // Remove object tree.
+ xremoveall(pathf("%s/pkg/obj/%s_%s", goroot, gohostos, gohostarch))
+
+ // Remove installed packages and tools.
+ xremoveall(pathf("%s/pkg/%s_%s", goroot, gohostos, gohostarch))
+ xremoveall(pathf("%s/pkg/%s_%s", goroot, goos, goarch))
+ xremoveall(pathf("%s/pkg/%s_%s_race", goroot, gohostos, gohostarch))
+ xremoveall(pathf("%s/pkg/%s_%s_race", goroot, goos, goarch))
+ xremoveall(tooldir)
+
+ // Remove cached version info.
+ xremove(pathf("%s/VERSION.cache", goroot))
+ }
+}
+
+/*
+ * command implementations
+ */
+
+func usage() {
+ xprintf("usage: go tool dist [command]\n" +
+ "Commands are:\n" +
+ "\n" +
+ "banner print installation banner\n" +
+ "bootstrap rebuild everything\n" +
+ "clean deletes all built files\n" +
+ "env [-p] print environment (-p: include $PATH)\n" +
+ "install [dir] install individual directory\n" +
+ "test [-h] run Go test(s)\n" +
+ "version print Go version\n" +
+ "\n" +
+ "All commands take -v flags to emit extra information.\n",
+ )
+ xexit(2)
+}
+
+// The env command prints the default environment.
+func cmdenv() {
+ path := flag.Bool("p", false, "emit updated PATH")
+ plan9 := flag.Bool("9", false, "emit plan 9 syntax")
+ windows := flag.Bool("w", false, "emit windows syntax")
+ xflagparse(0)
+
+ format := "%s=\"%s\"\n"
+ switch {
+ case *plan9:
+ format = "%s='%s'\n"
+ case *windows:
+ format = "set %s=%s\r\n"
+ }
+
+ xprintf(format, "CC", defaultcc)
+ xprintf(format, "CC_FOR_TARGET", defaultcctarget)
+ xprintf(format, "GOROOT", goroot)
+ xprintf(format, "GOBIN", gobin)
+ xprintf(format, "GOARCH", goarch)
+ xprintf(format, "GOOS", goos)
+ xprintf(format, "GOHOSTARCH", gohostarch)
+ xprintf(format, "GOHOSTOS", gohostos)
+ xprintf(format, "GOTOOLDIR", tooldir)
+ if goarch == "arm" {
+ xprintf(format, "GOARM", goarm)
+ }
+ if goarch == "386" {
+ xprintf(format, "GO386", go386)
+ }
+
+ if *path {
+ sep := ":"
+ if gohostos == "windows" {
+ sep = ";"
+ }
+ xprintf(format, "PATH", fmt.Sprintf("%s%s%s", gobin, sep, os.Getenv("PATH")))
+ }
+}
+
+// The bootstrap command runs a build from scratch,
+// stopping at having installed the go_bootstrap command.
+func cmdbootstrap() {
+ flag.BoolVar(&rebuildall, "a", rebuildall, "rebuild all")
+ xflagparse(0)
+
+ if isdir(pathf("%s/src/pkg", goroot)) {
+ fatal("\n\n"+
+ "The Go package sources have moved to $GOROOT/src.\n"+
+ "*** %s still exists. ***\n"+
+ "It probably contains stale files that may confuse the build.\n"+
+ "Please (check what's there and) remove it and try again.\n"+
+ "See https://golang.org/s/go14nopkg\n",
+ pathf("%s/src/pkg", goroot))
+ }
+
+ if rebuildall {
+ clean()
+ }
+
+ setup()
+
+ checkCC()
+ bootstrapBuildTools()
+
+ // For the main bootstrap, building for host os/arch.
+ oldgoos = goos
+ oldgoarch = goarch
+ goos = gohostos
+ goarch = gohostarch
+ os.Setenv("GOHOSTARCH", gohostarch)
+ os.Setenv("GOHOSTOS", gohostos)
+ os.Setenv("GOARCH", goarch)
+ os.Setenv("GOOS", goos)
+
+ // TODO(rsc): Enable when appropriate.
+ // This step is only needed if we believe that the Go compiler built from Go 1.4
+ // will produce different object files than the Go compiler built from itself.
+ // In the absence of bugs, that should not happen.
+ // And if there are bugs, they're more likely in the current development tree
+ // than in a standard release like Go 1.4, so don't do this rebuild by default.
+ if false {
+ xprintf("##### Building Go toolchain using itself.\n")
+ for _, dir := range buildlist {
+ installed[dir] = make(chan struct{})
+ }
+ var wg sync.WaitGroup
+ for _, dir := range builddeps["cmd/go"] {
+ wg.Add(1)
+ dir := dir
+ go func() {
+ defer wg.Done()
+ install(dir)
+ }()
+ }
+ wg.Wait()
+ xprintf("\n")
+ }
+
+ xprintf("##### Building go_bootstrap for host, %s/%s.\n", gohostos, gohostarch)
+ for _, dir := range buildlist {
+ installed[dir] = make(chan struct{})
+ }
+ for _, dir := range buildlist {
+ go install(dir)
+ }
+ <-installed["cmd/go"]
+
+ goos = oldgoos
+ goarch = oldgoarch
+ os.Setenv("GOARCH", goarch)
+ os.Setenv("GOOS", goos)
+
+ // Build runtime for actual goos/goarch too.
+ if goos != gohostos || goarch != gohostarch {
+ installed["runtime"] = make(chan struct{})
+ install("runtime")
+ }
+}
+
+// Copied from go/build/build.go.
+// Cannot use go/build directly because cmd/dist for a new release
+// builds against an old release's go/build, which may be out of sync.
+var cgoEnabled = map[string]bool{
+ "darwin/386": true,
+ "darwin/amd64": true,
+ "darwin/arm": true,
+ "darwin/arm64": true,
+ "dragonfly/amd64": true,
+ "freebsd/386": true,
+ "freebsd/amd64": true,
+ "linux/386": true,
+ "linux/amd64": true,
+ "linux/arm": true,
+ "linux/arm64": true,
+ "linux/ppc64le": true,
+ "android/386": true,
+ "android/amd64": true,
+ "android/arm": true,
+ "netbsd/386": true,
+ "netbsd/amd64": true,
+ "netbsd/arm": true,
+ "openbsd/386": true,
+ "openbsd/amd64": true,
+ "solaris/amd64": true,
+ "windows/386": true,
+ "windows/amd64": true,
+}
+
+func needCC() bool {
+ switch os.Getenv("CGO_ENABLED") {
+ case "1":
+ return true
+ case "0":
+ return false
+ }
+ return cgoEnabled[gohostos+"/"+gohostarch]
+}
+
+func checkCC() {
+ if !needCC() {
+ return
+ }
+ if output, err := exec.Command(defaultcc, "--help").CombinedOutput(); err != nil {
+ outputHdr := ""
+ if len(output) > 0 {
+ outputHdr = "\nCommand output:\n\n"
+ }
+ fatal("cannot invoke C compiler %q: %v\n\n"+
+ "Go needs a system C compiler for use with cgo.\n"+
+ "To set a C compiler, export CC=the-compiler.\n"+
+ "To disable cgo, export CGO_ENABLED=0.\n%s%s", defaultcc, err, outputHdr, output)
+ }
+}
+
+func defaulttarg() string {
+ // xgetwd might return a path with symlinks fully resolved, and if
+ // there happens to be symlinks in goroot, then the hasprefix test
+ // will never succeed. Instead, we use xrealwd to get a canonical
+ // goroot/src before the comparison to avoid this problem.
+ pwd := xgetwd()
+ src := pathf("%s/src/", goroot)
+ real_src := xrealwd(src)
+ if !strings.HasPrefix(pwd, real_src) {
+ fatal("current directory %s is not under %s", pwd, real_src)
+ }
+ pwd = pwd[len(real_src):]
+ // guard againt xrealwd return the directory without the trailing /
+ pwd = strings.TrimPrefix(pwd, "/")
+
+ return pwd
+}
+
+// Install installs the list of packages named on the command line.
+func cmdinstall() {
+ xflagparse(-1)
+
+ if flag.NArg() == 0 {
+ install(defaulttarg())
+ }
+
+ for _, arg := range flag.Args() {
+ install(arg)
+ }
+}
+
+// Clean deletes temporary objects.
+func cmdclean() {
+ xflagparse(0)
+ clean()
+}
+
+// Banner prints the 'now you've installed Go' banner.
+func cmdbanner() {
+ xflagparse(0)
+
+ xprintf("\n")
+ xprintf("---\n")
+ xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot)
+ xprintf("Installed commands in %s\n", gobin)
+
+ if !xsamefile(goroot_final, goroot) {
+ // If the files are to be moved, don't check that gobin
+ // is on PATH; assume they know what they are doing.
+ } else if gohostos == "plan9" {
+ // Check that gobin is bound before /bin.
+ pid := strings.Replace(readfile("#c/pid"), " ", "", -1)
+ ns := fmt.Sprintf("/proc/%s/ns", pid)
+ if !strings.Contains(readfile(ns), fmt.Sprintf("bind -b %s /bin", gobin)) {
+ xprintf("*** You need to bind %s before /bin.\n", gobin)
+ }
+ } else {
+ // Check that gobin appears in $PATH.
+ pathsep := ":"
+ if gohostos == "windows" {
+ pathsep = ";"
+ }
+ if !strings.Contains(pathsep+os.Getenv("PATH")+pathsep, pathsep+gobin+pathsep) {
+ xprintf("*** You need to add %s to your PATH.\n", gobin)
+ }
+ }
+
+ if !xsamefile(goroot_final, goroot) {
+ xprintf("\n"+
+ "The binaries expect %s to be copied or moved to %s\n",
+ goroot, goroot_final)
+ }
+}
+
+// Version prints the Go version.
+func cmdversion() {
+ xflagparse(0)
+ xprintf("%s\n", findgoversion())
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/buildtool.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/buildtool.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/buildtool.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/buildtool.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Build toolchain using Go 1.4.
+//
+// The general strategy is to copy the source files we need into
+// a new GOPATH workspace, adjust import paths appropriately,
+// invoke the Go 1.4 go command to build those sources,
+// and then copy the binaries back.
+
+package main
+
+import (
+ "os"
+ "strings"
+)
+
+// bootstrapDirs is a list of directories holding code that must be
+// compiled with a Go 1.4 toolchain to produce the bootstrapTargets.
+// All directories in this list are relative to and must be below $GOROOT/src/cmd.
+// The list is assumed to have two kinds of entries: names without slashes,
+// which are commands, and entries beginning with internal/, which are
+// packages supporting the commands.
+var bootstrapDirs = []string{
+ "asm",
+ "asm/internal/arch",
+ "asm/internal/asm",
+ "asm/internal/flags",
+ "asm/internal/lex",
+ "compile",
+ "compile/internal/amd64",
+ "compile/internal/arm",
+ "compile/internal/arm64",
+ "compile/internal/big",
+ "compile/internal/gc",
+ "compile/internal/mips64",
+ "compile/internal/ppc64",
+ "compile/internal/x86",
+ "internal/gcprog",
+ "internal/obj",
+ "internal/obj/arm",
+ "internal/obj/arm64",
+ "internal/obj/mips",
+ "internal/obj/ppc64",
+ "internal/obj/x86",
+ "link",
+ "link/internal/amd64",
+ "link/internal/arm",
+ "link/internal/arm64",
+ "link/internal/ld",
+ "link/internal/mips64",
+ "link/internal/ppc64",
+ "link/internal/x86",
+}
+
+func bootstrapBuildTools() {
+ goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
+ if goroot_bootstrap == "" {
+ goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME"))
+ }
+ xprintf("##### Building Go toolchain using %s.\n", goroot_bootstrap)
+
+ mkzbootstrap(pathf("%s/src/cmd/internal/obj/zbootstrap.go", goroot))
+
+ // Use $GOROOT/pkg/bootstrap as the bootstrap workspace root.
+ // We use a subdirectory of $GOROOT/pkg because that's the
+ // space within $GOROOT where we store all generated objects.
+ // We could use a temporary directory outside $GOROOT instead,
+ // but it is easier to debug on failure if the files are in a known location.
+ workspace := pathf("%s/pkg/bootstrap", goroot)
+ xremoveall(workspace)
+ base := pathf("%s/src/bootstrap", workspace)
+ xmkdirall(base)
+
+ // Copy source code into $GOROOT/pkg/bootstrap and rewrite import paths.
+ for _, dir := range bootstrapDirs {
+ src := pathf("%s/src/cmd/%s", goroot, dir)
+ dst := pathf("%s/%s", base, dir)
+ xmkdirall(dst)
+ for _, name := range xreaddirfiles(src) {
+ srcFile := pathf("%s/%s", src, name)
+ text := readfile(srcFile)
+ text = bootstrapFixImports(text, srcFile)
+ writefile(text, pathf("%s/%s", dst, name), 0)
+ }
+ }
+
+ // Set up environment for invoking Go 1.4 go command.
+ // GOROOT points at Go 1.4 GOROOT,
+ // GOPATH points at our bootstrap workspace,
+ // GOBIN is empty, so that binaries are installed to GOPATH/bin,
+ // and GOOS, GOHOSTOS, GOARCH, and GOHOSTOS are empty,
+ // so that Go 1.4 builds whatever kind of binary it knows how to build.
+ // Restore GOROOT, GOPATH, and GOBIN when done.
+ // Don't bother with GOOS, GOHOSTOS, GOARCH, and GOHOSTARCH,
+ // because setup will take care of those when bootstrapBuildTools returns.
+
+ defer os.Setenv("GOROOT", os.Getenv("GOROOT"))
+ os.Setenv("GOROOT", goroot_bootstrap)
+
+ defer os.Setenv("GOPATH", os.Getenv("GOPATH"))
+ os.Setenv("GOPATH", workspace)
+
+ defer os.Setenv("GOBIN", os.Getenv("GOBIN"))
+ os.Setenv("GOBIN", "")
+
+ os.Setenv("GOOS", "")
+ os.Setenv("GOHOSTOS", "")
+ os.Setenv("GOARCH", "")
+ os.Setenv("GOHOSTARCH", "")
+
+ // Run Go 1.4 to build binaries.
+ run(workspace, ShowOutput|CheckExit, pathf("%s/bin/go", goroot_bootstrap), "install", "-v", "bootstrap/...")
+
+ // Copy binaries into tool binary directory.
+ for _, name := range bootstrapDirs {
+ if !strings.Contains(name, "/") {
+ copyfile(pathf("%s/%s%s", tooldir, name, exe), pathf("%s/bin/%s%s", workspace, name, exe), writeExec)
+ }
+ }
+
+ xprintf("\n")
+}
+
+func bootstrapFixImports(text, srcFile string) string {
+ lines := strings.SplitAfter(text, "\n")
+ inBlock := false
+ for i, line := range lines {
+ if strings.HasPrefix(line, "import (") {
+ inBlock = true
+ continue
+ }
+ if inBlock && strings.HasPrefix(line, ")") {
+ inBlock = false
+ continue
+ }
+ if strings.HasPrefix(line, `import "`) || strings.HasPrefix(line, `import . "`) ||
+ inBlock && (strings.HasPrefix(line, "\t\"") || strings.HasPrefix(line, "\t. \"")) {
+ lines[i] = strings.Replace(line, `"cmd/`, `"bootstrap/`, -1)
+ }
+ }
+
+ lines[0] = "// Do not edit. Bootstrap copy of " + srcFile + "\n\n//line " + srcFile + ":1\n" + lines[0]
+
+ return strings.Join(lines, "")
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/test.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,1021 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "errors"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+func cmdtest() {
+ var t tester
+ var noRebuild bool
+ flag.BoolVar(&t.listMode, "list", false, "list available tests")
+ flag.BoolVar(&t.rebuild, "rebuild", false, "rebuild everything first")
+ flag.BoolVar(&noRebuild, "no-rebuild", false, "overrides -rebuild (historical dreg)")
+ flag.BoolVar(&t.keepGoing, "k", false, "keep going even when error occurred")
+ flag.BoolVar(&t.race, "race", false, "run in race builder mode (different set of tests)")
+ flag.StringVar(&t.banner, "banner", "##### ", "banner prefix; blank means no section banners")
+ flag.StringVar(&t.runRxStr, "run", os.Getenv("GOTESTONLY"),
+ "run only those tests matching the regular expression; empty means to run all. "+
+ "Special exception: if the string begins with '!', the match is inverted.")
+ xflagparse(-1) // any number of args
+ if noRebuild {
+ t.rebuild = false
+ }
+ t.run()
+}
+
+// tester executes cmdtest.
+type tester struct {
+ race bool
+ listMode bool
+ rebuild bool
+ failed bool
+ keepGoing bool
+ runRxStr string
+ runRx *regexp.Regexp
+ runRxWant bool // want runRx to match (true) or not match (false)
+ runNames []string // tests to run, exclusive with runRx; empty means all
+ banner string // prefix, or "" for none
+ lastHeading string // last dir heading printed
+
+ goroot string
+ goarch string
+ gohostarch string
+ goos string
+ gohostos string
+ cgoEnabled bool
+ partial bool
+ haveTime bool // the 'time' binary is available
+
+ tests []distTest
+ timeoutScale int
+
+ worklist []*work
+}
+
+type work struct {
+ dt *distTest
+ cmd *exec.Cmd
+ start chan bool
+ out []byte
+ err error
+ end chan bool
+}
+
+// A distTest is a test run by dist test.
+// Each test has a unique name and belongs to a group (heading)
+type distTest struct {
+ name string // unique test name; may be filtered with -run flag
+ heading string // group section; this header is printed before the test is run.
+ fn func(*distTest) error
+}
+
+func mustEnv(k string) string {
+ v := os.Getenv(k)
+ if v == "" {
+ log.Fatalf("Unset environment variable %v", k)
+ }
+ return v
+}
+
+func (t *tester) run() {
+ t.goroot = mustEnv("GOROOT")
+ t.goos = mustEnv("GOOS")
+ t.gohostos = mustEnv("GOHOSTOS")
+ t.goarch = mustEnv("GOARCH")
+ t.gohostarch = mustEnv("GOHOSTARCH")
+ slurp, err := exec.Command("go", "env", "CGO_ENABLED").Output()
+ if err != nil {
+ log.Fatalf("Error running go env CGO_ENABLED: %v", err)
+ }
+ t.cgoEnabled, _ = strconv.ParseBool(strings.TrimSpace(string(slurp)))
+ if flag.NArg() > 0 && t.runRxStr != "" {
+ log.Fatalf("the -run regular expression flag is mutually exclusive with test name arguments")
+ }
+ t.runNames = flag.Args()
+
+ if t.hasBash() {
+ if _, err := exec.LookPath("time"); err == nil {
+ t.haveTime = true
+ }
+ }
+
+ if t.rebuild {
+ t.out("Building packages and commands.")
+ cmd := exec.Command("go", "install", "-a", "-v", "std", "cmd")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ log.Fatalf("building packages and commands: %v", err)
+ }
+ }
+
+ if t.iOS() {
+ // Install the Mach exception handler used to intercept
+ // EXC_BAD_ACCESS and convert it into a Go panic. This is
+ // necessary for a Go program running under lldb (the way
+ // we run tests). It is disabled by default because iOS
+ // apps are not allowed to access the exc_server symbol.
+ cmd := exec.Command("go", "install", "-a", "-tags", "lldb", "runtime/cgo")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ log.Fatalf("building mach exception handler: %v", err)
+ }
+
+ defer func() {
+ cmd := exec.Command("go", "install", "-a", "runtime/cgo")
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if err := cmd.Run(); err != nil {
+ log.Fatalf("reverting mach exception handler: %v", err)
+ }
+ }()
+ }
+
+ t.timeoutScale = 1
+ if t.goarch == "arm" || t.goos == "windows" {
+ t.timeoutScale = 2
+ }
+ if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
+ t.timeoutScale, err = strconv.Atoi(s)
+ if err != nil {
+ log.Fatalf("failed to parse $GO_TEST_TIMEOUT_SCALE = %q as integer: %v", s, err)
+ }
+ }
+
+ if t.runRxStr != "" {
+ if t.runRxStr[0] == '!' {
+ t.runRxWant = false
+ t.runRxStr = t.runRxStr[1:]
+ } else {
+ t.runRxWant = true
+ }
+ t.runRx = regexp.MustCompile(t.runRxStr)
+ }
+
+ t.registerTests()
+ if t.listMode {
+ for _, tt := range t.tests {
+ fmt.Println(tt.name)
+ }
+ return
+ }
+
+ // we must unset GOROOT_FINAL before tests, because runtime/debug requires
+ // correct access to source code, so if we have GOROOT_FINAL in effect,
+ // at least runtime/debug test will fail.
+ os.Unsetenv("GOROOT_FINAL")
+
+ for _, name := range t.runNames {
+ if !t.isRegisteredTestName(name) {
+ log.Fatalf("unknown test %q", name)
+ }
+ }
+
+ for _, dt := range t.tests {
+ if !t.shouldRunTest(dt.name) {
+ t.partial = true
+ continue
+ }
+ dt := dt // dt used in background after this iteration
+ if err := dt.fn(&dt); err != nil {
+ t.runPending(&dt) // in case that hasn't been done yet
+ t.failed = true
+ if t.keepGoing {
+ log.Printf("Failed: %v", err)
+ } else {
+ log.Fatalf("Failed: %v", err)
+ }
+ }
+ }
+ t.runPending(nil)
+ if t.failed {
+ fmt.Println("\nFAILED")
+ os.Exit(1)
+ } else if t.partial {
+ fmt.Println("\nALL TESTS PASSED (some were excluded)")
+ } else {
+ fmt.Println("\nALL TESTS PASSED")
+ }
+}
+
+func (t *tester) shouldRunTest(name string) bool {
+ if t.runRx != nil {
+ return t.runRx.MatchString(name) == t.runRxWant
+ }
+ if len(t.runNames) == 0 {
+ return true
+ }
+ for _, runName := range t.runNames {
+ if runName == name {
+ return true
+ }
+ }
+ return false
+}
+
+func (t *tester) tags() string {
+ if t.iOS() {
+ return "-tags=lldb"
+ }
+ return "-tags="
+}
+
+func (t *tester) timeout(sec int) string {
+ return "-timeout=" + fmt.Sprint(time.Duration(sec)*time.Second*time.Duration(t.timeoutScale))
+}
+
+// ranGoTest and stdMatches are state closed over by the stdlib
+// testing func in registerStdTest below. The tests are run
+// sequentially, so there's no need for locks.
+//
+// ranGoBench and benchMatches are the same, but are only used
+// in -race mode.
+var (
+ ranGoTest bool
+ stdMatches []string
+
+ ranGoBench bool
+ benchMatches []string
+)
+
+func (t *tester) registerStdTest(pkg string) {
+ testName := "go_test:" + pkg
+ if t.runRx == nil || t.runRx.MatchString(testName) {
+ stdMatches = append(stdMatches, pkg)
+ }
+ t.tests = append(t.tests, distTest{
+ name: testName,
+ heading: "Testing packages.",
+ fn: func(dt *distTest) error {
+ if ranGoTest {
+ return nil
+ }
+ t.runPending(dt)
+ ranGoTest = true
+ args := []string{
+ "test",
+ "-short",
+ t.tags(),
+ t.timeout(180),
+ "-gcflags=" + os.Getenv("GO_GCFLAGS"),
+ }
+ if t.race {
+ args = append(args, "-race")
+ }
+ args = append(args, stdMatches...)
+ cmd := exec.Command("go", args...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+ },
+ })
+}
+
+func (t *tester) registerRaceBenchTest(pkg string) {
+ testName := "go_test_bench:" + pkg
+ if t.runRx == nil || t.runRx.MatchString(testName) {
+ benchMatches = append(benchMatches, pkg)
+ }
+ t.tests = append(t.tests, distTest{
+ name: testName,
+ heading: "Running benchmarks briefly.",
+ fn: func(dt *distTest) error {
+ if ranGoBench {
+ return nil
+ }
+ t.runPending(dt)
+ ranGoBench = true
+ args := []string{
+ "test",
+ "-short",
+ "-race",
+ "-run=^$", // nothing. only benchmarks.
+ "-bench=.*",
+ "-benchtime=.1s",
+ "-cpu=4",
+ }
+ args = append(args, benchMatches...)
+ cmd := exec.Command("go", args...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ return cmd.Run()
+ },
+ })
+}
+
+func (t *tester) registerTests() {
+ // Fast path to avoid the ~1 second of `go list std cmd` when
+ // the caller lists specific tests to run. (as the continuous
+ // build coordinator does).
+ if len(t.runNames) > 0 {
+ for _, name := range t.runNames {
+ if strings.HasPrefix(name, "go_test:") {
+ t.registerStdTest(strings.TrimPrefix(name, "go_test:"))
+ }
+ if strings.HasPrefix(name, "go_test_bench:") {
+ t.registerRaceBenchTest(strings.TrimPrefix(name, "go_test_bench:"))
+ }
+ }
+ } else {
+ // Use a format string to only list packages and commands that have tests.
+ const format = "{{if (or .TestGoFiles .XTestGoFiles)}}{{.ImportPath}}{{end}}"
+ cmd := exec.Command("go", "list", "-f", format)
+ if t.race {
+ cmd.Args = append(cmd.Args, "-tags", "race")
+ }
+ cmd.Args = append(cmd.Args, "std")
+ if !t.race {
+ cmd.Args = append(cmd.Args, "cmd")
+ }
+ all, err := cmd.CombinedOutput()
+ if err != nil {
+ log.Fatalf("Error running go list std cmd: %v, %s", err, all)
+ }
+ pkgs := strings.Fields(string(all))
+ for _, pkg := range pkgs {
+ t.registerStdTest(pkg)
+ }
+ if t.race {
+ for _, pkg := range pkgs {
+ t.registerRaceBenchTest(pkg)
+ }
+ }
+ }
+
+ if t.race {
+ return
+ }
+
+ // Runtime CPU tests.
+ testName := "runtime:cpu124"
+ t.tests = append(t.tests, distTest{
+ name: testName,
+ heading: "GOMAXPROCS=2 runtime -cpu=1,2,4",
+ fn: func(dt *distTest) error {
+ cmd := t.addCmd(dt, "src", "go", "test", "-short", t.timeout(300), t.tags(), "runtime", "-cpu=1,2,4")
+ // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code,
+ // creation of first goroutines and first garbage collections in the parallel setting.
+ cmd.Env = mergeEnvLists([]string{"GOMAXPROCS=2"}, os.Environ())
+ return nil
+ },
+ })
+
+ // Test that internal linking of standard packages does not
+ // require libgcc. This ensures that we can install a Go
+ // release on a system that does not have a C compiler
+ // installed and still build Go programs (that don't use cgo).
+ for _, pkg := range cgoPackages {
+
+ // Internal linking is not currently supported on Dragonfly.
+ if t.goos == "dragonfly" {
+ break
+ }
+
+ // ARM libgcc may be Thumb, which internal linking does not support.
+ if t.goarch == "arm" {
+ break
+ }
+
+ // Darwin/Android ARM64 fails with internal linking.
+ if (t.goos == "darwin" || t.goos == "android") && t.goarch == "arm64" {
+ break
+ }
+
+ pkg := pkg
+ var run string
+ if pkg == "net" {
+ run = "TestTCPStress"
+ }
+ t.tests = append(t.tests, distTest{
+ name: "nolibgcc:" + pkg,
+ heading: "Testing without libgcc.",
+ fn: func(dt *distTest) error {
+ t.addCmd(dt, "src", "go", "test", "-short", "-ldflags=-linkmode=internal -libgcc=none", t.tags(), pkg, "-run="+run)
+ return nil
+ },
+ })
+ }
+
+ // sync tests
+ t.tests = append(t.tests, distTest{
+ name: "sync_cpu",
+ heading: "sync -cpu=10",
+ fn: func(dt *distTest) error {
+ t.addCmd(dt, "src", "go", "test", "sync", "-short", t.timeout(120), t.tags(), "-cpu=10")
+ return nil
+ },
+ })
+
+ if t.cgoEnabled && t.goos != "android" && !t.iOS() {
+ // Disabled on android and iOS. golang.org/issue/8345
+ t.tests = append(t.tests, distTest{
+ name: "cgo_stdio",
+ heading: "../misc/cgo/stdio",
+ fn: func(dt *distTest) error {
+ t.addCmd(dt, "misc/cgo/stdio", "go", "run", filepath.Join(os.Getenv("GOROOT"), "test/run.go"), "-", ".")
+ return nil
+ },
+ })
+ t.tests = append(t.tests, distTest{
+ name: "cgo_life",
+ heading: "../misc/cgo/life",
+ fn: func(dt *distTest) error {
+ t.addCmd(dt, "misc/cgo/life", "go", "run", filepath.Join(os.Getenv("GOROOT"), "test/run.go"), "-", ".")
+ return nil
+ },
+ })
+ }
+ if t.cgoEnabled && t.goos != "android" && !t.iOS() {
+ // TODO(crawshaw): reenable on android and iOS
+ // golang.org/issue/8345
+ //
+ // These tests are not designed to run off the host.
+ t.tests = append(t.tests, distTest{
+ name: "cgo_test",
+ heading: "../misc/cgo/test",
+ fn: t.cgoTest,
+ })
+ }
+
+ if t.raceDetectorSupported() {
+ t.tests = append(t.tests, distTest{
+ name: "race",
+ heading: "Testing race detector",
+ fn: t.raceTest,
+ })
+ }
+
+ if t.hasBash() && t.cgoEnabled && t.goos != "android" && t.goos != "darwin" {
+ t.registerTest("testgodefs", "../misc/cgo/testgodefs", "./test.bash")
+ }
+ if t.cgoEnabled {
+ if t.cgoTestSOSupported() {
+ t.tests = append(t.tests, distTest{
+ name: "testso",
+ heading: "../misc/cgo/testso",
+ fn: func(dt *distTest) error {
+ return t.cgoTestSO(dt, "misc/cgo/testso")
+ },
+ })
+ t.tests = append(t.tests, distTest{
+ name: "testsovar",
+ heading: "../misc/cgo/testsovar",
+ fn: func(dt *distTest) error {
+ return t.cgoTestSO(dt, "misc/cgo/testsovar")
+ },
+ })
+ }
+ if t.supportedBuildmode("c-archive") {
+ t.registerTest("testcarchive", "../misc/cgo/testcarchive", "./test.bash")
+ }
+ if t.supportedBuildmode("c-shared") {
+ t.registerTest("testcshared", "../misc/cgo/testcshared", "./test.bash")
+ }
+ if t.supportedBuildmode("shared") {
+ t.registerTest("testshared", "../misc/cgo/testshared", "go", "test")
+ }
+ if t.gohostos == "linux" && t.goarch == "amd64" {
+ t.registerTest("testasan", "../misc/cgo/testasan", "go", "run", "main.go")
+ }
+ if t.gohostos == "linux" && t.goarch == "amd64" {
+ t.registerTest("testsanitizers", "../misc/cgo/testsanitizers", "./test.bash")
+ }
+ if t.hasBash() && t.goos != "android" && !t.iOS() && t.gohostos != "windows" {
+ t.registerTest("cgo_errors", "../misc/cgo/errors", "./test.bash")
+ }
+ if t.gohostos == "linux" && t.extLink() {
+ t.registerTest("testsigfwd", "../misc/cgo/testsigfwd", "go", "run", "main.go")
+ }
+ }
+
+ // Doc tests only run on builders.
+ // They find problems approximately never.
+ if t.hasBash() && t.goos != "nacl" && t.goos != "android" && !t.iOS() && os.Getenv("GO_BUILDER_NAME") != "" {
+ t.registerTest("doc_progs", "../doc/progs", "time", "go", "run", "run.go")
+ t.registerTest("wiki", "../doc/articles/wiki", "./test.bash")
+ t.registerTest("codewalk", "../doc/codewalk", "time", "./run")
+ }
+
+ if t.goos != "android" && !t.iOS() {
+ t.registerTest("bench_go1", "../test/bench/go1", "go", "test", t.timeout(600))
+ }
+ if t.goos != "android" && !t.iOS() {
+ const nShards = 5
+ for shard := 0; shard < nShards; shard++ {
+ shard := shard
+ t.tests = append(t.tests, distTest{
+ name: fmt.Sprintf("test:%d_%d", shard, nShards),
+ heading: "../test",
+ fn: func(dt *distTest) error { return t.testDirTest(dt, shard, nShards) },
+ })
+ }
+ }
+ if t.goos != "nacl" && t.goos != "android" && !t.iOS() {
+ t.tests = append(t.tests, distTest{
+ name: "api",
+ heading: "API check",
+ fn: func(dt *distTest) error {
+ t.addCmd(dt, "src", "go", "run", filepath.Join(t.goroot, "src/cmd/api/run.go"))
+ return nil
+ },
+ })
+ }
+}
+
+// isRegisteredTestName reports whether a test named testName has already
+// been registered.
+func (t *tester) isRegisteredTestName(testName string) bool {
+ for _, tt := range t.tests {
+ if tt.name == testName {
+ return true
+ }
+ }
+ return false
+}
+
+func (t *tester) registerTest1(seq bool, name, dirBanner, bin string, args ...string) {
+ if bin == "time" && !t.haveTime {
+ bin, args = args[0], args[1:]
+ }
+ if t.isRegisteredTestName(name) {
+ panic("duplicate registered test name " + name)
+ }
+ t.tests = append(t.tests, distTest{
+ name: name,
+ heading: dirBanner,
+ fn: func(dt *distTest) error {
+ if seq {
+ t.runPending(dt)
+ return t.dirCmd(filepath.Join(t.goroot, "src", dirBanner), bin, args...).Run()
+ }
+ t.addCmd(dt, filepath.Join(t.goroot, "src", dirBanner), bin, args...)
+ return nil
+ },
+ })
+}
+
+func (t *tester) registerTest(name, dirBanner, bin string, args ...string) {
+ t.registerTest1(false, name, dirBanner, bin, args...)
+}
+
+func (t *tester) registerSeqTest(name, dirBanner, bin string, args ...string) {
+ t.registerTest1(true, name, dirBanner, bin, args...)
+}
+
+func (t *tester) bgDirCmd(dir, bin string, args ...string) *exec.Cmd {
+ cmd := exec.Command(bin, args...)
+ if filepath.IsAbs(dir) {
+ cmd.Dir = dir
+ } else {
+ cmd.Dir = filepath.Join(t.goroot, dir)
+ }
+ return cmd
+}
+
+func (t *tester) dirCmd(dir, bin string, args ...string) *exec.Cmd {
+ cmd := t.bgDirCmd(dir, bin, args...)
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ if vflag > 1 {
+ errprintf("%s\n", strings.Join(cmd.Args, " "))
+ }
+ return cmd
+}
+
+func (t *tester) addCmd(dt *distTest, dir, bin string, args ...string) *exec.Cmd {
+ w := &work{
+ dt: dt,
+ cmd: t.bgDirCmd(dir, bin, args...),
+ }
+ t.worklist = append(t.worklist, w)
+ return w.cmd
+}
+
+func (t *tester) iOS() bool {
+ return t.goos == "darwin" && (t.goarch == "arm" || t.goarch == "arm64")
+}
+
+func (t *tester) out(v string) {
+ if t.banner == "" {
+ return
+ }
+ fmt.Println("\n" + t.banner + v)
+}
+
+func (t *tester) extLink() bool {
+ pair := t.gohostos + "-" + t.goarch
+ switch pair {
+ case "android-arm",
+ "darwin-arm", "darwin-arm64",
+ "dragonfly-386", "dragonfly-amd64",
+ "freebsd-386", "freebsd-amd64", "freebsd-arm",
+ "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le",
+ "netbsd-386", "netbsd-amd64",
+ "openbsd-386", "openbsd-amd64",
+ "windows-386", "windows-amd64":
+ return true
+ case "darwin-386", "darwin-amd64":
+ // linkmode=external fails on OS X 10.6 and earlier == Darwin
+ // 10.8 and earlier.
+ unameR, err := exec.Command("uname", "-r").Output()
+ if err != nil {
+ log.Fatalf("uname -r: %v", err)
+ }
+ major, _ := strconv.Atoi(string(unameR[:bytes.IndexByte(unameR, '.')]))
+ return major > 10
+ }
+ return false
+}
+
+func (t *tester) supportedBuildmode(mode string) bool {
+ pair := t.goos + "-" + t.goarch
+ switch mode {
+ case "c-archive":
+ if !t.extLink() {
+ return false
+ }
+ switch pair {
+ case "darwin-386", "darwin-amd64", "darwin-arm", "darwin-arm64",
+ "linux-amd64", "linux-386":
+ return true
+ }
+ return false
+ case "c-shared":
+ switch pair {
+ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64",
+ "darwin-amd64", "darwin-386",
+ "android-arm", "android-arm64", "android-386":
+ return true
+ }
+ return false
+ case "shared":
+ switch pair {
+ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le":
+ return true
+ }
+ return false
+ default:
+ log.Fatal("internal error: unknown buildmode %s", mode)
+ return false
+ }
+}
+
+func (t *tester) cgoTest(dt *distTest) error {
+ env := mergeEnvLists([]string{"GOTRACEBACK=2"}, os.Environ())
+
+ if t.goos == "android" || t.iOS() {
+ cmd := t.dirCmd("misc/cgo/test", "go", "test", t.tags())
+ cmd.Env = env
+ return cmd.Run()
+ }
+
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", t.tags(), "-ldflags", "-linkmode=auto")
+ cmd.Env = env
+
+ if t.gohostos != "dragonfly" {
+ // linkmode=internal fails on dragonfly since errno is a TLS relocation.
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=internal")
+ cmd.Env = env
+ }
+
+ pair := t.gohostos + "-" + t.goarch
+ switch pair {
+ case "darwin-386", "darwin-amd64",
+ "openbsd-386", "openbsd-amd64",
+ "windows-386", "windows-amd64":
+ // test linkmode=external, but __thread not supported, so skip testtls.
+ if !t.extLink() {
+ break
+ }
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external")
+ cmd.Env = env
+ cmd = t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external -s")
+ cmd.Env = env
+ case "android-arm",
+ "dragonfly-386", "dragonfly-amd64",
+ "freebsd-386", "freebsd-amd64", "freebsd-arm",
+ "linux-386", "linux-amd64", "linux-arm",
+ "netbsd-386", "netbsd-amd64":
+
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external")
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", "-linkmode=auto")
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", "-linkmode=external")
+ cmd.Env = env
+
+ switch pair {
+ case "netbsd-386", "netbsd-amd64":
+ // no static linking
+ case "freebsd-arm":
+ // -fPIC compiled tls code will use __tls_get_addr instead
+ // of __aeabi_read_tp, however, on FreeBSD/ARM, __tls_get_addr
+ // is implemented in rtld-elf, so -fPIC isn't compatible with
+ // static linking on FreeBSD/ARM with clang. (cgo depends on
+ // -fPIC fundamentally.)
+ default:
+ cc := mustEnv("CC")
+ cmd := t.dirCmd("misc/cgo/test",
+ cc, "-xc", "-o", "/dev/null", "-static", "-")
+ cmd.Env = env
+ cmd.Stdin = strings.NewReader("int main() {}")
+ if err := cmd.Run(); err != nil {
+ fmt.Println("No support for static linking found (lacks libc.a?), skip cgo static linking test.")
+ } else {
+ cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", `-linkmode=external -extldflags "-static -pthread"`)
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test")
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external`)
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external -extldflags "-static -pthread"`)
+ cmd.Env = env
+ }
+
+ if pair != "freebsd-amd64" { // clang -pie fails to link misc/cgo/test
+ cmd := t.dirCmd("misc/cgo/test",
+ cc, "-xc", "-o", "/dev/null", "-pie", "-")
+ cmd.Env = env
+ cmd.Stdin = strings.NewReader("int main() {}")
+ if err := cmd.Run(); err != nil {
+ fmt.Println("No support for -pie found, skip cgo PIE test.")
+ } else {
+ cmd = t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`)
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/testtls", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`)
+ cmd.Env = env
+
+ cmd = t.addCmd(dt, "misc/cgo/nocgo", "go", "test", "-ldflags", `-linkmode=external -extldflags "-pie"`)
+ cmd.Env = env
+
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+// run pending test commands, in parallel, emitting headers as appropriate.
+// When finished, emit header for nextTest, which is going to run after the
+// pending commands are done (and runPending returns).
+// A test should call runPending if it wants to make sure that it is not
+// running in parallel with earlier tests, or if it has some other reason
+// for needing the earlier tests to be done.
+func (t *tester) runPending(nextTest *distTest) {
+ worklist := t.worklist
+ t.worklist = nil
+ for _, w := range worklist {
+ w.start = make(chan bool)
+ w.end = make(chan bool)
+ go func(w *work) {
+ if !<-w.start {
+ w.out = []byte(fmt.Sprintf("skipped due to earlier error\n"))
+ } else {
+ w.out, w.err = w.cmd.CombinedOutput()
+ }
+ w.end <- true
+ }(w)
+ }
+
+ started := 0
+ ended := 0
+ var last *distTest
+ for ended < len(worklist) {
+ for started < len(worklist) && started-ended < maxbg {
+ //println("start", started)
+ w := worklist[started]
+ started++
+ w.start <- !t.failed || t.keepGoing
+ }
+ w := worklist[ended]
+ dt := w.dt
+ if dt.heading != "" && t.lastHeading != dt.heading {
+ t.lastHeading = dt.heading
+ t.out(dt.heading)
+ }
+ if dt != last {
+ // Assumes all the entries for a single dt are in one worklist.
+ last = w.dt
+ if vflag > 0 {
+ fmt.Printf("# go tool dist test -run=^%s$\n", dt.name)
+ }
+ }
+ if vflag > 1 {
+ errprintf("%s\n", strings.Join(w.cmd.Args, " "))
+ }
+ //println("wait", ended)
+ ended++
+ <-w.end
+ os.Stdout.Write(w.out)
+ if w.err != nil {
+ log.Printf("Failed: %v", w.err)
+ t.failed = true
+ }
+ }
+ if t.failed && !t.keepGoing {
+ log.Fatal("FAILED")
+ }
+
+ if dt := nextTest; dt != nil {
+ if dt.heading != "" && t.lastHeading != dt.heading {
+ t.lastHeading = dt.heading
+ t.out(dt.heading)
+ }
+ if vflag > 0 {
+ fmt.Printf("# go tool dist test -run=^%s$\n", dt.name)
+ }
+ }
+}
+
+func (t *tester) cgoTestSOSupported() bool {
+ if t.goos == "android" || t.iOS() {
+ // No exec facility on Android or iOS.
+ return false
+ }
+ if t.goarch == "ppc64" {
+ // External linking not implemented on ppc64 (issue #8912).
+ return false
+ }
+ if t.goarch == "mips64le" || t.goarch == "mips64" {
+ // External linking not implemented on mips64.
+ return false
+ }
+ return true
+}
+
+func (t *tester) cgoTestSO(dt *distTest, testpath string) error {
+ t.runPending(dt)
+
+ dir := filepath.Join(t.goroot, testpath)
+
+ // build shared object
+ output, err := exec.Command("go", "env", "CC").Output()
+ if err != nil {
+ return fmt.Errorf("Error running go env CC: %v", err)
+ }
+ cc := strings.TrimSuffix(string(output), "\n")
+ if cc == "" {
+ return errors.New("CC environment variable (go env CC) cannot be empty")
+ }
+ output, err = exec.Command("go", "env", "GOGCCFLAGS").Output()
+ if err != nil {
+ return fmt.Errorf("Error running go env GOGCCFLAGS: %v", err)
+ }
+ gogccflags := strings.Split(strings.TrimSuffix(string(output), "\n"), " ")
+
+ ext := "so"
+ args := append(gogccflags, "-shared")
+ switch t.goos {
+ case "darwin":
+ ext = "dylib"
+ args = append(args, "-undefined", "suppress", "-flat_namespace")
+ case "windows":
+ ext = "dll"
+ args = append(args, "-DEXPORT_DLL")
+ }
+ sofname := "libcgosotest." + ext
+ args = append(args, "-o", sofname, "cgoso_c.c")
+
+ if err := t.dirCmd(dir, cc, args...).Run(); err != nil {
+ return err
+ }
+ defer os.Remove(filepath.Join(dir, sofname))
+
+ if err := t.dirCmd(dir, "go", "build", "-o", "main.exe", "main.go").Run(); err != nil {
+ return err
+ }
+ defer os.Remove(filepath.Join(dir, "main.exe"))
+
+ cmd := t.dirCmd(dir, "./main.exe")
+ if t.goos != "windows" {
+ s := "LD_LIBRARY_PATH"
+ if t.goos == "darwin" {
+ s = "DYLD_LIBRARY_PATH"
+ }
+ cmd.Env = mergeEnvLists([]string{s + "=."}, os.Environ())
+
+ // On FreeBSD 64-bit architectures, the 32-bit linker looks for
+ // different environment variables.
+ if t.goos == "freebsd" && t.gohostarch == "386" {
+ cmd.Env = mergeEnvLists([]string{"LD_32_LIBRARY_PATH=."}, cmd.Env)
+ }
+ }
+ return cmd.Run()
+}
+
+func (t *tester) hasBash() bool {
+ switch t.gohostos {
+ case "windows", "plan9":
+ return false
+ }
+ return true
+}
+
+func (t *tester) raceDetectorSupported() bool {
+ switch t.gohostos {
+ case "linux", "darwin", "freebsd", "windows":
+ return t.cgoEnabled && t.goarch == "amd64" && t.gohostos == t.goos
+ }
+ return false
+}
+
+func (t *tester) raceTest(dt *distTest) error {
+ t.addCmd(dt, "src", "go", "test", "-race", "-i", "runtime/race", "flag", "os/exec")
+ t.addCmd(dt, "src", "go", "test", "-race", "-run=Output", "runtime/race")
+ t.addCmd(dt, "src", "go", "test", "-race", "-short", "-run=TestParse|TestEcho", "flag", "os/exec")
+ // We don't want the following line, because it
+ // slows down all.bash (by 10 seconds on my laptop).
+ // The race builder should catch any error here, but doesn't.
+ // TODO(iant): Figure out how to catch this.
+ // t.addCmd(dt, "src", "go", "test", "-race", "-run=TestParallelTest", "cmd/go")
+ if t.cgoEnabled {
+ env := mergeEnvLists([]string{"GOTRACEBACK=2"}, os.Environ())
+ cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-race", "-short")
+ cmd.Env = env
+ }
+ if t.extLink() {
+ // Test with external linking; see issue 9133.
+ t.addCmd(dt, "src", "go", "test", "-race", "-short", "-ldflags=-linkmode=external", "-run=TestParse|TestEcho", "flag", "os/exec")
+ }
+ return nil
+}
+
+var runtest struct {
+ sync.Once
+ exe string
+ err error
+}
+
+func (t *tester) testDirTest(dt *distTest, shard, shards int) error {
+ runtest.Do(func() {
+ const exe = "runtest.exe" // named exe for Windows, but harmless elsewhere
+ cmd := t.dirCmd("test", "go", "build", "-o", exe, "run.go")
+ cmd.Env = mergeEnvLists([]string{"GOOS=" + t.gohostos, "GOARCH=" + t.gohostarch, "GOMAXPROCS="}, os.Environ())
+ runtest.exe = filepath.Join(cmd.Dir, exe)
+ if err := cmd.Run(); err != nil {
+ runtest.err = err
+ return
+ }
+ xatexit(func() {
+ os.Remove(runtest.exe)
+ })
+ })
+ if runtest.err != nil {
+ return runtest.err
+ }
+
+ t.addCmd(dt, "test", runtest.exe,
+ fmt.Sprintf("--shard=%d", shard),
+ fmt.Sprintf("--shards=%d", shards),
+ )
+ return nil
+}
+
+// mergeEnvLists merges the two environment lists such that
+// variables with the same name in "in" replace those in "out".
+// out may be mutated.
+func mergeEnvLists(in, out []string) []string {
+NextVar:
+ for _, inkv := range in {
+ k := strings.SplitAfterN(inkv, "=", 2)[0]
+ for i, outkv := range out {
+ if strings.HasPrefix(outkv, k) {
+ out[i] = inkv
+ continue NextVar
+ }
+ }
+ out = append(out, inkv)
+ }
+ return out
+}
+
+// cgoPackages is the standard packages that use cgo.
+var cgoPackages = []string{
+ "crypto/x509",
+ "net",
+ "os/user",
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/util.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/util.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/dist/util.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/dist/util.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,557 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "debug/elf"
+ "encoding/binary"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+// pathf is fmt.Sprintf for generating paths
+// (on windows it turns / into \ after the printf).
+func pathf(format string, args ...interface{}) string {
+ return filepath.Clean(fmt.Sprintf(format, args...))
+}
+
+// filter returns a slice containing the elements x from list for which f(x) == true.
+func filter(list []string, f func(string) bool) []string {
+ var out []string
+ for _, x := range list {
+ if f(x) {
+ out = append(out, x)
+ }
+ }
+ return out
+}
+
+// uniq returns a sorted slice containing the unique elements of list.
+func uniq(list []string) []string {
+ out := make([]string, len(list))
+ copy(out, list)
+ sort.Strings(out)
+ keep := out[:0]
+ for _, x := range out {
+ if len(keep) == 0 || keep[len(keep)-1] != x {
+ keep = append(keep, x)
+ }
+ }
+ return keep
+}
+
+// splitlines returns a slice with the result of splitting
+// the input p after each \n.
+func splitlines(p string) []string {
+ return strings.SplitAfter(p, "\n")
+}
+
+// splitfields replaces the vector v with the result of splitting
+// the input p into non-empty fields containing no spaces.
+func splitfields(p string) []string {
+ return strings.Fields(p)
+}
+
+const (
+ CheckExit = 1 << iota
+ ShowOutput
+ Background
+)
+
+var outputLock sync.Mutex
+
+// run runs the command line cmd in dir.
+// If mode has ShowOutput set and Background unset, run passes cmd's output to
+// stdout/stderr directly. Otherwise, run returns cmd's output as a string.
+// If mode has CheckExit set and the command fails, run calls fatal.
+// If mode has Background set, this command is being run as a
+// Background job. Only bgrun should use the Background mode,
+// not other callers.
+func run(dir string, mode int, cmd ...string) string {
+ if vflag > 1 {
+ errprintf("run: %s\n", strings.Join(cmd, " "))
+ }
+
+ xcmd := exec.Command(cmd[0], cmd[1:]...)
+ xcmd.Dir = dir
+ var data []byte
+ var err error
+
+ // If we want to show command output and this is not
+ // a background command, assume it's the only thing
+ // running, so we can just let it write directly stdout/stderr
+ // as it runs without fear of mixing the output with some
+ // other command's output. Not buffering lets the output
+ // appear as it is printed instead of once the command exits.
+ // This is most important for the invocation of 'go1.4 build -v bootstrap/...'.
+ if mode&(Background|ShowOutput) == ShowOutput {
+ xcmd.Stdout = os.Stdout
+ xcmd.Stderr = os.Stderr
+ err = xcmd.Run()
+ } else {
+ data, err = xcmd.CombinedOutput()
+ }
+ if err != nil && mode&CheckExit != 0 {
+ outputLock.Lock()
+ if len(data) > 0 {
+ xprintf("%s\n", data)
+ }
+ outputLock.Unlock()
+ if mode&Background != 0 {
+ // Prevent fatal from waiting on our own goroutine's
+ // bghelper to exit:
+ bghelpers.Done()
+ }
+ fatal("FAILED: %v: %v", strings.Join(cmd, " "), err)
+ }
+ if mode&ShowOutput != 0 {
+ outputLock.Lock()
+ os.Stdout.Write(data)
+ outputLock.Unlock()
+ }
+ if vflag > 2 {
+ errprintf("run: %s DONE\n", strings.Join(cmd, " "))
+ }
+ return string(data)
+}
+
+var maxbg = 4 /* maximum number of jobs to run at once */
+
+var (
+ bgwork = make(chan func(), 1e5)
+ bgdone = make(chan struct{}, 1e5)
+
+ bghelpers sync.WaitGroup
+
+ dieOnce sync.Once // guards close of dying
+ dying = make(chan struct{})
+)
+
+func bginit() {
+ bghelpers.Add(maxbg)
+ for i := 0; i < maxbg; i++ {
+ go bghelper()
+ }
+}
+
+func bghelper() {
+ defer bghelpers.Done()
+ for {
+ select {
+ case <-dying:
+ return
+ case w := <-bgwork:
+ // Dying takes precedence over doing more work.
+ select {
+ case <-dying:
+ return
+ default:
+ w()
+ }
+ }
+ }
+}
+
+// bgrun is like run but runs the command in the background.
+// CheckExit|ShowOutput mode is implied (since output cannot be returned).
+// bgrun adds 1 to wg immediately, and calls Done when the work completes.
+func bgrun(wg *sync.WaitGroup, dir string, cmd ...string) {
+ wg.Add(1)
+ bgwork <- func() {
+ defer wg.Done()
+ run(dir, CheckExit|ShowOutput|Background, cmd...)
+ }
+}
+
+// bgwait waits for pending bgruns to finish.
+// bgwait must be called from only a single goroutine at a time.
+func bgwait(wg *sync.WaitGroup) {
+ done := make(chan struct{})
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+ select {
+ case <-done:
+ case <-dying:
+ }
+}
+
+// xgetwd returns the current directory.
+func xgetwd() string {
+ wd, err := os.Getwd()
+ if err != nil {
+ fatal("%s", err)
+ }
+ return wd
+}
+
+// xrealwd returns the 'real' name for the given path.
+// real is defined as what xgetwd returns in that directory.
+func xrealwd(path string) string {
+ old := xgetwd()
+ if err := os.Chdir(path); err != nil {
+ fatal("chdir %s: %v", path, err)
+ }
+ real := xgetwd()
+ if err := os.Chdir(old); err != nil {
+ fatal("chdir %s: %v", old, err)
+ }
+ return real
+}
+
+// isdir reports whether p names an existing directory.
+func isdir(p string) bool {
+ fi, err := os.Stat(p)
+ return err == nil && fi.IsDir()
+}
+
+// isfile reports whether p names an existing file.
+func isfile(p string) bool {
+ fi, err := os.Stat(p)
+ return err == nil && fi.Mode().IsRegular()
+}
+
+// mtime returns the modification time of the file p.
+func mtime(p string) time.Time {
+ fi, err := os.Stat(p)
+ if err != nil {
+ return time.Time{}
+ }
+ return fi.ModTime()
+}
+
+// isabs reports whether p is an absolute path.
+func isabs(p string) bool {
+ return filepath.IsAbs(p)
+}
+
+// readfile returns the content of the named file.
+func readfile(file string) string {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ fatal("%v", err)
+ }
+ return string(data)
+}
+
+const (
+ writeExec = 1 << iota
+ writeSkipSame
+)
+
+// writefile writes b to the named file, creating it if needed.
+// if exec is non-zero, marks the file as executable.
+// If the file already exists and has the expected content,
+// it is not rewritten, to avoid changing the time stamp.
+func writefile(b, file string, flag int) {
+ new := []byte(b)
+ if flag&writeSkipSame != 0 {
+ old, err := ioutil.ReadFile(file)
+ if err == nil && bytes.Equal(old, new) {
+ return
+ }
+ }
+ mode := os.FileMode(0666)
+ if flag&writeExec != 0 {
+ mode = 0777
+ }
+ err := ioutil.WriteFile(file, new, mode)
+ if err != nil {
+ fatal("%v", err)
+ }
+}
+
+// xmkdir creates the directory p.
+func xmkdir(p string) {
+ err := os.Mkdir(p, 0777)
+ if err != nil {
+ fatal("%v", err)
+ }
+}
+
+// xmkdirall creates the directory p and its parents, as needed.
+func xmkdirall(p string) {
+ err := os.MkdirAll(p, 0777)
+ if err != nil {
+ fatal("%v", err)
+ }
+}
+
+// xremove removes the file p.
+func xremove(p string) {
+ if vflag > 2 {
+ errprintf("rm %s\n", p)
+ }
+ os.Remove(p)
+}
+
+// xremoveall removes the file or directory tree rooted at p.
+func xremoveall(p string) {
+ if vflag > 2 {
+ errprintf("rm -r %s\n", p)
+ }
+ os.RemoveAll(p)
+}
+
+// xreaddir replaces dst with a list of the names of the files and subdirectories in dir.
+// The names are relative to dir; they are not full paths.
+func xreaddir(dir string) []string {
+ f, err := os.Open(dir)
+ if err != nil {
+ fatal("%v", err)
+ }
+ defer f.Close()
+ names, err := f.Readdirnames(-1)
+ if err != nil {
+ fatal("reading %s: %v", dir, err)
+ }
+ return names
+}
+
+// xreaddir replaces dst with a list of the names of the files in dir.
+// The names are relative to dir; they are not full paths.
+func xreaddirfiles(dir string) []string {
+ f, err := os.Open(dir)
+ if err != nil {
+ fatal("%v", err)
+ }
+ defer f.Close()
+ infos, err := f.Readdir(-1)
+ if err != nil {
+ fatal("reading %s: %v", dir, err)
+ }
+ var names []string
+ for _, fi := range infos {
+ if !fi.IsDir() {
+ names = append(names, fi.Name())
+ }
+ }
+ return names
+}
+
+// xworkdir creates a new temporary directory to hold object files
+// and returns the name of that directory.
+func xworkdir() string {
+ name, err := ioutil.TempDir("", "go-tool-dist-")
+ if err != nil {
+ fatal("%v", err)
+ }
+ return name
+}
+
+// fatal prints an error message to standard error and exits.
+func fatal(format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, "go tool dist: %s\n", fmt.Sprintf(format, args...))
+
+ dieOnce.Do(func() { close(dying) })
+
+ // Wait for background goroutines to finish,
+ // so that exit handler that removes the work directory
+ // is not fighting with active writes or open files.
+ bghelpers.Wait()
+
+ xexit(2)
+}
+
+var atexits []func()
+
+// xexit exits the process with return code n.
+func xexit(n int) {
+ for i := len(atexits) - 1; i >= 0; i-- {
+ atexits[i]()
+ }
+ os.Exit(n)
+}
+
+// xatexit schedules the exit-handler f to be run when the program exits.
+func xatexit(f func()) {
+ atexits = append(atexits, f)
+}
+
+// xprintf prints a message to standard output.
+func xprintf(format string, args ...interface{}) {
+ fmt.Printf(format, args...)
+}
+
+// errprintf prints a message to standard output.
+func errprintf(format string, args ...interface{}) {
+ fmt.Fprintf(os.Stderr, format, args...)
+}
+
+// main takes care of OS-specific startup and dispatches to xmain.
+func main() {
+ os.Setenv("TERM", "dumb") // disable escape codes in clang errors
+
+ slash = string(filepath.Separator)
+
+ gohostos = runtime.GOOS
+ switch gohostos {
+ case "darwin":
+ // Even on 64-bit platform, darwin uname -m prints i386.
+ // We don't support any of the OS X versions that run on 32-bit-only hardware anymore.
+ gohostarch = "amd64"
+ case "freebsd":
+ // Since FreeBSD 10 gcc is no longer part of the base system.
+ defaultclang = true
+ case "solaris":
+ // Even on 64-bit platform, solaris uname -m prints i86pc.
+ out := run("", CheckExit, "isainfo", "-n")
+ if strings.Contains(out, "amd64") {
+ gohostarch = "amd64"
+ }
+ if strings.Contains(out, "i386") {
+ gohostarch = "386"
+ }
+ case "plan9":
+ gohostarch = os.Getenv("objtype")
+ if gohostarch == "" {
+ fatal("$objtype is unset")
+ }
+ case "windows":
+ exe = ".exe"
+ }
+
+ sysinit()
+
+ if gohostarch == "" {
+ // Default Unix system.
+ out := run("", CheckExit, "uname", "-m")
+ switch {
+ case strings.Contains(out, "x86_64"), strings.Contains(out, "amd64"):
+ gohostarch = "amd64"
+ case strings.Contains(out, "86"):
+ gohostarch = "386"
+ case strings.Contains(out, "arm"):
+ gohostarch = "arm"
+ case strings.Contains(out, "aarch64"):
+ gohostarch = "arm64"
+ case strings.Contains(out, "ppc64le"):
+ gohostarch = "ppc64le"
+ case strings.Contains(out, "ppc64"):
+ gohostarch = "ppc64"
+ case strings.Contains(out, "mips64"):
+ file, err := elf.Open(os.Args[0])
+ if err != nil {
+ fatal("failed to open %s to determine endianness: %v", os.Args[0], err)
+ }
+ if file.FileHeader.ByteOrder == binary.BigEndian {
+ gohostarch = "mips64"
+ } else {
+ gohostarch = "mips64le"
+ }
+ case gohostos == "darwin":
+ if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM_") {
+ gohostarch = "arm"
+ }
+ default:
+ fatal("unknown architecture: %s", out)
+ }
+ }
+
+ if gohostarch == "arm" || gohostarch == "mips64" || gohostarch == "mips64le" {
+ maxbg = min(maxbg, runtime.NumCPU())
+ }
+ bginit()
+
+ // The OS X 10.6 linker does not support external linking mode.
+ // See golang.org/issue/5130.
+ //
+ // OS X 10.6 does not work with clang either, but OS X 10.9 requires it.
+ // It seems to work with OS X 10.8, so we default to clang for 10.8 and later.
+ // See golang.org/issue/5822.
+ //
+ // Roughly, OS X 10.N shows up as uname release (N+4),
+ // so OS X 10.6 is uname version 10 and OS X 10.8 is uname version 12.
+ if gohostos == "darwin" {
+ rel := run("", CheckExit, "uname", "-r")
+ if i := strings.Index(rel, "."); i >= 0 {
+ rel = rel[:i]
+ }
+ osx, _ := strconv.Atoi(rel)
+ if osx <= 6+4 {
+ goextlinkenabled = "0"
+ }
+ if osx >= 8+4 {
+ defaultclang = true
+ }
+ }
+
+ if len(os.Args) > 1 && os.Args[1] == "-check-goarm" {
+ useVFPv1() // might fail with SIGILL
+ println("VFPv1 OK.")
+ useVFPv3() // might fail with SIGILL
+ println("VFPv3 OK.")
+ os.Exit(0)
+ }
+
+ xinit()
+ xmain()
+ xexit(0)
+}
+
+// xsamefile reports whether f1 and f2 are the same file (or dir)
+func xsamefile(f1, f2 string) bool {
+ fi1, err1 := os.Stat(f1)
+ fi2, err2 := os.Stat(f2)
+ if err1 != nil || err2 != nil {
+ return f1 == f2
+ }
+ return os.SameFile(fi1, fi2)
+}
+
+func xgetgoarm() string {
+ if goos == "nacl" {
+ // NaCl guarantees VFPv3 and is always cross-compiled.
+ return "7"
+ }
+ if goos == "darwin" {
+ // Assume all darwin/arm devices are have VFPv3. This
+ // port is also mostly cross-compiled, so it makes little
+ // sense to auto-detect the setting.
+ return "7"
+ }
+ if gohostarch != "arm" || goos != gohostos {
+ // Conservative default for cross-compilation.
+ return "5"
+ }
+ if goos == "freebsd" || goos == "openbsd" {
+ // FreeBSD has broken VFP support.
+ // OpenBSD currently only supports softfloat.
+ return "5"
+ }
+
+ // Try to exec ourselves in a mode to detect VFP support.
+ // Seeing how far it gets determines which instructions failed.
+ // The test is OS-agnostic.
+ out := run("", 0, os.Args[0], "-check-goarm")
+ v1ok := strings.Contains(out, "VFPv1 OK.")
+ v3ok := strings.Contains(out, "VFPv3 OK.")
+
+ if v1ok && v3ok {
+ return "7"
+ }
+ if v1ok {
+ return "6"
+ }
+ return "5"
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/go/build.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/go/build.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/go/build.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/go/build.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,3520 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "container/heap"
+ "debug/elf"
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var cmdBuild = &Command{
+ UsageLine: "build [-o output] [-i] [build flags] [packages]",
+ Short: "compile packages and dependencies",
+ Long: `
+Build compiles the packages named by the import paths,
+along with their dependencies, but it does not install the results.
+
+If the arguments to build are a list of .go files, build treats
+them as a list of source files specifying a single package.
+
+When compiling a single main package, build writes
+the resulting executable to an output file named after
+the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
+or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe').
+The '.exe' suffix is added when writing a Windows executable.
+
+When compiling multiple packages or a single non-main package,
+build compiles the packages but discards the resulting object,
+serving only as a check that the packages can be built.
+
+The -o flag, only allowed when compiling a single package,
+forces build to write the resulting executable or object
+to the named output file, instead of the default behavior described
+in the last two paragraphs.
+
+The -i flag installs the packages that are dependencies of the target.
+
+The build flags are shared by the build, clean, get, install, list, run,
+and test commands:
+
+ -a
+ force rebuilding of packages that are already up-to-date.
+ -n
+ print the commands but do not run them.
+ -p n
+ the number of programs, such as build commands or
+ test binaries, that can be run in parallel.
+ The default is the number of CPUs available, except
+ on darwin/arm which defaults to 1.
+ -race
+ enable data race detection.
+ Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
+ -msan
+ enable interoperation with memory sanitizer.
+ Supported only on linux/amd64,
+ and only with Clang/LLVM as the host C compiler.
+ -v
+ print the names of packages as they are compiled.
+ -work
+ print the name of the temporary work directory and
+ do not delete it when exiting.
+ -x
+ print the commands.
+
+ -asmflags 'flag list'
+ arguments to pass on each go tool asm invocation.
+ -buildmode mode
+ build mode to use. See 'go help buildmode' for more.
+ -compiler name
+ name of compiler to use, as in runtime.Compiler (gccgo or gc).
+ -gccgoflags 'arg list'
+ arguments to pass on each gccgo compiler/linker invocation.
+ -gcflags 'arg list'
+ arguments to pass on each go tool compile invocation.
+ -installsuffix suffix
+ a suffix to use in the name of the package installation directory,
+ in order to keep output separate from default builds.
+ If using the -race flag, the install suffix is automatically set to race
+ or, if set explicitly, has _race appended to it. Likewise for the -msan
+ flag. Using a -buildmode option that requires non-default compile flags
+ has a similar effect.
+ -ldflags 'flag list'
+ arguments to pass on each go tool link invocation.
+ -linkshared
+ link against shared libraries previously created with
+ -buildmode=shared.
+ -pkgdir dir
+ install and load all packages from dir instead of the usual locations.
+ For example, when building with a non-standard configuration,
+ use -pkgdir to keep generated packages in a separate location.
+ -tags 'tag list'
+ a list of build tags to consider satisfied during the build.
+ For more information about build tags, see the description of
+ build constraints in the documentation for the go/build package.
+ -toolexec 'cmd args'
+ a program to use to invoke toolchain programs like vet and asm.
+ For example, instead of running asm, the go command will run
+ 'cmd args /path/to/asm '.
+
+The list flags accept a space-separated list of strings. To embed spaces
+in an element in the list, surround it with either single or double quotes.
+
+For more about specifying packages, see 'go help packages'.
+For more about where packages and binaries are installed,
+run 'go help gopath'.
+For more about calling between Go and C/C++, run 'go help c'.
+
+Note: Build adheres to certain conventions such as those described
+by 'go help gopath'. Not all projects can follow these conventions,
+however. Installations that have their own conventions or that use
+a separate software build system may choose to use lower-level
+invocations such as 'go tool compile' and 'go tool link' to avoid
+some of the overheads and design decisions of the build tool.
+
+See also: go install, go get, go clean.
+ `,
+}
+
+func init() {
+ // break init cycle
+ cmdBuild.Run = runBuild
+ cmdInstall.Run = runInstall
+
+ cmdBuild.Flag.BoolVar(&buildI, "i", false, "")
+
+ addBuildFlags(cmdBuild)
+ addBuildFlags(cmdInstall)
+
+ if buildContext.GOOS == "darwin" {
+ switch buildContext.GOARCH {
+ case "arm", "arm64":
+ // darwin/arm cannot run multiple tests simultaneously.
+ // Parallelism is limited in go_darwin_arm_exec, but
+ // also needs to be limited here so go test std does not
+ // timeout tests that waiting to run.
+ buildP = 1
+ }
+ }
+}
+
+// Flags set by multiple commands.
+var buildA bool // -a flag
+var buildN bool // -n flag
+var buildP = runtime.NumCPU() // -p flag
+var buildV bool // -v flag
+var buildX bool // -x flag
+var buildI bool // -i flag
+var buildO = cmdBuild.Flag.String("o", "", "output file")
+var buildWork bool // -work flag
+var buildAsmflags []string // -asmflags flag
+var buildGcflags []string // -gcflags flag
+var buildLdflags []string // -ldflags flag
+var buildGccgoflags []string // -gccgoflags flag
+var buildRace bool // -race flag
+var buildMSan bool // -msan flag
+var buildToolExec []string // -toolexec flag
+var buildBuildmode string // -buildmode flag
+var buildLinkshared bool // -linkshared flag
+var buildPkgdir string // -pkgdir flag
+
+var buildContext = build.Default
+var buildToolchain toolchain = noToolchain{}
+var ldBuildmode string
+
+// buildCompiler implements flag.Var.
+// It implements Set by updating both
+// buildToolchain and buildContext.Compiler.
+type buildCompiler struct{}
+
+func (c buildCompiler) Set(value string) error {
+ switch value {
+ case "gc":
+ buildToolchain = gcToolchain{}
+ case "gccgo":
+ buildToolchain = gccgoToolchain{}
+ default:
+ return fmt.Errorf("unknown compiler %q", value)
+ }
+ buildContext.Compiler = value
+ return nil
+}
+
+func (c buildCompiler) String() string {
+ return buildContext.Compiler
+}
+
+func init() {
+ switch build.Default.Compiler {
+ case "gc":
+ buildToolchain = gcToolchain{}
+ case "gccgo":
+ buildToolchain = gccgoToolchain{}
+ }
+}
+
+// addBuildFlags adds the flags common to the build, clean, get,
+// install, list, run, and test commands.
+func addBuildFlags(cmd *Command) {
+ cmd.Flag.BoolVar(&buildA, "a", false, "")
+ cmd.Flag.BoolVar(&buildN, "n", false, "")
+ cmd.Flag.IntVar(&buildP, "p", buildP, "")
+ cmd.Flag.BoolVar(&buildV, "v", false, "")
+ cmd.Flag.BoolVar(&buildX, "x", false, "")
+
+ cmd.Flag.Var((*stringsFlag)(&buildAsmflags), "asmflags", "")
+ cmd.Flag.Var(buildCompiler{}, "compiler", "")
+ cmd.Flag.StringVar(&buildBuildmode, "buildmode", "default", "")
+ cmd.Flag.Var((*stringsFlag)(&buildGcflags), "gcflags", "")
+ cmd.Flag.Var((*stringsFlag)(&buildGccgoflags), "gccgoflags", "")
+ cmd.Flag.StringVar(&buildContext.InstallSuffix, "installsuffix", "", "")
+ cmd.Flag.Var((*stringsFlag)(&buildLdflags), "ldflags", "")
+ cmd.Flag.BoolVar(&buildLinkshared, "linkshared", false, "")
+ cmd.Flag.StringVar(&buildPkgdir, "pkgdir", "", "")
+ cmd.Flag.BoolVar(&buildRace, "race", false, "")
+ cmd.Flag.BoolVar(&buildMSan, "msan", false, "")
+ cmd.Flag.Var((*stringsFlag)(&buildContext.BuildTags), "tags", "")
+ cmd.Flag.Var((*stringsFlag)(&buildToolExec), "toolexec", "")
+ cmd.Flag.BoolVar(&buildWork, "work", false, "")
+}
+
+func addBuildFlagsNX(cmd *Command) {
+ cmd.Flag.BoolVar(&buildN, "n", false, "")
+ cmd.Flag.BoolVar(&buildX, "x", false, "")
+}
+
+func isSpaceByte(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+// fileExtSplit expects a filename and returns the name
+// and ext (without the dot). If the file has no
+// extension, ext will be empty.
+func fileExtSplit(file string) (name, ext string) {
+ dotExt := filepath.Ext(file)
+ name = file[:len(file)-len(dotExt)]
+ if dotExt != "" {
+ ext = dotExt[1:]
+ }
+ return
+}
+
+type stringsFlag []string
+
+func (v *stringsFlag) Set(s string) error {
+ var err error
+ *v, err = splitQuotedFields(s)
+ if *v == nil {
+ *v = []string{}
+ }
+ return err
+}
+
+func splitQuotedFields(s string) ([]string, error) {
+ // Split fields allowing '' or "" around elements.
+ // Quotes further inside the string do not count.
+ var f []string
+ for len(s) > 0 {
+ for len(s) > 0 && isSpaceByte(s[0]) {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ break
+ }
+ // Accepted quoted string. No unescaping inside.
+ if s[0] == '"' || s[0] == '\'' {
+ quote := s[0]
+ s = s[1:]
+ i := 0
+ for i < len(s) && s[i] != quote {
+ i++
+ }
+ if i >= len(s) {
+ return nil, fmt.Errorf("unterminated %c string", quote)
+ }
+ f = append(f, s[:i])
+ s = s[i+1:]
+ continue
+ }
+ i := 0
+ for i < len(s) && !isSpaceByte(s[i]) {
+ i++
+ }
+ f = append(f, s[:i])
+ s = s[i:]
+ }
+ return f, nil
+}
+
+func (v *stringsFlag) String() string {
+ return ""
+}
+
+func pkgsMain(pkgs []*Package) (res []*Package) {
+ for _, p := range pkgs {
+ if p.Name == "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+func pkgsNotMain(pkgs []*Package) (res []*Package) {
+ for _, p := range pkgs {
+ if p.Name != "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+var pkgsFilter = func(pkgs []*Package) []*Package { return pkgs }
+
+func buildModeInit() {
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ var codegenArg string
+ platform := goos + "/" + goarch
+ switch buildBuildmode {
+ case "archive":
+ pkgsFilter = pkgsNotMain
+ case "c-archive":
+ pkgsFilter = func(p []*Package) []*Package {
+ if len(p) != 1 || p[0].Name != "main" {
+ fatalf("-buildmode=c-archive requires exactly one main package")
+ }
+ return p
+ }
+ exeSuffix = ".a"
+ ldBuildmode = "c-archive"
+ case "c-shared":
+ pkgsFilter = pkgsMain
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/amd64", "linux/arm", "linux/arm64", "linux/386",
+ "android/amd64", "android/arm", "android/arm64", "android/386":
+ codegenArg = "-shared"
+ case "darwin/amd64", "darwin/386":
+ default:
+ fatalf("-buildmode=c-shared not supported on %s\n", platform)
+ }
+ }
+ ldBuildmode = "c-shared"
+ case "default":
+ switch platform {
+ case "android/arm", "android/arm64", "android/amd64", "android/386":
+ codegenArg = "-shared"
+ ldBuildmode = "pie"
+ default:
+ ldBuildmode = "exe"
+ }
+ case "exe":
+ pkgsFilter = pkgsMain
+ ldBuildmode = "exe"
+ case "pie":
+ if gccgo {
+ fatalf("-buildmode=pie not supported by gccgo")
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le",
+ "android/amd64", "android/arm", "android/arm64", "android/386":
+ codegenArg = "-shared"
+ default:
+ fatalf("-buildmode=pie not supported on %s\n", platform)
+ }
+ }
+ ldBuildmode = "pie"
+ case "shared":
+ pkgsFilter = pkgsNotMain
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
+ default:
+ fatalf("-buildmode=shared not supported on %s\n", platform)
+ }
+ codegenArg = "-dynlink"
+ }
+ if *buildO != "" {
+ fatalf("-buildmode=shared and -o not supported together")
+ }
+ ldBuildmode = "shared"
+ default:
+ fatalf("buildmode=%s not supported", buildBuildmode)
+ }
+ if buildLinkshared {
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
+ buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
+ default:
+ fatalf("-linkshared not supported on %s\n", platform)
+ }
+ codegenArg = "-dynlink"
+ // TODO(mwhudson): remove -w when that gets fixed in linker.
+ buildLdflags = append(buildLdflags, "-linkshared", "-w")
+ }
+ }
+ if codegenArg != "" {
+ if gccgo {
+ buildGccgoflags = append(buildGccgoflags, codegenArg)
+ } else {
+ buildAsmflags = append(buildAsmflags, codegenArg)
+ buildGcflags = append(buildGcflags, codegenArg)
+ }
+ if buildContext.InstallSuffix != "" {
+ buildContext.InstallSuffix += "_"
+ }
+ buildContext.InstallSuffix += codegenArg[1:]
+ }
+}
+
+func runBuild(cmd *Command, args []string) {
+ instrumentInit()
+ buildModeInit()
+ var b builder
+ b.init()
+
+ pkgs := packagesForBuild(args)
+
+ if len(pkgs) == 1 && pkgs[0].Name == "main" && *buildO == "" {
+ _, *buildO = path.Split(pkgs[0].ImportPath)
+ *buildO += exeSuffix
+ }
+
+ // sanity check some often mis-used options
+ switch buildContext.Compiler {
+ case "gccgo":
+ if len(buildGcflags) != 0 {
+ fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags")
+ }
+ if len(buildLdflags) != 0 {
+ fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags")
+ }
+ case "gc":
+ if len(buildGccgoflags) != 0 {
+ fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags")
+ }
+ }
+
+ depMode := modeBuild
+ if buildI {
+ depMode = modeInstall
+ }
+
+ if *buildO != "" {
+ if len(pkgs) > 1 {
+ fatalf("go build: cannot use -o with multiple packages")
+ } else if len(pkgs) == 0 {
+ fatalf("no packages to build")
+ }
+ p := pkgs[0]
+ p.target = *buildO
+ p.Stale = true // must build - not up to date
+ a := b.action(modeInstall, depMode, p)
+ b.do(a)
+ return
+ }
+
+ var a *action
+ if buildBuildmode == "shared" {
+ pkgs := pkgsFilter(packages(args))
+ if libName, err := libname(args, pkgs); err != nil {
+ fatalf("%s", err.Error())
+ } else {
+ a = b.libaction(libName, pkgs, modeBuild, depMode)
+ }
+ } else {
+ a = &action{}
+ for _, p := range pkgsFilter(packages(args)) {
+ a.deps = append(a.deps, b.action(modeBuild, depMode, p))
+ }
+ }
+ b.do(a)
+}
+
+var cmdInstall = &Command{
+ UsageLine: "install [build flags] [packages]",
+ Short: "compile and install packages and dependencies",
+ Long: `
+Install compiles and installs the packages named by the import paths,
+along with their dependencies.
+
+For more about the build flags, see 'go help build'.
+For more about specifying packages, see 'go help packages'.
+
+See also: go build, go get, go clean.
+ `,
+}
+
+// isMetaPackage checks if name is a reserved package name that expands to multiple packages
+func isMetaPackage(name string) bool {
+ return name == "std" || name == "cmd" || name == "all"
+}
+
+// libname returns the filename to use for the shared library when using
+// -buildmode=shared. The rules we use are:
+// Use arguments for special 'meta' packages:
+// std --> libstd.so
+// std cmd --> libstd,cmd.so
+// A single non-meta argument with trailing "/..." is special cased:
+// foo/... --> libfoo.so
+// (A relative path like "./..." expands the "." first)
+// Use import paths for other cases, changing '/' to '-':
+// somelib --> libsubdir-somelib.so
+// ./ or ../ --> libsubdir-somelib.so
+// gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
+// a/... b/... ---> liba/c,b/d.so - all matching import paths
+// Name parts are joined with ','.
+func libname(args []string, pkgs []*Package) (string, error) {
+ var libname string
+ appendName := func(arg string) {
+ if libname == "" {
+ libname = arg
+ } else {
+ libname += "," + arg
+ }
+ }
+ var haveNonMeta bool
+ for _, arg := range args {
+ if isMetaPackage(arg) {
+ appendName(arg)
+ } else {
+ haveNonMeta = true
+ }
+ }
+ if len(libname) == 0 { // non-meta packages only. use import paths
+ if len(args) == 1 && strings.HasSuffix(args[0], "/...") {
+ // Special case of "foo/..." as mentioned above.
+ arg := strings.TrimSuffix(args[0], "/...")
+ if build.IsLocalImport(arg) {
+ cwd, _ := os.Getwd()
+ bp, _ := buildContext.ImportDir(filepath.Join(cwd, arg), build.FindOnly)
+ if bp.ImportPath != "" && bp.ImportPath != "." {
+ arg = bp.ImportPath
+ }
+ }
+ appendName(strings.Replace(arg, "/", "-", -1))
+ } else {
+ for _, pkg := range pkgs {
+ appendName(strings.Replace(pkg.ImportPath, "/", "-", -1))
+ }
+ }
+ } else if haveNonMeta { // have both meta package and a non-meta one
+ return "", errors.New("mixing of meta and non-meta packages is not allowed")
+ }
+ // TODO(mwhudson): Needs to change for platforms that use different naming
+ // conventions...
+ return "lib" + libname + ".so", nil
+}
+
+func runInstall(cmd *Command, args []string) {
+ if gobin != "" && !filepath.IsAbs(gobin) {
+ fatalf("cannot install, GOBIN must be an absolute path")
+ }
+
+ instrumentInit()
+ buildModeInit()
+ pkgs := pkgsFilter(packagesForBuild(args))
+
+ for _, p := range pkgs {
+ if p.Target == "" && (!p.Standard || p.ImportPath != "unsafe") {
+ switch {
+ case p.gobinSubdir:
+ errorf("go install: cannot install cross-compiled binaries when GOBIN is set")
+ case p.cmdline:
+ errorf("go install: no install location for .go files listed on command line (GOBIN not set)")
+ case p.ConflictDir != "":
+ errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir)
+ default:
+ errorf("go install: no install location for directory %s outside GOPATH\n"+
+ "\tFor more details see: go help gopath", p.Dir)
+ }
+ }
+ }
+ exitIfErrors()
+
+ var b builder
+ b.init()
+ var a *action
+ if buildBuildmode == "shared" {
+ if libName, err := libname(args, pkgs); err != nil {
+ fatalf("%s", err.Error())
+ } else {
+ a = b.libaction(libName, pkgs, modeInstall, modeInstall)
+ }
+ } else {
+ a = &action{}
+ var tools []*action
+ for _, p := range pkgs {
+ // If p is a tool, delay the installation until the end of the build.
+ // This avoids installing assemblers/compilers that are being executed
+ // by other steps in the build.
+ // cmd/cgo is handled specially in b.action, so that we can
+ // both build and use it in the same 'go install'.
+ action := b.action(modeInstall, modeInstall, p)
+ if goTools[p.ImportPath] == toTool && p.ImportPath != "cmd/cgo" {
+ a.deps = append(a.deps, action.deps...)
+ action.deps = append(action.deps, a)
+ tools = append(tools, action)
+ continue
+ }
+ a.deps = append(a.deps, action)
+ }
+ if len(tools) > 0 {
+ a = &action{
+ deps: tools,
+ }
+ }
+ }
+ b.do(a)
+ exitIfErrors()
+
+ // Success. If this command is 'go install' with no arguments
+ // and the current directory (the implicit argument) is a command,
+ // remove any leftover command binary from a previous 'go build'.
+ // The binary is installed; it's not needed here anymore.
+ // And worse it might be a stale copy, which you don't want to find
+ // instead of the installed one if $PATH contains dot.
+ // One way to view this behavior is that it is as if 'go install' first
+ // runs 'go build' and the moves the generated file to the install dir.
+ // See issue 9645.
+ if len(args) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" {
+ // Compute file 'go build' would have created.
+ // If it exists and is an executable file, remove it.
+ _, targ := filepath.Split(pkgs[0].ImportPath)
+ targ += exeSuffix
+ if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory
+ fi, err := os.Stat(targ)
+ if err == nil {
+ m := fi.Mode()
+ if m.IsRegular() {
+ if m&0111 != 0 || goos == "windows" { // windows never sets executable bit
+ os.Remove(targ)
+ }
+ }
+ }
+ }
+ }
+}
+
+// Global build parameters (used during package load)
+var (
+ goarch string
+ goos string
+ exeSuffix string
+ gopath []string
+)
+
+func init() {
+ goarch = buildContext.GOARCH
+ goos = buildContext.GOOS
+ if goos == "windows" {
+ exeSuffix = ".exe"
+ }
+ gopath = filepath.SplitList(buildContext.GOPATH)
+}
+
+// A builder holds global state about a build.
+// It does not hold per-package state, because we
+// build packages in parallel, and the builder is shared.
+type builder struct {
+ work string // the temporary work directory (ends in filepath.Separator)
+ actionCache map[cacheKey]*action // a cache of already-constructed actions
+ mkdirCache map[string]bool // a cache of created directories
+ print func(args ...interface{}) (int, error)
+
+ output sync.Mutex
+ scriptDir string // current directory in printed script
+
+ exec sync.Mutex
+ readySema chan bool
+ ready actionQueue
+}
+
+// An action represents a single action in the action graph.
+type action struct {
+ p *Package // the package this action works on
+ deps []*action // actions that must happen before this one
+ triggers []*action // inverse of deps
+ cgo *action // action for cgo binary if needed
+ args []string // additional args for runProgram
+ testOutput *bytes.Buffer // test output buffer
+
+ f func(*builder, *action) error // the action itself (nil = no-op)
+ ignoreFail bool // whether to run f even if dependencies fail
+
+ // Generated files, directories.
+ link bool // target is executable, not just package
+ pkgdir string // the -I or -L argument to use when importing this package
+ objdir string // directory for intermediate objects
+ objpkg string // the intermediate package .a file created during the action
+ target string // goal of the action: the created package or executable
+
+ // Execution state.
+ pending int // number of deps yet to complete
+ priority int // relative execution priority
+ failed bool // whether the action failed
+}
+
+// cacheKey is the key for the action cache.
+type cacheKey struct {
+ mode buildMode
+ p *Package
+ shlib string
+}
+
+// buildMode specifies the build mode:
+// are we just building things or also installing the results?
+type buildMode int
+
+const (
+ modeBuild buildMode = iota
+ modeInstall
+)
+
+var (
+ goroot = filepath.Clean(runtime.GOROOT())
+ gobin = os.Getenv("GOBIN")
+ gorootBin = filepath.Join(goroot, "bin")
+ gorootPkg = filepath.Join(goroot, "pkg")
+ gorootSrc = filepath.Join(goroot, "src")
+)
+
+func (b *builder) init() {
+ var err error
+ b.print = func(a ...interface{}) (int, error) {
+ return fmt.Fprint(os.Stderr, a...)
+ }
+ b.actionCache = make(map[cacheKey]*action)
+ b.mkdirCache = make(map[string]bool)
+
+ if buildN {
+ b.work = "$WORK"
+ } else {
+ b.work, err = ioutil.TempDir("", "go-build")
+ if err != nil {
+ fatalf("%s", err)
+ }
+ if buildX || buildWork {
+ fmt.Fprintf(os.Stderr, "WORK=%s\n", b.work)
+ }
+ if !buildWork {
+ workdir := b.work
+ atexit(func() { os.RemoveAll(workdir) })
+ }
+ }
+}
+
+// goFilesPackage creates a package for building a collection of Go files
+// (typically named on the command line). The target is named p.a for
+// package p or named after the first Go file for package main.
+func goFilesPackage(gofiles []string) *Package {
+ // TODO: Remove this restriction.
+ for _, f := range gofiles {
+ if !strings.HasSuffix(f, ".go") {
+ fatalf("named files must be .go files")
+ }
+ }
+
+ var stk importStack
+ ctxt := buildContext
+ ctxt.UseAllFiles = true
+
+ // Synthesize fake "directory" that only shows the named files,
+ // to make it look like this is a standard package or
+ // command directory. So that local imports resolve
+ // consistently, the files must all be in the same directory.
+ var dirent []os.FileInfo
+ var dir string
+ for _, file := range gofiles {
+ fi, err := os.Stat(file)
+ if err != nil {
+ fatalf("%s", err)
+ }
+ if fi.IsDir() {
+ fatalf("%s is a directory, should be a Go file", file)
+ }
+ dir1, _ := filepath.Split(file)
+ if dir1 == "" {
+ dir1 = "./"
+ }
+ if dir == "" {
+ dir = dir1
+ } else if dir != dir1 {
+ fatalf("named files must all be in one directory; have %s and %s", dir, dir1)
+ }
+ dirent = append(dirent, fi)
+ }
+ ctxt.ReadDir = func(string) ([]os.FileInfo, error) { return dirent, nil }
+
+ var err error
+ if dir == "" {
+ dir = cwd
+ }
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ fatalf("%s", err)
+ }
+
+ bp, err := ctxt.ImportDir(dir, 0)
+ pkg := new(Package)
+ pkg.local = true
+ pkg.cmdline = true
+ stk.push("main")
+ pkg.load(&stk, bp, err)
+ stk.pop()
+ pkg.localPrefix = dirToImportPath(dir)
+ pkg.ImportPath = "command-line-arguments"
+ pkg.target = ""
+
+ if pkg.Name == "main" {
+ _, elem := filepath.Split(gofiles[0])
+ exe := elem[:len(elem)-len(".go")] + exeSuffix
+ if *buildO == "" {
+ *buildO = exe
+ }
+ if gobin != "" {
+ pkg.target = filepath.Join(gobin, exe)
+ }
+ }
+
+ pkg.Target = pkg.target
+ pkg.Stale = true
+
+ computeStale(pkg)
+ return pkg
+}
+
+// readpkglist returns the list of packages that were built into the shared library
+// at shlibpath. For the native toolchain this list is stored, newline separated, in
+// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the
+// .go_export section.
+func readpkglist(shlibpath string) (pkgs []*Package) {
+ var stk importStack
+ if _, gccgo := buildToolchain.(gccgoToolchain); gccgo {
+ f, _ := elf.Open(shlibpath)
+ sect := f.Section(".go_export")
+ data, _ := sect.Data()
+ scanner := bufio.NewScanner(bytes.NewBuffer(data))
+ for scanner.Scan() {
+ t := scanner.Text()
+ if strings.HasPrefix(t, "pkgpath ") {
+ t = strings.TrimPrefix(t, "pkgpath ")
+ t = strings.TrimSuffix(t, ";")
+ pkgs = append(pkgs, loadPackage(t, &stk))
+ }
+ }
+ } else {
+ pkglistbytes, err := readELFNote(shlibpath, "Go\x00\x00", 1)
+ if err != nil {
+ fatalf("readELFNote failed: %v", err)
+ }
+ scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes))
+ for scanner.Scan() {
+ t := scanner.Text()
+ pkgs = append(pkgs, loadPackage(t, &stk))
+ }
+ }
+ return
+}
+
+// action returns the action for applying the given operation (mode) to the package.
+// depMode is the action to use when building dependencies.
+// action never looks for p in a shared library, but may find p's dependencies in a
+// shared library if buildLinkshared is true.
+func (b *builder) action(mode buildMode, depMode buildMode, p *Package) *action {
+ return b.action1(mode, depMode, p, false, "")
+}
+
+// action1 returns the action for applying the given operation (mode) to the package.
+// depMode is the action to use when building dependencies.
+// action1 will look for p in a shared library if lookshared is true.
+// forShlib is the shared library that p will become part of, if any.
+func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, lookshared bool, forShlib string) *action {
+ shlib := ""
+ if lookshared {
+ shlib = p.Shlib
+ }
+ key := cacheKey{mode, p, shlib}
+
+ a := b.actionCache[key]
+ if a != nil {
+ return a
+ }
+ if shlib != "" {
+ key2 := cacheKey{modeInstall, nil, shlib}
+ a = b.actionCache[key2]
+ if a != nil {
+ b.actionCache[key] = a
+ return a
+ }
+ pkgs := readpkglist(shlib)
+ a = b.libaction(filepath.Base(shlib), pkgs, modeInstall, depMode)
+ b.actionCache[key2] = a
+ b.actionCache[key] = a
+ return a
+ }
+
+ a = &action{p: p, pkgdir: p.build.PkgRoot}
+ if p.pkgdir != "" { // overrides p.t
+ a.pkgdir = p.pkgdir
+ }
+ b.actionCache[key] = a
+
+ for _, p1 := range p.imports {
+ if forShlib != "" {
+ // p is part of a shared library.
+ if p1.Shlib != "" && p1.Shlib != forShlib {
+ // p1 is explicitly part of a different shared library.
+ // Put the action for that shared library into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, true, p1.Shlib))
+ } else {
+ // p1 is (implicitly or not) part of this shared library.
+ // Put the action for p1 into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, false, forShlib))
+ }
+ } else {
+ // p is not part of a shared library.
+ // If p1 is in a shared library, put the action for that into
+ // a.deps, otherwise put the action for p1 into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, buildLinkshared, p1.Shlib))
+ }
+ }
+
+ // If we are not doing a cross-build, then record the binary we'll
+ // generate for cgo as a dependency of the build of any package
+ // using cgo, to make sure we do not overwrite the binary while
+ // a package is using it. If this is a cross-build, then the cgo we
+ // are writing is not the cgo we need to use.
+ if goos == runtime.GOOS && goarch == runtime.GOARCH && !buildRace && !buildMSan {
+ if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !buildLinkshared && buildBuildmode != "shared" {
+ var stk importStack
+ p1 := loadPackage("cmd/cgo", &stk)
+ if p1.Error != nil {
+ fatalf("load cmd/cgo: %v", p1.Error)
+ }
+ a.cgo = b.action(depMode, depMode, p1)
+ a.deps = append(a.deps, a.cgo)
+ }
+ }
+
+ if p.Standard {
+ switch p.ImportPath {
+ case "builtin", "unsafe":
+ // Fake packages - nothing to build.
+ return a
+ }
+ // gccgo standard library is "fake" too.
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ // the target name is needed for cgo.
+ a.target = p.target
+ return a
+ }
+ }
+
+ if !p.Stale && p.target != "" {
+ // p.Stale==false implies that p.target is up-to-date.
+ // Record target name for use by actions depending on this one.
+ a.target = p.target
+ return a
+ }
+
+ if p.local && p.target == "" {
+ // Imported via local path. No permanent target.
+ mode = modeBuild
+ }
+ work := p.pkgdir
+ if work == "" {
+ work = b.work
+ }
+ a.objdir = filepath.Join(work, a.p.ImportPath, "_obj") + string(filepath.Separator)
+ a.objpkg = buildToolchain.pkgpath(work, a.p)
+ a.link = p.Name == "main"
+
+ switch mode {
+ case modeInstall:
+ a.f = (*builder).install
+ a.deps = []*action{b.action1(modeBuild, depMode, p, lookshared, forShlib)}
+ a.target = a.p.target
+
+ // Install header for cgo in c-archive and c-shared modes.
+ if p.usesCgo() && (buildBuildmode == "c-archive" || buildBuildmode == "c-shared") {
+ hdrTarget := a.target[:len(a.target)-len(filepath.Ext(a.target))] + ".h"
+ if buildContext.Compiler == "gccgo" {
+ // For the header file, remove the "lib"
+ // added by go/build, so we generate pkg.h
+ // rather than libpkg.h.
+ dir, file := filepath.Split(hdrTarget)
+ file = strings.TrimPrefix(file, "lib")
+ hdrTarget = filepath.Join(dir, file)
+ }
+ ah := &action{
+ p: a.p,
+ deps: []*action{a.deps[0]},
+ f: (*builder).installHeader,
+ pkgdir: a.pkgdir,
+ objdir: a.objdir,
+ target: hdrTarget,
+ }
+ a.deps = append(a.deps, ah)
+ }
+
+ case modeBuild:
+ a.f = (*builder).build
+ a.target = a.objpkg
+ if a.link {
+ // An executable file. (This is the name of a temporary file.)
+ // Because we run the temporary file in 'go run' and 'go test',
+ // the name will show up in ps listings. If the caller has specified
+ // a name, use that instead of a.out. The binary is generated
+ // in an otherwise empty subdirectory named exe to avoid
+ // naming conflicts. The only possible conflict is if we were
+ // to create a top-level package named exe.
+ name := "a.out"
+ if p.exeName != "" {
+ name = p.exeName
+ } else if goos == "darwin" && buildBuildmode == "c-shared" && p.target != "" {
+ // On OS X, the linker output name gets recorded in the
+ // shared library's LC_ID_DYLIB load command.
+ // The code invoking the linker knows to pass only the final
+ // path element. Arrange that the path element matches what
+ // we'll install it as; otherwise the library is only loadable as "a.out".
+ _, name = filepath.Split(p.target)
+ }
+ a.target = a.objdir + filepath.Join("exe", name) + exeSuffix
+ }
+ }
+
+ return a
+}
+
+func (b *builder) libaction(libname string, pkgs []*Package, mode, depMode buildMode) *action {
+ a := &action{}
+ switch mode {
+ default:
+ fatalf("unrecognized mode %v", mode)
+
+ case modeBuild:
+ a.f = (*builder).linkShared
+ a.target = filepath.Join(b.work, libname)
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ a.deps = append(a.deps, b.action(depMode, depMode, p))
+ }
+
+ case modeInstall:
+ // Currently build mode shared forces external linking mode, and
+ // external linking mode forces an import of runtime/cgo (and
+ // math on arm). So if it was not passed on the command line and
+ // it is not present in another shared library, add it here.
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ if !gccgo {
+ seencgo := false
+ for _, p := range pkgs {
+ seencgo = seencgo || (p.Standard && p.ImportPath == "runtime/cgo")
+ }
+ if !seencgo {
+ var stk importStack
+ p := loadPackage("runtime/cgo", &stk)
+ if p.Error != nil {
+ fatalf("load runtime/cgo: %v", p.Error)
+ }
+ computeStale(p)
+ // If runtime/cgo is in another shared library, then that's
+ // also the shared library that contains runtime, so
+ // something will depend on it and so runtime/cgo's staleness
+ // will be checked when processing that library.
+ if p.Shlib == "" || p.Shlib == libname {
+ pkgs = append([]*Package{}, pkgs...)
+ pkgs = append(pkgs, p)
+ }
+ }
+ if goarch == "arm" {
+ seenmath := false
+ for _, p := range pkgs {
+ seenmath = seenmath || (p.Standard && p.ImportPath == "math")
+ }
+ if !seenmath {
+ var stk importStack
+ p := loadPackage("math", &stk)
+ if p.Error != nil {
+ fatalf("load math: %v", p.Error)
+ }
+ computeStale(p)
+ // If math is in another shared library, then that's
+ // also the shared library that contains runtime, so
+ // something will depend on it and so math's staleness
+ // will be checked when processing that library.
+ if p.Shlib == "" || p.Shlib == libname {
+ pkgs = append([]*Package{}, pkgs...)
+ pkgs = append(pkgs, p)
+ }
+ }
+ }
+ }
+
+ // Figure out where the library will go.
+ var libdir string
+ for _, p := range pkgs {
+ plibdir := p.build.PkgTargetRoot
+ if gccgo {
+ plibdir = filepath.Join(plibdir, "shlibs")
+ }
+ if libdir == "" {
+ libdir = plibdir
+ } else if libdir != plibdir {
+ fatalf("multiple roots %s & %s", libdir, plibdir)
+ }
+ }
+ a.target = filepath.Join(libdir, libname)
+
+ // Now we can check whether we need to rebuild it.
+ stale := false
+ var built time.Time
+ if fi, err := os.Stat(a.target); err == nil {
+ built = fi.ModTime()
+ }
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ stale = stale || p.Stale
+ lstat, err := os.Stat(p.target)
+ if err != nil || lstat.ModTime().After(built) {
+ stale = true
+ }
+ a.deps = append(a.deps, b.action1(depMode, depMode, p, false, a.target))
+ }
+
+ if stale {
+ a.f = (*builder).install
+ buildAction := b.libaction(libname, pkgs, modeBuild, depMode)
+ a.deps = []*action{buildAction}
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ shlibnameaction := &action{}
+ shlibnameaction.f = (*builder).installShlibname
+ shlibnameaction.target = p.target[:len(p.target)-2] + ".shlibname"
+ a.deps = append(a.deps, shlibnameaction)
+ shlibnameaction.deps = append(shlibnameaction.deps, buildAction)
+ }
+ }
+ }
+ return a
+}
+
+// actionList returns the list of actions in the dag rooted at root
+// as visited in a depth-first post-order traversal.
+func actionList(root *action) []*action {
+ seen := map[*action]bool{}
+ all := []*action{}
+ var walk func(*action)
+ walk = func(a *action) {
+ if seen[a] {
+ return
+ }
+ seen[a] = true
+ for _, a1 := range a.deps {
+ walk(a1)
+ }
+ all = append(all, a)
+ }
+ walk(root)
+ return all
+}
+
+// allArchiveActions returns a list of the archive dependencies of root.
+// This is needed because if package p depends on package q that is in libr.so, the
+// action graph looks like p->libr.so->q and so just scanning through p's
+// dependencies does not find the import dir for q.
+func allArchiveActions(root *action) []*action {
+ seen := map[*action]bool{}
+ r := []*action{}
+ var walk func(*action)
+ walk = func(a *action) {
+ if seen[a] {
+ return
+ }
+ seen[a] = true
+ if strings.HasSuffix(a.target, ".so") || a == root {
+ for _, a1 := range a.deps {
+ walk(a1)
+ }
+ } else if strings.HasSuffix(a.target, ".a") {
+ r = append(r, a)
+ }
+ }
+ walk(root)
+ return r
+}
+
+// do runs the action graph rooted at root.
+func (b *builder) do(root *action) {
+ // Build list of all actions, assigning depth-first post-order priority.
+ // The original implementation here was a true queue
+ // (using a channel) but it had the effect of getting
+ // distracted by low-level leaf actions to the detriment
+ // of completing higher-level actions. The order of
+ // work does not matter much to overall execution time,
+ // but when running "go test std" it is nice to see each test
+ // results as soon as possible. The priorities assigned
+ // ensure that, all else being equal, the execution prefers
+ // to do what it would have done first in a simple depth-first
+ // dependency order traversal.
+ all := actionList(root)
+ for i, a := range all {
+ a.priority = i
+ }
+
+ b.readySema = make(chan bool, len(all))
+
+ // Initialize per-action execution state.
+ for _, a := range all {
+ for _, a1 := range a.deps {
+ a1.triggers = append(a1.triggers, a)
+ }
+ a.pending = len(a.deps)
+ if a.pending == 0 {
+ b.ready.push(a)
+ b.readySema <- true
+ }
+ }
+
+ // Handle runs a single action and takes care of triggering
+ // any actions that are runnable as a result.
+ handle := func(a *action) {
+ var err error
+ if a.f != nil && (!a.failed || a.ignoreFail) {
+ err = a.f(b, a)
+ }
+
+ // The actions run in parallel but all the updates to the
+ // shared work state are serialized through b.exec.
+ b.exec.Lock()
+ defer b.exec.Unlock()
+
+ if err != nil {
+ if err == errPrintedOutput {
+ setExitStatus(2)
+ } else {
+ errorf("%s", err)
+ }
+ a.failed = true
+ }
+
+ for _, a0 := range a.triggers {
+ if a.failed {
+ a0.failed = true
+ }
+ if a0.pending--; a0.pending == 0 {
+ b.ready.push(a0)
+ b.readySema <- true
+ }
+ }
+
+ if a == root {
+ close(b.readySema)
+ }
+ }
+
+ var wg sync.WaitGroup
+
+ // Kick off goroutines according to parallelism.
+ // If we are using the -n flag (just printing commands)
+ // drop the parallelism to 1, both to make the output
+ // deterministic and because there is no real work anyway.
+ par := buildP
+ if buildN {
+ par = 1
+ }
+ for i := 0; i < par; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case _, ok := <-b.readySema:
+ if !ok {
+ return
+ }
+ // Receiving a value from b.readySema entitles
+ // us to take from the ready queue.
+ b.exec.Lock()
+ a := b.ready.pop()
+ b.exec.Unlock()
+ handle(a)
+ case <-interrupted:
+ setExitStatus(1)
+ return
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+// hasString reports whether s appears in the list of strings.
+func hasString(strings []string, s string) bool {
+ for _, t := range strings {
+ if s == t {
+ return true
+ }
+ }
+ return false
+}
+
+// build is the action for building a single package or command.
+func (b *builder) build(a *action) (err error) {
+ // Return an error if the package has CXX files but it's not using
+ // cgo nor SWIG, since the CXX files can only be processed by cgo
+ // and SWIG.
+ if len(a.p.CXXFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() {
+ return fmt.Errorf("can't build package %s because it contains C++ files (%s) but it's not using cgo nor SWIG",
+ a.p.ImportPath, strings.Join(a.p.CXXFiles, ","))
+ }
+ // Same as above for Objective-C files
+ if len(a.p.MFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() {
+ return fmt.Errorf("can't build package %s because it contains Objective-C files (%s) but it's not using cgo nor SWIG",
+ a.p.ImportPath, strings.Join(a.p.MFiles, ","))
+ }
+ defer func() {
+ if err != nil && err != errPrintedOutput {
+ err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err)
+ }
+ }()
+ if buildN {
+ // In -n mode, print a banner between packages.
+ // The banner is five lines so that when changes to
+ // different sections of the bootstrap script have to
+ // be merged, the banners give patch something
+ // to use to find its context.
+ b.print("\n#\n# " + a.p.ImportPath + "\n#\n\n")
+ }
+
+ if buildV {
+ b.print(a.p.ImportPath + "\n")
+ }
+
+ // Make build directory.
+ obj := a.objdir
+ if err := b.mkdir(obj); err != nil {
+ return err
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ var gofiles, cgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
+
+ gofiles = append(gofiles, a.p.GoFiles...)
+ cgofiles = append(cgofiles, a.p.CgoFiles...)
+ cfiles = append(cfiles, a.p.CFiles...)
+ sfiles = append(sfiles, a.p.SFiles...)
+ cxxfiles = append(cxxfiles, a.p.CXXFiles...)
+
+ if a.p.usesCgo() || a.p.usesSwig() {
+ if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.p); err != nil {
+ return
+ }
+ }
+
+ // Run SWIG on each .swig and .swigcxx file.
+ // Each run will generate two files, a .go file and a .c or .cxx file.
+ // The .go file will use import "C" and is to be processed by cgo.
+ if a.p.usesSwig() {
+ outGo, outC, outCXX, err := b.swig(a.p, obj, pcCFLAGS)
+ if err != nil {
+ return err
+ }
+ cgofiles = append(cgofiles, outGo...)
+ cfiles = append(cfiles, outC...)
+ cxxfiles = append(cxxfiles, outCXX...)
+ }
+
+ // Run cgo.
+ if a.p.usesCgo() || a.p.usesSwig() {
+ // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
+ // There is one exception: runtime/cgo's job is to bridge the
+ // cgo and non-cgo worlds, so it necessarily has files in both.
+ // In that case gcc only gets the gcc_* files.
+ var gccfiles []string
+ if a.p.Standard && a.p.ImportPath == "runtime/cgo" {
+ filter := func(files, nongcc, gcc []string) ([]string, []string) {
+ for _, f := range files {
+ if strings.HasPrefix(f, "gcc_") {
+ gcc = append(gcc, f)
+ } else {
+ nongcc = append(nongcc, f)
+ }
+ }
+ return nongcc, gcc
+ }
+ cfiles, gccfiles = filter(cfiles, cfiles[:0], gccfiles)
+ sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
+ } else {
+ gccfiles = append(cfiles, sfiles...)
+ cfiles = nil
+ sfiles = nil
+ }
+
+ cgoExe := tool("cgo")
+ if a.cgo != nil && a.cgo.target != "" {
+ cgoExe = a.cgo.target
+ }
+ outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles)
+ if err != nil {
+ return err
+ }
+ cgoObjects = append(cgoObjects, outObj...)
+ gofiles = append(gofiles, outGo...)
+ }
+
+ if len(gofiles) == 0 {
+ return &build.NoGoError{Dir: a.p.Dir}
+ }
+
+ // If we're doing coverage, preprocess the .go files and put them in the work directory
+ if a.p.coverMode != "" {
+ for i, file := range gofiles {
+ var sourceFile string
+ var coverFile string
+ var key string
+ if strings.HasSuffix(file, ".cgo1.go") {
+ // cgo files have absolute paths
+ base := filepath.Base(file)
+ sourceFile = file
+ coverFile = filepath.Join(obj, base)
+ key = strings.TrimSuffix(base, ".cgo1.go") + ".go"
+ } else {
+ sourceFile = filepath.Join(a.p.Dir, file)
+ coverFile = filepath.Join(obj, file)
+ key = file
+ }
+ cover := a.p.coverVars[key]
+ if cover == nil || isTestFile(file) {
+ // Not covering this file.
+ continue
+ }
+ if err := b.cover(a, coverFile, sourceFile, 0666, cover.Var); err != nil {
+ return err
+ }
+ gofiles[i] = coverFile
+ }
+ }
+
+ // Prepare Go import path list.
+ inc := b.includeArgs("-I", allArchiveActions(a))
+
+ // Compile Go.
+ ofile, out, err := buildToolchain.gc(b, a.p, a.objpkg, obj, len(sfiles) > 0, inc, gofiles)
+ if len(out) > 0 {
+ b.showOutput(a.p.Dir, a.p.ImportPath, b.processOutput(out))
+ if err != nil {
+ return errPrintedOutput
+ }
+ }
+ if err != nil {
+ return err
+ }
+ if ofile != a.objpkg {
+ objects = append(objects, ofile)
+ }
+
+ // Copy .h files named for goos or goarch or goos_goarch
+ // to names using GOOS and GOARCH.
+ // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
+ _goos_goarch := "_" + goos + "_" + goarch
+ _goos := "_" + goos
+ _goarch := "_" + goarch
+ for _, file := range a.p.HFiles {
+ name, ext := fileExtSplit(file)
+ switch {
+ case strings.HasSuffix(name, _goos_goarch):
+ targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goarch):
+ targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goos):
+ targ := file[:len(name)-len(_goos)] + "_GOOS." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, file := range cfiles {
+ out := file[:len(file)-len(".c")] + ".o"
+ if err := buildToolchain.cc(b, a.p, obj, obj+out, file); err != nil {
+ return err
+ }
+ objects = append(objects, out)
+ }
+
+ // Assemble .s files.
+ for _, file := range sfiles {
+ out := file[:len(file)-len(".s")] + ".o"
+ if err := buildToolchain.asm(b, a.p, obj, obj+out, file); err != nil {
+ return err
+ }
+ objects = append(objects, out)
+ }
+
+ // NOTE(rsc): On Windows, it is critically important that the
+ // gcc-compiled objects (cgoObjects) be listed after the ordinary
+ // objects in the archive. I do not know why this is.
+ // https://golang.org/issue/2601
+ objects = append(objects, cgoObjects...)
+
+ // Add system object files.
+ for _, syso := range a.p.SysoFiles {
+ objects = append(objects, filepath.Join(a.p.Dir, syso))
+ }
+
+ // Pack into archive in obj directory.
+ // If the Go compiler wrote an archive, we only need to add the
+ // object files for non-Go sources to the archive.
+ // If the Go compiler wrote an archive and the package is entirely
+ // Go sources, there is no pack to execute at all.
+ if len(objects) > 0 {
+ if err := buildToolchain.pack(b, a.p, obj, a.objpkg, objects); err != nil {
+ return err
+ }
+ }
+
+ // Link if needed.
+ if a.link {
+ // The compiler only cares about direct imports, but the
+ // linker needs the whole dependency tree.
+ all := actionList(a)
+ all = all[:len(all)-1] // drop a
+ if err := buildToolchain.ld(b, a, a.target, all, a.objpkg, objects); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
+func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) {
+ if pkgs := p.CgoPkgConfig; len(pkgs) > 0 {
+ var out []byte
+ out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs)
+ if err != nil {
+ b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out))
+ b.print(err.Error() + "\n")
+ err = errPrintedOutput
+ return
+ }
+ if len(out) > 0 {
+ cflags = strings.Fields(string(out))
+ }
+ out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs)
+ if err != nil {
+ b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out))
+ b.print(err.Error() + "\n")
+ err = errPrintedOutput
+ return
+ }
+ if len(out) > 0 {
+ ldflags = strings.Fields(string(out))
+ }
+ }
+ return
+}
+
+func (b *builder) installShlibname(a *action) error {
+ a1 := a.deps[0]
+ err := ioutil.WriteFile(a.target, []byte(filepath.Base(a1.target)+"\n"), 0666)
+ if err != nil {
+ return err
+ }
+ if buildX {
+ b.showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.target), a.target)
+ }
+ return nil
+}
+
+func (b *builder) linkShared(a *action) (err error) {
+ allactions := actionList(a)
+ allactions = allactions[:len(allactions)-1]
+ return buildToolchain.ldShared(b, a.deps, a.target, allactions)
+}
+
+// install is the action for installing a single package or executable.
+func (b *builder) install(a *action) (err error) {
+ defer func() {
+ if err != nil && err != errPrintedOutput {
+ err = fmt.Errorf("go install %s: %v", a.p.ImportPath, err)
+ }
+ }()
+ a1 := a.deps[0]
+ perm := os.FileMode(0666)
+ if a1.link {
+ switch buildBuildmode {
+ case "c-archive", "c-shared":
+ default:
+ perm = 0777
+ }
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ // remove object dir to keep the amount of
+ // garbage down in a large build. On an operating system
+ // with aggressive buffering, cleaning incrementally like
+ // this keeps the intermediate objects from hitting the disk.
+ if !buildWork {
+ defer os.RemoveAll(a1.objdir)
+ defer os.Remove(a1.target)
+ }
+
+ return b.moveOrCopyFile(a, a.target, a1.target, perm, false)
+}
+
+// includeArgs returns the -I or -L directory list for access
+// to the results of the list of actions.
+func (b *builder) includeArgs(flag string, all []*action) []string {
+ inc := []string{}
+ incMap := map[string]bool{
+ b.work: true, // handled later
+ gorootPkg: true,
+ "": true, // ignore empty strings
+ }
+
+ // Look in the temporary space for results of test-specific actions.
+ // This is the $WORK/my/package/_test directory for the
+ // package being built, so there are few of these.
+ for _, a1 := range all {
+ if a1.p == nil {
+ continue
+ }
+ if dir := a1.pkgdir; dir != a1.p.build.PkgRoot && !incMap[dir] {
+ incMap[dir] = true
+ inc = append(inc, flag, dir)
+ }
+ }
+
+ // Also look in $WORK for any non-test packages that have
+ // been built but not installed.
+ inc = append(inc, flag, b.work)
+
+ // Finally, look in the installed package directories for each action.
+ // First add the package dirs corresponding to GOPATH entries
+ // in the original GOPATH order.
+ need := map[string]*build.Package{}
+ for _, a1 := range all {
+ if a1.p != nil && a1.pkgdir == a1.p.build.PkgRoot {
+ need[a1.p.build.Root] = a1.p.build
+ }
+ }
+ for _, root := range gopath {
+ if p := need[root]; p != nil && !incMap[p.PkgRoot] {
+ incMap[p.PkgRoot] = true
+ inc = append(inc, flag, p.PkgTargetRoot)
+ }
+ }
+
+ // Then add anything that's left.
+ for _, a1 := range all {
+ if a1.p == nil {
+ continue
+ }
+ if dir := a1.pkgdir; dir == a1.p.build.PkgRoot && !incMap[dir] {
+ incMap[dir] = true
+ inc = append(inc, flag, a1.p.build.PkgTargetRoot)
+ }
+ }
+
+ return inc
+}
+
+// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
+func (b *builder) moveOrCopyFile(a *action, dst, src string, perm os.FileMode, force bool) error {
+ if buildN {
+ b.showcmd("", "mv %s %s", src, dst)
+ return nil
+ }
+
+ // If we can update the mode and rename to the dst, do it.
+ // Otherwise fall back to standard copy.
+
+ // The perm argument is meant to be adjusted according to umask,
+ // but we don't know what the umask is.
+ // Create a dummy file to find out.
+ // This avoids build tags and works even on systems like Plan 9
+ // where the file mask computation incorporates other information.
+ mode := perm
+ f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+ if err == nil {
+ fi, err := f.Stat()
+ if err == nil {
+ mode = fi.Mode() & 0777
+ }
+ name := f.Name()
+ f.Close()
+ os.Remove(name)
+ }
+
+ if err := os.Chmod(src, mode); err == nil {
+ if err := os.Rename(src, dst); err == nil {
+ if buildX {
+ b.showcmd("", "mv %s %s", src, dst)
+ }
+ return nil
+ }
+ }
+
+ return b.copyFile(a, dst, src, perm, force)
+}
+
+// copyFile is like 'cp src dst'.
+func (b *builder) copyFile(a *action, dst, src string, perm os.FileMode, force bool) error {
+ if buildN || buildX {
+ b.showcmd("", "cp %s %s", src, dst)
+ if buildN {
+ return nil
+ }
+ }
+
+ sf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+
+ // Be careful about removing/overwriting dst.
+ // Do not remove/overwrite if dst exists and is a directory
+ // or a non-object file.
+ if fi, err := os.Stat(dst); err == nil {
+ if fi.IsDir() {
+ return fmt.Errorf("build output %q already exists and is a directory", dst)
+ }
+ if !force && fi.Mode().IsRegular() && !isObject(dst) {
+ return fmt.Errorf("build output %q already exists and is not an object file", dst)
+ }
+ }
+
+ // On Windows, remove lingering ~ file from last attempt.
+ if toolIsWindows {
+ if _, err := os.Stat(dst + "~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ }
+
+ mayberemovefile(dst)
+ df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil && toolIsWindows {
+ // Windows does not allow deletion of a binary file
+ // while it is executing. Try to move it out of the way.
+ // If the move fails, which is likely, we'll try again the
+ // next time we do an install of this binary.
+ if err := os.Rename(dst, dst+"~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ }
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(df, sf)
+ df.Close()
+ if err != nil {
+ mayberemovefile(dst)
+ return fmt.Errorf("copying %s to %s: %v", src, dst, err)
+ }
+ return nil
+}
+
+// Install the cgo export header file, if there is one.
+func (b *builder) installHeader(a *action) error {
+ src := a.objdir + "_cgo_install.h"
+ if _, err := os.Stat(src); os.IsNotExist(err) {
+ // If the file does not exist, there are no exported
+ // functions, and we do not install anything.
+ return nil
+ }
+
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ return b.moveOrCopyFile(a, a.target, src, 0666, true)
+}
+
+// cover runs, in effect,
+// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
+func (b *builder) cover(a *action, dst, src string, perm os.FileMode, varName string) error {
+ return b.run(a.objdir, "cover "+a.p.ImportPath, nil,
+ buildToolExec,
+ tool("cover"),
+ "-mode", a.p.coverMode,
+ "-var", varName,
+ "-o", dst,
+ src)
+}
+
+var objectMagic = [][]byte{
+ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive
+ {'\x7F', 'E', 'L', 'F'}, // ELF
+ {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit
+ {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit
+ {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit
+ {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit
+ {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc
+ {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386
+ {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64
+ {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm
+}
+
+func isObject(s string) bool {
+ f, err := os.Open(s)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ buf := make([]byte, 64)
+ io.ReadFull(f, buf)
+ for _, magic := range objectMagic {
+ if bytes.HasPrefix(buf, magic) {
+ return true
+ }
+ }
+ return false
+}
+
+// mayberemovefile removes a file only if it is a regular file
+// When running as a user with sufficient privileges, we may delete
+// even device files, for example, which is not intended.
+func mayberemovefile(s string) {
+ if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
+ return
+ }
+ os.Remove(s)
+}
+
+// fmtcmd formats a command in the manner of fmt.Sprintf but also:
+//
+// If dir is non-empty and the script is not in dir right now,
+// fmtcmd inserts "cd dir\n" before the command.
+//
+// fmtcmd replaces the value of b.work with $WORK.
+// fmtcmd replaces the value of goroot with $GOROOT.
+// fmtcmd replaces the value of b.gobin with $GOBIN.
+//
+// fmtcmd replaces the name of the current directory with dot (.)
+// but only when it is at the beginning of a space-separated token.
+//
+func (b *builder) fmtcmd(dir string, format string, args ...interface{}) string {
+ cmd := fmt.Sprintf(format, args...)
+ if dir != "" && dir != "/" {
+ cmd = strings.Replace(" "+cmd, " "+dir, " .", -1)[1:]
+ if b.scriptDir != dir {
+ b.scriptDir = dir
+ cmd = "cd " + dir + "\n" + cmd
+ }
+ }
+ if b.work != "" {
+ cmd = strings.Replace(cmd, b.work, "$WORK", -1)
+ }
+ return cmd
+}
+
+// showcmd prints the given command to standard output
+// for the implementation of -n or -x.
+func (b *builder) showcmd(dir string, format string, args ...interface{}) {
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.print(b.fmtcmd(dir, format, args...) + "\n")
+}
+
+// showOutput prints "# desc" followed by the given output.
+// The output is expected to contain references to 'dir', usually
+// the source directory for the package that has failed to build.
+// showOutput rewrites mentions of dir with a relative path to dir
+// when the relative path is shorter. This is usually more pleasant.
+// For example, if fmt doesn't compile and we are in src/html,
+// the output is
+//
+// $ go build
+// # fmt
+// ../fmt/print.go:1090: undefined: asdf
+// $
+//
+// instead of
+//
+// $ go build
+// # fmt
+// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+// $
+//
+// showOutput also replaces references to the work directory with $WORK.
+//
+func (b *builder) showOutput(dir, desc, out string) {
+ prefix := "# " + desc
+ suffix := "\n" + out
+ if reldir := shortPath(dir); reldir != dir {
+ suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1)
+ suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1)
+ }
+ suffix = strings.Replace(suffix, " "+b.work, " $WORK", -1)
+
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.print(prefix, suffix)
+}
+
+// shortPath returns an absolute or relative name for path, whatever is shorter.
+func shortPath(path string) string {
+ if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
+ return rel
+ }
+ return path
+}
+
+// relPaths returns a copy of paths with absolute paths
+// made relative to the current directory if they would be shorter.
+func relPaths(paths []string) []string {
+ var out []string
+ pwd, _ := os.Getwd()
+ for _, p := range paths {
+ rel, err := filepath.Rel(pwd, p)
+ if err == nil && len(rel) < len(p) {
+ p = rel
+ }
+ out = append(out, p)
+ }
+ return out
+}
+
+// errPrintedOutput is a special error indicating that a command failed
+// but that it generated output as well, and that output has already
+// been printed, so there's no point showing 'exit status 1' or whatever
+// the wait status was. The main executor, builder.do, knows not to
+// print this error.
+var errPrintedOutput = errors.New("already printed output - no need to show error")
+
+var cgoLine = regexp.MustCompile(`\[[^\[\]]+\.cgo1\.go:[0-9]+\]`)
+var cgoTypeSigRe = regexp.MustCompile(`\b_Ctype_\B`)
+
+// run runs the command given by cmdline in the directory dir.
+// If the command fails, run prints information about the failure
+// and returns a non-nil error.
+func (b *builder) run(dir string, desc string, env []string, cmdargs ...interface{}) error {
+ out, err := b.runOut(dir, desc, env, cmdargs...)
+ if len(out) > 0 {
+ if desc == "" {
+ desc = b.fmtcmd(dir, "%s", strings.Join(stringList(cmdargs...), " "))
+ }
+ b.showOutput(dir, desc, b.processOutput(out))
+ if err != nil {
+ err = errPrintedOutput
+ }
+ }
+ return err
+}
+
+// processOutput prepares the output of runOut to be output to the console.
+func (b *builder) processOutput(out []byte) string {
+ if out[len(out)-1] != '\n' {
+ out = append(out, '\n')
+ }
+ messages := string(out)
+ // Fix up output referring to cgo-generated code to be more readable.
+ // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
+ // Replace *[100]_Ctype_foo with *[100]C.foo.
+ // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
+ if !buildX && cgoLine.MatchString(messages) {
+ messages = cgoLine.ReplaceAllString(messages, "")
+ messages = cgoTypeSigRe.ReplaceAllString(messages, "C.")
+ }
+ return messages
+}
+
+// runOut runs the command given by cmdline in the directory dir.
+// It returns the command output and any errors that occurred.
+func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...interface{}) ([]byte, error) {
+ cmdline := stringList(cmdargs...)
+ if buildN || buildX {
+ var envcmdline string
+ for i := range env {
+ envcmdline += env[i]
+ envcmdline += " "
+ }
+ envcmdline += joinUnambiguously(cmdline)
+ b.showcmd(dir, "%s", envcmdline)
+ if buildN {
+ return nil, nil
+ }
+ }
+
+ nbusy := 0
+ for {
+ var buf bytes.Buffer
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ cmd.Dir = dir
+ cmd.Env = mergeEnvLists(env, envForDir(cmd.Dir, os.Environ()))
+ err := cmd.Run()
+
+ // cmd.Run will fail on Unix if some other process has the binary
+ // we want to run open for writing. This can happen here because
+ // we build and install the cgo command and then run it.
+ // If another command was kicked off while we were writing the
+ // cgo binary, the child process for that command may be holding
+ // a reference to the fd, keeping us from running exec.
+ //
+ // But, you might reasonably wonder, how can this happen?
+ // The cgo fd, like all our fds, is close-on-exec, so that we need
+ // not worry about other processes inheriting the fd accidentally.
+ // The answer is that running a command is fork and exec.
+ // A child forked while the cgo fd is open inherits that fd.
+ // Until the child has called exec, it holds the fd open and the
+ // kernel will not let us run cgo. Even if the child were to close
+ // the fd explicitly, it would still be open from the time of the fork
+ // until the time of the explicit close, and the race would remain.
+ //
+ // On Unix systems, this results in ETXTBSY, which formats
+ // as "text file busy". Rather than hard-code specific error cases,
+ // we just look for that string. If this happens, sleep a little
+ // and try again. We let this happen three times, with increasing
+ // sleep lengths: 100+200+400 ms = 0.7 seconds.
+ //
+ // An alternate solution might be to split the cmd.Run into
+ // separate cmd.Start and cmd.Wait, and then use an RWLock
+ // to make sure that copyFile only executes when no cmd.Start
+ // call is in progress. However, cmd.Start (really syscall.forkExec)
+ // only guarantees that when it returns, the exec is committed to
+ // happen and succeed. It uses a close-on-exec file descriptor
+ // itself to determine this, so we know that when cmd.Start returns,
+ // at least one close-on-exec file descriptor has been closed.
+ // However, we cannot be sure that all of them have been closed,
+ // so the program might still encounter ETXTBSY even with such
+ // an RWLock. The race window would be smaller, perhaps, but not
+ // guaranteed to be gone.
+ //
+ // Sleeping when we observe the race seems to be the most reliable
+ // option we have.
+ //
+ // https://golang.org/issue/3001
+ //
+ if err != nil && nbusy < 3 && strings.Contains(err.Error(), "text file busy") {
+ time.Sleep(100 * time.Millisecond << uint(nbusy))
+ nbusy++
+ continue
+ }
+
+ // err can be something like 'exit status 1'.
+ // Add information about what program was running.
+ // Note that if buf.Bytes() is non-empty, the caller usually
+ // shows buf.Bytes() and does not print err at all, so the
+ // prefix here does not make most output any more verbose.
+ if err != nil {
+ err = errors.New(cmdline[0] + ": " + err.Error())
+ }
+ return buf.Bytes(), err
+ }
+}
+
+// joinUnambiguously prints the slice, quoting where necessary to make the
+// output unambiguous.
+// TODO: See issue 5279. The printing of commands needs a complete redo.
+func joinUnambiguously(a []string) string {
+ var buf bytes.Buffer
+ for i, s := range a {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ q := strconv.Quote(s)
+ if s == "" || strings.Contains(s, " ") || len(q) > len(s)+2 {
+ buf.WriteString(q)
+ } else {
+ buf.WriteString(s)
+ }
+ }
+ return buf.String()
+}
+
+// mkdir makes the named directory.
+func (b *builder) mkdir(dir string) error {
+ b.exec.Lock()
+ defer b.exec.Unlock()
+ // We can be a little aggressive about being
+ // sure directories exist. Skip repeated calls.
+ if b.mkdirCache[dir] {
+ return nil
+ }
+ b.mkdirCache[dir] = true
+
+ if buildN || buildX {
+ b.showcmd("", "mkdir -p %s", dir)
+ if buildN {
+ return nil
+ }
+ }
+
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ return nil
+}
+
+// mkAbs returns an absolute path corresponding to
+// evaluating f in the directory dir.
+// We always pass absolute paths of source files so that
+// the error messages will include the full path to a file
+// in need of attention.
+func mkAbs(dir, f string) string {
+ // Leave absolute paths alone.
+ // Also, during -n mode we use the pseudo-directory $WORK
+ // instead of creating an actual work directory that won't be used.
+ // Leave paths beginning with $WORK alone too.
+ if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") {
+ return f
+ }
+ return filepath.Join(dir, f)
+}
+
+type toolchain interface {
+ // gc runs the compiler in a specific directory on a set of files
+ // and returns the name of the generated output file.
+ // The compiler runs in the directory dir.
+ gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error)
+ // cc runs the toolchain's C compiler in a directory on a C file
+ // to produce an output file.
+ cc(b *builder, p *Package, objdir, ofile, cfile string) error
+ // asm runs the assembler in a specific directory on a specific file
+ // to generate the named output file.
+ asm(b *builder, p *Package, obj, ofile, sfile string) error
+ // pkgpath builds an appropriate path for a temporary package file.
+ pkgpath(basedir string, p *Package) string
+ // pack runs the archive packer in a specific directory to create
+ // an archive from a set of object files.
+ // typically it is run in the object directory.
+ pack(b *builder, p *Package, objDir, afile string, ofiles []string) error
+ // ld runs the linker to create an executable starting at mainpkg.
+ ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error
+ // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions
+ ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error
+
+ compiler() string
+ linker() string
+}
+
+type noToolchain struct{}
+
+func noCompiler() error {
+ log.Fatalf("unknown compiler %q", buildContext.Compiler)
+ return nil
+}
+
+func (noToolchain) compiler() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) linker() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) {
+ return "", nil, noCompiler()
+}
+
+func (noToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ return noCompiler()
+}
+
+func (noToolchain) pkgpath(basedir string, p *Package) string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ return noCompiler()
+}
+
+func (noToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ return noCompiler()
+}
+
+// The Go toolchain.
+type gcToolchain struct{}
+
+func (gcToolchain) compiler() string {
+ return tool("compile")
+}
+
+func (gcToolchain) linker() string {
+ return tool("link")
+}
+
+func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+ if archive != "" {
+ ofile = archive
+ } else {
+ out := "_go_.o"
+ ofile = obj + out
+ }
+
+ gcargs := []string{"-p", p.ImportPath}
+ if p.Name == "main" {
+ gcargs[1] = "main"
+ }
+ if p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
+ // runtime compiles with a special gc flag to emit
+ // additional reflect type data.
+ gcargs = append(gcargs, "-+")
+ }
+
+ // If we're giving the compiler the entire package (no C etc files), tell it that,
+ // so that it can give good error messages about forward declarations.
+ // Exceptions: a few standard packages have forward declarations for
+ // pieces supplied behind-the-scenes by package runtime.
+ extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
+ if p.Standard {
+ switch p.ImportPath {
+ case "bytes", "net", "os", "runtime/pprof", "sync", "time":
+ extFiles++
+ }
+ }
+ if extFiles == 0 {
+ gcargs = append(gcargs, "-complete")
+ }
+ if buildContext.InstallSuffix != "" {
+ gcargs = append(gcargs, "-installsuffix", buildContext.InstallSuffix)
+ }
+ if p.buildID != "" {
+ gcargs = append(gcargs, "-buildid", p.buildID)
+ }
+
+ for _, path := range p.Imports {
+ if i := strings.LastIndex(path, "/vendor/"); i >= 0 {
+ gcargs = append(gcargs, "-importmap", path[i+len("/vendor/"):]+"="+path)
+ } else if strings.HasPrefix(path, "vendor/") {
+ gcargs = append(gcargs, "-importmap", path[len("vendor/"):]+"="+path)
+ }
+ }
+
+ args := []interface{}{buildToolExec, tool("compile"), "-o", ofile, "-trimpath", b.work, buildGcflags, gcargs, "-D", p.localPrefix, importArgs}
+ if ofile == archive {
+ args = append(args, "-pack")
+ }
+ if asmhdr {
+ args = append(args, "-asmhdr", obj+"go_asm.h")
+ }
+ for _, f := range gofiles {
+ args = append(args, mkAbs(p.Dir, f))
+ }
+
+ output, err = b.runOut(p.Dir, p.ImportPath, nil, args...)
+ return ofile, output, err
+}
+
+func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
+ inc := filepath.Join(goroot, "pkg", "include")
+ sfile = mkAbs(p.Dir, sfile)
+ args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile}
+ if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
+ return err
+ }
+ return nil
+}
+
+// toolVerify checks that the command line args writes the same output file
+// if run using newTool instead.
+// Unused now but kept around for future use.
+func toolVerify(b *builder, p *Package, newTool string, ofile string, args []interface{}) error {
+ newArgs := make([]interface{}, len(args))
+ copy(newArgs, args)
+ newArgs[1] = tool(newTool)
+ newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
+ if err := b.run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+ return err
+ }
+ data1, err := ioutil.ReadFile(ofile)
+ if err != nil {
+ return err
+ }
+ data2, err := ioutil.ReadFile(ofile + ".new")
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(data1, data2) {
+ return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(stringList(args...), " "), strings.Join(stringList(newArgs...), " "))
+ }
+ os.Remove(ofile + ".new")
+ return nil
+}
+
+func (gcToolchain) pkgpath(basedir string, p *Package) string {
+ end := filepath.FromSlash(p.ImportPath + ".a")
+ return filepath.Join(basedir, end)
+}
+
+func (gcToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(objDir, f))
+ }
+ absAfile := mkAbs(objDir, afile)
+
+ // The archive file should have been created by the compiler.
+ // Since it used to not work that way, verify.
+ if _, err := os.Stat(absAfile); err != nil {
+ fatalf("os.Stat of archive file failed: %v", err)
+ }
+
+ if buildN || buildX {
+ cmdline := stringList("pack", "r", absAfile, absOfiles)
+ b.showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
+ }
+ if buildN {
+ return nil
+ }
+ if err := packInternal(b, absAfile, absOfiles); err != nil {
+ b.showOutput(p.Dir, p.ImportPath, err.Error()+"\n")
+ return errPrintedOutput
+ }
+ return nil
+}
+
+func packInternal(b *builder, afile string, ofiles []string) error {
+ dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0)
+ if err != nil {
+ return err
+ }
+ defer dst.Close() // only for error returns or panics
+ w := bufio.NewWriter(dst)
+
+ for _, ofile := range ofiles {
+ src, err := os.Open(ofile)
+ if err != nil {
+ return err
+ }
+ fi, err := src.Stat()
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Note: Not using %-16.16s format because we care
+ // about bytes, not runes.
+ name := fi.Name()
+ if len(name) > 16 {
+ name = name[:16]
+ } else {
+ name += strings.Repeat(" ", 16-len(name))
+ }
+ size := fi.Size()
+ fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n",
+ name, 0, 0, 0, 0644, size)
+ n, err := io.Copy(w, src)
+ src.Close()
+ if err == nil && n < size {
+ err = io.ErrUnexpectedEOF
+ } else if err == nil && n > size {
+ err = fmt.Errorf("file larger than size reported by stat")
+ }
+ if err != nil {
+ return fmt.Errorf("copying %s to %s: %v", ofile, afile, err)
+ }
+ if size&1 != 0 {
+ w.WriteByte(0)
+ }
+ }
+
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return dst.Close()
+}
+
+// setextld sets the appropriate linker flags for the specified compiler.
+func setextld(ldflags []string, compiler []string) []string {
+ for _, f := range ldflags {
+ if f == "-extld" || strings.HasPrefix(f, "-extld=") {
+ // don't override -extld if supplied
+ return ldflags
+ }
+ }
+ ldflags = append(ldflags, "-extld="+compiler[0])
+ if len(compiler) > 1 {
+ extldflags := false
+ add := strings.Join(compiler[1:], " ")
+ for i, f := range ldflags {
+ if f == "-extldflags" && i+1 < len(ldflags) {
+ ldflags[i+1] = add + " " + ldflags[i+1]
+ extldflags = true
+ break
+ } else if strings.HasPrefix(f, "-extldflags=") {
+ ldflags[i] = "-extldflags=" + add + " " + ldflags[i][len("-extldflags="):]
+ extldflags = true
+ break
+ }
+ }
+ if !extldflags {
+ ldflags = append(ldflags, "-extldflags="+add)
+ }
+ }
+ return ldflags
+}
+
+func (gcToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ importArgs := b.includeArgs("-L", allactions)
+ cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
+ for _, a := range allactions {
+ if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ var ldflags []string
+ if buildContext.InstallSuffix != "" {
+ ldflags = append(ldflags, "-installsuffix", buildContext.InstallSuffix)
+ }
+ if root.p.omitDWARF {
+ ldflags = append(ldflags, "-w")
+ }
+
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", defaultCXX)
+ } else {
+ compiler = envList("CC", defaultCC)
+ }
+ ldflags = setextld(ldflags, compiler)
+ ldflags = append(ldflags, "-buildmode="+ldBuildmode)
+ if root.p.buildID != "" {
+ ldflags = append(ldflags, "-buildid="+root.p.buildID)
+ }
+ ldflags = append(ldflags, buildLdflags...)
+
+ // On OS X when using external linking to build a shared library,
+ // the argument passed here to -o ends up recorded in the final
+ // shared library in the LC_ID_DYLIB load command.
+ // To avoid putting the temporary output directory name there
+ // (and making the resulting shared library useless),
+ // run the link in the output directory so that -o can name
+ // just the final path element.
+ dir := "."
+ if goos == "darwin" && buildBuildmode == "c-shared" {
+ dir, out = filepath.Split(out)
+ }
+
+ return b.run(dir, root.p.ImportPath, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags, mainpkg)
+}
+
+func (gcToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ importArgs := b.includeArgs("-L", allactions)
+ ldflags := []string{"-installsuffix", buildContext.InstallSuffix}
+ ldflags = append(ldflags, "-buildmode=shared")
+ ldflags = append(ldflags, buildLdflags...)
+ cxx := false
+ for _, a := range allactions {
+ if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", defaultCXX)
+ } else {
+ compiler = envList("CC", defaultCC)
+ }
+ ldflags = setextld(ldflags, compiler)
+ for _, d := range toplevelactions {
+ if !strings.HasSuffix(d.target, ".a") { // omit unsafe etc and actions for other shared libraries
+ continue
+ }
+ ldflags = append(ldflags, d.p.ImportPath+"="+d.target)
+ }
+ return b.run(".", out, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags)
+}
+
+func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(p.Dir, cfile))
+}
+
+// The Gccgo toolchain.
+type gccgoToolchain struct{}
+
+var gccgoName, gccgoBin string
+
+func init() {
+ gccgoName = os.Getenv("GCCGO")
+ if gccgoName == "" {
+ gccgoName = "gccgo"
+ }
+ gccgoBin, _ = exec.LookPath(gccgoName)
+}
+
+func (gccgoToolchain) compiler() string {
+ return gccgoBin
+}
+
+func (gccgoToolchain) linker() string {
+ return gccgoBin
+}
+
+func (tools gccgoToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+ out := "_go_.o"
+ ofile = obj + out
+ gcargs := []string{"-g"}
+ gcargs = append(gcargs, b.gccArchArgs()...)
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath)
+ }
+ if p.localPrefix != "" {
+ gcargs = append(gcargs, "-fgo-relative-import-path="+p.localPrefix)
+ }
+ args := stringList(tools.compiler(), importArgs, "-c", gcargs, "-o", ofile, buildGccgoflags)
+ for _, f := range gofiles {
+ args = append(args, mkAbs(p.Dir, f))
+ }
+
+ output, err = b.runOut(p.Dir, p.ImportPath, nil, args)
+ return ofile, output, err
+}
+
+func (tools gccgoToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ sfile = mkAbs(p.Dir, sfile)
+ defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch}
+ if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath)
+ }
+ defs = tools.maybePIC(defs)
+ defs = append(defs, b.gccArchArgs()...)
+ return b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", obj, "-c", "-o", ofile, defs, sfile)
+}
+
+func (gccgoToolchain) pkgpath(basedir string, p *Package) string {
+ end := filepath.FromSlash(p.ImportPath + ".a")
+ afile := filepath.Join(basedir, end)
+ // add "lib" to the final element
+ return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile))
+}
+
+func (gccgoToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(objDir, f))
+ }
+ return b.run(p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objDir, afile), absOfiles)
+}
+
+func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ // gccgo needs explicit linking with all package dependencies,
+ // and all LDFLAGS from cgo dependencies.
+ apackagesSeen := make(map[*Package]bool)
+ afiles := []string{}
+ shlibs := []string{}
+ xfiles := []string{}
+ ldflags := b.gccArchArgs()
+ cgoldflags := []string{}
+ usesCgo := false
+ cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
+ objc := len(root.p.MFiles) > 0
+
+ actionsSeen := make(map[*action]bool)
+ // Make a pre-order depth-first traversal of the action graph, taking note of
+ // whether a shared library action has been seen on the way to an action (the
+ // construction of the graph means that if any path to a node passes through
+ // a shared library action, they all do).
+ var walk func(a *action, seenShlib bool)
+ walk = func(a *action, seenShlib bool) {
+ if actionsSeen[a] {
+ return
+ }
+ actionsSeen[a] = true
+ if a.p != nil && !seenShlib {
+ if a.p.Standard {
+ return
+ }
+ // We record the target of the first time we see a .a file
+ // for a package to make sure that we prefer the 'install'
+ // rather than the 'build' location (which may not exist any
+ // more). We still need to traverse the dependencies of the
+ // build action though so saying
+ // if apackagesSeen[a.p] { return }
+ // doesn't work.
+ if !apackagesSeen[a.p] {
+ apackagesSeen[a.p] = true
+ if a.p.fake && a.p.external {
+ // external _tests, if present must come before
+ // internal _tests. Store these on a separate list
+ // and place them at the head after this loop.
+ xfiles = append(xfiles, a.target)
+ } else if a.p.fake {
+ // move _test files to the top of the link order
+ afiles = append([]string{a.target}, afiles...)
+ } else {
+ afiles = append(afiles, a.target)
+ }
+ }
+ }
+ if strings.HasSuffix(a.target, ".so") {
+ shlibs = append(shlibs, a.target)
+ seenShlib = true
+ }
+ for _, a1 := range a.deps {
+ walk(a1, seenShlib)
+ }
+ }
+ for _, a1 := range root.deps {
+ walk(a1, false)
+ }
+ afiles = append(xfiles, afiles...)
+
+ for _, a := range allactions {
+ // Gather CgoLDFLAGS, but not from standard packages.
+ // The go tool can dig up runtime/cgo from GOROOT and
+ // think that it should use its CgoLDFLAGS, but gccgo
+ // doesn't use runtime/cgo.
+ if a.p == nil {
+ continue
+ }
+ if !a.p.Standard {
+ cgoldflags = append(cgoldflags, a.p.CgoLDFLAGS...)
+ }
+ if len(a.p.CgoFiles) > 0 {
+ usesCgo = true
+ }
+ if a.p.usesSwig() {
+ usesCgo = true
+ }
+ if len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0 {
+ cxx = true
+ }
+ if len(a.p.MFiles) > 0 {
+ objc = true
+ }
+ }
+
+ ldflags = append(ldflags, "-Wl,--whole-archive")
+ ldflags = append(ldflags, afiles...)
+ ldflags = append(ldflags, "-Wl,--no-whole-archive")
+
+ ldflags = append(ldflags, cgoldflags...)
+ ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
+ ldflags = append(ldflags, root.p.CgoLDFLAGS...)
+
+ ldflags = stringList("-Wl,-(", ldflags, "-Wl,-)")
+
+ for _, shlib := range shlibs {
+ ldflags = append(
+ ldflags,
+ "-L"+filepath.Dir(shlib),
+ "-Wl,-rpath="+filepath.Dir(shlib),
+ "-l"+strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(shlib), "lib"),
+ ".so"))
+ }
+
+ var realOut string
+ switch ldBuildmode {
+ case "exe":
+ if usesCgo && goos == "linux" {
+ ldflags = append(ldflags, "-Wl,-E")
+ }
+
+ case "c-archive":
+ // Link the Go files into a single .o, and also link
+ // in -lgolibbegin.
+ //
+ // We need to use --whole-archive with -lgolibbegin
+ // because it doesn't define any symbols that will
+ // cause the contents to be pulled in; it's just
+ // initialization code.
+ //
+ // The user remains responsible for linking against
+ // -lgo -lpthread -lm in the final link. We can't use
+ // -r to pick them up because we can't combine
+ // split-stack and non-split-stack code in a single -r
+ // link, and libgo picks up non-split-stack code from
+ // libffi.
+ ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive")
+
+ if b.gccSupportsNoPie() {
+ ldflags = append(ldflags, "-no-pie")
+ }
+
+ // We are creating an object file, so we don't want a build ID.
+ ldflags = b.disableBuildID(ldflags)
+
+ realOut = out
+ out = out + ".o"
+
+ case "c-shared":
+ ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
+
+ default:
+ fatalf("-buildmode=%s not supported for gccgo", ldBuildmode)
+ }
+
+ switch ldBuildmode {
+ case "exe", "c-shared":
+ if cxx {
+ ldflags = append(ldflags, "-lstdc++")
+ }
+ if objc {
+ ldflags = append(ldflags, "-lobjc")
+ }
+ }
+
+ if err := b.run(".", root.p.ImportPath, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil {
+ return err
+ }
+
+ switch ldBuildmode {
+ case "c-archive":
+ if err := b.run(".", root.p.ImportPath, nil, "ar", "rc", realOut, out); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tools gccgoToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ args := []string{"-o", out, "-shared", "-nostdlib", "-zdefs", "-Wl,--whole-archive"}
+ for _, a := range toplevelactions {
+ args = append(args, a.target)
+ }
+ args = append(args, "-Wl,--no-whole-archive", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
+ shlibs := []string{}
+ for _, a := range allactions {
+ if strings.HasSuffix(a.target, ".so") {
+ shlibs = append(shlibs, a.target)
+ }
+ }
+ for _, shlib := range shlibs {
+ args = append(
+ args,
+ "-L"+filepath.Dir(shlib),
+ "-Wl,-rpath="+filepath.Dir(shlib),
+ "-l"+strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(shlib), "lib"),
+ ".so"))
+ }
+ return b.run(".", out, nil, tools.linker(), args, buildGccgoflags)
+}
+
+func (tools gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ inc := filepath.Join(goroot, "pkg", "include")
+ cfile = mkAbs(p.Dir, cfile)
+ defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch}
+ defs = append(defs, b.gccArchArgs()...)
+ if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`)
+ }
+ switch goarch {
+ case "386", "amd64":
+ defs = append(defs, "-fsplit-stack")
+ }
+ defs = tools.maybePIC(defs)
+ return b.run(p.Dir, p.ImportPath, nil, envList("CC", defaultCC), "-Wall", "-g",
+ "-I", objdir, "-I", inc, "-o", ofile, defs, "-c", cfile)
+}
+
+// maybePIC adds -fPIC to the list of arguments if needed.
+func (tools gccgoToolchain) maybePIC(args []string) []string {
+ switch buildBuildmode {
+ case "c-shared", "shared":
+ args = append(args, "-fPIC")
+ }
+ return args
+}
+
+func gccgoPkgpath(p *Package) string {
+ if p.build.IsCommand() && !p.forceLibrary {
+ return ""
+ }
+ return p.ImportPath
+}
+
+func gccgoCleanPkgpath(p *Package) string {
+ clean := func(r rune) rune {
+ switch {
+ case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z',
+ '0' <= r && r <= '9':
+ return r
+ }
+ return '_'
+ }
+ return strings.Map(clean, gccgoPkgpath(p))
+}
+
+// gcc runs the gcc C compiler to create an object from a single C file.
+func (b *builder) gcc(p *Package, out string, flags []string, cfile string) error {
+ return b.ccompile(p, out, flags, cfile, b.gccCmd(p.Dir))
+}
+
+// gxx runs the g++ C++ compiler to create an object from a single C++ file.
+func (b *builder) gxx(p *Package, out string, flags []string, cxxfile string) error {
+ return b.ccompile(p, out, flags, cxxfile, b.gxxCmd(p.Dir))
+}
+
+// ccompile runs the given C or C++ compiler and creates an object from a single source file.
+func (b *builder) ccompile(p *Package, out string, flags []string, file string, compiler []string) error {
+ file = mkAbs(p.Dir, file)
+ return b.run(p.Dir, p.ImportPath, nil, compiler, flags, "-o", out, "-c", file)
+}
+
+// gccld runs the gcc linker to create an executable from a set of object files.
+func (b *builder) gccld(p *Package, out string, flags []string, obj []string) error {
+ var cmd []string
+ if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 {
+ cmd = b.gxxCmd(p.Dir)
+ } else {
+ cmd = b.gccCmd(p.Dir)
+ }
+ return b.run(p.Dir, p.ImportPath, nil, cmd, "-o", out, obj, flags)
+}
+
+// gccCmd returns a gcc command line prefix
+// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
+func (b *builder) gccCmd(objdir string) []string {
+ return b.ccompilerCmd("CC", defaultCC, objdir)
+}
+
+// gxxCmd returns a g++ command line prefix
+// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+func (b *builder) gxxCmd(objdir string) []string {
+ return b.ccompilerCmd("CXX", defaultCXX, objdir)
+}
+
+// ccompilerCmd returns a command line prefix for the given environment
+// variable and using the default command when the variable is empty.
+func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
+ // NOTE: env.go's mkEnv knows that the first three
+ // strings returned are "gcc", "-I", objdir (and cuts them off).
+
+ compiler := envList(envvar, defcmd)
+ a := []string{compiler[0], "-I", objdir}
+ a = append(a, compiler[1:]...)
+
+ // Definitely want -fPIC but on Windows gcc complains
+ // "-fPIC ignored for target (all code is position independent)"
+ if goos != "windows" {
+ a = append(a, "-fPIC")
+ }
+ a = append(a, b.gccArchArgs()...)
+ // gcc-4.5 and beyond require explicit "-pthread" flag
+ // for multithreading with pthread library.
+ if buildContext.CgoEnabled {
+ switch goos {
+ case "windows":
+ a = append(a, "-mthreads")
+ default:
+ a = append(a, "-pthread")
+ }
+ }
+
+ if strings.Contains(a[0], "clang") {
+ // disable ASCII art in clang errors, if possible
+ a = append(a, "-fno-caret-diagnostics")
+ // clang is too smart about command-line arguments
+ a = append(a, "-Qunused-arguments")
+ }
+
+ // disable word wrapping in error messages
+ a = append(a, "-fmessage-length=0")
+
+ // On OS X, some of the compilers behave as if -fno-common
+ // is always set, and the Mach-O linker in 6l/8l assumes this.
+ // See https://golang.org/issue/3253.
+ if goos == "darwin" {
+ a = append(a, "-fno-common")
+ }
+
+ return a
+}
+
+// On systems with PIE (position independent executables) enabled by default,
+// -no-pie must be passed when doing a partial link with -Wl,-r. But -no-pie is
+// not supported by all compilers.
+func (b *builder) gccSupportsNoPie() bool {
+ if goos != "linux" {
+ // On some BSD platforms, error messages from the
+ // compiler make it to the console despite cmd.Std*
+ // all being nil. As -no-pie is only required on linux
+ // systems so far, we only test there.
+ return false
+ }
+ src := filepath.Join(b.work, "trivial.c")
+ if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
+ return false
+ }
+ cmdArgs := b.gccCmd(b.work)
+ cmdArgs = append(cmdArgs, "-no-pie", "-c", "trivial.c")
+ if buildN || buildX {
+ b.showcmd(b.work, "%s", joinUnambiguously(cmdArgs))
+ if buildN {
+ return false
+ }
+ }
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ cmd.Dir = b.work
+ cmd.Env = envForDir(cmd.Dir, os.Environ())
+ out, err := cmd.CombinedOutput()
+ return err == nil && !bytes.Contains(out, []byte("unrecognized"))
+}
+
+// gccArchArgs returns arguments to pass to gcc based on the architecture.
+func (b *builder) gccArchArgs() []string {
+ switch goarch {
+ case "386":
+ return []string{"-m32"}
+ case "amd64", "amd64p32":
+ return []string{"-m64"}
+ case "arm":
+ return []string{"-marm"} // not thumb
+ }
+ return nil
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+func envList(key, def string) []string {
+ v := os.Getenv(key)
+ if v == "" {
+ v = def
+ }
+ return strings.Fields(v)
+}
+
+// Return the flags to use when invoking the C or C++ compilers, or cgo.
+func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
+ var defaults string
+ if def {
+ defaults = "-g -O2"
+ }
+
+ cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
+ cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
+ cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
+ ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
+ return
+}
+
+var cgoRe = regexp.MustCompile(`[/\\:]`)
+
+func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) {
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoLDFLAGS := b.cflags(p, true)
+ _, cgoexeCFLAGS, _, _ := b.cflags(p, false)
+ cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+ cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
+ // If we are compiling Objective-C code, then we need to link against libobjc
+ if len(mfiles) > 0 {
+ cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc")
+ }
+
+ if buildMSan && p.ImportPath != "runtime/cgo" {
+ cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
+ cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
+ }
+
+ // Allows including _cgo_export.h from .[ch] files in the package.
+ cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj)
+
+ // cgo
+ // TODO: CGO_FLAGS?
+ gofiles := []string{obj + "_cgo_gotypes.go"}
+ cfiles := []string{"_cgo_main.c", "_cgo_export.c"}
+ for _, fn := range cgofiles {
+ f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_")
+ gofiles = append(gofiles, obj+f+"cgo1.go")
+ cfiles = append(cfiles, f+"cgo2.c")
+ }
+ defunC := obj + "_cgo_defun.c"
+
+ cgoflags := []string{}
+ // TODO: make cgo not depend on $GOARCH?
+
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+ }
+ if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo") {
+ cgoflags = append(cgoflags, "-import_syscall=false")
+ }
+
+ // Update $CGO_LDFLAGS with p.CgoLDFLAGS.
+ var cgoenv []string
+ if len(cgoLDFLAGS) > 0 {
+ flags := make([]string, len(cgoLDFLAGS))
+ for i, f := range cgoLDFLAGS {
+ flags[i] = strconv.Quote(f)
+ }
+ cgoenv = []string{"CGO_LDFLAGS=" + strings.Join(flags, " ")}
+ }
+
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ switch goarch {
+ case "386", "amd64":
+ cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack")
+ }
+ cgoflags = append(cgoflags, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath)
+ }
+ }
+
+ switch buildBuildmode {
+ case "c-archive", "c-shared":
+ // Tell cgo that if there are any exported functions
+ // it should generate a header file that C code can
+ // #include.
+ cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h")
+ }
+
+ if err := b.run(p.Dir, p.ImportPath, cgoenv, buildToolExec, cgoExe, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoexeCFLAGS, cgofiles); err != nil {
+ return nil, nil, err
+ }
+ outGo = append(outGo, gofiles...)
+
+ // cc _cgo_defun.c
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ if gccgo {
+ defunObj := obj + "_cgo_defun.o"
+ if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, defunObj)
+ }
+
+ // gcc
+ var linkobj []string
+
+ var bareLDFLAGS []string
+ // When linking relocatable objects, various flags need to be
+ // filtered out as they are inapplicable and can cause some linkers
+ // to fail.
+ for i := 0; i < len(cgoLDFLAGS); i++ {
+ f := cgoLDFLAGS[i]
+ switch {
+ // skip "-lc" or "-l somelib"
+ case strings.HasPrefix(f, "-l"):
+ if f == "-l" {
+ i++
+ }
+ // skip "-framework X" on Darwin
+ case goos == "darwin" && f == "-framework":
+ i++
+ // skip "*.{dylib,so,dll}"
+ case strings.HasSuffix(f, ".dylib"),
+ strings.HasSuffix(f, ".so"),
+ strings.HasSuffix(f, ".dll"):
+ // Remove any -fsanitize=foo flags.
+ // Otherwise the compiler driver thinks that we are doing final link
+ // and links sanitizer runtime into the object file. But we are not doing
+ // the final link, we will link the resulting object file again. And
+ // so the program ends up with two copies of sanitizer runtime.
+ // See issue 8788 for details.
+ case strings.HasPrefix(f, "-fsanitize="):
+ continue
+ // runpath flags not applicable unless building a shared
+ // object or executable; see issue 12115 for details. This
+ // is necessary as Go currently does not offer a way to
+ // specify the set of LDFLAGS that only apply to shared
+ // objects.
+ case strings.HasPrefix(f, "-Wl,-rpath"):
+ if f == "-Wl,-rpath" || f == "-Wl,-rpath-link" {
+ // Skip following argument to -rpath* too.
+ i++
+ }
+ default:
+ bareLDFLAGS = append(bareLDFLAGS, f)
+ }
+ }
+
+ var staticLibs []string
+ if goos == "windows" {
+ // libmingw32 and libmingwex have some inter-dependencies,
+ // so must use linker groups.
+ staticLibs = []string{"-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group"}
+ }
+
+ cflags := stringList(cgoCPPFLAGS, cgoCFLAGS)
+ for _, cfile := range cfiles {
+ ofile := obj + cfile[:len(cfile)-1] + "o"
+ if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ if !strings.HasSuffix(ofile, "_cgo_main.o") {
+ outObj = append(outObj, ofile)
+ }
+ }
+
+ for _, file := range gccfiles {
+ ofile := obj + cgoRe.ReplaceAllString(file[:len(file)-1], "_") + "o"
+ if err := b.gcc(p, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS)
+ for _, file := range gxxfiles {
+ // Append .o to the file, just in case the pkg has file.c and file.cpp
+ ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
+ if err := b.gxx(p, ofile, cxxflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ for _, file := range mfiles {
+ // Append .o to the file, just in case the pkg has file.c and file.m
+ ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
+ if err := b.gcc(p, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ linkobj = append(linkobj, p.SysoFiles...)
+ dynobj := obj + "_cgo_.o"
+ pie := (goarch == "arm" && goos == "linux") || goos == "android"
+ if pie { // we need to use -pie for Linux/ARM to get accurate imported sym
+ cgoLDFLAGS = append(cgoLDFLAGS, "-pie")
+ }
+ if err := b.gccld(p, dynobj, cgoLDFLAGS, linkobj); err != nil {
+ return nil, nil, err
+ }
+ if pie { // but we don't need -pie for normal cgo programs
+ cgoLDFLAGS = cgoLDFLAGS[0 : len(cgoLDFLAGS)-1]
+ }
+
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ // we don't use dynimport when using gccgo.
+ return outGo, outObj, nil
+ }
+
+ // cgo -dynimport
+ importGo := obj + "_cgo_import.go"
+ cgoflags = []string{}
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-dynlinker") // record path to dynamic linker
+ }
+ if err := b.run(p.Dir, p.ImportPath, nil, buildToolExec, cgoExe, "-objdir", obj, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags); err != nil {
+ return nil, nil, err
+ }
+ outGo = append(outGo, importGo)
+
+ ofile := obj + "_all.o"
+ var gccObjs, nonGccObjs []string
+ for _, f := range outObj {
+ if strings.HasSuffix(f, ".o") {
+ gccObjs = append(gccObjs, f)
+ } else {
+ nonGccObjs = append(nonGccObjs, f)
+ }
+ }
+ ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs)
+
+ if b.gccSupportsNoPie() {
+ ldflags = append(ldflags, "-no-pie")
+ }
+
+ // We are creating an object file, so we don't want a build ID.
+ ldflags = b.disableBuildID(ldflags)
+
+ if err := b.gccld(p, ofile, ldflags, gccObjs); err != nil {
+ return nil, nil, err
+ }
+
+ // NOTE(rsc): The importObj is a 5c/6c/8c object and on Windows
+ // must be processed before the gcc-generated objects.
+ // Put it first. https://golang.org/issue/2601
+ outObj = stringList(nonGccObjs, ofile)
+
+ return outGo, outObj, nil
+}
+
+// Run SWIG on all SWIG input files.
+// TODO: Don't build a shared library, once SWIG emits the necessary
+// pragmas for external linking.
+func (b *builder) swig(p *Package, obj string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+ if err := b.swigVersionCheck(); err != nil {
+ return nil, nil, nil, err
+ }
+
+ intgosize, err := b.swigIntSize(obj)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for _, f := range p.SwigFiles {
+ goFile, cFile, err := b.swigOne(p, f, obj, pcCFLAGS, false, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cFile != "" {
+ outC = append(outC, cFile)
+ }
+ }
+ for _, f := range p.SwigCXXFiles {
+ goFile, cxxFile, err := b.swigOne(p, f, obj, pcCFLAGS, true, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cxxFile != "" {
+ outCXX = append(outCXX, cxxFile)
+ }
+ }
+ return outGo, outC, outCXX, nil
+}
+
+// Make sure SWIG is new enough.
+var (
+ swigCheckOnce sync.Once
+ swigCheck error
+)
+
+func (b *builder) swigDoVersionCheck() error {
+ out, err := b.runOut("", "", nil, "swig", "-version")
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`)
+ matches := re.FindSubmatch(out)
+ if matches == nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+
+ major, err := strconv.Atoi(string(matches[1]))
+ if err != nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+ const errmsg = "must have SWIG version >= 3.0.6"
+ if major < 3 {
+ return errors.New(errmsg)
+ }
+ if major > 3 {
+ // 4.0 or later
+ return nil
+ }
+
+ // We have SWIG version 3.x.
+ if len(matches[2]) > 0 {
+ minor, err := strconv.Atoi(string(matches[2][1:]))
+ if err != nil {
+ return nil
+ }
+ if minor > 0 {
+ // 3.1 or later
+ return nil
+ }
+ }
+
+ // We have SWIG version 3.0.x.
+ if len(matches[3]) > 0 {
+ patch, err := strconv.Atoi(string(matches[3][1:]))
+ if err != nil {
+ return nil
+ }
+ if patch < 6 {
+ // Before 3.0.6.
+ return errors.New(errmsg)
+ }
+ }
+
+ return nil
+}
+
+func (b *builder) swigVersionCheck() error {
+ swigCheckOnce.Do(func() {
+ swigCheck = b.swigDoVersionCheck()
+ })
+ return swigCheck
+}
+
+// This code fails to build if sizeof(int) <= 32
+const swigIntSizeCode = `
+package main
+const i int = 1 << 32
+`
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9
+func (b *builder) swigIntSize(obj string) (intsize string, err error) {
+ if buildN {
+ return "$INTBITS", nil
+ }
+ src := filepath.Join(b.work, "swig_intsize.go")
+ if err = ioutil.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil {
+ return
+ }
+ srcs := []string{src}
+
+ p := goFilesPackage(srcs)
+
+ if _, _, e := buildToolchain.gc(b, p, "", obj, false, nil, srcs); e != nil {
+ return "32", nil
+ }
+ return "64", nil
+}
+
+// Run SWIG on one SWIG input file.
+func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true)
+ var cflags []string
+ if cxx {
+ cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
+ } else {
+ cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS)
+ }
+
+ n := 5 // length of ".swig"
+ if cxx {
+ n = 8 // length of ".swigcxx"
+ }
+ base := file[:len(file)-n]
+ goFile := base + ".go"
+ gccBase := base + "_wrap."
+ gccExt := "c"
+ if cxx {
+ gccExt = "cxx"
+ }
+
+ _, gccgo := buildToolchain.(gccgoToolchain)
+
+ // swig
+ args := []string{
+ "-go",
+ "-cgo",
+ "-intgosize", intgosize,
+ "-module", base,
+ "-o", obj + gccBase + gccExt,
+ "-outdir", obj,
+ }
+
+ for _, f := range cflags {
+ if len(f) > 3 && f[:2] == "-I" {
+ args = append(args, f)
+ }
+ }
+
+ if gccgo {
+ args = append(args, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ args = append(args, "-go-pkgpath", pkgpath)
+ }
+ }
+ if cxx {
+ args = append(args, "-c++")
+ }
+
+ out, err := b.runOut(p.Dir, p.ImportPath, nil, "swig", args, file)
+ if err != nil {
+ if len(out) > 0 {
+ if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) {
+ return "", "", errors.New("must have SWIG version >= 3.0.6")
+ }
+ b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig error
+ return "", "", errPrintedOutput
+ }
+ return "", "", err
+ }
+ if len(out) > 0 {
+ b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning
+ }
+
+ return obj + goFile, obj + gccBase + gccExt, nil
+}
+
+// disableBuildID adjusts a linker command line to avoid creating a
+// build ID when creating an object file rather than an executable or
+// shared library. Some systems, such as Ubuntu, always add
+// --build-id to every link, but we don't want a build ID when we are
+// producing an object file. On some of those system a plain -r (not
+// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a
+// plain -r. I don't know how to turn off --build-id when using clang
+// other than passing a trailing --build-id=none. So that is what we
+// do, but only on systems likely to support it, which is to say,
+// systems that normally use gold or the GNU linker.
+func (b *builder) disableBuildID(ldflags []string) []string {
+ switch goos {
+ case "android", "dragonfly", "linux", "netbsd":
+ ldflags = append(ldflags, "-Wl,--build-id=none")
+ }
+ return ldflags
+}
+
+// An actionQueue is a priority queue of actions.
+type actionQueue []*action
+
+// Implement heap.Interface
+func (q *actionQueue) Len() int { return len(*q) }
+func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] }
+func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority }
+func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*action)) }
+func (q *actionQueue) Pop() interface{} {
+ n := len(*q) - 1
+ x := (*q)[n]
+ *q = (*q)[:n]
+ return x
+}
+
+func (q *actionQueue) push(a *action) {
+ heap.Push(q, a)
+}
+
+func (q *actionQueue) pop() *action {
+ return heap.Pop(q).(*action)
+}
+
+func instrumentInit() {
+ if !buildRace && !buildMSan {
+ return
+ }
+ if buildRace && buildMSan {
+ fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously", flag.Args()[0])
+ os.Exit(2)
+ }
+ if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" {
+ fmt.Fprintf(os.Stderr, "go %s: -race and -msan are only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0])
+ os.Exit(2)
+ }
+ if !buildContext.CgoEnabled {
+ fmt.Fprintf(os.Stderr, "go %s: -race requires cgo; enable cgo by setting CGO_ENABLED=1\n", flag.Args()[0])
+ os.Exit(2)
+ }
+ if buildRace {
+ buildGcflags = append(buildGcflags, "-race")
+ buildLdflags = append(buildLdflags, "-race")
+ } else {
+ buildGcflags = append(buildGcflags, "-msan")
+ buildLdflags = append(buildLdflags, "-msan")
+ }
+ if buildContext.InstallSuffix != "" {
+ buildContext.InstallSuffix += "_"
+ }
+
+ if buildRace {
+ buildContext.InstallSuffix += "race"
+ buildContext.BuildTags = append(buildContext.BuildTags, "race")
+ } else {
+ buildContext.InstallSuffix += "msan"
+ buildContext.BuildTags = append(buildContext.BuildTags, "msan")
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/link.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/link.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/link.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/link.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,699 @@
+// Derived from Inferno utils/6l/l.h and related files.
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package obj
+
+import "encoding/binary"
+
+// An Addr is an argument to an instruction.
+// The general forms and their encodings are:
+//
+// sym±offset(symkind)(reg)(index*scale)
+// Memory reference at address &sym(symkind) + offset + reg + index*scale.
+// Any of sym(symkind), ±offset, (reg), (index*scale), and *scale can be omitted.
+// If (reg) and *scale are both omitted, the resulting expression (index) is parsed as (reg).
+// To force a parsing as index*scale, write (index*1).
+// Encoding:
+// type = TYPE_MEM
+// name = symkind (NAME_AUTO, ...) or 0 (NAME_NONE)
+// sym = sym
+// offset = ±offset
+// reg = reg (REG_*)
+// index = index (REG_*)
+// scale = scale (1, 2, 4, 8)
+//
+// $
+// Effective address of memory reference , defined above.
+// Encoding: same as memory reference, but type = TYPE_ADDR.
+//
+// $<±integer value>
+// This is a special case of $, in which only ±offset is present.
+// It has a separate type for easy recognition.
+// Encoding:
+// type = TYPE_CONST
+// offset = ±integer value
+//
+// *
+// Indirect reference through memory reference , defined above.
+// Only used on x86 for CALL/JMP *sym(SB), which calls/jumps to a function
+// pointer stored in the data word sym(SB), not a function named sym(SB).
+// Encoding: same as above, but type = TYPE_INDIR.
+//
+// $*$
+// No longer used.
+// On machines with actual SB registers, $*$ forced the
+// instruction encoding to use a full 32-bit constant, never a
+// reference relative to SB.
+//
+// $
+// Floating point constant value.
+// Encoding:
+// type = TYPE_FCONST
+// val = floating point value
+//
+// $
+// String literal value (raw bytes used for DATA instruction).
+// Encoding:
+// type = TYPE_SCONST
+// val = string
+//
+//
+// Any register: integer, floating point, control, segment, and so on.
+// If looking for specific register kind, must check type and reg value range.
+// Encoding:
+// type = TYPE_REG
+// reg = reg (REG_*)
+//
+// x(PC)
+// Encoding:
+// type = TYPE_BRANCH
+// val = Prog* reference OR ELSE offset = target pc (branch takes priority)
+//
+// $±x-±y
+// Final argument to TEXT, specifying local frame size x and argument size y.
+// In this form, x and y are integer literals only, not arbitrary expressions.
+// This avoids parsing ambiguities due to the use of - as a separator.
+// The ± are optional.
+// If the final argument to TEXT omits the -±y, the encoding should still
+// use TYPE_TEXTSIZE (not TYPE_CONST), with u.argsize = ArgsSizeUnknown.
+// Encoding:
+// type = TYPE_TEXTSIZE
+// offset = x
+// val = int32(y)
+//
+// reg<>shift, reg->shift, reg@>shift
+// Shifted register value, for ARM.
+// In this form, reg must be a register and shift can be a register or an integer constant.
+// Encoding:
+// type = TYPE_SHIFT
+// offset = (reg&15) | shifttype<<5 | count
+// shifttype = 0, 1, 2, 3 for <<, >>, ->, @>
+// count = (reg&15)<<8 | 1<<4 for a register shift count, (n&31)<<7 for an integer constant.
+//
+// (reg, reg)
+// A destination register pair. When used as the last argument of an instruction,
+// this form makes clear that both registers are destinations.
+// Encoding:
+// type = TYPE_REGREG
+// reg = first register
+// offset = second register
+//
+// [reg, reg, reg-reg]
+// Register list for ARM.
+// Encoding:
+// type = TYPE_REGLIST
+// offset = bit mask of registers in list; R0 is low bit.
+//
+// reg, reg
+// Register pair for ARM.
+// TYPE_REGREG2
+//
+// (reg+reg)
+// Register pair for PPC64.
+// Encoding:
+// type = TYPE_MEM
+// reg = first register
+// index = second register
+// scale = 1
+//
+type Addr struct {
+ Type int16
+ Reg int16
+ Index int16
+ Scale int16 // Sometimes holds a register.
+ Name int8
+ Class int8
+ Etype uint8
+ Offset int64
+ Width int64
+ Sym *LSym
+ Gotype *LSym
+
+ // argument value:
+ // for TYPE_SCONST, a string
+ // for TYPE_FCONST, a float64
+ // for TYPE_BRANCH, a *Prog (optional)
+ // for TYPE_TEXTSIZE, an int32 (optional)
+ Val interface{}
+
+ Node interface{} // for use by compiler
+}
+
+const (
+ NAME_NONE = 0 + iota
+ NAME_EXTERN
+ NAME_STATIC
+ NAME_AUTO
+ NAME_PARAM
+ // A reference to name@GOT(SB) is a reference to the entry in the global offset
+ // table for 'name'.
+ NAME_GOTREF
+)
+
+const (
+ TYPE_NONE = 0
+)
+
+const (
+ TYPE_BRANCH = 5 + iota
+ TYPE_TEXTSIZE
+ TYPE_MEM
+ TYPE_CONST
+ TYPE_FCONST
+ TYPE_SCONST
+ TYPE_REG
+ TYPE_ADDR
+ TYPE_SHIFT
+ TYPE_REGREG
+ TYPE_REGREG2
+ TYPE_INDIR
+ TYPE_REGLIST
+)
+
+// TODO(rsc): Describe prog.
+// TODO(rsc): Describe TEXT/GLOBL flag in from3, DATA width in from3.
+type Prog struct {
+ Ctxt *Link
+ Link *Prog
+ From Addr
+ From3 *Addr // optional
+ To Addr
+ Opt interface{}
+ Forwd *Prog
+ Pcond *Prog
+ Rel *Prog // Source of forward jumps on x86; pcrel on arm
+ Pc int64
+ Lineno int32
+ Spadj int32
+ As int16
+ Reg int16
+ RegTo2 int16 // 2nd register output operand
+ Mark uint16
+ Optab uint16
+ Scond uint8
+ Back uint8
+ Ft uint8
+ Tt uint8
+ Isize uint8
+ Mode int8
+
+ Info ProgInfo
+}
+
+// From3Type returns From3.Type, or TYPE_NONE when From3 is nil.
+func (p *Prog) From3Type() int16 {
+ if p.From3 == nil {
+ return TYPE_NONE
+ }
+ return p.From3.Type
+}
+
+// From3Offset returns From3.Offset, or 0 when From3 is nil.
+func (p *Prog) From3Offset() int64 {
+ if p.From3 == nil {
+ return 0
+ }
+ return p.From3.Offset
+}
+
+// ProgInfo holds information about the instruction for use
+// by clients such as the compiler. The exact meaning of this
+// data is up to the client and is not interpreted by the cmd/internal/obj/... packages.
+type ProgInfo struct {
+ _ struct{} // to prevent unkeyed literals. Trailing zero-sized field will take space.
+ Flags uint32 // flag bits
+ Reguse uint64 // registers implicitly used by this instruction
+ Regset uint64 // registers implicitly set by this instruction
+ Regindex uint64 // registers used by addressing mode
+}
+
+// Prog.as opcodes.
+// These are the portable opcodes, common to all architectures.
+// Each architecture defines many more arch-specific opcodes,
+// with values starting at A_ARCHSPECIFIC.
+// Each architecture adds an offset to this so each machine has
+// distinct space for its instructions. The offset is a power of
+// two so it can be masked to return to origin zero.
+// See the definitions of ABase386 etc.
+const (
+ AXXX = 0 + iota
+ ACALL
+ ACHECKNIL
+ ADATA
+ ADUFFCOPY
+ ADUFFZERO
+ AEND
+ AFUNCDATA
+ AGLOBL
+ AJMP
+ ANOP
+ APCDATA
+ ARET
+ ATEXT
+ ATYPE
+ AUNDEF
+ AUSEFIELD
+ AVARDEF
+ AVARKILL
+ AVARLIVE
+ A_ARCHSPECIFIC
+)
+
+// An LSym is the sort of symbol that is written to an object file.
+type LSym struct {
+ Name string
+ Type int16
+ Version int16
+ Dupok uint8
+ Cfunc uint8
+ Nosplit uint8
+ Leaf uint8
+ Seenglobl uint8
+ Onlist uint8
+ // Local means make the symbol local even when compiling Go code to reference Go
+ // symbols in other shared libraries, as in this mode symbols are global by
+ // default. "local" here means in the sense of the dynamic linker, i.e. not
+ // visible outside of the module (shared library or executable) that contains its
+ // definition. (When not compiling to support Go shared libraries, all symbols are
+ // local in this sense unless there is a cgo_export_* directive).
+ Local bool
+ Args int32
+ Locals int32
+ Value int64
+ Size int64
+ Next *LSym
+ Gotype *LSym
+ Autom *Auto
+ Text *Prog
+ Etext *Prog
+ Pcln *Pcln
+ P []byte
+ R []Reloc
+}
+
+type Pcln struct {
+ Pcsp Pcdata
+ Pcfile Pcdata
+ Pcline Pcdata
+ Pcdata []Pcdata
+ Funcdata []*LSym
+ Funcdataoff []int64
+ File []*LSym
+ Lastfile *LSym
+ Lastindex int
+}
+
+// LSym.type
+const (
+ Sxxx = iota
+ STEXT
+ SELFRXSECT
+
+ STYPE
+ SSTRING
+ SGOSTRING
+ SGOFUNC
+ SGCBITS
+ SRODATA
+ SFUNCTAB
+
+ // Types STYPE-SFUNCTAB above are written to the .rodata section by default.
+ // When linking a shared object, some conceptually "read only" types need to
+ // be written to by relocations and putting them in a section called
+ // ".rodata" interacts poorly with the system linkers. The GNU linkers
+ // support this situation by arranging for sections of the name
+ // ".data.rel.ro.XXX" to be mprotected read only by the dynamic linker after
+ // relocations have applied, so when the Go linker is creating a shared
+ // object it checks all objects of the above types and bumps any object that
+ // has a relocation to it to the corresponding type below, which are then
+ // written to sections with appropriate magic names.
+ STYPERELRO
+ SSTRINGRELRO
+ SGOSTRINGRELRO
+ SGOFUNCRELRO
+ SGCBITSRELRO
+ SRODATARELRO
+ SFUNCTABRELRO
+
+ STYPELINK
+ SSYMTAB
+ SPCLNTAB
+ SELFROSECT
+ SMACHOPLT
+ SELFSECT
+ SMACHO
+ SMACHOGOT
+ SWINDOWS
+ SELFGOT
+ SNOPTRDATA
+ SINITARR
+ SDATA
+ SBSS
+ SNOPTRBSS
+ STLSBSS
+ SXREF
+ SMACHOSYMSTR
+ SMACHOSYMTAB
+ SMACHOINDIRECTPLT
+ SMACHOINDIRECTGOT
+ SFILE
+ SFILEPATH
+ SCONST
+ SDYNIMPORT
+ SHOSTOBJ
+ SSUB = 1 << 8
+ SMASK = SSUB - 1
+ SHIDDEN = 1 << 9
+ SCONTAINER = 1 << 10 // has a sub-symbol
+)
+
+type Reloc struct {
+ Off int32
+ Siz uint8
+ Type int32
+ Add int64
+ Sym *LSym
+}
+
+// Reloc.type
+const (
+ R_ADDR = 1 + iota
+ // R_ADDRPOWER relocates a pair of "D-form" instructions (instructions with 16-bit
+ // immediates in the low half of the instruction word), usually addis followed by
+ // another add or a load, inserting the "high adjusted" 16 bits of the address of
+ // the referenced symbol into the immediate field of the first instruction and the
+ // low 16 bits into that of the second instruction.
+ R_ADDRPOWER
+ // R_ADDRARM64 relocates an adrp, add pair to compute the address of the
+ // referenced symbol.
+ R_ADDRARM64
+ // R_ADDRMIPS (only used on mips64) resolves to a 32-bit external address,
+ // by loading the address into a register with two instructions (lui, ori).
+ R_ADDRMIPS
+ R_SIZE
+ R_CALL
+ R_CALLARM
+ R_CALLARM64
+ R_CALLIND
+ R_CALLPOWER
+ // R_CALLMIPS (only used on mips64) resolves to non-PC-relative target address
+ // of a CALL (JAL) instruction, by encoding the address into the instruction.
+ R_CALLMIPS
+ R_CONST
+ R_PCREL
+ // R_TLS_LE, used on 386, amd64, and ARM, resolves to the offset of the
+ // thread-local symbol from the thread local base and is used to implement the
+ // "local exec" model for tls access (r.Sym is not set on intel platforms but is
+ // set to a TLS symbol -- runtime.tlsg -- in the linker when externally linking).
+ R_TLS_LE
+ // R_TLS_IE, used 386, amd64, and ARM resolves to the PC-relative offset to a GOT
+ // slot containing the offset from the thread-local symbol from the thread local
+ // base and is used to implemented the "initial exec" model for tls access (r.Sym
+ // is not set on intel platforms but is set to a TLS symbol -- runtime.tlsg -- in
+ // the linker when externally linking).
+ R_TLS_IE
+ R_GOTOFF
+ R_PLT0
+ R_PLT1
+ R_PLT2
+ R_USEFIELD
+ R_POWER_TOC
+ R_GOTPCREL
+ // R_JMPMIPS (only used on mips64) resolves to non-PC-relative target address
+ // of a JMP instruction, by encoding the address into the instruction.
+ // The stack nosplit check ignores this since it is not a function call.
+ R_JMPMIPS
+
+ // Platform dependent relocations. Architectures with fixed width instructions
+ // have the inherent issue that a 32-bit (or 64-bit!) displacement cannot be
+ // stuffed into a 32-bit instruction, so an address needs to be spread across
+ // several instructions, and in turn this requires a sequence of relocations, each
+ // updating a part of an instruction. This leads to relocation codes that are
+ // inherently processor specific.
+
+ // Arm64.
+
+ // Set a MOV[NZ] immediate field to bits [15:0] of the offset from the thread
+ // local base to the thread local variable defined by the referenced (thread
+ // local) symbol. Error if the offset does not fit into 16 bits.
+ R_ARM64_TLS_LE
+
+ // Relocates an ADRP; LD64 instruction sequence to load the offset between
+ // the thread local base and the thread local variable defined by the
+ // referenced (thread local) symbol from the GOT.
+ R_ARM64_TLS_IE
+
+ // R_ARM64_GOTPCREL relocates an adrp, ld64 pair to compute the address of the GOT
+ // slot of the referenced symbol.
+ R_ARM64_GOTPCREL
+
+ // PPC64.
+
+ // R_POWER_TLS_LE is used to implement the "local exec" model for tls
+ // access. It resolves to the offset of the thread-local symbol from the
+ // thread pointer (R13) and inserts this value into the low 16 bits of an
+ // instruction word.
+ R_POWER_TLS_LE
+
+ // R_POWER_TLS_IE is used to implement the "initial exec" model for tls access. It
+ // relocates a D-form, DS-form instruction sequence like R_ADDRPOWER_DS. It
+ // inserts to the offset of GOT slot for the thread-local symbol from the TOC (the
+ // GOT slot is filled by the dynamic linker with the offset of the thread-local
+ // symbol from the thread pointer (R13)).
+ R_POWER_TLS_IE
+
+ // R_POWER_TLS marks an X-form instruction such as "MOVD 0(R13)(R31*1), g" as
+ // accessing a particular thread-local symbol. It does not affect code generation
+ // but is used by the system linker when relaxing "initial exec" model code to
+ // "local exec" model code.
+ R_POWER_TLS
+
+ // R_ADDRPOWER_DS is similar to R_ADDRPOWER above, but assumes the second
+ // instruction is a "DS-form" instruction, which has an immediate field occupying
+ // bits [15:2] of the instruction word. Bits [15:2] of the address of the
+ // relocated symbol are inserted into this field; it is an error if the last two
+ // bits of the address are not 0.
+ R_ADDRPOWER_DS
+
+ // R_ADDRPOWER_PCREL relocates a D-form, DS-form instruction sequence like
+ // R_ADDRPOWER_DS but inserts the offset of the GOT slot for the referenced symbol
+ // from the TOC rather than the symbol's address.
+ R_ADDRPOWER_GOT
+
+ // R_ADDRPOWER_PCREL relocates two D-form instructions like R_ADDRPOWER, but
+ // inserts the displacement from the place being relocated to the address of the
+ // the relocated symbol instead of just its address.
+ R_ADDRPOWER_PCREL
+
+ // R_ADDRPOWER_TOCREL relocates two D-form instructions like R_ADDRPOWER, but
+ // inserts the offset from the TOC to the address of the the relocated symbol
+ // rather than the symbol's address.
+ R_ADDRPOWER_TOCREL
+
+ // R_ADDRPOWER_TOCREL relocates a D-form, DS-form instruction sequence like
+ // R_ADDRPOWER_DS but inserts the offset from the TOC to the address of the the
+ // relocated symbol rather than the symbol's address.
+ R_ADDRPOWER_TOCREL_DS
+)
+
+type Auto struct {
+ Asym *LSym
+ Link *Auto
+ Aoffset int32
+ Name int16
+ Gotype *LSym
+}
+
+// Auto.name
+const (
+ A_AUTO = 1 + iota
+ A_PARAM
+)
+
+type Pcdata struct {
+ P []byte
+}
+
+// Pcdata iterator.
+// for(pciterinit(ctxt, &it, &pcd); !it.done; pciternext(&it)) { it.value holds in [it.pc, it.nextpc) }
+type Pciter struct {
+ d Pcdata
+ p []byte
+ pc uint32
+ nextpc uint32
+ pcscale uint32
+ value int32
+ start int
+ done int
+}
+
+// symbol version, incremented each time a file is loaded.
+// version==1 is reserved for savehist.
+const (
+ HistVersion = 1
+)
+
+// Link holds the context for writing object code from a compiler
+// to be linker input or for reading that input into the linker.
+type Link struct {
+ Goarm int32
+ Headtype int
+ Arch *LinkArch
+ Debugasm int32
+ Debugvlog int32
+ Debugdivmod int32
+ Debugpcln int32
+ Flag_shared int32
+ Flag_dynlink bool
+ Bso *Biobuf
+ Pathname string
+ Windows int32
+ Goroot string
+ Goroot_final string
+ Enforce_data_order int32
+ Hash map[SymVer]*LSym
+ LineHist LineHist
+ Imports []string
+ Plist *Plist
+ Plast *Plist
+ Sym_div *LSym
+ Sym_divu *LSym
+ Sym_mod *LSym
+ Sym_modu *LSym
+ Plan9privates *LSym
+ Curp *Prog
+ Printp *Prog
+ Blitrl *Prog
+ Elitrl *Prog
+ Rexflag int
+ Vexflag int
+ Rep int
+ Repn int
+ Lock int
+ Asmode int
+ Andptr []byte
+ And [100]uint8
+ Instoffset int64
+ Autosize int32
+ Armsize int32
+ Pc int64
+ DiagFunc func(string, ...interface{})
+ Mode int
+ Cursym *LSym
+ Version int
+ Textp *LSym
+ Etextp *LSym
+ Errors int
+
+ // state for writing objects
+ Text *LSym
+ Data *LSym
+ Etext *LSym
+ Edata *LSym
+}
+
+func (ctxt *Link) Diag(format string, args ...interface{}) {
+ ctxt.Errors++
+ ctxt.DiagFunc(format, args...)
+}
+
+// The smallest possible offset from the hardware stack pointer to a local
+// variable on the stack. Architectures that use a link register save its value
+// on the stack in the function prologue and so always have a pointer between
+// the hardware stack pointer and the local variable area.
+func (ctxt *Link) FixedFrameSize() int64 {
+ switch ctxt.Arch.Thechar {
+ case '6', '8':
+ return 0
+ case '9':
+ // PIC code on ppc64le requires 32 bytes of stack, and it's easier to
+ // just use that much stack always on ppc64x.
+ return int64(4 * ctxt.Arch.Ptrsize)
+ default:
+ return int64(ctxt.Arch.Ptrsize)
+ }
+}
+
+type SymVer struct {
+ Name string
+ Version int // TODO: make int16 to match LSym.Version?
+}
+
+// LinkArch is the definition of a single architecture.
+type LinkArch struct {
+ ByteOrder binary.ByteOrder
+ Name string
+ Thechar int
+ Preprocess func(*Link, *LSym)
+ Assemble func(*Link, *LSym)
+ Follow func(*Link, *LSym)
+ Progedit func(*Link, *Prog)
+ UnaryDst map[int]bool // Instruction takes one operand, a destination.
+ Minlc int
+ Ptrsize int
+ Regsize int
+}
+
+/* executable header types */
+const (
+ Hunknown = 0 + iota
+ Hdarwin
+ Hdragonfly
+ Helf
+ Hfreebsd
+ Hlinux
+ Hnacl
+ Hnetbsd
+ Hopenbsd
+ Hplan9
+ Hsolaris
+ Hwindows
+)
+
+type Plist struct {
+ Name *LSym
+ Firstpc *Prog
+ Recur int
+ Link *Plist
+}
+
+/*
+ * start a new Prog list.
+ */
+func Linknewplist(ctxt *Link) *Plist {
+ pl := new(Plist)
+ if ctxt.Plist == nil {
+ ctxt.Plist = pl
+ } else {
+ ctxt.Plast.Link = pl
+ }
+ ctxt.Plast = pl
+ return pl
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/util.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/util.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/util.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/obj/util.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,651 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package obj
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "strings"
+ "time"
+)
+
+const REG_NONE = 0
+
+var start time.Time
+
+func Cputime() float64 {
+ if start.IsZero() {
+ start = time.Now()
+ }
+ return time.Since(start).Seconds()
+}
+
+type Biobuf struct {
+ f *os.File
+ r *bufio.Reader
+ w *bufio.Writer
+ linelen int
+}
+
+func Bopenw(name string) (*Biobuf, error) {
+ f, err := os.Create(name)
+ if err != nil {
+ return nil, err
+ }
+ return &Biobuf{f: f, w: bufio.NewWriter(f)}, nil
+}
+
+func Bopenr(name string) (*Biobuf, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ return &Biobuf{f: f, r: bufio.NewReader(f)}, nil
+}
+
+func Binitw(w io.Writer) *Biobuf {
+ return &Biobuf{w: bufio.NewWriter(w)}
+}
+
+func Binitr(r io.Reader) *Biobuf {
+ return &Biobuf{r: bufio.NewReader(r)}
+}
+
+func (b *Biobuf) Write(p []byte) (int, error) {
+ return b.w.Write(p)
+}
+
+func Bwritestring(b *Biobuf, p string) (int, error) {
+ return b.w.WriteString(p)
+}
+
+func Bseek(b *Biobuf, offset int64, whence int) int64 {
+ if b.w != nil {
+ if err := b.w.Flush(); err != nil {
+ log.Fatalf("writing output: %v", err)
+ }
+ } else if b.r != nil {
+ if whence == 1 {
+ offset -= int64(b.r.Buffered())
+ }
+ }
+ off, err := b.f.Seek(offset, whence)
+ if err != nil {
+ log.Fatalf("seeking in output: %v", err)
+ }
+ if b.r != nil {
+ b.r.Reset(b.f)
+ }
+ return off
+}
+
+func Boffset(b *Biobuf) int64 {
+ if b.w != nil {
+ if err := b.w.Flush(); err != nil {
+ log.Fatalf("writing output: %v", err)
+ }
+ }
+ off, err := b.f.Seek(0, 1)
+ if err != nil {
+ log.Fatalf("seeking in output [0, 1]: %v", err)
+ }
+ if b.r != nil {
+ off -= int64(b.r.Buffered())
+ }
+ return off
+}
+
+func (b *Biobuf) Flush() error {
+ return b.w.Flush()
+}
+
+func Bputc(b *Biobuf, c byte) {
+ b.w.WriteByte(c)
+}
+
+const Beof = -1
+
+func Bread(b *Biobuf, p []byte) int {
+ n, err := io.ReadFull(b.r, p)
+ if n == 0 {
+ if err != nil && err != io.EOF {
+ n = -1
+ }
+ }
+ return n
+}
+
+func Bgetc(b *Biobuf) int {
+ c, err := b.r.ReadByte()
+ if err != nil {
+ return -1
+ }
+ return int(c)
+}
+
+func Bgetrune(b *Biobuf) int {
+ r, _, err := b.r.ReadRune()
+ if err != nil {
+ return -1
+ }
+ return int(r)
+}
+
+func Bungetrune(b *Biobuf) {
+ b.r.UnreadRune()
+}
+
+func (b *Biobuf) Read(p []byte) (int, error) {
+ return b.r.Read(p)
+}
+
+func (b *Biobuf) Peek(n int) ([]byte, error) {
+ return b.r.Peek(n)
+}
+
+func Brdline(b *Biobuf, delim int) string {
+ s, err := b.r.ReadBytes(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ b.linelen = len(s)
+ return string(s)
+}
+
+func Brdstr(b *Biobuf, delim int, cut int) string {
+ s, err := b.r.ReadString(byte(delim))
+ if err != nil {
+ log.Fatalf("reading input: %v", err)
+ }
+ if len(s) > 0 && cut > 0 {
+ s = s[:len(s)-1]
+ }
+ return s
+}
+
+func Blinelen(b *Biobuf) int {
+ return b.linelen
+}
+
+func Bterm(b *Biobuf) error {
+ var err error
+ if b.w != nil {
+ err = b.w.Flush()
+ }
+ err1 := b.f.Close()
+ if err == nil {
+ err = err1
+ }
+ return err
+}
+
+func envOr(key, value string) string {
+ if x := os.Getenv(key); x != "" {
+ return x
+ }
+ return value
+}
+
+func Getgoroot() string {
+ return envOr("GOROOT", defaultGOROOT)
+}
+
+func Getgoarch() string {
+ return envOr("GOARCH", defaultGOARCH)
+}
+
+func Getgoos() string {
+ return envOr("GOOS", defaultGOOS)
+}
+
+func Getgoarm() int32 {
+ switch v := envOr("GOARM", defaultGOARM); v {
+ case "5":
+ return 5
+ case "6":
+ return 6
+ case "7":
+ return 7
+ }
+ // Fail here, rather than validate at multiple call sites.
+ log.Fatalf("Invalid GOARM value. Must be 5, 6, or 7.")
+ panic("unreachable")
+}
+
+func Getgo386() string {
+ // Validated by cmd/compile.
+ return envOr("GO386", defaultGO386)
+}
+
+func Getgoextlinkenabled() string {
+ return envOr("GO_EXTLINK_ENABLED", defaultGO_EXTLINK_ENABLED)
+}
+
+func Getgoversion() string {
+ return version
+}
+
+func (p *Prog) Line() string {
+ return p.Ctxt.LineHist.LineString(int(p.Lineno))
+}
+
+var armCondCode = []string{
+ ".EQ",
+ ".NE",
+ ".CS",
+ ".CC",
+ ".MI",
+ ".PL",
+ ".VS",
+ ".VC",
+ ".HI",
+ ".LS",
+ ".GE",
+ ".LT",
+ ".GT",
+ ".LE",
+ "",
+ ".NV",
+}
+
+/* ARM scond byte */
+const (
+ C_SCOND = (1 << 4) - 1
+ C_SBIT = 1 << 4
+ C_PBIT = 1 << 5
+ C_WBIT = 1 << 6
+ C_FBIT = 1 << 7
+ C_UBIT = 1 << 7
+ C_SCOND_XOR = 14
+)
+
+// CConv formats ARM condition codes.
+func CConv(s uint8) string {
+ if s == 0 {
+ return ""
+ }
+ sc := armCondCode[(s&C_SCOND)^C_SCOND_XOR]
+ if s&C_SBIT != 0 {
+ sc += ".S"
+ }
+ if s&C_PBIT != 0 {
+ sc += ".P"
+ }
+ if s&C_WBIT != 0 {
+ sc += ".W"
+ }
+ if s&C_UBIT != 0 { /* ambiguous with FBIT */
+ sc += ".U"
+ }
+ return sc
+}
+
+func (p *Prog) String() string {
+ if p.Ctxt == nil {
+ return ""
+ }
+
+ sc := CConv(p.Scond)
+
+ var buf bytes.Buffer
+
+ fmt.Fprintf(&buf, "%.5d (%v)\t%v%s", p.Pc, p.Line(), Aconv(int(p.As)), sc)
+ sep := "\t"
+ if p.From.Type != TYPE_NONE {
+ fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.From))
+ sep = ", "
+ }
+ if p.Reg != REG_NONE {
+ // Should not happen but might as well show it if it does.
+ fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.Reg)))
+ sep = ", "
+ }
+ if p.From3Type() != TYPE_NONE {
+ if p.From3.Type == TYPE_CONST && (p.As == ADATA || p.As == ATEXT || p.As == AGLOBL) {
+ // Special case - omit $.
+ fmt.Fprintf(&buf, "%s%d", sep, p.From3.Offset)
+ } else {
+ fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, p.From3))
+ }
+ sep = ", "
+ }
+ if p.To.Type != TYPE_NONE {
+ fmt.Fprintf(&buf, "%s%v", sep, Dconv(p, &p.To))
+ }
+ if p.RegTo2 != REG_NONE {
+ fmt.Fprintf(&buf, "%s%v", sep, Rconv(int(p.RegTo2)))
+ }
+ return buf.String()
+}
+
+func (ctxt *Link) NewProg() *Prog {
+ p := new(Prog) // should be the only call to this; all others should use ctxt.NewProg
+ p.Ctxt = ctxt
+ return p
+}
+
+func (ctxt *Link) Line(n int) string {
+ return ctxt.LineHist.LineString(n)
+}
+
+func Getcallerpc(interface{}) uintptr {
+ return 1
+}
+
+func (ctxt *Link) Dconv(a *Addr) string {
+ return Dconv(nil, a)
+}
+
+func Dconv(p *Prog, a *Addr) string {
+ var str string
+
+ switch a.Type {
+ default:
+ str = fmt.Sprintf("type=%d", a.Type)
+
+ case TYPE_NONE:
+ str = ""
+ if a.Name != NAME_NONE || a.Reg != 0 || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(NONE)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case TYPE_REG:
+ // TODO(rsc): This special case is for x86 instructions like
+ // PINSRQ CX,$1,X6
+ // where the $1 is included in the p->to Addr.
+ // Move into a new field.
+ if a.Offset != 0 {
+ str = fmt.Sprintf("$%d,%v", a.Offset, Rconv(int(a.Reg)))
+ break
+ }
+
+ str = Rconv(int(a.Reg))
+ if a.Name != TYPE_NONE || a.Sym != nil {
+ str = fmt.Sprintf("%v(%v)(REG)", Mconv(a), Rconv(int(a.Reg)))
+ }
+
+ case TYPE_BRANCH:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s(SB)", a.Sym.Name)
+ } else if p != nil && p.Pcond != nil {
+ str = fmt.Sprint(p.Pcond.Pc)
+ } else if a.Val != nil {
+ str = fmt.Sprint(a.Val.(*Prog).Pc)
+ } else {
+ str = fmt.Sprintf("%d(PC)", a.Offset)
+ }
+
+ case TYPE_INDIR:
+ str = fmt.Sprintf("*%s", Mconv(a))
+
+ case TYPE_MEM:
+ str = Mconv(a)
+ if a.Index != REG_NONE {
+ str += fmt.Sprintf("(%v*%d)", Rconv(int(a.Index)), int(a.Scale))
+ }
+
+ case TYPE_CONST:
+ if a.Reg != 0 {
+ str = fmt.Sprintf("$%v(%v)", Mconv(a), Rconv(int(a.Reg)))
+ } else {
+ str = fmt.Sprintf("$%v", Mconv(a))
+ }
+
+ case TYPE_TEXTSIZE:
+ if a.Val.(int32) == ArgsSizeUnknown {
+ str = fmt.Sprintf("$%d", a.Offset)
+ } else {
+ str = fmt.Sprintf("$%d-%d", a.Offset, a.Val.(int32))
+ }
+
+ case TYPE_FCONST:
+ str = fmt.Sprintf("%.17g", a.Val.(float64))
+ // Make sure 1 prints as 1.0
+ if !strings.ContainsAny(str, ".e") {
+ str += ".0"
+ }
+ str = fmt.Sprintf("$(%s)", str)
+
+ case TYPE_SCONST:
+ str = fmt.Sprintf("$%q", a.Val.(string))
+
+ case TYPE_ADDR:
+ str = fmt.Sprintf("$%s", Mconv(a))
+
+ case TYPE_SHIFT:
+ v := int(a.Offset)
+ op := string("<<>>->@>"[((v>>5)&3)<<1:])
+ if v&(1<<4) != 0 {
+ str = fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ str = fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+ if a.Reg != 0 {
+ str += fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ }
+
+ case TYPE_REGREG:
+ str = fmt.Sprintf("(%v, %v)", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
+
+ case TYPE_REGREG2:
+ str = fmt.Sprintf("%v, %v", Rconv(int(a.Reg)), Rconv(int(a.Offset)))
+
+ case TYPE_REGLIST:
+ str = regListConv(int(a.Offset))
+ }
+
+ return str
+}
+
+func Mconv(a *Addr) string {
+ var str string
+
+ switch a.Name {
+ default:
+ str = fmt.Sprintf("name=%d", a.Name)
+
+ case NAME_NONE:
+ switch {
+ case a.Reg == REG_NONE:
+ str = fmt.Sprint(a.Offset)
+ case a.Offset == 0:
+ str = fmt.Sprintf("(%v)", Rconv(int(a.Reg)))
+ case a.Offset != 0:
+ str = fmt.Sprintf("%d(%v)", a.Offset, Rconv(int(a.Reg)))
+ }
+
+ case NAME_EXTERN:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s%s(SB)", a.Sym.Name, offConv(a.Offset))
+ } else {
+ str = fmt.Sprintf("%s(SB)", offConv(a.Offset))
+ }
+
+ case NAME_GOTREF:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s%s@GOT(SB)", a.Sym.Name, offConv(a.Offset))
+ } else {
+ str = fmt.Sprintf("%s@GOT(SB)", offConv(a.Offset))
+ }
+
+ case NAME_STATIC:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s<>%s(SB)", a.Sym.Name, offConv(a.Offset))
+ } else {
+ str = fmt.Sprintf("<>%s(SB)", offConv(a.Offset))
+ }
+
+ case NAME_AUTO:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s%s(SP)", a.Sym.Name, offConv(a.Offset))
+ } else {
+ str = fmt.Sprintf("%s(SP)", offConv(a.Offset))
+ }
+
+ case NAME_PARAM:
+ if a.Sym != nil {
+ str = fmt.Sprintf("%s%s(FP)", a.Sym.Name, offConv(a.Offset))
+ } else {
+ str = fmt.Sprintf("%s(FP)", offConv(a.Offset))
+ }
+ }
+ return str
+}
+
+func offConv(off int64) string {
+ if off == 0 {
+ return ""
+ }
+ return fmt.Sprintf("%+d", off)
+}
+
+type regSet struct {
+ lo int
+ hi int
+ Rconv func(int) string
+}
+
+// Few enough architectures that a linear scan is fastest.
+// Not even worth sorting.
+var regSpace []regSet
+
+/*
+ Each architecture defines a register space as a unique
+ integer range.
+ Here is the list of architectures and the base of their register spaces.
+*/
+
+const (
+ // Because of masking operations in the encodings, each register
+ // space should start at 0 modulo some power of 2.
+ RBase386 = 1 * 1024
+ RBaseAMD64 = 2 * 1024
+ RBaseARM = 3 * 1024
+ RBasePPC64 = 4 * 1024 // range [4k, 8k)
+ RBaseARM64 = 8 * 1024 // range [8k, 13k)
+ RBaseMIPS64 = 13 * 1024 // range [13k, 14k)
+)
+
+// RegisterRegister binds a pretty-printer (Rconv) for register
+// numbers to a given register number range. Lo is inclusive,
+// hi exclusive (valid registers are lo through hi-1).
+func RegisterRegister(lo, hi int, Rconv func(int) string) {
+ regSpace = append(regSpace, regSet{lo, hi, Rconv})
+}
+
+func Rconv(reg int) string {
+ if reg == REG_NONE {
+ return "NONE"
+ }
+ for i := range regSpace {
+ rs := ®Space[i]
+ if rs.lo <= reg && reg < rs.hi {
+ return rs.Rconv(reg)
+ }
+ }
+ return fmt.Sprintf("R???%d", reg)
+}
+
+func regListConv(list int) string {
+ str := ""
+
+ for i := 0; i < 16; i++ { // TODO: 16 is ARM-specific.
+ if list&(1< 0 {
+ s := d.syms[i-1]
+ if s.Addr != 0 && s.Addr <= addr && addr < s.Addr+uint64(s.Size) {
+ return s.Name, s.Addr
+ }
+ }
+ return "", 0
+}
+
+// base returns the final element in the path.
+// It works on both Windows and Unix paths,
+// regardless of host operating system.
+func base(path string) string {
+ path = path[strings.LastIndex(path, "/")+1:]
+ path = path[strings.LastIndex(path, `\`)+1:]
+ return path
+}
+
+// Print prints a disassembly of the file to w.
+// If filter is non-nil, the disassembly only includes functions with names matching filter.
+// The disassembly only includes functions that overlap the range [start, end).
+func (d *Disasm) Print(w io.Writer, filter *regexp.Regexp, start, end uint64) {
+ if start < d.textStart {
+ start = d.textStart
+ }
+ if end > d.textEnd {
+ end = d.textEnd
+ }
+ printed := false
+ bw := bufio.NewWriter(w)
+ for _, sym := range d.syms {
+ symStart := sym.Addr
+ symEnd := sym.Addr + uint64(sym.Size)
+ if sym.Code != 'T' && sym.Code != 't' ||
+ symStart < d.textStart ||
+ symEnd <= start || end <= symStart ||
+ filter != nil && !filter.MatchString(sym.Name) {
+ continue
+ }
+ if printed {
+ fmt.Fprintf(bw, "\n")
+ }
+ printed = true
+
+ file, _, _ := d.pcln.PCToLine(sym.Addr)
+ fmt.Fprintf(bw, "TEXT %s(SB) %s\n", sym.Name, file)
+
+ tw := tabwriter.NewWriter(bw, 1, 8, 1, '\t', 0)
+ if symEnd > end {
+ symEnd = end
+ }
+ code := d.text[:end-d.textStart]
+ d.Decode(symStart, symEnd, func(pc, size uint64, file string, line int, text string) {
+ i := pc - d.textStart
+ fmt.Fprintf(tw, "\t%s:%d\t%#x\t", base(file), line, pc)
+ if size%4 != 0 || d.goarch == "386" || d.goarch == "amd64" {
+ // Print instruction as bytes.
+ fmt.Fprintf(tw, "%x", code[i:i+size])
+ } else {
+ // Print instruction as 32-bit words.
+ for j := uint64(0); j < size; j += 4 {
+ if j > 0 {
+ fmt.Fprintf(tw, " ")
+ }
+ fmt.Fprintf(tw, "%08x", d.byteOrder.Uint32(code[i+j:]))
+ }
+ }
+ fmt.Fprintf(tw, "\t%s\n", text)
+ })
+ tw.Flush()
+ }
+ bw.Flush()
+}
+
+// Decode disassembles the text segment range [start, end), calling f for each instruction.
+func (d *Disasm) Decode(start, end uint64, f func(pc, size uint64, file string, line int, text string)) {
+ if start < d.textStart {
+ start = d.textStart
+ }
+ if end > d.textEnd {
+ end = d.textEnd
+ }
+ code := d.text[:end-d.textStart]
+ lookup := d.lookup
+ for pc := start; pc < end; {
+ i := pc - d.textStart
+ text, size := d.disasm(code[i:], pc, lookup)
+ file, line, _ := d.pcln.PCToLine(pc)
+ f(pc, uint64(size), file, line, text)
+ pc += uint64(size)
+ }
+}
+
+type lookupFunc func(addr uint64) (sym string, base uint64)
+type disasmFunc func(code []byte, pc uint64, lookup lookupFunc) (text string, size int)
+
+func disasm_386(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ return disasm_x86(code, pc, lookup, 32)
+}
+
+func disasm_amd64(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ return disasm_x86(code, pc, lookup, 64)
+}
+
+func disasm_x86(code []byte, pc uint64, lookup lookupFunc, arch int) (string, int) {
+ inst, err := x86asm.Decode(code, 64)
+ var text string
+ size := inst.Len
+ if err != nil || size == 0 || inst.Op == 0 {
+ size = 1
+ text = "?"
+ } else {
+ text = x86asm.GoSyntax(inst, pc, lookup)
+ }
+ return text, size
+}
+
+type textReader struct {
+ code []byte
+ pc uint64
+}
+
+func (r textReader) ReadAt(data []byte, off int64) (n int, err error) {
+ if off < 0 || uint64(off) < r.pc {
+ return 0, io.EOF
+ }
+ d := uint64(off) - r.pc
+ if d >= uint64(len(r.code)) {
+ return 0, io.EOF
+ }
+ n = copy(data, r.code[d:])
+ if n < len(data) {
+ err = io.ErrUnexpectedEOF
+ }
+ return
+}
+
+func disasm_arm(code []byte, pc uint64, lookup lookupFunc) (string, int) {
+ inst, err := armasm.Decode(code, armasm.ModeARM)
+ var text string
+ size := inst.Len
+ if err != nil || size == 0 || inst.Op == 0 {
+ size = 4
+ text = "?"
+ } else {
+ text = armasm.GoSyntax(inst, pc, lookup, textReader{code, pc})
+ }
+ return text, size
+}
+
+var disasms = map[string]disasmFunc{
+ "386": disasm_386,
+ "amd64": disasm_amd64,
+ "arm": disasm_arm,
+}
+
+var byteOrders = map[string]binary.ByteOrder{
+ "386": binary.LittleEndian,
+ "amd64": binary.LittleEndian,
+ "arm": binary.LittleEndian,
+ "ppc64": binary.BigEndian,
+ "ppc64le": binary.LittleEndian,
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/objfile/elf.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/objfile/elf.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/internal/objfile/elf.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/internal/objfile/elf.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,104 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parsing of ELF executables (Linux, FreeBSD, and so on).
+
+package objfile
+
+import (
+ "debug/elf"
+ "fmt"
+ "os"
+)
+
+type elfFile struct {
+ elf *elf.File
+}
+
+func openElf(r *os.File) (rawFile, error) {
+ f, err := elf.NewFile(r)
+ if err != nil {
+ return nil, err
+ }
+ return &elfFile{f}, nil
+}
+
+func (f *elfFile) symbols() ([]Sym, error) {
+ elfSyms, err := f.elf.Symbols()
+ if err != nil {
+ return nil, err
+ }
+
+ var syms []Sym
+ for _, s := range elfSyms {
+ sym := Sym{Addr: s.Value, Name: s.Name, Size: int64(s.Size), Code: '?'}
+ switch s.Section {
+ case elf.SHN_UNDEF:
+ sym.Code = 'U'
+ case elf.SHN_COMMON:
+ sym.Code = 'B'
+ default:
+ i := int(s.Section)
+ if i < 0 || i >= len(f.elf.Sections) {
+ break
+ }
+ sect := f.elf.Sections[i]
+ switch sect.Flags & (elf.SHF_WRITE | elf.SHF_ALLOC | elf.SHF_EXECINSTR) {
+ case elf.SHF_ALLOC | elf.SHF_EXECINSTR:
+ sym.Code = 'T'
+ case elf.SHF_ALLOC:
+ sym.Code = 'R'
+ case elf.SHF_ALLOC | elf.SHF_WRITE:
+ sym.Code = 'D'
+ }
+ }
+ if elf.ST_BIND(s.Info) == elf.STB_LOCAL {
+ sym.Code += 'a' - 'A'
+ }
+ syms = append(syms, sym)
+ }
+
+ return syms, nil
+}
+
+func (f *elfFile) pcln() (textStart uint64, symtab, pclntab []byte, err error) {
+ if sect := f.elf.Section(".text"); sect != nil {
+ textStart = sect.Addr
+ }
+ if sect := f.elf.Section(".gosymtab"); sect != nil {
+ if symtab, err = sect.Data(); err != nil {
+ return 0, nil, nil, err
+ }
+ }
+ if sect := f.elf.Section(".gopclntab"); sect != nil {
+ if pclntab, err = sect.Data(); err != nil {
+ return 0, nil, nil, err
+ }
+ }
+ return textStart, symtab, pclntab, nil
+}
+
+func (f *elfFile) text() (textStart uint64, text []byte, err error) {
+ sect := f.elf.Section(".text")
+ if sect == nil {
+ return 0, nil, fmt.Errorf("text section not found")
+ }
+ textStart = sect.Addr
+ text, err = sect.Data()
+ return
+}
+
+func (f *elfFile) goarch() string {
+ switch f.elf.Machine {
+ case elf.EM_386:
+ return "386"
+ case elf.EM_X86_64:
+ return "amd64"
+ case elf.EM_ARM:
+ return "arm"
+ case elf.EM_PPC64:
+ return "ppc64"
+ }
+ return ""
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/arch.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/arch.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/arch.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/arch.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,88 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ld
+
+import "encoding/binary"
+
+var Linkarm = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "arm",
+ Thechar: '5',
+ Minlc: 4,
+ Ptrsize: 4,
+ Regsize: 4,
+}
+
+var Linkarm64 = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "arm64",
+ Thechar: '7',
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkamd64 = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "amd64",
+ Thechar: '6',
+ Minlc: 1,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkamd64p32 = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "amd64p32",
+ Thechar: '6',
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 8,
+}
+
+var Link386 = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "386",
+ Thechar: '8',
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 4,
+}
+
+var Linkppc64 = LinkArch{
+ ByteOrder: binary.BigEndian,
+ Name: "ppc64",
+ Thechar: '9',
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkppc64le = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "ppc64le",
+ Thechar: '9',
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkmips64 = LinkArch{
+ ByteOrder: binary.BigEndian,
+ Name: "mips64",
+ Thechar: '0',
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkmips64le = LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "mips64le",
+ Thechar: '0',
+ Minlc: 4,
+ Ptrsize: 8,
+ Regsize: 8,
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/data.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/data.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/data.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/data.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,1830 @@
+// Derived from Inferno utils/6l/obj.c and utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ld
+
+import (
+ "cmd/internal/gcprog"
+ "cmd/internal/obj"
+ "fmt"
+ "log"
+ "os"
+ "strconv"
+ "strings"
+)
+
+func Symgrow(ctxt *Link, s *LSym, siz int64) {
+ if int64(int(siz)) != siz {
+ log.Fatalf("symgrow size %d too long", siz)
+ }
+ if int64(len(s.P)) >= siz {
+ return
+ }
+ for cap(s.P) < int(siz) {
+ s.P = append(s.P[:len(s.P)], 0)
+ }
+ s.P = s.P[:siz]
+}
+
+func Addrel(s *LSym) *Reloc {
+ s.R = append(s.R, Reloc{})
+ return &s.R[len(s.R)-1]
+}
+
+func setuintxx(ctxt *Link, s *LSym, off int64, v uint64, wid int64) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ if s.Size < off+wid {
+ s.Size = off + wid
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ switch wid {
+ case 1:
+ s.P[off] = uint8(v)
+ case 2:
+ ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(v))
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(v))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(v))
+ }
+
+ return off + wid
+}
+
+func adduintxx(ctxt *Link, s *LSym, v uint64, wid int) int64 {
+ off := s.Size
+ setuintxx(ctxt, s, off, v, int64(wid))
+ return off
+}
+
+func Adduint8(ctxt *Link, s *LSym, v uint8) int64 {
+ return adduintxx(ctxt, s, uint64(v), 1)
+}
+
+func Adduint16(ctxt *Link, s *LSym, v uint16) int64 {
+ return adduintxx(ctxt, s, uint64(v), 2)
+}
+
+func Adduint32(ctxt *Link, s *LSym, v uint32) int64 {
+ return adduintxx(ctxt, s, uint64(v), 4)
+}
+
+func Adduint64(ctxt *Link, s *LSym, v uint64) int64 {
+ return adduintxx(ctxt, s, v, 8)
+}
+
+func adduint(ctxt *Link, s *LSym, v uint64) int64 {
+ return adduintxx(ctxt, s, v, Thearch.Intsize)
+}
+
+func setuint8(ctxt *Link, s *LSym, r int64, v uint8) int64 {
+ return setuintxx(ctxt, s, r, uint64(v), 1)
+}
+
+func setuint32(ctxt *Link, s *LSym, r int64, v uint32) int64 {
+ return setuintxx(ctxt, s, r, uint64(v), 4)
+}
+
+func Addaddrplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ i := s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r := Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = obj.R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
+
+func Addpcrelplus(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ i := s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r := Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Add = add
+ r.Type = obj.R_PCREL
+ r.Siz = 4
+ return i + int64(r.Siz)
+}
+
+func Addaddr(ctxt *Link, s *LSym, t *LSym) int64 {
+ return Addaddrplus(ctxt, s, t, 0)
+}
+
+func setaddrplus(ctxt *Link, s *LSym, off int64, t *LSym, add int64) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ if off+int64(ctxt.Arch.Ptrsize) > s.Size {
+ s.Size = off + int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ }
+
+ r := Addrel(s)
+ r.Sym = t
+ r.Off = int32(off)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = obj.R_ADDR
+ r.Add = add
+ return off + int64(r.Siz)
+}
+
+func setaddr(ctxt *Link, s *LSym, off int64, t *LSym) int64 {
+ return setaddrplus(ctxt, s, off, t, 0)
+}
+
+func addsize(ctxt *Link, s *LSym, t *LSym) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ i := s.Size
+ s.Size += int64(ctxt.Arch.Ptrsize)
+ Symgrow(ctxt, s, s.Size)
+ r := Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = uint8(ctxt.Arch.Ptrsize)
+ r.Type = obj.R_SIZE
+ return i + int64(r.Siz)
+}
+
+func addaddrplus4(ctxt *Link, s *LSym, t *LSym, add int64) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SDATA
+ }
+ s.Reachable = true
+ i := s.Size
+ s.Size += 4
+ Symgrow(ctxt, s, s.Size)
+ r := Addrel(s)
+ r.Sym = t
+ r.Off = int32(i)
+ r.Siz = 4
+ r.Type = obj.R_ADDR
+ r.Add = add
+ return i + int64(r.Siz)
+}
+
+/*
+ * divide-and-conquer list-link
+ * sort of LSym* structures.
+ * Used for the data block.
+ */
+func datcmp(s1 *LSym, s2 *LSym) int {
+ if s1.Type != s2.Type {
+ return int(s1.Type) - int(s2.Type)
+ }
+
+ // For ppc64, we want to interleave the .got and .toc sections
+ // from input files. Both are type SELFGOT, so in that case
+ // fall through to the name comparison (conveniently, .got
+ // sorts before .toc).
+ if s1.Type != obj.SELFGOT && s1.Size != s2.Size {
+ if s1.Size < s2.Size {
+ return -1
+ }
+ return +1
+ }
+
+ return stringsCompare(s1.Name, s2.Name)
+}
+
+func listnextp(s *LSym) **LSym {
+ return &s.Next
+}
+
+func listsubp(s *LSym) **LSym {
+ return &s.Sub
+}
+
+func listsort(l *LSym, cmp func(*LSym, *LSym) int, nextp func(*LSym) **LSym) *LSym {
+ if l == nil || *nextp(l) == nil {
+ return l
+ }
+
+ l1 := l
+ l2 := l
+ for {
+ l2 = *nextp(l2)
+ if l2 == nil {
+ break
+ }
+ l2 = *nextp(l2)
+ if l2 == nil {
+ break
+ }
+ l1 = *nextp(l1)
+ }
+
+ l2 = *nextp(l1)
+ *nextp(l1) = nil
+ l1 = listsort(l, cmp, nextp)
+ l2 = listsort(l2, cmp, nextp)
+
+ /* set up lead element */
+ if cmp(l1, l2) < 0 {
+ l = l1
+ l1 = *nextp(l1)
+ } else {
+ l = l2
+ l2 = *nextp(l2)
+ }
+
+ le := l
+
+ for {
+ if l1 == nil {
+ for l2 != nil {
+ *nextp(le) = l2
+ le = l2
+ l2 = *nextp(l2)
+ }
+
+ *nextp(le) = nil
+ break
+ }
+
+ if l2 == nil {
+ for l1 != nil {
+ *nextp(le) = l1
+ le = l1
+ l1 = *nextp(l1)
+ }
+
+ break
+ }
+
+ if cmp(l1, l2) < 0 {
+ *nextp(le) = l1
+ le = l1
+ l1 = *nextp(l1)
+ } else {
+ *nextp(le) = l2
+ le = l2
+ l2 = *nextp(l2)
+ }
+ }
+
+ *nextp(le) = nil
+ return l
+}
+
+func relocsym(s *LSym) {
+ var r *Reloc
+ var rs *LSym
+ var i16 int16
+ var off int32
+ var siz int32
+ var fl int32
+ var o int64
+
+ Ctxt.Cursym = s
+ for ri := int32(0); ri < int32(len(s.R)); ri++ {
+ r = &s.R[ri]
+ r.Done = 1
+ off = r.Off
+ siz = int32(r.Siz)
+ if off < 0 || off+siz > int32(len(s.P)) {
+ Diag("%s: invalid relocation %d+%d not in [%d,%d)", s.Name, off, siz, 0, len(s.P))
+ continue
+ }
+
+ if r.Sym != nil && (r.Sym.Type&(obj.SMASK|obj.SHIDDEN) == 0 || r.Sym.Type&obj.SMASK == obj.SXREF) {
+ // When putting the runtime but not main into a shared library
+ // these symbols are undefined and that's OK.
+ if Buildmode == BuildmodeShared && (r.Sym.Name == "main.main" || r.Sym.Name == "main.init") {
+ r.Sym.Type = obj.SDYNIMPORT
+ } else {
+ Diag("%s: not defined", r.Sym.Name)
+ continue
+ }
+ }
+
+ if r.Type >= 256 {
+ continue
+ }
+ if r.Siz == 0 { // informational relocation - no work to do
+ continue
+ }
+
+ // We need to be able to reference dynimport symbols when linking against
+ // shared libraries, and Solaris needs it always
+ if HEADTYPE != obj.Hsolaris && r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT && !DynlinkingGo() {
+ if !(Thearch.Thechar == '9' && Linkmode == LinkExternal && r.Sym.Name == ".TOC.") {
+ Diag("unhandled relocation for %s (type %d rtype %d)", r.Sym.Name, r.Sym.Type, r.Type)
+ }
+ }
+ if r.Sym != nil && r.Sym.Type != obj.STLSBSS && !r.Sym.Reachable {
+ Diag("unreachable sym in relocation: %s %s", s.Name, r.Sym.Name)
+ }
+
+ switch r.Type {
+ default:
+ switch siz {
+ default:
+ Diag("bad reloc size %#x for %s", uint32(siz), r.Sym.Name)
+ case 1:
+ o = int64(s.P[off])
+ case 2:
+ o = int64(Ctxt.Arch.ByteOrder.Uint16(s.P[off:]))
+ case 4:
+ o = int64(Ctxt.Arch.ByteOrder.Uint32(s.P[off:]))
+ case 8:
+ o = int64(Ctxt.Arch.ByteOrder.Uint64(s.P[off:]))
+ }
+ if Thearch.Archreloc(r, s, &o) < 0 {
+ Diag("unknown reloc %d", r.Type)
+ }
+
+ case obj.R_TLS_LE:
+ isAndroidX86 := goos == "android" && (Thearch.Thechar == '6' || Thearch.Thechar == '8')
+
+ if Linkmode == LinkExternal && Iself && HEADTYPE != obj.Hopenbsd && !isAndroidX86 {
+ r.Done = 0
+ if r.Sym == nil {
+ r.Sym = Ctxt.Tlsg
+ }
+ r.Xsym = r.Sym
+ r.Xadd = r.Add
+ o = 0
+ if Thearch.Thechar != '6' {
+ o = r.Add
+ }
+ break
+ }
+
+ if Iself && Thearch.Thechar == '5' {
+ // On ELF ARM, the thread pointer is 8 bytes before
+ // the start of the thread-local data block, so add 8
+ // to the actual TLS offset (r->sym->value).
+ // This 8 seems to be a fundamental constant of
+ // ELF on ARM (or maybe Glibc on ARM); it is not
+ // related to the fact that our own TLS storage happens
+ // to take up 8 bytes.
+ o = 8 + r.Sym.Value
+ } else if Iself || Ctxt.Headtype == obj.Hplan9 || Ctxt.Headtype == obj.Hdarwin || isAndroidX86 {
+ o = int64(Ctxt.Tlsoffset) + r.Add
+ } else if Ctxt.Headtype == obj.Hwindows {
+ o = r.Add
+ } else {
+ log.Fatalf("unexpected R_TLS_LE relocation for %s", Headstr(Ctxt.Headtype))
+ }
+
+ case obj.R_TLS_IE:
+ isAndroidX86 := goos == "android" && (Thearch.Thechar == '6' || Thearch.Thechar == '8')
+
+ if Linkmode == LinkExternal && Iself && HEADTYPE != obj.Hopenbsd && !isAndroidX86 {
+ r.Done = 0
+ if r.Sym == nil {
+ r.Sym = Ctxt.Tlsg
+ }
+ r.Xsym = r.Sym
+ r.Xadd = r.Add
+ o = 0
+ if Thearch.Thechar != '6' {
+ o = r.Add
+ }
+ break
+ }
+ log.Fatalf("cannot handle R_TLS_IE when linking internally")
+
+ case obj.R_ADDR:
+ if Linkmode == LinkExternal && r.Sym.Type != obj.SCONST {
+ r.Done = 0
+
+ // set up addend for eventual relocation via outer symbol.
+ rs = r.Sym
+
+ r.Xadd = r.Add
+ for rs.Outer != nil {
+ r.Xadd += Symaddr(rs) - Symaddr(rs.Outer)
+ rs = rs.Outer
+ }
+
+ if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
+ Diag("missing section for %s", rs.Name)
+ }
+ r.Xsym = rs
+
+ o = r.Xadd
+ if Iself {
+ if Thearch.Thechar == '6' {
+ o = 0
+ }
+ } else if HEADTYPE == obj.Hdarwin {
+ // ld64 for arm64 has a bug where if the address pointed to by o exists in the
+ // symbol table (dynid >= 0), or is inside a symbol that exists in the symbol
+ // table, then it will add o twice into the relocated value.
+ // The workaround is that on arm64 don't ever add symaddr to o and always use
+ // extern relocation by requiring rs->dynid >= 0.
+ if rs.Type != obj.SHOSTOBJ {
+ if Thearch.Thechar == '7' && rs.Dynid < 0 {
+ Diag("R_ADDR reloc to %s+%d is not supported on darwin/arm64", rs.Name, o)
+ }
+ if Thearch.Thechar != '7' {
+ o += Symaddr(rs)
+ }
+ }
+ } else if HEADTYPE == obj.Hwindows {
+ // nothing to do
+ } else {
+ Diag("unhandled pcrel relocation for %s", headstring)
+ }
+
+ break
+ }
+
+ o = Symaddr(r.Sym) + r.Add
+
+ // On amd64, 4-byte offsets will be sign-extended, so it is impossible to
+ // access more than 2GB of static data; fail at link time is better than
+ // fail at runtime. See https://golang.org/issue/7980.
+ // Instead of special casing only amd64, we treat this as an error on all
+ // 64-bit architectures so as to be future-proof.
+ if int32(o) < 0 && Thearch.Ptrsize > 4 && siz == 4 {
+ Diag("non-pc-relative relocation address is too big: %#x (%#x + %#x)", uint64(o), Symaddr(r.Sym), r.Add)
+ errorexit()
+ }
+
+ // r->sym can be null when CALL $(constant) is transformed from absolute PC to relative PC call.
+ case obj.R_CALL, obj.R_GOTPCREL, obj.R_PCREL:
+ if Linkmode == LinkExternal && r.Sym != nil && r.Sym.Type != obj.SCONST && (r.Sym.Sect != Ctxt.Cursym.Sect || r.Type == obj.R_GOTPCREL) {
+ r.Done = 0
+
+ // set up addend for eventual relocation via outer symbol.
+ rs = r.Sym
+
+ r.Xadd = r.Add
+ for rs.Outer != nil {
+ r.Xadd += Symaddr(rs) - Symaddr(rs.Outer)
+ rs = rs.Outer
+ }
+
+ r.Xadd -= int64(r.Siz) // relative to address after the relocated chunk
+ if rs.Type != obj.SHOSTOBJ && rs.Type != obj.SDYNIMPORT && rs.Sect == nil {
+ Diag("missing section for %s", rs.Name)
+ }
+ r.Xsym = rs
+
+ o = r.Xadd
+ if Iself {
+ if Thearch.Thechar == '6' {
+ o = 0
+ }
+ } else if HEADTYPE == obj.Hdarwin {
+ if r.Type == obj.R_CALL {
+ if rs.Type != obj.SHOSTOBJ {
+ o += int64(uint64(Symaddr(rs)) - rs.Sect.Vaddr)
+ }
+ o -= int64(r.Off) // relative to section offset, not symbol
+ } else {
+ o += int64(r.Siz)
+ }
+ } else if HEADTYPE == obj.Hwindows && Thearch.Thechar == '6' { // only amd64 needs PCREL
+ // PE/COFF's PC32 relocation uses the address after the relocated
+ // bytes as the base. Compensate by skewing the addend.
+ o += int64(r.Siz)
+ // GNU ld always add VirtualAddress of the .text section to the
+ // relocated address, compensate that.
+ o -= int64(s.Sect.Vaddr - PEBASE)
+ } else {
+ Diag("unhandled pcrel relocation for %s", headstring)
+ }
+
+ break
+ }
+
+ o = 0
+ if r.Sym != nil {
+ o += Symaddr(r.Sym)
+ }
+
+ // NOTE: The (int32) cast on the next line works around a bug in Plan 9's 8c
+ // compiler. The expression s->value + r->off + r->siz is int32 + int32 +
+ // uchar, and Plan 9 8c incorrectly treats the expression as type uint32
+ // instead of int32, causing incorrect values when sign extended for adding
+ // to o. The bug only occurs on Plan 9, because this C program is compiled by
+ // the standard host compiler (gcc on most other systems).
+ o += r.Add - (s.Value + int64(r.Off) + int64(int32(r.Siz)))
+
+ case obj.R_SIZE:
+ o = r.Sym.Size + r.Add
+ }
+
+ if r.Variant != RV_NONE {
+ o = Thearch.Archrelocvariant(r, s, o)
+ }
+
+ if false {
+ nam := ""
+ if r.Sym != nil {
+ nam = r.Sym.Name
+ }
+ fmt.Printf("relocate %s %#x (%#x+%#x, size %d) => %s %#x +%#x [type %d/%d, %x]\n", s.Name, s.Value+int64(off), s.Value, r.Off, r.Siz, nam, Symaddr(r.Sym), r.Add, r.Type, r.Variant, o)
+ }
+ switch siz {
+ default:
+ Ctxt.Cursym = s
+ Diag("bad reloc size %#x for %s", uint32(siz), r.Sym.Name)
+ fallthrough
+
+ // TODO(rsc): Remove.
+ case 1:
+ s.P[off] = byte(int8(o))
+
+ case 2:
+ if o != int64(int16(o)) {
+ Diag("relocation address is too big: %#x", o)
+ }
+ i16 = int16(o)
+ Ctxt.Arch.ByteOrder.PutUint16(s.P[off:], uint16(i16))
+
+ case 4:
+ if r.Type == obj.R_PCREL || r.Type == obj.R_CALL {
+ if o != int64(int32(o)) {
+ Diag("pc-relative relocation address is too big: %#x", o)
+ }
+ } else {
+ if o != int64(int32(o)) && o != int64(uint32(o)) {
+ Diag("non-pc-relative relocation address is too big: %#x", uint64(o))
+ }
+ }
+
+ fl = int32(o)
+ Ctxt.Arch.ByteOrder.PutUint32(s.P[off:], uint32(fl))
+
+ case 8:
+ Ctxt.Arch.ByteOrder.PutUint64(s.P[off:], uint64(o))
+ }
+ }
+}
+
+func reloc() {
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f reloc\n", obj.Cputime())
+ }
+ Bso.Flush()
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ relocsym(s)
+ }
+ for s := datap; s != nil; s = s.Next {
+ relocsym(s)
+ }
+}
+
+func dynrelocsym(s *LSym) {
+ if HEADTYPE == obj.Hwindows && Linkmode != LinkExternal {
+ rel := Linklookup(Ctxt, ".rel", 0)
+ if s == rel {
+ return
+ }
+ var r *Reloc
+ var targ *LSym
+ for ri := 0; ri < len(s.R); ri++ {
+ r = &s.R[ri]
+ targ = r.Sym
+ if targ == nil {
+ continue
+ }
+ if !targ.Reachable {
+ Diag("internal inconsistency: dynamic symbol %s is not reachable.", targ.Name)
+ }
+ if r.Sym.Plt == -2 && r.Sym.Got != -2 { // make dynimport JMP table for PE object files.
+ targ.Plt = int32(rel.Size)
+ r.Sym = rel
+ r.Add = int64(targ.Plt)
+
+ // jmp *addr
+ if Thearch.Thechar == '8' {
+ Adduint8(Ctxt, rel, 0xff)
+ Adduint8(Ctxt, rel, 0x25)
+ Addaddr(Ctxt, rel, targ)
+ Adduint8(Ctxt, rel, 0x90)
+ Adduint8(Ctxt, rel, 0x90)
+ } else {
+ Adduint8(Ctxt, rel, 0xff)
+ Adduint8(Ctxt, rel, 0x24)
+ Adduint8(Ctxt, rel, 0x25)
+ addaddrplus4(Ctxt, rel, targ, 0)
+ Adduint8(Ctxt, rel, 0x90)
+ }
+ } else if r.Sym.Plt >= 0 {
+ r.Sym = rel
+ r.Add = int64(targ.Plt)
+ }
+ }
+
+ return
+ }
+
+ var r *Reloc
+ for ri := 0; ri < len(s.R); ri++ {
+ r = &s.R[ri]
+ if r.Sym != nil && r.Sym.Type == obj.SDYNIMPORT || r.Type >= 256 {
+ if r.Sym != nil && !r.Sym.Reachable {
+ Diag("internal inconsistency: dynamic symbol %s is not reachable.", r.Sym.Name)
+ }
+ Thearch.Adddynrel(s, r)
+ }
+ }
+}
+
+func dynreloc() {
+ // -d suppresses dynamic loader format, so we may as well not
+ // compute these sections or mark their symbols as reachable.
+ if Debug['d'] != 0 && HEADTYPE != obj.Hwindows {
+ return
+ }
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f reloc\n", obj.Cputime())
+ }
+ Bso.Flush()
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ dynrelocsym(s)
+ }
+ for s := datap; s != nil; s = s.Next {
+ dynrelocsym(s)
+ }
+ if Iself {
+ elfdynhash()
+ }
+}
+
+func blk(start *LSym, addr int64, size int64) {
+ var sym *LSym
+
+ for sym = start; sym != nil; sym = sym.Next {
+ if sym.Type&obj.SSUB == 0 && sym.Value >= addr {
+ break
+ }
+ }
+
+ eaddr := addr + size
+ var ep []byte
+ var p []byte
+ for ; sym != nil; sym = sym.Next {
+ if sym.Type&obj.SSUB != 0 {
+ continue
+ }
+ if sym.Value >= eaddr {
+ break
+ }
+ Ctxt.Cursym = sym
+ if sym.Value < addr {
+ Diag("phase error: addr=%#x but sym=%#x type=%d", int64(addr), int64(sym.Value), sym.Type)
+ errorexit()
+ }
+
+ for ; addr < sym.Value; addr++ {
+ Cput(0)
+ }
+ p = sym.P
+ ep = p[len(sym.P):]
+ for -cap(p) < -cap(ep) {
+ Cput(uint8(p[0]))
+ p = p[1:]
+ }
+ addr += int64(len(sym.P))
+ for ; addr < sym.Value+sym.Size; addr++ {
+ Cput(0)
+ }
+ if addr != sym.Value+sym.Size {
+ Diag("phase error: addr=%#x value+size=%#x", int64(addr), int64(sym.Value)+sym.Size)
+ errorexit()
+ }
+
+ if sym.Value+sym.Size >= eaddr {
+ break
+ }
+ }
+
+ for ; addr < eaddr; addr++ {
+ Cput(0)
+ }
+ Cflush()
+}
+
+func Codeblk(addr int64, size int64) {
+ if Debug['a'] != 0 {
+ fmt.Fprintf(&Bso, "codeblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
+ }
+
+ blk(Ctxt.Textp, addr, size)
+
+ /* again for printing */
+ if Debug['a'] == 0 {
+ return
+ }
+
+ var sym *LSym
+ for sym = Ctxt.Textp; sym != nil; sym = sym.Next {
+ if !sym.Reachable {
+ continue
+ }
+ if sym.Value >= addr {
+ break
+ }
+ }
+
+ eaddr := addr + size
+ var q []byte
+ for ; sym != nil; sym = sym.Next {
+ if !sym.Reachable {
+ continue
+ }
+ if sym.Value >= eaddr {
+ break
+ }
+
+ if addr < sym.Value {
+ fmt.Fprintf(&Bso, "%-20s %.8x|", "_", uint64(int64(addr)))
+ for ; addr < sym.Value; addr++ {
+ fmt.Fprintf(&Bso, " %.2x", 0)
+ }
+ fmt.Fprintf(&Bso, "\n")
+ }
+
+ fmt.Fprintf(&Bso, "%.6x\t%-20s\n", uint64(int64(addr)), sym.Name)
+ q = sym.P
+
+ for len(q) >= 16 {
+ fmt.Fprintf(&Bso, "%.6x\t% x\n", uint64(addr), q[:16])
+ addr += 16
+ q = q[16:]
+ }
+
+ if len(q) > 0 {
+ fmt.Fprintf(&Bso, "%.6x\t% x\n", uint64(addr), q)
+ addr += int64(len(q))
+ }
+ }
+
+ if addr < eaddr {
+ fmt.Fprintf(&Bso, "%-20s %.8x|", "_", uint64(int64(addr)))
+ for ; addr < eaddr; addr++ {
+ fmt.Fprintf(&Bso, " %.2x", 0)
+ }
+ }
+
+ Bso.Flush()
+}
+
+func Datblk(addr int64, size int64) {
+ if Debug['a'] != 0 {
+ fmt.Fprintf(&Bso, "datblk [%#x,%#x) at offset %#x\n", addr, addr+size, Cpos())
+ }
+
+ blk(datap, addr, size)
+
+ /* again for printing */
+ if Debug['a'] == 0 {
+ return
+ }
+
+ var sym *LSym
+ for sym = datap; sym != nil; sym = sym.Next {
+ if sym.Value >= addr {
+ break
+ }
+ }
+
+ eaddr := addr + size
+ var ep []byte
+ var i int64
+ var p []byte
+ var r *Reloc
+ var rsname string
+ var typ string
+ for ; sym != nil; sym = sym.Next {
+ if sym.Value >= eaddr {
+ break
+ }
+ if addr < sym.Value {
+ fmt.Fprintf(&Bso, "\t%.8x| 00 ...\n", uint64(addr))
+ addr = sym.Value
+ }
+
+ fmt.Fprintf(&Bso, "%s\n\t%.8x|", sym.Name, uint(addr))
+ p = sym.P
+ ep = p[len(sym.P):]
+ for -cap(p) < -cap(ep) {
+ if -cap(p) > -cap(sym.P) && int(-cap(p)+cap(sym.P))%16 == 0 {
+ fmt.Fprintf(&Bso, "\n\t%.8x|", uint(addr+int64(-cap(p)+cap(sym.P))))
+ }
+ fmt.Fprintf(&Bso, " %.2x", p[0])
+ p = p[1:]
+ }
+
+ addr += int64(len(sym.P))
+ for ; addr < sym.Value+sym.Size; addr++ {
+ fmt.Fprintf(&Bso, " %.2x", 0)
+ }
+ fmt.Fprintf(&Bso, "\n")
+
+ if Linkmode == LinkExternal {
+ for i = 0; i < int64(len(sym.R)); i++ {
+ r = &sym.R[i]
+ rsname = ""
+ if r.Sym != nil {
+ rsname = r.Sym.Name
+ }
+ typ = "?"
+ switch r.Type {
+ case obj.R_ADDR:
+ typ = "addr"
+
+ case obj.R_PCREL:
+ typ = "pcrel"
+
+ case obj.R_CALL:
+ typ = "call"
+ }
+
+ fmt.Fprintf(&Bso, "\treloc %.8x/%d %s %s+%#x [%#x]\n", uint(sym.Value+int64(r.Off)), r.Siz, typ, rsname, int64(r.Add), int64(r.Sym.Value+r.Add))
+ }
+ }
+ }
+
+ if addr < eaddr {
+ fmt.Fprintf(&Bso, "\t%.8x| 00 ...\n", uint(addr))
+ }
+ fmt.Fprintf(&Bso, "\t%.8x|\n", uint(eaddr))
+}
+
+func strnput(s string, n int) {
+ for ; n > 0 && s != ""; s = s[1:] {
+ Cput(uint8(s[0]))
+ n--
+ }
+
+ for n > 0 {
+ Cput(0)
+ n--
+ }
+}
+
+var strdata []*LSym
+
+func addstrdata1(arg string) {
+ i := strings.Index(arg, "=")
+ if i < 0 {
+ Exitf("-X flag requires argument of the form importpath.name=value")
+ }
+ addstrdata(arg[:i], arg[i+1:])
+}
+
+func addstrdata(name string, value string) {
+ p := fmt.Sprintf("%s.str", name)
+ sp := Linklookup(Ctxt, p, 0)
+
+ Addstring(sp, value)
+ sp.Type = obj.SRODATA
+
+ s := Linklookup(Ctxt, name, 0)
+ s.Size = 0
+ s.Dupok = 1
+ reachable := s.Reachable
+ Addaddr(Ctxt, s, sp)
+ adduintxx(Ctxt, s, uint64(len(value)), Thearch.Ptrsize)
+
+ // addstring, addaddr, etc., mark the symbols as reachable.
+ // In this case that is not necessarily true, so stick to what
+ // we know before entering this function.
+ s.Reachable = reachable
+
+ strdata = append(strdata, s)
+
+ sp.Reachable = reachable
+}
+
+func checkstrdata() {
+ for _, s := range strdata {
+ if s.Type == obj.STEXT {
+ Diag("cannot use -X with text symbol %s", s.Name)
+ } else if s.Gotype != nil && s.Gotype.Name != "type.string" {
+ Diag("cannot use -X with non-string symbol %s", s.Name)
+ }
+ }
+}
+
+func Addstring(s *LSym, str string) int64 {
+ if s.Type == 0 {
+ s.Type = obj.SNOPTRDATA
+ }
+ s.Reachable = true
+ r := int32(s.Size)
+ n := len(str) + 1
+ if s.Name == ".shstrtab" {
+ elfsetstring(str, int(r))
+ }
+ Symgrow(Ctxt, s, int64(r)+int64(n))
+ copy(s.P[r:], str)
+ s.P[int(r)+len(str)] = 0
+ s.Size += int64(n)
+ return int64(r)
+}
+
+// addgostring adds str, as a Go string value, to s. symname is the name of the
+// symbol used to define the string data and must be unique per linked object.
+func addgostring(s *LSym, symname, str string) {
+ sym := Linklookup(Ctxt, symname, 0)
+ if sym.Type != obj.Sxxx {
+ Diag("duplicate symname in addgostring: %s", symname)
+ }
+ sym.Reachable = true
+ sym.Local = true
+ sym.Type = obj.SRODATA
+ sym.Size = int64(len(str))
+ sym.P = []byte(str)
+ Addaddr(Ctxt, s, sym)
+ adduint(Ctxt, s, uint64(len(str)))
+}
+
+func addinitarrdata(s *LSym) {
+ p := s.Name + ".ptr"
+ sp := Linklookup(Ctxt, p, 0)
+ sp.Type = obj.SINITARR
+ sp.Size = 0
+ sp.Dupok = 1
+ Addaddr(Ctxt, sp, s)
+}
+
+func dosymtype() {
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if len(s.P) > 0 {
+ if s.Type == obj.SBSS {
+ s.Type = obj.SDATA
+ }
+ if s.Type == obj.SNOPTRBSS {
+ s.Type = obj.SNOPTRDATA
+ }
+ }
+ // Create a new entry in the .init_array section that points to the
+ // library initializer function.
+ switch Buildmode {
+ case BuildmodeCArchive, BuildmodeCShared:
+ if s.Name == INITENTRY {
+ addinitarrdata(s)
+ }
+ }
+ }
+}
+
+func symalign(s *LSym) int32 {
+ if s.Align != 0 {
+ return s.Align
+ }
+
+ align := int32(Thearch.Maxalign)
+ for int64(align) > s.Size && align > 1 {
+ align >>= 1
+ }
+ if align < s.Align {
+ align = s.Align
+ }
+ return align
+}
+
+func aligndatsize(datsize int64, s *LSym) int64 {
+ return Rnd(datsize, int64(symalign(s)))
+}
+
+// maxalign returns the maximum required alignment for
+// the list of symbols s; the list stops when s->type exceeds type.
+func maxalign(s *LSym, type_ int) int32 {
+ var align int32
+
+ max := int32(0)
+ for ; s != nil && int(s.Type) <= type_; s = s.Next {
+ align = symalign(s)
+ if max < align {
+ max = align
+ }
+ }
+
+ return max
+}
+
+const debugGCProg = false
+
+type GCProg struct {
+ sym *LSym
+ w gcprog.Writer
+}
+
+func (p *GCProg) Init(name string) {
+ p.sym = Linklookup(Ctxt, name, 0)
+ p.w.Init(p.writeByte)
+ if debugGCProg {
+ fmt.Fprintf(os.Stderr, "ld: start GCProg %s\n", name)
+ p.w.Debug(os.Stderr)
+ }
+}
+
+func (p *GCProg) writeByte(x byte) {
+ Adduint8(Ctxt, p.sym, x)
+}
+
+func (p *GCProg) End(size int64) {
+ p.w.ZeroUntil(size / int64(Thearch.Ptrsize))
+ p.w.End()
+ if debugGCProg {
+ fmt.Fprintf(os.Stderr, "ld: end GCProg\n")
+ }
+}
+
+func (p *GCProg) AddSym(s *LSym) {
+ typ := s.Gotype
+ // Things without pointers should be in SNOPTRDATA or SNOPTRBSS;
+ // everything we see should have pointers and should therefore have a type.
+ if typ == nil {
+ Diag("missing Go type information for global symbol: %s size %d", s.Name, int(s.Size))
+ return
+ }
+
+ ptrsize := int64(Thearch.Ptrsize)
+ nptr := decodetype_ptrdata(typ) / ptrsize
+
+ if debugGCProg {
+ fmt.Fprintf(os.Stderr, "gcprog sym: %s at %d (ptr=%d+%d)\n", s.Name, s.Value, s.Value/ptrsize, nptr)
+ }
+
+ if decodetype_usegcprog(typ) == 0 {
+ // Copy pointers from mask into program.
+ mask := decodetype_gcmask(typ)
+ for i := int64(0); i < nptr; i++ {
+ if (mask[i/8]>>uint(i%8))&1 != 0 {
+ p.w.Ptr(s.Value/ptrsize + i)
+ }
+ }
+ return
+ }
+
+ // Copy program.
+ prog := decodetype_gcprog(typ)
+ p.w.ZeroUntil(s.Value / ptrsize)
+ p.w.Append(prog[4:], nptr)
+}
+
+func growdatsize(datsizep *int64, s *LSym) {
+ datsize := *datsizep
+ const cutoff int64 = 2e9 // 2 GB (or so; looks better in errors than 2^31)
+ switch {
+ case s.Size < 0:
+ Diag("%s: negative size (%d bytes)", s.Name, s.Size)
+ case s.Size > cutoff:
+ Diag("%s: symbol too large (%d bytes)", s.Name, s.Size)
+ case datsize <= cutoff && datsize+s.Size > cutoff:
+ Diag("%s: too much data (over %d bytes)", s.Name, cutoff)
+ }
+ *datsizep = datsize + s.Size
+}
+
+func dodata() {
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f dodata\n", obj.Cputime())
+ }
+ Bso.Flush()
+
+ var last *LSym
+ datap = nil
+
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if !s.Reachable || s.Special != 0 {
+ continue
+ }
+ if obj.STEXT < s.Type && s.Type < obj.SXREF {
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if last == nil {
+ datap = s
+ } else {
+ last.Next = s
+ }
+ s.Next = nil
+ last = s
+ }
+ }
+
+ for s := datap; s != nil; s = s.Next {
+ if int64(len(s.P)) > s.Size {
+ Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
+ }
+ }
+
+ /*
+ * now that we have the datap list, but before we start
+ * to assign addresses, record all the necessary
+ * dynamic relocations. these will grow the relocation
+ * symbol, which is itself data.
+ *
+ * on darwin, we need the symbol table numbers for dynreloc.
+ */
+ if HEADTYPE == obj.Hdarwin {
+ machosymorder()
+ }
+ dynreloc()
+
+ /* some symbols may no longer belong in datap (Mach-O) */
+ var l **LSym
+ var s *LSym
+ for l = &datap; ; {
+ s = *l
+ if s == nil {
+ break
+ }
+
+ if s.Type <= obj.STEXT || obj.SXREF <= s.Type {
+ *l = s.Next
+ } else {
+ l = &s.Next
+ }
+ }
+
+ *l = nil
+
+ if UseRelro() {
+ // "read only" data with relocations needs to go in its own section
+ // when building a shared library. We do this by boosting objects of
+ // type SXXX with relocations to type SXXXRELRO.
+ for s := datap; s != nil; s = s.Next {
+ if (s.Type >= obj.STYPE && s.Type <= obj.SFUNCTAB && len(s.R) > 0) || s.Type == obj.SGOSTRING {
+ s.Type += (obj.STYPERELRO - obj.STYPE)
+ if s.Outer != nil {
+ s.Outer.Type = s.Type
+ }
+ }
+ }
+ // Check that we haven't made two symbols with the same .Outer into
+ // different types (because references two symbols with non-nil Outer
+ // become references to the outer symbol + offset it's vital that the
+ // symbol and the outer end up in the same section).
+ for s := datap; s != nil; s = s.Next {
+ if s.Outer != nil && s.Outer.Type != s.Type {
+ Diag("inconsistent types for %s and its Outer %s (%d != %d)",
+ s.Name, s.Outer.Name, s.Type, s.Outer.Type)
+ }
+ }
+
+ }
+
+ datap = listsort(datap, datcmp, listnextp)
+
+ if Iself {
+ // Make .rela and .rela.plt contiguous, the ELF ABI requires this
+ // and Solaris actually cares.
+ var relplt *LSym
+ for l = &datap; *l != nil; l = &(*l).Next {
+ if (*l).Name == ".rel.plt" || (*l).Name == ".rela.plt" {
+ relplt = (*l)
+ *l = (*l).Next
+ break
+ }
+ }
+ if relplt != nil {
+ for s = datap; s != nil; s = s.Next {
+ if s.Name == ".rel" || s.Name == ".rela" {
+ relplt.Next = s.Next
+ s.Next = relplt
+ }
+ }
+ }
+ }
+
+ /*
+ * allocate sections. list is sorted by type,
+ * so we can just walk it for each piece we want to emit.
+ * segdata is processed before segtext, because we need
+ * to see all symbols in the .data and .bss sections in order
+ * to generate garbage collection information.
+ */
+
+ /* begin segdata */
+
+ /* skip symbols belonging to segtext */
+ s = datap
+
+ for ; s != nil && s.Type < obj.SELFSECT; s = s.Next {
+ }
+
+ /* writable ELF sections */
+ datsize := int64(0)
+
+ var sect *Section
+ for ; s != nil && s.Type < obj.SELFGOT; s = s.Next {
+ sect = addsection(&Segdata, s.Name, 06)
+ sect.Align = symalign(s)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ s.Sect = sect
+ s.Type = obj.SDATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ sect.Length = uint64(datsize) - sect.Vaddr
+ }
+
+ /* .got (and .toc on ppc64) */
+ if s.Type == obj.SELFGOT {
+ sect := addsection(&Segdata, ".got", 06)
+ sect.Align = maxalign(s, obj.SELFGOT)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ var toc *LSym
+ for ; s != nil && s.Type == obj.SELFGOT; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SDATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+
+ // Resolve .TOC. symbol for this object file (ppc64)
+ toc = Linkrlookup(Ctxt, ".TOC.", int(s.Version))
+
+ if toc != nil {
+ toc.Sect = sect
+ toc.Outer = s
+ toc.Sub = s.Sub
+ s.Sub = toc
+
+ toc.Value = 0x8000
+ }
+
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+ }
+
+ /* pointer-free data */
+ sect = addsection(&Segdata, ".noptrdata", 06)
+
+ sect.Align = maxalign(s, obj.SINITARR-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.noptrdata", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.enoptrdata", 0).Sect = sect
+ for ; s != nil && s.Type < obj.SINITARR; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SDATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ hasinitarr := Linkshared
+
+ /* shared library initializer */
+ switch Buildmode {
+ case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared:
+ hasinitarr = true
+ }
+
+ if hasinitarr {
+ sect := addsection(&Segdata, ".init_array", 06)
+ sect.Align = maxalign(s, obj.SINITARR)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ for ; s != nil && s.Type == obj.SINITARR; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+ }
+
+ /* data */
+ sect = addsection(&Segdata, ".data", 06)
+ sect.Align = maxalign(s, obj.SBSS-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.data", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.edata", 0).Sect = sect
+ var gc GCProg
+ gc.Init("runtime.gcdata")
+ for ; s != nil && s.Type < obj.SBSS; s = s.Next {
+ if s.Type == obj.SINITARR {
+ Ctxt.Cursym = s
+ Diag("unexpected symbol type %d", s.Type)
+ }
+
+ s.Sect = sect
+ s.Type = obj.SDATA
+ datsize = aligndatsize(datsize, s)
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ gc.AddSym(s)
+ growdatsize(&datsize, s)
+ }
+ sect.Length = uint64(datsize) - sect.Vaddr
+ gc.End(int64(sect.Length))
+
+ /* bss */
+ sect = addsection(&Segdata, ".bss", 06)
+ sect.Align = maxalign(s, obj.SNOPTRBSS-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.bss", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.ebss", 0).Sect = sect
+ gc = GCProg{}
+ gc.Init("runtime.gcbss")
+ for ; s != nil && s.Type < obj.SNOPTRBSS; s = s.Next {
+ s.Sect = sect
+ datsize = aligndatsize(datsize, s)
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ gc.AddSym(s)
+ growdatsize(&datsize, s)
+ }
+ sect.Length = uint64(datsize) - sect.Vaddr
+ gc.End(int64(sect.Length))
+
+ /* pointer-free bss */
+ sect = addsection(&Segdata, ".noptrbss", 06)
+
+ sect.Align = maxalign(s, obj.SNOPTRBSS)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.noptrbss", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.enoptrbss", 0).Sect = sect
+ for ; s != nil && s.Type == obj.SNOPTRBSS; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+ Linklookup(Ctxt, "runtime.end", 0).Sect = sect
+
+ // 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits.
+ if datsize != int64(uint32(datsize)) {
+ Diag("data or bss segment too large")
+ }
+
+ if s != nil && s.Type == obj.STLSBSS {
+ if Iself && (Linkmode == LinkExternal || Debug['d'] == 0) && HEADTYPE != obj.Hopenbsd {
+ sect = addsection(&Segdata, ".tbss", 06)
+ sect.Align = int32(Thearch.Ptrsize)
+ sect.Vaddr = 0
+ } else {
+ sect = nil
+ }
+ datsize = 0
+
+ for ; s != nil && s.Type == obj.STLSBSS; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Value = datsize
+ growdatsize(&datsize, s)
+ }
+
+ if sect != nil {
+ sect.Length = uint64(datsize)
+ }
+ }
+
+ if s != nil {
+ Ctxt.Cursym = nil
+ Diag("unexpected symbol type %d for %s", s.Type, s.Name)
+ }
+
+ /*
+ * We finished data, begin read-only data.
+ * Not all systems support a separate read-only non-executable data section.
+ * ELF systems do.
+ * OS X and Plan 9 do not.
+ * Windows PE may, but if so we have not implemented it.
+ * And if we're using external linking mode, the point is moot,
+ * since it's not our decision; that code expects the sections in
+ * segtext.
+ */
+ var segro *Segment
+ if Iself && Linkmode == LinkInternal {
+ segro = &Segrodata
+ } else {
+ segro = &Segtext
+ }
+
+ s = datap
+
+ datsize = 0
+
+ /* read-only executable ELF, Mach-O sections */
+ for ; s != nil && s.Type < obj.STYPE; s = s.Next {
+ sect = addsection(&Segtext, s.Name, 04)
+ sect.Align = symalign(s)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ sect.Length = uint64(datsize) - sect.Vaddr
+ }
+
+ /* read-only data */
+ sect = addsection(segro, ".rodata", 04)
+
+ sect.Align = maxalign(s, obj.STYPERELRO-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = 0
+ Linklookup(Ctxt, "runtime.rodata", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.erodata", 0).Sect = sect
+ for ; s != nil && s.Type < obj.STYPERELRO; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ // There is some data that are conceptually read-only but are written to by
+ // relocations. On GNU systems, we can arrange for the dynamic linker to
+ // mprotect sections after relocations are applied by giving them write
+ // permissions in the object file and calling them ".data.rel.ro.FOO". We
+ // divide the .rodata section between actual .rodata and .data.rel.ro.rodata,
+ // but for the other sections that this applies to, we just write a read-only
+ // .FOO section or a read-write .data.rel.ro.FOO section depending on the
+ // situation.
+ // TODO(mwhudson): It would make sense to do this more widely, but it makes
+ // the system linker segfault on darwin.
+ relro_perms := 04
+ relro_prefix := ""
+
+ if UseRelro() {
+ relro_perms = 06
+ relro_prefix = ".data.rel.ro"
+ /* data only written by relocations */
+ sect = addsection(segro, ".data.rel.ro", 06)
+
+ sect.Align = maxalign(s, obj.STYPELINK-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = 0
+ for ; s != nil && s.Type < obj.STYPELINK; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ if s.Outer != nil && s.Outer.Sect != nil && s.Outer.Sect != sect {
+ Diag("s.Outer (%s) in different section from s (%s)", s.Outer.Name, s.Name)
+ }
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ }
+
+ /* typelink */
+ sect = addsection(segro, relro_prefix+".typelink", relro_perms)
+
+ sect.Align = maxalign(s, obj.STYPELINK)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.typelink", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.etypelink", 0).Sect = sect
+ for ; s != nil && s.Type == obj.STYPELINK; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ /* gosymtab */
+ sect = addsection(segro, relro_prefix+".gosymtab", relro_perms)
+
+ sect.Align = maxalign(s, obj.SPCLNTAB-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.symtab", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.esymtab", 0).Sect = sect
+ for ; s != nil && s.Type < obj.SPCLNTAB; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ /* gopclntab */
+ sect = addsection(segro, relro_prefix+".gopclntab", relro_perms)
+
+ sect.Align = maxalign(s, obj.SELFROSECT-1)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ Linklookup(Ctxt, "runtime.pclntab", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.epclntab", 0).Sect = sect
+ for ; s != nil && s.Type < obj.SELFROSECT; s = s.Next {
+ datsize = aligndatsize(datsize, s)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ }
+
+ sect.Length = uint64(datsize) - sect.Vaddr
+
+ /* read-only ELF, Mach-O sections */
+ for ; s != nil && s.Type < obj.SELFSECT; s = s.Next {
+ sect = addsection(segro, s.Name, 04)
+ sect.Align = symalign(s)
+ datsize = Rnd(datsize, int64(sect.Align))
+ sect.Vaddr = uint64(datsize)
+ s.Sect = sect
+ s.Type = obj.SRODATA
+ s.Value = int64(uint64(datsize) - sect.Vaddr)
+ growdatsize(&datsize, s)
+ sect.Length = uint64(datsize) - sect.Vaddr
+ }
+
+ // 6g uses 4-byte relocation offsets, so the entire segment must fit in 32 bits.
+ if datsize != int64(uint32(datsize)) {
+ Diag("read-only data segment too large")
+ }
+
+ /* number the sections */
+ n := int32(1)
+
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
+ sect.Extnum = int16(n)
+ n++
+ }
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
+ sect.Extnum = int16(n)
+ n++
+ }
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ sect.Extnum = int16(n)
+ n++
+ }
+}
+
+// Add buildid to beginning of text segment, on non-ELF systems.
+// Non-ELF binary formats are not always flexible enough to
+// give us a place to put the Go build ID. On those systems, we put it
+// at the very beginning of the text segment.
+// This ``header'' is read by cmd/go.
+func textbuildid() {
+ if Iself || buildid == "" {
+ return
+ }
+
+ sym := Linklookup(Ctxt, "go.buildid", 0)
+ sym.Reachable = true
+ // The \xff is invalid UTF-8, meant to make it less likely
+ // to find one of these accidentally.
+ data := "\xff Go build ID: " + strconv.Quote(buildid) + "\n \xff"
+ sym.Type = obj.STEXT
+ sym.P = []byte(data)
+ sym.Size = int64(len(sym.P))
+
+ sym.Next = Ctxt.Textp
+ Ctxt.Textp = sym
+}
+
+// assign addresses to text
+func textaddress() {
+ var sub *LSym
+
+ addsection(&Segtext, ".text", 05)
+
+ // Assign PCs in text segment.
+ // Could parallelize, by assigning to text
+ // and then letting threads copy down, but probably not worth it.
+ sect := Segtext.Sect
+
+ sect.Align = int32(Funcalign)
+ Linklookup(Ctxt, "runtime.text", 0).Sect = sect
+ Linklookup(Ctxt, "runtime.etext", 0).Sect = sect
+ va := uint64(INITTEXT)
+ sect.Vaddr = va
+ for sym := Ctxt.Textp; sym != nil; sym = sym.Next {
+ sym.Sect = sect
+ if sym.Type&obj.SSUB != 0 {
+ continue
+ }
+ if sym.Align != 0 {
+ va = uint64(Rnd(int64(va), int64(sym.Align)))
+ } else {
+ va = uint64(Rnd(int64(va), int64(Funcalign)))
+ }
+ sym.Value = 0
+ for sub = sym; sub != nil; sub = sub.Sub {
+ sub.Value += int64(va)
+ }
+ if sym.Size == 0 && sym.Sub != nil {
+ Ctxt.Cursym = sym
+ }
+ if sym.Size < MINFUNC {
+ va += MINFUNC // spacing required for findfunctab
+ } else {
+ va += uint64(sym.Size)
+ }
+ }
+
+ sect.Length = va - sect.Vaddr
+}
+
+// assign addresses
+func address() {
+ va := uint64(INITTEXT)
+ Segtext.Rwx = 05
+ Segtext.Vaddr = va
+ Segtext.Fileoff = uint64(HEADR)
+ for s := Segtext.Sect; s != nil; s = s.Next {
+ va = uint64(Rnd(int64(va), int64(s.Align)))
+ s.Vaddr = va
+ va += s.Length
+ }
+
+ Segtext.Length = va - uint64(INITTEXT)
+ Segtext.Filelen = Segtext.Length
+ if HEADTYPE == obj.Hnacl {
+ va += 32 // room for the "halt sled"
+ }
+
+ if Segrodata.Sect != nil {
+ // align to page boundary so as not to mix
+ // rodata and executable text.
+ va = uint64(Rnd(int64(va), int64(INITRND)))
+
+ Segrodata.Rwx = 04
+ Segrodata.Vaddr = va
+ Segrodata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
+ Segrodata.Filelen = 0
+ for s := Segrodata.Sect; s != nil; s = s.Next {
+ va = uint64(Rnd(int64(va), int64(s.Align)))
+ s.Vaddr = va
+ va += s.Length
+ }
+
+ Segrodata.Length = va - Segrodata.Vaddr
+ Segrodata.Filelen = Segrodata.Length
+ }
+
+ va = uint64(Rnd(int64(va), int64(INITRND)))
+ Segdata.Rwx = 06
+ Segdata.Vaddr = va
+ Segdata.Fileoff = va - Segtext.Vaddr + Segtext.Fileoff
+ Segdata.Filelen = 0
+ if HEADTYPE == obj.Hwindows {
+ Segdata.Fileoff = Segtext.Fileoff + uint64(Rnd(int64(Segtext.Length), PEFILEALIGN))
+ }
+ if HEADTYPE == obj.Hplan9 {
+ Segdata.Fileoff = Segtext.Fileoff + Segtext.Filelen
+ }
+ var data *Section
+ var noptr *Section
+ var bss *Section
+ var noptrbss *Section
+ var vlen int64
+ for s := Segdata.Sect; s != nil; s = s.Next {
+ if Iself && s.Name == ".tbss" {
+ continue
+ }
+ vlen = int64(s.Length)
+ if s.Next != nil && !(Iself && s.Next.Name == ".tbss") {
+ vlen = int64(s.Next.Vaddr - s.Vaddr)
+ }
+ s.Vaddr = va
+ va += uint64(vlen)
+ Segdata.Length = va - Segdata.Vaddr
+ if s.Name == ".data" {
+ data = s
+ }
+ if s.Name == ".noptrdata" {
+ noptr = s
+ }
+ if s.Name == ".bss" {
+ bss = s
+ }
+ if s.Name == ".noptrbss" {
+ noptrbss = s
+ }
+ }
+
+ Segdata.Filelen = bss.Vaddr - Segdata.Vaddr
+
+ text := Segtext.Sect
+ var rodata *Section
+ if Segrodata.Sect != nil {
+ rodata = Segrodata.Sect
+ } else {
+ rodata = text.Next
+ }
+ typelink := rodata.Next
+ if UseRelro() {
+ // There is another section (.data.rel.ro) when building a shared
+ // object on elf systems.
+ typelink = typelink.Next
+ }
+ symtab := typelink.Next
+ pclntab := symtab.Next
+
+ var sub *LSym
+ for sym := datap; sym != nil; sym = sym.Next {
+ Ctxt.Cursym = sym
+ if sym.Sect != nil {
+ sym.Value += int64(sym.Sect.Vaddr)
+ }
+ for sub = sym.Sub; sub != nil; sub = sub.Sub {
+ sub.Value += sym.Value
+ }
+ }
+
+ if Buildmode == BuildmodeShared {
+ s := Linklookup(Ctxt, "go.link.abihashbytes", 0)
+ sectSym := Linklookup(Ctxt, ".note.go.abihash", 0)
+ s.Sect = sectSym.Sect
+ s.Value = int64(sectSym.Sect.Vaddr + 16)
+ }
+
+ xdefine("runtime.text", obj.STEXT, int64(text.Vaddr))
+ xdefine("runtime.etext", obj.STEXT, int64(text.Vaddr+text.Length))
+ xdefine("runtime.rodata", obj.SRODATA, int64(rodata.Vaddr))
+ xdefine("runtime.erodata", obj.SRODATA, int64(rodata.Vaddr+rodata.Length))
+ xdefine("runtime.typelink", obj.SRODATA, int64(typelink.Vaddr))
+ xdefine("runtime.etypelink", obj.SRODATA, int64(typelink.Vaddr+typelink.Length))
+
+ sym := Linklookup(Ctxt, "runtime.gcdata", 0)
+ sym.Local = true
+ xdefine("runtime.egcdata", obj.SRODATA, Symaddr(sym)+sym.Size)
+ Linklookup(Ctxt, "runtime.egcdata", 0).Sect = sym.Sect
+
+ sym = Linklookup(Ctxt, "runtime.gcbss", 0)
+ sym.Local = true
+ xdefine("runtime.egcbss", obj.SRODATA, Symaddr(sym)+sym.Size)
+ Linklookup(Ctxt, "runtime.egcbss", 0).Sect = sym.Sect
+
+ xdefine("runtime.symtab", obj.SRODATA, int64(symtab.Vaddr))
+ xdefine("runtime.esymtab", obj.SRODATA, int64(symtab.Vaddr+symtab.Length))
+ xdefine("runtime.pclntab", obj.SRODATA, int64(pclntab.Vaddr))
+ xdefine("runtime.epclntab", obj.SRODATA, int64(pclntab.Vaddr+pclntab.Length))
+ xdefine("runtime.noptrdata", obj.SNOPTRDATA, int64(noptr.Vaddr))
+ xdefine("runtime.enoptrdata", obj.SNOPTRDATA, int64(noptr.Vaddr+noptr.Length))
+ xdefine("runtime.bss", obj.SBSS, int64(bss.Vaddr))
+ xdefine("runtime.ebss", obj.SBSS, int64(bss.Vaddr+bss.Length))
+ xdefine("runtime.data", obj.SDATA, int64(data.Vaddr))
+ xdefine("runtime.edata", obj.SDATA, int64(data.Vaddr+data.Length))
+ xdefine("runtime.noptrbss", obj.SNOPTRBSS, int64(noptrbss.Vaddr))
+ xdefine("runtime.enoptrbss", obj.SNOPTRBSS, int64(noptrbss.Vaddr+noptrbss.Length))
+ xdefine("runtime.end", obj.SBSS, int64(Segdata.Vaddr+Segdata.Length))
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/dwarf.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/dwarf.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/dwarf.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/dwarf.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,2549 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO/NICETOHAVE:
+// - eliminate DW_CLS_ if not used
+// - package info in compilation units
+// - assign global variables and types to their packages
+// - gdb uses c syntax, meaning clumsy quoting is needed for go identifiers. eg
+// ptype struct '[]uint8' and qualifiers need to be quoted away
+// - lexical scoping is lost, so gdb gets confused as to which 'main.i' you mean.
+// - file:line info for variables
+// - make strings a typedef so prettyprinters can see the underlying string type
+
+package ld
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "os"
+ "strings"
+)
+
+/*
+ * Offsets and sizes of the debug_* sections in the cout file.
+ */
+var abbrevo int64
+
+var abbrevsize int64
+
+var abbrevsym *LSym
+
+var abbrevsympos int64
+
+var lineo int64
+
+var linesize int64
+
+var linesym *LSym
+
+var linesympos int64
+
+var infoo int64 // also the base for DWDie->offs and reference attributes.
+
+var infosize int64
+
+var infosym *LSym
+
+var infosympos int64
+
+var frameo int64
+
+var framesize int64
+
+var framesym *LSym
+
+var framesympos int64
+
+var pubnameso int64
+
+var pubnamessize int64
+
+var pubtypeso int64
+
+var pubtypessize int64
+
+var arangeso int64
+
+var arangessize int64
+
+var gdbscripto int64
+
+var gdbscriptsize int64
+
+var infosec *LSym
+
+var inforeloco int64
+
+var inforelocsize int64
+
+var arangessec *LSym
+
+var arangesreloco int64
+
+var arangesrelocsize int64
+
+var linesec *LSym
+
+var linereloco int64
+
+var linerelocsize int64
+
+var framesec *LSym
+
+var framereloco int64
+
+var framerelocsize int64
+
+var gdbscript string
+
+/*
+ * Basic I/O
+ */
+func addrput(addr int64) {
+ switch Thearch.Ptrsize {
+ case 4:
+ Thearch.Lput(uint32(addr))
+
+ case 8:
+ Thearch.Vput(uint64(addr))
+ }
+}
+
+func uleb128enc(v uint64, dst []byte) int {
+ var c uint8
+
+ length := uint8(0)
+ for {
+ c = uint8(v & 0x7f)
+ v >>= 7
+ if v != 0 {
+ c |= 0x80
+ }
+ if dst != nil {
+ dst[0] = byte(c)
+ dst = dst[1:]
+ }
+ length++
+ if c&0x80 == 0 {
+ break
+ }
+ }
+
+ return int(length)
+}
+
+func sleb128enc(v int64, dst []byte) int {
+ var c uint8
+ var s uint8
+
+ length := uint8(0)
+ for {
+ c = uint8(v & 0x7f)
+ s = uint8(v & 0x40)
+ v >>= 7
+ if (v != -1 || s == 0) && (v != 0 || s != 0) {
+ c |= 0x80
+ }
+ if dst != nil {
+ dst[0] = byte(c)
+ dst = dst[1:]
+ }
+ length++
+ if c&0x80 == 0 {
+ break
+ }
+ }
+
+ return int(length)
+}
+
+var encbuf [10]byte
+
+func uleb128put(v int64) {
+ n := uleb128enc(uint64(v), encbuf[:])
+ Cwrite(encbuf[:n])
+}
+
+func sleb128put(v int64) {
+ n := sleb128enc(v, encbuf[:])
+ Cwrite(encbuf[:n])
+}
+
+/*
+ * Defining Abbrevs. This is hardcoded, and there will be
+ * only a handful of them. The DWARF spec places no restriction on
+ * the ordering of attributes in the Abbrevs and DIEs, and we will
+ * always write them out in the order of declaration in the abbrev.
+ */
+type DWAttrForm struct {
+ attr uint16
+ form uint8
+}
+
+// Go-specific type attributes.
+const (
+ DW_AT_go_kind = 0x2900
+ DW_AT_go_key = 0x2901
+ DW_AT_go_elem = 0x2902
+
+ DW_AT_internal_location = 253 // params and locals; not emitted
+)
+
+// Index into the abbrevs table below.
+// Keep in sync with ispubname() and ispubtype() below.
+// ispubtype considers >= NULLTYPE public
+const (
+ DW_ABRV_NULL = iota
+ DW_ABRV_COMPUNIT
+ DW_ABRV_FUNCTION
+ DW_ABRV_VARIABLE
+ DW_ABRV_AUTO
+ DW_ABRV_PARAM
+ DW_ABRV_STRUCTFIELD
+ DW_ABRV_FUNCTYPEPARAM
+ DW_ABRV_DOTDOTDOT
+ DW_ABRV_ARRAYRANGE
+ DW_ABRV_NULLTYPE
+ DW_ABRV_BASETYPE
+ DW_ABRV_ARRAYTYPE
+ DW_ABRV_CHANTYPE
+ DW_ABRV_FUNCTYPE
+ DW_ABRV_IFACETYPE
+ DW_ABRV_MAPTYPE
+ DW_ABRV_PTRTYPE
+ DW_ABRV_BARE_PTRTYPE // only for void*, no DW_AT_type attr to please gdb 6.
+ DW_ABRV_SLICETYPE
+ DW_ABRV_STRINGTYPE
+ DW_ABRV_STRUCTTYPE
+ DW_ABRV_TYPEDECL
+ DW_NABRV
+)
+
+type DWAbbrev struct {
+ tag uint8
+ children uint8
+ attr []DWAttrForm
+}
+
+var abbrevs = [DW_NABRV]DWAbbrev{
+ /* The mandatory DW_ABRV_NULL entry. */
+ {0, 0, []DWAttrForm{}},
+
+ /* COMPUNIT */
+ {
+ DW_TAG_compile_unit,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_language, DW_FORM_data1},
+ {DW_AT_low_pc, DW_FORM_addr},
+ {DW_AT_high_pc, DW_FORM_addr},
+ {DW_AT_stmt_list, DW_FORM_data4},
+ {DW_AT_comp_dir, DW_FORM_string},
+ },
+ },
+
+ /* FUNCTION */
+ {
+ DW_TAG_subprogram,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_low_pc, DW_FORM_addr},
+ {DW_AT_high_pc, DW_FORM_addr},
+ {DW_AT_external, DW_FORM_flag},
+ },
+ },
+
+ /* VARIABLE */
+ {
+ DW_TAG_variable,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_location, DW_FORM_block1},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_external, DW_FORM_flag},
+ },
+ },
+
+ /* AUTO */
+ {
+ DW_TAG_variable,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_location, DW_FORM_block1},
+ {DW_AT_type, DW_FORM_ref_addr},
+ },
+ },
+
+ /* PARAM */
+ {
+ DW_TAG_formal_parameter,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_location, DW_FORM_block1},
+ {DW_AT_type, DW_FORM_ref_addr},
+ },
+ },
+
+ /* STRUCTFIELD */
+ {
+ DW_TAG_member,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_data_member_location, DW_FORM_block1},
+ {DW_AT_type, DW_FORM_ref_addr},
+ },
+ },
+
+ /* FUNCTYPEPARAM */
+ {
+ DW_TAG_formal_parameter,
+ DW_CHILDREN_no,
+
+ // No name!
+ []DWAttrForm{
+ {DW_AT_type, DW_FORM_ref_addr},
+ },
+ },
+
+ /* DOTDOTDOT */
+ {
+ DW_TAG_unspecified_parameters,
+ DW_CHILDREN_no,
+ []DWAttrForm{},
+ },
+
+ /* ARRAYRANGE */
+ {
+ DW_TAG_subrange_type,
+ DW_CHILDREN_no,
+
+ // No name!
+ []DWAttrForm{
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_count, DW_FORM_udata},
+ },
+ },
+
+ // Below here are the types considered public by ispubtype
+ /* NULLTYPE */
+ {
+ DW_TAG_unspecified_type,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ },
+ },
+
+ /* BASETYPE */
+ {
+ DW_TAG_base_type,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_encoding, DW_FORM_data1},
+ {DW_AT_byte_size, DW_FORM_data1},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* ARRAYTYPE */
+ // child is subrange with upper bound
+ {
+ DW_TAG_array_type,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_byte_size, DW_FORM_udata},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* CHANTYPE */
+ {
+ DW_TAG_typedef,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_kind, DW_FORM_data1},
+ {DW_AT_go_elem, DW_FORM_ref_addr},
+ },
+ },
+
+ /* FUNCTYPE */
+ {
+ DW_TAG_subroutine_type,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ // {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* IFACETYPE */
+ {
+ DW_TAG_typedef,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* MAPTYPE */
+ {
+ DW_TAG_typedef,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_kind, DW_FORM_data1},
+ {DW_AT_go_key, DW_FORM_ref_addr},
+ {DW_AT_go_elem, DW_FORM_ref_addr},
+ },
+ },
+
+ /* PTRTYPE */
+ {
+ DW_TAG_pointer_type,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* BARE_PTRTYPE */
+ {
+ DW_TAG_pointer_type,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ },
+ },
+
+ /* SLICETYPE */
+ {
+ DW_TAG_structure_type,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_byte_size, DW_FORM_udata},
+ {DW_AT_go_kind, DW_FORM_data1},
+ {DW_AT_go_elem, DW_FORM_ref_addr},
+ },
+ },
+
+ /* STRINGTYPE */
+ {
+ DW_TAG_structure_type,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_byte_size, DW_FORM_udata},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* STRUCTTYPE */
+ {
+ DW_TAG_structure_type,
+ DW_CHILDREN_yes,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_byte_size, DW_FORM_udata},
+ {DW_AT_go_kind, DW_FORM_data1},
+ },
+ },
+
+ /* TYPEDECL */
+ {
+ DW_TAG_typedef,
+ DW_CHILDREN_no,
+ []DWAttrForm{
+ {DW_AT_name, DW_FORM_string},
+ {DW_AT_type, DW_FORM_ref_addr},
+ },
+ },
+}
+
+func writeabbrev() {
+ abbrevo = Cpos()
+ for i := 1; i < DW_NABRV; i++ {
+ // See section 7.5.3
+ uleb128put(int64(i))
+
+ uleb128put(int64(abbrevs[i].tag))
+ Cput(abbrevs[i].children)
+ for _, f := range abbrevs[i].attr {
+ uleb128put(int64(f.attr))
+ uleb128put(int64(f.form))
+ }
+ uleb128put(0)
+ uleb128put(0)
+ }
+
+ Cput(0)
+ abbrevsize = Cpos() - abbrevo
+}
+
+/*
+ * Debugging Information Entries and their attributes.
+ */
+const (
+ HASHSIZE = 107
+)
+
+func dwarfhashstr(s string) uint32 {
+ h := uint32(0)
+ for s != "" {
+ h = h + h + h + uint32(s[0])
+ s = s[1:]
+ }
+ return h % HASHSIZE
+}
+
+// For DW_CLS_string and _block, value should contain the length, and
+// data the data, for _reference, value is 0 and data is a DWDie* to
+// the referenced instance, for all others, value is the whole thing
+// and data is null.
+
+type DWAttr struct {
+ link *DWAttr
+ atr uint16 // DW_AT_
+ cls uint8 // DW_CLS_
+ value int64
+ data interface{}
+}
+
+type DWDie struct {
+ abbrev int
+ link *DWDie
+ child *DWDie
+ attr *DWAttr
+ // offset into .debug_info section, i.e relative to
+ // infoo. only valid after call to putdie()
+ offs int64
+ hash []*DWDie // optional index of children by name, enabled by mkindex()
+ hlink *DWDie // bucket chain in parent's index
+}
+
+/*
+ * Root DIEs for compilation units, types and global variables.
+ */
+var dwroot DWDie
+
+var dwtypes DWDie
+
+var dwglobals DWDie
+
+func newattr(die *DWDie, attr uint16, cls int, value int64, data interface{}) *DWAttr {
+ a := new(DWAttr)
+ a.link = die.attr
+ die.attr = a
+ a.atr = attr
+ a.cls = uint8(cls)
+ a.value = value
+ a.data = data
+ return a
+}
+
+// Each DIE (except the root ones) has at least 1 attribute: its
+// name. getattr moves the desired one to the front so
+// frequently searched ones are found faster.
+func getattr(die *DWDie, attr uint16) *DWAttr {
+ if die.attr.atr == attr {
+ return die.attr
+ }
+
+ a := die.attr
+ b := a.link
+ for b != nil {
+ if b.atr == attr {
+ a.link = b.link
+ b.link = die.attr
+ die.attr = b
+ return b
+ }
+
+ a = b
+ b = b.link
+ }
+
+ return nil
+}
+
+// Every DIE has at least a DW_AT_name attribute (but it will only be
+// written out if it is listed in the abbrev). If its parent is
+// keeping an index, the new DIE will be inserted there.
+func newdie(parent *DWDie, abbrev int, name string) *DWDie {
+ die := new(DWDie)
+ die.abbrev = abbrev
+ die.link = parent.child
+ parent.child = die
+
+ newattr(die, DW_AT_name, DW_CLS_STRING, int64(len(name)), name)
+
+ if parent.hash != nil {
+ h := int(dwarfhashstr(name))
+ die.hlink = parent.hash[h]
+ parent.hash[h] = die
+ }
+
+ return die
+}
+
+func mkindex(die *DWDie) {
+ die.hash = make([]*DWDie, HASHSIZE)
+}
+
+func walktypedef(die *DWDie) *DWDie {
+ // Resolve typedef if present.
+ if die.abbrev == DW_ABRV_TYPEDECL {
+ for attr := die.attr; attr != nil; attr = attr.link {
+ if attr.atr == DW_AT_type && attr.cls == DW_CLS_REFERENCE && attr.data != nil {
+ return attr.data.(*DWDie)
+ }
+ }
+ }
+
+ return die
+}
+
+// Find child by AT_name using hashtable if available or linear scan
+// if not.
+func find(die *DWDie, name string) *DWDie {
+ var prev *DWDie
+ for ; die != prev; prev, die = die, walktypedef(die) {
+
+ if die.hash == nil {
+ for a := die.child; a != nil; a = a.link {
+ if name == getattr(a, DW_AT_name).data {
+ return a
+ }
+ }
+ continue
+ }
+
+ h := int(dwarfhashstr(name))
+ a := die.hash[h]
+
+ if a == nil {
+ continue
+ }
+
+ if name == getattr(a, DW_AT_name).data {
+ return a
+ }
+
+ // Move found ones to head of the list.
+ for b := a.hlink; b != nil; b = b.hlink {
+ if name == getattr(b, DW_AT_name).data {
+ a.hlink = b.hlink
+ b.hlink = die.hash[h]
+ die.hash[h] = b
+ return b
+ }
+ a = b
+ }
+ }
+ return nil
+}
+
+func mustFind(die *DWDie, name string) *DWDie {
+ r := find(die, name)
+ if r == nil {
+ Exitf("dwarf find: %s %p has no %s", getattr(die, DW_AT_name).data, die, name)
+ }
+ return r
+}
+
+func adddwarfrel(sec *LSym, sym *LSym, offsetbase int64, siz int, addend int64) {
+ r := Addrel(sec)
+ r.Sym = sym
+ r.Xsym = sym
+ r.Off = int32(Cpos() - offsetbase)
+ r.Siz = uint8(siz)
+ r.Type = obj.R_ADDR
+ r.Add = addend
+ r.Xadd = addend
+ if Iself && Thearch.Thechar == '6' {
+ addend = 0
+ }
+ if HEADTYPE == obj.Hdarwin {
+ addend += sym.Value
+ }
+ switch siz {
+ case 4:
+ Thearch.Lput(uint32(addend))
+
+ case 8:
+ Thearch.Vput(uint64(addend))
+
+ default:
+ Diag("bad size in adddwarfrel")
+ }
+}
+
+func newrefattr(die *DWDie, attr uint16, ref *DWDie) *DWAttr {
+ if ref == nil {
+ return nil
+ }
+ return newattr(die, attr, DW_CLS_REFERENCE, 0, ref)
+}
+
+var fwdcount int
+
+func putattr(abbrev int, form int, cls int, value int64, data interface{}) {
+ switch form {
+ case DW_FORM_addr: // address
+ if Linkmode == LinkExternal {
+ value -= (data.(*LSym)).Value
+ adddwarfrel(infosec, data.(*LSym), infoo, Thearch.Ptrsize, value)
+ break
+ }
+
+ addrput(value)
+
+ case DW_FORM_block1: // block
+ if cls == DW_CLS_ADDRESS {
+ Cput(uint8(1 + Thearch.Ptrsize))
+ Cput(DW_OP_addr)
+ if Linkmode == LinkExternal {
+ value -= (data.(*LSym)).Value
+ adddwarfrel(infosec, data.(*LSym), infoo, Thearch.Ptrsize, value)
+ break
+ }
+
+ addrput(value)
+ break
+ }
+
+ value &= 0xff
+ Cput(uint8(value))
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
+ Cput(uint8(p[i]))
+ }
+
+ case DW_FORM_block2: // block
+ value &= 0xffff
+
+ Thearch.Wput(uint16(value))
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
+ Cput(uint8(p[i]))
+ }
+
+ case DW_FORM_block4: // block
+ value &= 0xffffffff
+
+ Thearch.Lput(uint32(value))
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
+ Cput(uint8(p[i]))
+ }
+
+ case DW_FORM_block: // block
+ uleb128put(value)
+
+ p := data.([]byte)
+ for i := 0; int64(i) < value; i++ {
+ Cput(uint8(p[i]))
+ }
+
+ case DW_FORM_data1: // constant
+ Cput(uint8(value))
+
+ case DW_FORM_data2: // constant
+ Thearch.Wput(uint16(value))
+
+ case DW_FORM_data4: // constant, {line,loclist,mac,rangelist}ptr
+ if Linkmode == LinkExternal && cls == DW_CLS_PTR {
+ adddwarfrel(infosec, linesym, infoo, 4, value)
+ break
+ }
+
+ Thearch.Lput(uint32(value))
+
+ case DW_FORM_data8: // constant, {line,loclist,mac,rangelist}ptr
+ Thearch.Vput(uint64(value))
+
+ case DW_FORM_sdata: // constant
+ sleb128put(value)
+
+ case DW_FORM_udata: // constant
+ uleb128put(value)
+
+ case DW_FORM_string: // string
+ strnput(data.(string), int(value+1))
+
+ case DW_FORM_flag: // flag
+ if value != 0 {
+ Cput(1)
+ } else {
+ Cput(0)
+ }
+
+ // In DWARF 2 (which is what we claim to generate),
+ // the ref_addr is the same size as a normal address.
+ // In DWARF 3 it is always 32 bits, unless emitting a large
+ // (> 4 GB of debug info aka "64-bit") unit, which we don't implement.
+ case DW_FORM_ref_addr: // reference to a DIE in the .info section
+ if data == nil {
+ Diag("dwarf: null reference in %d", abbrev)
+ if Thearch.Ptrsize == 8 {
+ Thearch.Vput(0) // invalid dwarf, gdb will complain.
+ } else {
+ Thearch.Lput(0) // invalid dwarf, gdb will complain.
+ }
+ } else {
+ off := (data.(*DWDie)).offs
+ if off == 0 {
+ fwdcount++
+ }
+ if Linkmode == LinkExternal {
+ adddwarfrel(infosec, infosym, infoo, Thearch.Ptrsize, off)
+ break
+ }
+
+ addrput(off)
+ }
+
+ case DW_FORM_ref1, // reference within the compilation unit
+ DW_FORM_ref2, // reference
+ DW_FORM_ref4, // reference
+ DW_FORM_ref8, // reference
+ DW_FORM_ref_udata, // reference
+
+ DW_FORM_strp, // string
+ DW_FORM_indirect: // (see Section 7.5.3)
+ fallthrough
+ default:
+ Exitf("dwarf: unsupported attribute form %d / class %d", form, cls)
+ }
+}
+
+// Note that we can (and do) add arbitrary attributes to a DIE, but
+// only the ones actually listed in the Abbrev will be written out.
+func putattrs(abbrev int, attr *DWAttr) {
+Outer:
+ for _, f := range abbrevs[abbrev].attr {
+ for ap := attr; ap != nil; ap = ap.link {
+ if ap.atr == f.attr {
+ putattr(abbrev, int(f.form), int(ap.cls), ap.value, ap.data)
+ continue Outer
+ }
+ }
+
+ putattr(abbrev, int(f.form), 0, 0, nil)
+ }
+}
+
+func putdies(die *DWDie) {
+ for ; die != nil; die = die.link {
+ putdie(die)
+ }
+}
+
+func putdie(die *DWDie) {
+ die.offs = Cpos() - infoo
+ uleb128put(int64(die.abbrev))
+ putattrs(die.abbrev, die.attr)
+ if abbrevs[die.abbrev].children != 0 {
+ putdies(die.child)
+ Cput(0)
+ }
+}
+
+func reverselist(list **DWDie) {
+ curr := *list
+ var prev *DWDie
+ for curr != nil {
+ var next *DWDie = curr.link
+ curr.link = prev
+ prev = curr
+ curr = next
+ }
+
+ *list = prev
+}
+
+func reversetree(list **DWDie) {
+ reverselist(list)
+ for die := *list; die != nil; die = die.link {
+ if abbrevs[die.abbrev].children != 0 {
+ reversetree(&die.child)
+ }
+ }
+}
+
+func newmemberoffsetattr(die *DWDie, offs int32) {
+ var block [20]byte
+
+ i := 0
+ block[i] = DW_OP_plus_uconst
+ i++
+ i += uleb128enc(uint64(offs), block[i:])
+ newattr(die, DW_AT_data_member_location, DW_CLS_BLOCK, int64(i), block[:i])
+}
+
+// GDB doesn't like DW_FORM_addr for DW_AT_location, so emit a
+// location expression that evals to a const.
+func newabslocexprattr(die *DWDie, addr int64, sym *LSym) {
+ newattr(die, DW_AT_location, DW_CLS_ADDRESS, addr, sym)
+ // below
+}
+
+// Lookup predefined types
+func lookup_or_diag(n string) *LSym {
+ s := Linkrlookup(Ctxt, n, 0)
+ if s == nil || s.Size == 0 {
+ Exitf("dwarf: missing type: %s", n)
+ }
+
+ return s
+}
+
+func dotypedef(parent *DWDie, name string, def *DWDie) {
+ // Only emit typedefs for real names.
+ if strings.HasPrefix(name, "map[") {
+ return
+ }
+ if strings.HasPrefix(name, "struct {") {
+ return
+ }
+ if strings.HasPrefix(name, "chan ") {
+ return
+ }
+ if name[0] == '[' || name[0] == '*' {
+ return
+ }
+ if def == nil {
+ Diag("dwarf: bad def in dotypedef")
+ }
+
+ // The typedef entry must be created after the def,
+ // so that future lookups will find the typedef instead
+ // of the real definition. This hooks the typedef into any
+ // circular definition loops, so that gdb can understand them.
+ die := newdie(parent, DW_ABRV_TYPEDECL, name)
+
+ newrefattr(die, DW_AT_type, def)
+}
+
+// Define gotype, for composite ones recurse into constituents.
+func defgotype(gotype *LSym) *DWDie {
+ if gotype == nil {
+ return mustFind(&dwtypes, "")
+ }
+
+ if !strings.HasPrefix(gotype.Name, "type.") {
+ Diag("dwarf: type name doesn't start with \".type\": %s", gotype.Name)
+ return mustFind(&dwtypes, "")
+ }
+
+ name := gotype.Name[5:] // could also decode from Type.string
+
+ die := find(&dwtypes, name)
+
+ if die != nil {
+ return die
+ }
+
+ if false && Debug['v'] > 2 {
+ fmt.Printf("new type: %v\n", gotype)
+ }
+
+ kind := decodetype_kind(gotype)
+ bytesize := decodetype_size(gotype)
+
+ switch kind {
+ case obj.KindBool:
+ die = newdie(&dwtypes, DW_ABRV_BASETYPE, name)
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_boolean, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindInt,
+ obj.KindInt8,
+ obj.KindInt16,
+ obj.KindInt32,
+ obj.KindInt64:
+ die = newdie(&dwtypes, DW_ABRV_BASETYPE, name)
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_signed, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindUint,
+ obj.KindUint8,
+ obj.KindUint16,
+ obj.KindUint32,
+ obj.KindUint64,
+ obj.KindUintptr:
+ die = newdie(&dwtypes, DW_ABRV_BASETYPE, name)
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_unsigned, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindFloat32,
+ obj.KindFloat64:
+ die = newdie(&dwtypes, DW_ABRV_BASETYPE, name)
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_float, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindComplex64,
+ obj.KindComplex128:
+ die = newdie(&dwtypes, DW_ABRV_BASETYPE, name)
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_complex_float, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindArray:
+ die = newdie(&dwtypes, DW_ABRV_ARRAYTYPE, name)
+ dotypedef(&dwtypes, name, die)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+ s := decodetype_arrayelem(gotype)
+ newrefattr(die, DW_AT_type, defgotype(s))
+ fld := newdie(die, DW_ABRV_ARRAYRANGE, "range")
+
+ // use actual length not upper bound; correct for 0-length arrays.
+ newattr(fld, DW_AT_count, DW_CLS_CONSTANT, decodetype_arraylen(gotype), 0)
+
+ newrefattr(fld, DW_AT_type, mustFind(&dwtypes, "uintptr"))
+
+ case obj.KindChan:
+ die = newdie(&dwtypes, DW_ABRV_CHANTYPE, name)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+ s := decodetype_chanelem(gotype)
+ newrefattr(die, DW_AT_go_elem, defgotype(s))
+
+ case obj.KindFunc:
+ die = newdie(&dwtypes, DW_ABRV_FUNCTYPE, name)
+ dotypedef(&dwtypes, name, die)
+ newrefattr(die, DW_AT_type, mustFind(&dwtypes, "void"))
+ nfields := decodetype_funcincount(gotype)
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
+ s = decodetype_funcintype(gotype, i)
+ fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
+ newrefattr(fld, DW_AT_type, defgotype(s))
+ }
+
+ if decodetype_funcdotdotdot(gotype) != 0 {
+ newdie(die, DW_ABRV_DOTDOTDOT, "...")
+ }
+ nfields = decodetype_funcoutcount(gotype)
+ for i := 0; i < nfields; i++ {
+ s = decodetype_funcouttype(gotype, i)
+ fld = newdie(die, DW_ABRV_FUNCTYPEPARAM, s.Name[5:])
+ newrefattr(fld, DW_AT_type, defptrto(defgotype(s)))
+ }
+
+ case obj.KindInterface:
+ die = newdie(&dwtypes, DW_ABRV_IFACETYPE, name)
+ dotypedef(&dwtypes, name, die)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+ nfields := int(decodetype_ifacemethodcount(gotype))
+ var s *LSym
+ if nfields == 0 {
+ s = lookup_or_diag("type.runtime.eface")
+ } else {
+ s = lookup_or_diag("type.runtime.iface")
+ }
+ newrefattr(die, DW_AT_type, defgotype(s))
+
+ case obj.KindMap:
+ die = newdie(&dwtypes, DW_ABRV_MAPTYPE, name)
+ s := decodetype_mapkey(gotype)
+ newrefattr(die, DW_AT_go_key, defgotype(s))
+ s = decodetype_mapvalue(gotype)
+ newrefattr(die, DW_AT_go_elem, defgotype(s))
+
+ case obj.KindPtr:
+ die = newdie(&dwtypes, DW_ABRV_PTRTYPE, name)
+ dotypedef(&dwtypes, name, die)
+ s := decodetype_ptrelem(gotype)
+ newrefattr(die, DW_AT_type, defgotype(s))
+
+ case obj.KindSlice:
+ die = newdie(&dwtypes, DW_ABRV_SLICETYPE, name)
+ dotypedef(&dwtypes, name, die)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+ s := decodetype_arrayelem(gotype)
+ newrefattr(die, DW_AT_go_elem, defgotype(s))
+
+ case obj.KindString:
+ die = newdie(&dwtypes, DW_ABRV_STRINGTYPE, name)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+
+ case obj.KindStruct:
+ die = newdie(&dwtypes, DW_ABRV_STRUCTTYPE, name)
+ dotypedef(&dwtypes, name, die)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, bytesize, 0)
+ nfields := decodetype_structfieldcount(gotype)
+ var f string
+ var fld *DWDie
+ var s *LSym
+ for i := 0; i < nfields; i++ {
+ f = decodetype_structfieldname(gotype, i)
+ s = decodetype_structfieldtype(gotype, i)
+ if f == "" {
+ f = s.Name[5:] // skip "type."
+ }
+ fld = newdie(die, DW_ABRV_STRUCTFIELD, f)
+ newrefattr(fld, DW_AT_type, defgotype(s))
+ newmemberoffsetattr(fld, int32(decodetype_structfieldoffs(gotype, i)))
+ }
+
+ case obj.KindUnsafePointer:
+ die = newdie(&dwtypes, DW_ABRV_BARE_PTRTYPE, name)
+
+ default:
+ Diag("dwarf: definition of unknown kind %d: %s", kind, gotype.Name)
+ die = newdie(&dwtypes, DW_ABRV_TYPEDECL, name)
+ newrefattr(die, DW_AT_type, mustFind(&dwtypes, ""))
+ }
+
+ newattr(die, DW_AT_go_kind, DW_CLS_CONSTANT, int64(kind), 0)
+
+ return die
+}
+
+// Find or construct *T given T.
+func defptrto(dwtype *DWDie) *DWDie {
+ ptrname := fmt.Sprintf("*%s", getattr(dwtype, DW_AT_name).data)
+ die := find(&dwtypes, ptrname)
+ if die == nil {
+ die = newdie(&dwtypes, DW_ABRV_PTRTYPE, ptrname)
+ newrefattr(die, DW_AT_type, dwtype)
+ }
+
+ return die
+}
+
+// Copies src's children into dst. Copies attributes by value.
+// DWAttr.data is copied as pointer only. If except is one of
+// the top-level children, it will not be copied.
+func copychildrenexcept(dst *DWDie, src *DWDie, except *DWDie) {
+ for src = src.child; src != nil; src = src.link {
+ if src == except {
+ continue
+ }
+ c := newdie(dst, src.abbrev, getattr(src, DW_AT_name).data.(string))
+ for a := src.attr; a != nil; a = a.link {
+ newattr(c, a.atr, int(a.cls), a.value, a.data)
+ }
+ copychildrenexcept(c, src, nil)
+ }
+
+ reverselist(&dst.child)
+}
+
+func copychildren(dst *DWDie, src *DWDie) {
+ copychildrenexcept(dst, src, nil)
+}
+
+// Search children (assumed to have DW_TAG_member) for the one named
+// field and set its DW_AT_type to dwtype
+func substitutetype(structdie *DWDie, field string, dwtype *DWDie) {
+ child := mustFind(structdie, field)
+ if child == nil {
+ return
+ }
+
+ a := getattr(child, DW_AT_type)
+ if a != nil {
+ a.data = dwtype
+ } else {
+ newrefattr(child, DW_AT_type, dwtype)
+ }
+}
+
+func synthesizestringtypes(die *DWDie) {
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime.stringStructDWARF")))
+ if prototype == nil {
+ return
+ }
+
+ for ; die != nil; die = die.link {
+ if die.abbrev != DW_ABRV_STRINGTYPE {
+ continue
+ }
+ copychildren(die, prototype)
+ }
+}
+
+func synthesizeslicetypes(die *DWDie) {
+ prototype := walktypedef(defgotype(lookup_or_diag("type.runtime.slice")))
+ if prototype == nil {
+ return
+ }
+
+ for ; die != nil; die = die.link {
+ if die.abbrev != DW_ABRV_SLICETYPE {
+ continue
+ }
+ copychildren(die, prototype)
+ elem := getattr(die, DW_AT_go_elem).data.(*DWDie)
+ substitutetype(die, "array", defptrto(elem))
+ }
+}
+
+func mkinternaltypename(base string, arg1 string, arg2 string) string {
+ var buf string
+
+ if arg2 == "" {
+ buf = fmt.Sprintf("%s<%s>", base, arg1)
+ } else {
+ buf = fmt.Sprintf("%s<%s,%s>", base, arg1, arg2)
+ }
+ n := buf
+ return n
+}
+
+// synthesizemaptypes is way too closely married to runtime/hashmap.c
+const (
+ MaxKeySize = 128
+ MaxValSize = 128
+ BucketSize = 8
+)
+
+func synthesizemaptypes(die *DWDie) {
+ hash := walktypedef(defgotype(lookup_or_diag("type.runtime.hmap")))
+ bucket := walktypedef(defgotype(lookup_or_diag("type.runtime.bmap")))
+
+ if hash == nil {
+ return
+ }
+
+ for ; die != nil; die = die.link {
+ if die.abbrev != DW_ABRV_MAPTYPE {
+ continue
+ }
+
+ keytype := walktypedef(getattr(die, DW_AT_go_key).data.(*DWDie))
+ valtype := walktypedef(getattr(die, DW_AT_go_elem).data.(*DWDie))
+
+ // compute size info like hashmap.c does.
+ keysize, valsize := Thearch.Ptrsize, Thearch.Ptrsize
+ a := getattr(keytype, DW_AT_byte_size)
+ if a != nil {
+ keysize = int(a.value)
+ }
+ a = getattr(valtype, DW_AT_byte_size)
+ if a != nil {
+ valsize = int(a.value)
+ }
+ indirect_key, indirect_val := false, false
+ if keysize > MaxKeySize {
+ keysize = Thearch.Ptrsize
+ indirect_key = true
+ }
+ if valsize > MaxValSize {
+ valsize = Thearch.Ptrsize
+ indirect_val = true
+ }
+
+ // Construct type to represent an array of BucketSize keys
+ dwhk := newdie(&dwtypes, DW_ABRV_ARRAYTYPE, mkinternaltypename("[]key", getattr(keytype, DW_AT_name).data.(string), ""))
+
+ newattr(dwhk, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*int64(keysize), 0)
+ t := keytype
+ if indirect_key {
+ t = defptrto(keytype)
+ }
+ newrefattr(dwhk, DW_AT_type, t)
+ fld := newdie(dwhk, DW_ABRV_ARRAYRANGE, "size")
+ newattr(fld, DW_AT_count, DW_CLS_CONSTANT, BucketSize, 0)
+ newrefattr(fld, DW_AT_type, mustFind(&dwtypes, "uintptr"))
+
+ // Construct type to represent an array of BucketSize values
+ dwhv := newdie(&dwtypes, DW_ABRV_ARRAYTYPE, mkinternaltypename("[]val", getattr(valtype, DW_AT_name).data.(string), ""))
+
+ newattr(dwhv, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize*int64(valsize), 0)
+ t = valtype
+ if indirect_val {
+ t = defptrto(valtype)
+ }
+ newrefattr(dwhv, DW_AT_type, t)
+ fld = newdie(dwhv, DW_ABRV_ARRAYRANGE, "size")
+ newattr(fld, DW_AT_count, DW_CLS_CONSTANT, BucketSize, 0)
+ newrefattr(fld, DW_AT_type, mustFind(&dwtypes, "uintptr"))
+
+ // Construct bucket
+ dwhb := newdie(&dwtypes, DW_ABRV_STRUCTTYPE, mkinternaltypename("bucket", getattr(keytype, DW_AT_name).data.(string), getattr(valtype, DW_AT_name).data.(string)))
+
+ // Copy over all fields except the field "data" from the generic bucket.
+ // "data" will be replaced with keys/values below.
+ copychildrenexcept(dwhb, bucket, find(bucket, "data"))
+
+ fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "keys")
+ newrefattr(fld, DW_AT_type, dwhk)
+ newmemberoffsetattr(fld, BucketSize)
+ fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "values")
+ newrefattr(fld, DW_AT_type, dwhv)
+ newmemberoffsetattr(fld, BucketSize+BucketSize*int32(keysize))
+ fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "overflow")
+ newrefattr(fld, DW_AT_type, defptrto(dwhb))
+ newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize)))
+ if Thearch.Regsize > Thearch.Ptrsize {
+ fld = newdie(dwhb, DW_ABRV_STRUCTFIELD, "pad")
+ newrefattr(fld, DW_AT_type, mustFind(&dwtypes, "uintptr"))
+ newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(Thearch.Ptrsize))
+ }
+
+ newattr(dwhb, DW_AT_byte_size, DW_CLS_CONSTANT, BucketSize+BucketSize*int64(keysize)+BucketSize*int64(valsize)+int64(Thearch.Regsize), 0)
+
+ // Construct hash
+ dwh := newdie(&dwtypes, DW_ABRV_STRUCTTYPE, mkinternaltypename("hash", getattr(keytype, DW_AT_name).data.(string), getattr(valtype, DW_AT_name).data.(string)))
+
+ copychildren(dwh, hash)
+ substitutetype(dwh, "buckets", defptrto(dwhb))
+ substitutetype(dwh, "oldbuckets", defptrto(dwhb))
+ newattr(dwh, DW_AT_byte_size, DW_CLS_CONSTANT, getattr(hash, DW_AT_byte_size).value, nil)
+
+ // make map type a pointer to hash
+ newrefattr(die, DW_AT_type, defptrto(dwh))
+ }
+}
+
+func synthesizechantypes(die *DWDie) {
+ sudog := walktypedef(defgotype(lookup_or_diag("type.runtime.sudog")))
+ waitq := walktypedef(defgotype(lookup_or_diag("type.runtime.waitq")))
+ hchan := walktypedef(defgotype(lookup_or_diag("type.runtime.hchan")))
+ if sudog == nil || waitq == nil || hchan == nil {
+ return
+ }
+
+ sudogsize := int(getattr(sudog, DW_AT_byte_size).value)
+
+ for ; die != nil; die = die.link {
+ if die.abbrev != DW_ABRV_CHANTYPE {
+ continue
+ }
+ elemsize := Thearch.Ptrsize
+ elemtype := getattr(die, DW_AT_go_elem).data.(*DWDie)
+ a := getattr(elemtype, DW_AT_byte_size)
+ if a != nil {
+ elemsize = int(a.value)
+ }
+
+ // sudog
+ dws := newdie(&dwtypes, DW_ABRV_STRUCTTYPE, mkinternaltypename("sudog", getattr(elemtype, DW_AT_name).data.(string), ""))
+
+ copychildren(dws, sudog)
+ substitutetype(dws, "elem", elemtype)
+ if elemsize > 8 {
+ elemsize -= 8
+ } else {
+ elemsize = 0
+ }
+ newattr(dws, DW_AT_byte_size, DW_CLS_CONSTANT, int64(sudogsize)+int64(elemsize), nil)
+
+ // waitq
+ dww := newdie(&dwtypes, DW_ABRV_STRUCTTYPE, mkinternaltypename("waitq", getattr(elemtype, DW_AT_name).data.(string), ""))
+
+ copychildren(dww, waitq)
+ substitutetype(dww, "first", defptrto(dws))
+ substitutetype(dww, "last", defptrto(dws))
+ newattr(dww, DW_AT_byte_size, DW_CLS_CONSTANT, getattr(waitq, DW_AT_byte_size).value, nil)
+
+ // hchan
+ dwh := newdie(&dwtypes, DW_ABRV_STRUCTTYPE, mkinternaltypename("hchan", getattr(elemtype, DW_AT_name).data.(string), ""))
+
+ copychildren(dwh, hchan)
+ substitutetype(dwh, "recvq", dww)
+ substitutetype(dwh, "sendq", dww)
+ newattr(dwh, DW_AT_byte_size, DW_CLS_CONSTANT, getattr(hchan, DW_AT_byte_size).value, nil)
+
+ newrefattr(die, DW_AT_type, defptrto(dwh))
+ }
+}
+
+// For use with pass.c::genasmsym
+func defdwsymb(sym *LSym, s string, t int, v int64, size int64, ver int, gotype *LSym) {
+ if strings.HasPrefix(s, "go.string.") {
+ return
+ }
+ if strings.HasPrefix(s, "runtime.gcbits.") {
+ return
+ }
+
+ if strings.HasPrefix(s, "type.") && s != "type.*" && !strings.HasPrefix(s, "type..") {
+ defgotype(sym)
+ return
+ }
+
+ var dv *DWDie
+
+ var dt *DWDie
+ switch t {
+ default:
+ return
+
+ case 'd', 'b', 'D', 'B':
+ dv = newdie(&dwglobals, DW_ABRV_VARIABLE, s)
+ newabslocexprattr(dv, v, sym)
+ if ver == 0 {
+ newattr(dv, DW_AT_external, DW_CLS_FLAG, 1, 0)
+ }
+ fallthrough
+
+ case 'a', 'p':
+ dt = defgotype(gotype)
+ }
+
+ if dv != nil {
+ newrefattr(dv, DW_AT_type, dt)
+ }
+}
+
+func movetomodule(parent *DWDie) {
+ die := dwroot.child.child
+ for die.link != nil {
+ die = die.link
+ }
+ die.link = parent.child
+}
+
+// If the pcln table contains runtime/runtime.go, use that to set gdbscript path.
+func finddebugruntimepath(s *LSym) {
+ if gdbscript != "" {
+ return
+ }
+
+ for i := 0; i < s.Pcln.Nfile; i++ {
+ f := s.Pcln.File[i]
+ if i := strings.Index(f.Name, "runtime/runtime.go"); i >= 0 {
+ gdbscript = f.Name[:i] + "runtime/runtime-gdb.py"
+ break
+ }
+ }
+}
+
+/*
+ * Generate short opcodes when possible, long ones when necessary.
+ * See section 6.2.5
+ */
+const (
+ LINE_BASE = -1
+ LINE_RANGE = 4
+ OPCODE_BASE = 10
+)
+
+func putpclcdelta(delta_pc int64, delta_lc int64) {
+ if LINE_BASE <= delta_lc && delta_lc < LINE_BASE+LINE_RANGE {
+ var opcode int64 = OPCODE_BASE + (delta_lc - LINE_BASE) + (LINE_RANGE * delta_pc)
+ if OPCODE_BASE <= opcode && opcode < 256 {
+ Cput(uint8(opcode))
+ return
+ }
+ }
+
+ if delta_pc != 0 {
+ Cput(DW_LNS_advance_pc)
+ sleb128put(delta_pc)
+ }
+
+ Cput(DW_LNS_advance_line)
+ sleb128put(delta_lc)
+ Cput(DW_LNS_copy)
+}
+
+func newcfaoffsetattr(die *DWDie, offs int32) {
+ var block [20]byte
+
+ i := 0
+
+ block[i] = DW_OP_call_frame_cfa
+ i++
+ if offs != 0 {
+ block[i] = DW_OP_consts
+ i++
+ i += sleb128enc(int64(offs), block[i:])
+ block[i] = DW_OP_plus
+ i++
+ }
+
+ newattr(die, DW_AT_location, DW_CLS_BLOCK, int64(i), block[:i])
+}
+
+func mkvarname(name string, da int) string {
+ buf := fmt.Sprintf("%s#%d", name, da)
+ n := buf
+ return n
+}
+
+/*
+ * Walk prog table, emit line program and build DIE tree.
+ */
+
+// flush previous compilation unit.
+func flushunit(dwinfo *DWDie, pc int64, pcsym *LSym, unitstart int64, header_length int32) {
+ if dwinfo != nil && pc != 0 {
+ newattr(dwinfo, DW_AT_high_pc, DW_CLS_ADDRESS, pc+1, pcsym)
+ }
+
+ if unitstart >= 0 {
+ Cput(0) // start extended opcode
+ uleb128put(1)
+ Cput(DW_LNE_end_sequence)
+
+ here := Cpos()
+ Cseek(unitstart)
+ Thearch.Lput(uint32(here - unitstart - 4)) // unit_length
+ Thearch.Wput(2) // dwarf version
+ Thearch.Lput(uint32(header_length)) // header length starting here
+ Cseek(here)
+ }
+}
+
+func getCompilationDir() string {
+ if dir, err := os.Getwd(); err == nil {
+ return dir
+ }
+ return "/"
+}
+
+func writelines() {
+ if linesec == nil {
+ linesec = Linklookup(Ctxt, ".dwarfline", 0)
+ }
+ linesec.R = linesec.R[:0]
+
+ unitstart := int64(-1)
+ headerend := int64(-1)
+ epc := int64(0)
+ var epcs *LSym
+ lineo = Cpos()
+ var dwinfo *DWDie
+ flushunit(dwinfo, epc, epcs, unitstart, int32(headerend-unitstart-10))
+ unitstart = Cpos()
+
+ lang := DW_LANG_Go
+
+ s := Ctxt.Textp
+
+ dwinfo = newdie(&dwroot, DW_ABRV_COMPUNIT, "go")
+ newattr(dwinfo, DW_AT_language, DW_CLS_CONSTANT, int64(lang), 0)
+ newattr(dwinfo, DW_AT_stmt_list, DW_CLS_PTR, unitstart-lineo, 0)
+ newattr(dwinfo, DW_AT_low_pc, DW_CLS_ADDRESS, s.Value, s)
+ // OS X linker requires compilation dir or absolute path in comp unit name to output debug info.
+ compDir := getCompilationDir()
+ newattr(dwinfo, DW_AT_comp_dir, DW_CLS_STRING, int64(len(compDir)), compDir)
+
+ // Write .debug_line Line Number Program Header (sec 6.2.4)
+ // Fields marked with (*) must be changed for 64-bit dwarf
+ Thearch.Lput(0) // unit_length (*), will be filled in by flushunit.
+ Thearch.Wput(2) // dwarf version (appendix F)
+ Thearch.Lput(0) // header_length (*), filled in by flushunit.
+
+ // cpos == unitstart + 4 + 2 + 4
+ Cput(1) // minimum_instruction_length
+ Cput(1) // default_is_stmt
+ Cput(LINE_BASE & 0xFF) // line_base
+ Cput(LINE_RANGE) // line_range
+ Cput(OPCODE_BASE) // opcode_base
+ Cput(0) // standard_opcode_lengths[1]
+ Cput(1) // standard_opcode_lengths[2]
+ Cput(1) // standard_opcode_lengths[3]
+ Cput(1) // standard_opcode_lengths[4]
+ Cput(1) // standard_opcode_lengths[5]
+ Cput(0) // standard_opcode_lengths[6]
+ Cput(0) // standard_opcode_lengths[7]
+ Cput(0) // standard_opcode_lengths[8]
+ Cput(1) // standard_opcode_lengths[9]
+ Cput(0) // include_directories (empty)
+
+ files := make([]*LSym, Ctxt.Nhistfile)
+
+ for f := Ctxt.Filesyms; f != nil; f = f.Next {
+ files[f.Value-1] = f
+ }
+
+ for i := 0; int32(i) < Ctxt.Nhistfile; i++ {
+ strnput(files[i].Name, len(files[i].Name)+4)
+ }
+
+ // 4 zeros: the string termination + 3 fields.
+ Cput(0)
+ // terminate file_names.
+ headerend = Cpos()
+
+ Cput(0) // start extended opcode
+ uleb128put(1 + int64(Thearch.Ptrsize))
+ Cput(DW_LNE_set_address)
+
+ pc := s.Value
+ line := 1
+ file := 1
+ if Linkmode == LinkExternal {
+ adddwarfrel(linesec, s, lineo, Thearch.Ptrsize, 0)
+ } else {
+ addrput(pc)
+ }
+
+ var pcfile Pciter
+ var pcline Pciter
+ for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
+ s = Ctxt.Cursym
+
+ dwfunc := newdie(dwinfo, DW_ABRV_FUNCTION, s.Name)
+ newattr(dwfunc, DW_AT_low_pc, DW_CLS_ADDRESS, s.Value, s)
+ epc = s.Value + s.Size
+ epcs = s
+ newattr(dwfunc, DW_AT_high_pc, DW_CLS_ADDRESS, epc, s)
+ if s.Version == 0 {
+ newattr(dwfunc, DW_AT_external, DW_CLS_FLAG, 1, 0)
+ }
+
+ if s.Pcln == nil {
+ continue
+ }
+
+ finddebugruntimepath(s)
+
+ pciterinit(Ctxt, &pcfile, &s.Pcln.Pcfile)
+ pciterinit(Ctxt, &pcline, &s.Pcln.Pcline)
+ epc = pc
+ for pcfile.done == 0 && pcline.done == 0 {
+ if epc-s.Value >= int64(pcfile.nextpc) {
+ pciternext(&pcfile)
+ continue
+ }
+
+ if epc-s.Value >= int64(pcline.nextpc) {
+ pciternext(&pcline)
+ continue
+ }
+
+ if int32(file) != pcfile.value {
+ Cput(DW_LNS_set_file)
+ uleb128put(int64(pcfile.value))
+ file = int(pcfile.value)
+ }
+
+ putpclcdelta(s.Value+int64(pcline.pc)-pc, int64(pcline.value)-int64(line))
+
+ pc = s.Value + int64(pcline.pc)
+ line = int(pcline.value)
+ if pcfile.nextpc < pcline.nextpc {
+ epc = int64(pcfile.nextpc)
+ } else {
+ epc = int64(pcline.nextpc)
+ }
+ epc += s.Value
+ }
+
+ var (
+ dt int
+ offs int64
+ varhash [HASHSIZE]*DWDie
+ )
+ da := 0
+ dwfunc.hash = varhash[:] // enable indexing of children by name
+ for a := s.Autom; a != nil; a = a.Link {
+ switch a.Name {
+ case obj.A_AUTO:
+ dt = DW_ABRV_AUTO
+ offs = int64(a.Aoffset)
+ if !haslinkregister() {
+ offs -= int64(Thearch.Ptrsize)
+ }
+
+ case obj.A_PARAM:
+ dt = DW_ABRV_PARAM
+ offs = int64(a.Aoffset) + Ctxt.FixedFrameSize()
+
+ default:
+ continue
+ }
+
+ if strings.Contains(a.Asym.Name, ".autotmp_") {
+ continue
+ }
+ var n string
+ if find(dwfunc, a.Asym.Name) != nil {
+ n = mkvarname(a.Asym.Name, da)
+ } else {
+ n = a.Asym.Name
+ }
+
+ // Drop the package prefix from locals and arguments.
+ if i := strings.LastIndex(n, "."); i >= 0 {
+ n = n[i+1:]
+ }
+
+ dwvar := newdie(dwfunc, dt, n)
+ newcfaoffsetattr(dwvar, int32(offs))
+ newrefattr(dwvar, DW_AT_type, defgotype(a.Gotype))
+
+ // push dwvar down dwfunc->child to preserve order
+ newattr(dwvar, DW_AT_internal_location, DW_CLS_CONSTANT, offs, nil)
+
+ dwfunc.child = dwvar.link // take dwvar out from the top of the list
+ dws := &dwfunc.child
+ for ; *dws != nil; dws = &(*dws).link {
+ if offs > getattr(*dws, DW_AT_internal_location).value {
+ break
+ }
+ }
+ dwvar.link = *dws
+ *dws = dwvar
+
+ da++
+ }
+
+ dwfunc.hash = nil
+ }
+
+ flushunit(dwinfo, epc, epcs, unitstart, int32(headerend-unitstart-10))
+ linesize = Cpos() - lineo
+}
+
+/*
+ * Emit .debug_frame
+ */
+const (
+ CIERESERVE = 16
+ DATAALIGNMENTFACTOR = -4
+)
+
+func putpccfadelta(deltapc int64, cfa int64) {
+ Cput(DW_CFA_def_cfa_offset_sf)
+ sleb128put(cfa / DATAALIGNMENTFACTOR)
+
+ if deltapc < 0x40 {
+ Cput(uint8(DW_CFA_advance_loc + deltapc))
+ } else if deltapc < 0x100 {
+ Cput(DW_CFA_advance_loc1)
+ Cput(uint8(deltapc))
+ } else if deltapc < 0x10000 {
+ Cput(DW_CFA_advance_loc2)
+ Thearch.Wput(uint16(deltapc))
+ } else {
+ Cput(DW_CFA_advance_loc4)
+ Thearch.Lput(uint32(deltapc))
+ }
+}
+
+func writeframes() {
+ if framesec == nil {
+ framesec = Linklookup(Ctxt, ".dwarfframe", 0)
+ }
+ framesec.R = framesec.R[:0]
+ frameo = Cpos()
+
+ // Emit the CIE, Section 6.4.1
+ Thearch.Lput(CIERESERVE) // initial length, must be multiple of thearch.ptrsize
+ Thearch.Lput(0xffffffff) // cid.
+ Cput(3) // dwarf version (appendix F)
+ Cput(0) // augmentation ""
+ uleb128put(1) // code_alignment_factor
+ sleb128put(DATAALIGNMENTFACTOR) // guess
+ uleb128put(int64(Thearch.Dwarfreglr)) // return_address_register
+
+ Cput(DW_CFA_def_cfa)
+
+ uleb128put(int64(Thearch.Dwarfregsp)) // register SP (**ABI-dependent, defined in l.h)
+ if haslinkregister() {
+ uleb128put(int64(0)) // offset
+ } else {
+ uleb128put(int64(Thearch.Ptrsize)) // offset
+ }
+
+ Cput(DW_CFA_offset_extended)
+ uleb128put(int64(Thearch.Dwarfreglr)) // return address
+ if haslinkregister() {
+ uleb128put(int64(0) / DATAALIGNMENTFACTOR) // at cfa - 0
+ } else {
+ uleb128put(int64(-Thearch.Ptrsize) / DATAALIGNMENTFACTOR) // at cfa - x*4
+ }
+
+ // 4 is to exclude the length field.
+ pad := CIERESERVE + frameo + 4 - Cpos()
+
+ if pad < 0 {
+ Exitf("dwarf: CIERESERVE too small by %d bytes.", -pad)
+ }
+
+ strnput("", int(pad))
+
+ var pcsp Pciter
+ for Ctxt.Cursym = Ctxt.Textp; Ctxt.Cursym != nil; Ctxt.Cursym = Ctxt.Cursym.Next {
+ s := Ctxt.Cursym
+ if s.Pcln == nil {
+ continue
+ }
+
+ fdeo := Cpos()
+
+ // Emit a FDE, Section 6.4.1, starting wit a placeholder.
+ Thearch.Lput(0) // length, must be multiple of thearch.ptrsize
+ Thearch.Lput(0) // Pointer to the CIE above, at offset 0
+ addrput(0) // initial location
+ addrput(0) // address range
+
+ for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
+ nextpc := pcsp.nextpc
+
+ // pciterinit goes up to the end of the function,
+ // but DWARF expects us to stop just before the end.
+ if int64(nextpc) == s.Size {
+ nextpc--
+ if nextpc < pcsp.pc {
+ continue
+ }
+ }
+
+ if haslinkregister() {
+ putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(pcsp.value))
+ } else {
+ putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(Thearch.Ptrsize)+int64(pcsp.value))
+ }
+ }
+
+ fdesize := Cpos() - fdeo - 4 // exclude the length field.
+ pad = Rnd(fdesize, int64(Thearch.Ptrsize)) - fdesize
+ strnput("", int(pad))
+ fdesize += pad
+
+ // Emit the FDE header for real, Section 6.4.1.
+ Cseek(fdeo)
+
+ Thearch.Lput(uint32(fdesize))
+ if Linkmode == LinkExternal {
+ adddwarfrel(framesec, framesym, frameo, 4, 0)
+ adddwarfrel(framesec, s, frameo, Thearch.Ptrsize, 0)
+ } else {
+ Thearch.Lput(0)
+ addrput(s.Value)
+ }
+
+ addrput(s.Size)
+ Cseek(fdeo + 4 + fdesize)
+ }
+
+ Cflush()
+ framesize = Cpos() - frameo
+}
+
+/*
+ * Walk DWarfDebugInfoEntries, and emit .debug_info
+ */
+const (
+ COMPUNITHEADERSIZE = 4 + 2 + 4 + 1
+)
+
+func writeinfo() {
+ fwdcount = 0
+ if infosec == nil {
+ infosec = Linklookup(Ctxt, ".dwarfinfo", 0)
+ }
+ infosec.R = infosec.R[:0]
+
+ if arangessec == nil {
+ arangessec = Linklookup(Ctxt, ".dwarfaranges", 0)
+ }
+ arangessec.R = arangessec.R[:0]
+
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
+ unitstart := Cpos()
+
+ // Write .debug_info Compilation Unit Header (sec 7.5.1)
+ // Fields marked with (*) must be changed for 64-bit dwarf
+ // This must match COMPUNITHEADERSIZE above.
+ Thearch.Lput(0) // unit_length (*), will be filled in later.
+ Thearch.Wput(2) // dwarf version (appendix F)
+
+ // debug_abbrev_offset (*)
+ if Linkmode == LinkExternal {
+ adddwarfrel(infosec, abbrevsym, infoo, 4, 0)
+ } else {
+ Thearch.Lput(0)
+ }
+
+ Cput(uint8(Thearch.Ptrsize)) // address_size
+
+ putdie(compunit)
+
+ here := Cpos()
+ Cseek(unitstart)
+ Thearch.Lput(uint32(here - unitstart - 4)) // exclude the length field.
+ Cseek(here)
+ }
+
+ Cflush()
+}
+
+/*
+ * Emit .debug_pubnames/_types. _info must have been written before,
+ * because we need die->offs and infoo/infosize;
+ */
+func ispubname(die *DWDie) bool {
+ switch die.abbrev {
+ case DW_ABRV_FUNCTION, DW_ABRV_VARIABLE:
+ a := getattr(die, DW_AT_external)
+ return a != nil && a.value != 0
+ }
+
+ return false
+}
+
+func ispubtype(die *DWDie) bool {
+ return die.abbrev >= DW_ABRV_NULLTYPE
+}
+
+func writepub(ispub func(*DWDie) bool) int64 {
+ sectionstart := Cpos()
+
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
+ unitend := infoo + infosize
+ unitstart := compunit.offs - COMPUNITHEADERSIZE
+ if compunit.link != nil {
+ unitend = compunit.link.offs - COMPUNITHEADERSIZE
+ }
+
+ // Write .debug_pubnames/types Header (sec 6.1.1)
+ Thearch.Lput(0) // unit_length (*), will be filled in later.
+ Thearch.Wput(2) // dwarf version (appendix F)
+ Thearch.Lput(uint32(unitstart)) // debug_info_offset (of the Comp unit Header)
+ Thearch.Lput(uint32(unitend - unitstart)) // debug_info_length
+
+ for die := compunit.child; die != nil; die = die.link {
+ if !ispub(die) {
+ continue
+ }
+ Thearch.Lput(uint32(die.offs - unitstart))
+ dwa := getattr(die, DW_AT_name)
+ strnput(dwa.data.(string), int(dwa.value+1))
+ }
+
+ Thearch.Lput(0)
+
+ here := Cpos()
+ Cseek(sectionstart)
+ Thearch.Lput(uint32(here - sectionstart - 4)) // exclude the length field.
+ Cseek(here)
+ }
+
+ return sectionstart
+}
+
+/*
+ * emit .debug_aranges. _info must have been written before,
+ * because we need die->offs of dw_globals.
+ */
+func writearanges() int64 {
+ sectionstart := Cpos()
+ // The first tuple is aligned to a multiple of the size of a single tuple
+ // (twice the size of an address)
+ headersize := int(Rnd(4+2+4+1+1, int64(Thearch.Ptrsize*2))) // don't count unit_length field itself
+
+ for compunit := dwroot.child; compunit != nil; compunit = compunit.link {
+ b := getattr(compunit, DW_AT_low_pc)
+ if b == nil {
+ continue
+ }
+ e := getattr(compunit, DW_AT_high_pc)
+ if e == nil {
+ continue
+ }
+
+ // Write .debug_aranges Header + entry (sec 6.1.2)
+ Thearch.Lput(uint32(headersize) + 4*uint32(Thearch.Ptrsize) - 4) // unit_length (*)
+ Thearch.Wput(2) // dwarf version (appendix F)
+
+ value := compunit.offs - COMPUNITHEADERSIZE // debug_info_offset
+ if Linkmode == LinkExternal {
+ adddwarfrel(arangessec, infosym, sectionstart, 4, value)
+ } else {
+ Thearch.Lput(uint32(value))
+ }
+
+ Cput(uint8(Thearch.Ptrsize)) // address_size
+ Cput(0) // segment_size
+ strnput("", headersize-(4+2+4+1+1)) // align to thearch.ptrsize
+
+ if Linkmode == LinkExternal {
+ adddwarfrel(arangessec, b.data.(*LSym), sectionstart, Thearch.Ptrsize, b.value-(b.data.(*LSym)).Value)
+ } else {
+ addrput(b.value)
+ }
+
+ addrput(e.value - b.value)
+ addrput(0)
+ addrput(0)
+ }
+
+ Cflush()
+ return sectionstart
+}
+
+func writegdbscript() int64 {
+ sectionstart := Cpos()
+
+ if gdbscript != "" {
+ Cput(1) // magic 1 byte?
+ strnput(gdbscript, len(gdbscript)+1)
+ Cflush()
+ }
+
+ return sectionstart
+}
+
+func align(size int64) {
+ if HEADTYPE == obj.Hwindows { // Only Windows PE need section align.
+ strnput("", int(Rnd(size, PEFILEALIGN)-size))
+ }
+}
+
+func writedwarfreloc(s *LSym) int64 {
+ start := Cpos()
+ for ri := 0; ri < len(s.R); ri++ {
+ r := &s.R[ri]
+ i := -1
+ if Iself {
+ i = Thearch.Elfreloc1(r, int64(r.Off))
+ } else if HEADTYPE == obj.Hdarwin {
+ i = Thearch.Machoreloc1(r, int64(r.Off))
+ }
+ if i < 0 {
+ Diag("unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name)
+ }
+ }
+
+ return start
+}
+
+func addmachodwarfsect(prev *Section, name string) *Section {
+ sect := addsection(&Segdwarf, name, 04)
+ sect.Extnum = prev.Extnum + 1
+ sym := Linklookup(Ctxt, name, 0)
+ sym.Sect = sect
+ return sect
+}
+
+/*
+ * This is the main entry point for generating dwarf. After emitting
+ * the mandatory debug_abbrev section, it calls writelines() to set up
+ * the per-compilation unit part of the DIE tree, while simultaneously
+ * emitting the debug_line section. When the final tree contains
+ * forward references, it will write the debug_info section in 2
+ * passes.
+ *
+ */
+func Dwarfemitdebugsections() {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+
+ if Linkmode == LinkExternal {
+ if !Iself && HEADTYPE != obj.Hdarwin {
+ return
+ }
+ if HEADTYPE == obj.Hdarwin {
+ sect := Segdata.Sect
+ // find the last section.
+ for sect.Next != nil {
+ sect = sect.Next
+ }
+ sect = addmachodwarfsect(sect, ".debug_abbrev")
+ sect = addmachodwarfsect(sect, ".debug_line")
+ sect = addmachodwarfsect(sect, ".debug_frame")
+ sect = addmachodwarfsect(sect, ".debug_info")
+
+ infosym = Linklookup(Ctxt, ".debug_info", 0)
+ infosym.Hide = 1
+
+ abbrevsym = Linklookup(Ctxt, ".debug_abbrev", 0)
+ abbrevsym.Hide = 1
+
+ linesym = Linklookup(Ctxt, ".debug_line", 0)
+ linesym.Hide = 1
+
+ framesym = Linklookup(Ctxt, ".debug_frame", 0)
+ framesym.Hide = 1
+ }
+ }
+
+ // For diagnostic messages.
+ newattr(&dwtypes, DW_AT_name, DW_CLS_STRING, int64(len("dwtypes")), "dwtypes")
+
+ mkindex(&dwroot)
+ mkindex(&dwtypes)
+ mkindex(&dwglobals)
+
+ // Some types that must exist to define other ones.
+ newdie(&dwtypes, DW_ABRV_NULLTYPE, "")
+
+ newdie(&dwtypes, DW_ABRV_NULLTYPE, "void")
+ newdie(&dwtypes, DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer")
+
+ die := newdie(&dwtypes, DW_ABRV_BASETYPE, "uintptr") // needed for array size
+ newattr(die, DW_AT_encoding, DW_CLS_CONSTANT, DW_ATE_unsigned, 0)
+ newattr(die, DW_AT_byte_size, DW_CLS_CONSTANT, int64(Thearch.Ptrsize), 0)
+ newattr(die, DW_AT_go_kind, DW_CLS_CONSTANT, obj.KindUintptr, 0)
+
+ // Needed by the prettyprinter code for interface inspection.
+ defgotype(lookup_or_diag("type.runtime._type"))
+
+ defgotype(lookup_or_diag("type.runtime.interfacetype"))
+ defgotype(lookup_or_diag("type.runtime.itab"))
+
+ genasmsym(defdwsymb)
+
+ writeabbrev()
+ align(abbrevsize)
+ writelines()
+ align(linesize)
+ writeframes()
+ align(framesize)
+
+ synthesizestringtypes(dwtypes.child)
+ synthesizeslicetypes(dwtypes.child)
+ synthesizemaptypes(dwtypes.child)
+ synthesizechantypes(dwtypes.child)
+
+ reversetree(&dwroot.child)
+ reversetree(&dwtypes.child)
+ reversetree(&dwglobals.child)
+
+ movetomodule(&dwtypes)
+ movetomodule(&dwglobals)
+
+ infoo = Cpos()
+ writeinfo()
+ infoe := Cpos()
+ pubnameso = infoe
+ pubtypeso = infoe
+ arangeso = infoe
+ gdbscripto = infoe
+
+ if fwdcount > 0 {
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f dwarf pass 2.\n", obj.Cputime())
+ }
+ Cseek(infoo)
+ writeinfo()
+ if fwdcount > 0 {
+ Exitf("dwarf: unresolved references after first dwarf info pass")
+ }
+
+ if infoe != Cpos() {
+ Exitf("dwarf: inconsistent second dwarf info pass")
+ }
+ }
+
+ infosize = infoe - infoo
+ align(infosize)
+
+ pubnameso = writepub(ispubname)
+ pubnamessize = Cpos() - pubnameso
+ align(pubnamessize)
+
+ pubtypeso = writepub(ispubtype)
+ pubtypessize = Cpos() - pubtypeso
+ align(pubtypessize)
+
+ arangeso = writearanges()
+ arangessize = Cpos() - arangeso
+ align(arangessize)
+
+ gdbscripto = writegdbscript()
+ gdbscriptsize = Cpos() - gdbscripto
+ align(gdbscriptsize)
+
+ for Cpos()&7 != 0 {
+ Cput(0)
+ }
+ if HEADTYPE != obj.Hdarwin {
+ dwarfemitreloc()
+ }
+}
+
+func dwarfemitreloc() {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+ inforeloco = writedwarfreloc(infosec)
+ inforelocsize = Cpos() - inforeloco
+ align(inforelocsize)
+
+ arangesreloco = writedwarfreloc(arangessec)
+ arangesrelocsize = Cpos() - arangesreloco
+ align(arangesrelocsize)
+
+ linereloco = writedwarfreloc(linesec)
+ linerelocsize = Cpos() - linereloco
+ align(linerelocsize)
+
+ framereloco = writedwarfreloc(framesec)
+ framerelocsize = Cpos() - framereloco
+ align(framerelocsize)
+}
+
+/*
+ * Elf.
+ */
+const (
+ ElfStrDebugAbbrev = iota
+ ElfStrDebugAranges
+ ElfStrDebugFrame
+ ElfStrDebugInfo
+ ElfStrDebugLine
+ ElfStrDebugLoc
+ ElfStrDebugMacinfo
+ ElfStrDebugPubNames
+ ElfStrDebugPubTypes
+ ElfStrDebugRanges
+ ElfStrDebugStr
+ ElfStrGDBScripts
+ ElfStrRelDebugInfo
+ ElfStrRelDebugAranges
+ ElfStrRelDebugLine
+ ElfStrRelDebugFrame
+ NElfStrDbg
+)
+
+var elfstrdbg [NElfStrDbg]int64
+
+func dwarfaddshstrings(shstrtab *LSym) {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+
+ elfstrdbg[ElfStrDebugAbbrev] = Addstring(shstrtab, ".debug_abbrev")
+ elfstrdbg[ElfStrDebugAranges] = Addstring(shstrtab, ".debug_aranges")
+ elfstrdbg[ElfStrDebugFrame] = Addstring(shstrtab, ".debug_frame")
+ elfstrdbg[ElfStrDebugInfo] = Addstring(shstrtab, ".debug_info")
+ elfstrdbg[ElfStrDebugLine] = Addstring(shstrtab, ".debug_line")
+ elfstrdbg[ElfStrDebugLoc] = Addstring(shstrtab, ".debug_loc")
+ elfstrdbg[ElfStrDebugMacinfo] = Addstring(shstrtab, ".debug_macinfo")
+ elfstrdbg[ElfStrDebugPubNames] = Addstring(shstrtab, ".debug_pubnames")
+ elfstrdbg[ElfStrDebugPubTypes] = Addstring(shstrtab, ".debug_pubtypes")
+ elfstrdbg[ElfStrDebugRanges] = Addstring(shstrtab, ".debug_ranges")
+ elfstrdbg[ElfStrDebugStr] = Addstring(shstrtab, ".debug_str")
+ elfstrdbg[ElfStrGDBScripts] = Addstring(shstrtab, ".debug_gdb_scripts")
+ if Linkmode == LinkExternal {
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ elfstrdbg[ElfStrRelDebugInfo] = Addstring(shstrtab, ".rela.debug_info")
+ elfstrdbg[ElfStrRelDebugAranges] = Addstring(shstrtab, ".rela.debug_aranges")
+ elfstrdbg[ElfStrRelDebugLine] = Addstring(shstrtab, ".rela.debug_line")
+ elfstrdbg[ElfStrRelDebugFrame] = Addstring(shstrtab, ".rela.debug_frame")
+ default:
+ elfstrdbg[ElfStrRelDebugInfo] = Addstring(shstrtab, ".rel.debug_info")
+ elfstrdbg[ElfStrRelDebugAranges] = Addstring(shstrtab, ".rel.debug_aranges")
+ elfstrdbg[ElfStrRelDebugLine] = Addstring(shstrtab, ".rel.debug_line")
+ elfstrdbg[ElfStrRelDebugFrame] = Addstring(shstrtab, ".rel.debug_frame")
+ }
+
+ infosym = Linklookup(Ctxt, ".debug_info", 0)
+ infosym.Hide = 1
+
+ abbrevsym = Linklookup(Ctxt, ".debug_abbrev", 0)
+ abbrevsym.Hide = 1
+
+ linesym = Linklookup(Ctxt, ".debug_line", 0)
+ linesym.Hide = 1
+
+ framesym = Linklookup(Ctxt, ".debug_frame", 0)
+ framesym.Hide = 1
+ }
+}
+
+// Add section symbols for DWARF debug info. This is called before
+// dwarfaddelfheaders.
+func dwarfaddelfsectionsyms() {
+ if infosym != nil {
+ infosympos = Cpos()
+ putelfsectionsym(infosym, 0)
+ }
+
+ if abbrevsym != nil {
+ abbrevsympos = Cpos()
+ putelfsectionsym(abbrevsym, 0)
+ }
+
+ if linesym != nil {
+ linesympos = Cpos()
+ putelfsectionsym(linesym, 0)
+ }
+
+ if framesym != nil {
+ framesympos = Cpos()
+ putelfsectionsym(framesym, 0)
+ }
+}
+
+func dwarfaddelfrelocheader(elfstr int, shdata *ElfShdr, off int64, size int64) {
+ sh := newElfShdr(elfstrdbg[elfstr])
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ sh.type_ = SHT_RELA
+ default:
+ sh.type_ = SHT_REL
+ }
+
+ sh.entsize = uint64(Thearch.Ptrsize) * 2
+ if sh.type_ == SHT_RELA {
+ sh.entsize += uint64(Thearch.Ptrsize)
+ }
+ sh.link = uint32(elfshname(".symtab").shnum)
+ sh.info = uint32(shdata.shnum)
+ sh.off = uint64(off)
+ sh.size = uint64(size)
+ sh.addralign = uint64(Thearch.Ptrsize)
+}
+
+func dwarfaddelfheaders() {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAbbrev])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(abbrevo)
+ sh.size = uint64(abbrevsize)
+ sh.addralign = 1
+ if abbrevsympos > 0 {
+ putelfsymshndx(abbrevsympos, sh.shnum)
+ }
+
+ sh = newElfShdr(elfstrdbg[ElfStrDebugLine])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(lineo)
+ sh.size = uint64(linesize)
+ sh.addralign = 1
+ if linesympos > 0 {
+ putelfsymshndx(linesympos, sh.shnum)
+ }
+ shline := sh
+
+ sh = newElfShdr(elfstrdbg[ElfStrDebugFrame])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(frameo)
+ sh.size = uint64(framesize)
+ sh.addralign = 1
+ if framesympos > 0 {
+ putelfsymshndx(framesympos, sh.shnum)
+ }
+ shframe := sh
+
+ sh = newElfShdr(elfstrdbg[ElfStrDebugInfo])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(infoo)
+ sh.size = uint64(infosize)
+ sh.addralign = 1
+ if infosympos > 0 {
+ putelfsymshndx(infosympos, sh.shnum)
+ }
+ shinfo := sh
+
+ if pubnamessize > 0 {
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubNames])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(pubnameso)
+ sh.size = uint64(pubnamessize)
+ sh.addralign = 1
+ }
+
+ if pubtypessize > 0 {
+ sh := newElfShdr(elfstrdbg[ElfStrDebugPubTypes])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(pubtypeso)
+ sh.size = uint64(pubtypessize)
+ sh.addralign = 1
+ }
+
+ var sharanges *ElfShdr
+ if arangessize != 0 {
+ sh := newElfShdr(elfstrdbg[ElfStrDebugAranges])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(arangeso)
+ sh.size = uint64(arangessize)
+ sh.addralign = 1
+ sharanges = sh
+ }
+
+ if gdbscriptsize != 0 {
+ sh := newElfShdr(elfstrdbg[ElfStrGDBScripts])
+ sh.type_ = SHT_PROGBITS
+ sh.off = uint64(gdbscripto)
+ sh.size = uint64(gdbscriptsize)
+ sh.addralign = 1
+ }
+
+ if inforelocsize != 0 {
+ dwarfaddelfrelocheader(ElfStrRelDebugInfo, shinfo, inforeloco, inforelocsize)
+ }
+
+ if arangesrelocsize != 0 {
+ dwarfaddelfrelocheader(ElfStrRelDebugAranges, sharanges, arangesreloco, arangesrelocsize)
+ }
+
+ if linerelocsize != 0 {
+ dwarfaddelfrelocheader(ElfStrRelDebugLine, shline, linereloco, linerelocsize)
+ }
+
+ if framerelocsize != 0 {
+ dwarfaddelfrelocheader(ElfStrRelDebugFrame, shframe, framereloco, framerelocsize)
+ }
+}
+
+/*
+ * Macho
+ */
+func dwarfaddmachoheaders(ms *MachoSeg) {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+
+ // Zero vsize segments won't be loaded in memory, even so they
+ // have to be page aligned in the file.
+ fakestart := Rnd(int64(Segdwarf.Fileoff), 0x1000)
+ addr := Segdata.Vaddr + Segdata.Length
+
+ nsect := 4
+ if pubnamessize > 0 {
+ nsect++
+ }
+ if pubtypessize > 0 {
+ nsect++
+ }
+ if arangessize > 0 {
+ nsect++
+ }
+ if gdbscriptsize > 0 {
+ nsect++
+ }
+
+ if Linkmode != LinkExternal {
+ ms = newMachoSeg("__DWARF", nsect)
+ ms.fileoffset = uint64(fakestart)
+ ms.filesize = Segdwarf.Filelen
+ ms.vaddr = addr
+ }
+
+ msect := newMachoSect(ms, "__debug_abbrev", "__DWARF")
+ msect.off = uint32(abbrevo)
+ msect.size = uint64(abbrevsize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ if abbrevsym != nil {
+ abbrevsym.Value = int64(msect.addr)
+ }
+
+ msect = newMachoSect(ms, "__debug_line", "__DWARF")
+ msect.off = uint32(lineo)
+ msect.size = uint64(linesize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ if linesym != nil {
+ linesym.Value = int64(msect.addr)
+ }
+ if linerelocsize > 0 {
+ msect.nreloc = uint32(len(linesec.R))
+ msect.reloc = uint32(linereloco)
+ }
+
+ msect = newMachoSect(ms, "__debug_frame", "__DWARF")
+ msect.off = uint32(frameo)
+ msect.size = uint64(framesize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ if framesym != nil {
+ framesym.Value = int64(msect.addr)
+ }
+ if framerelocsize > 0 {
+ msect.nreloc = uint32(len(framesec.R))
+ msect.reloc = uint32(framereloco)
+ }
+
+ msect = newMachoSect(ms, "__debug_info", "__DWARF")
+ msect.off = uint32(infoo)
+ msect.size = uint64(infosize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ if infosym != nil {
+ infosym.Value = int64(msect.addr)
+ }
+ if inforelocsize > 0 {
+ msect.nreloc = uint32(len(infosec.R))
+ msect.reloc = uint32(inforeloco)
+ }
+
+ if pubnamessize > 0 {
+ msect := newMachoSect(ms, "__debug_pubnames", "__DWARF")
+ msect.off = uint32(pubnameso)
+ msect.size = uint64(pubnamessize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ }
+
+ if pubtypessize > 0 {
+ msect := newMachoSect(ms, "__debug_pubtypes", "__DWARF")
+ msect.off = uint32(pubtypeso)
+ msect.size = uint64(pubtypessize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ }
+
+ if arangessize > 0 {
+ msect := newMachoSect(ms, "__debug_aranges", "__DWARF")
+ msect.off = uint32(arangeso)
+ msect.size = uint64(arangessize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ if arangesrelocsize > 0 {
+ msect.nreloc = uint32(len(arangessec.R))
+ msect.reloc = uint32(arangesreloco)
+ }
+ }
+
+ // TODO(lvd) fix gdb/python to load MachO (16 char section name limit)
+ if gdbscriptsize > 0 {
+ msect := newMachoSect(ms, "__debug_gdb_scripts", "__DWARF")
+ msect.off = uint32(gdbscripto)
+ msect.size = uint64(gdbscriptsize)
+ msect.addr = addr
+ addr += msect.size
+ msect.flag = 0x02000000
+ }
+}
+
+/*
+ * Windows PE
+ */
+func dwarfaddpeheaders() {
+ if Debug['w'] != 0 { // disable dwarf
+ return
+ }
+
+ newPEDWARFSection(".debug_abbrev", abbrevsize)
+ newPEDWARFSection(".debug_line", linesize)
+ newPEDWARFSection(".debug_frame", framesize)
+ newPEDWARFSection(".debug_info", infosize)
+ newPEDWARFSection(".debug_pubnames", pubnamessize)
+ newPEDWARFSection(".debug_pubtypes", pubtypessize)
+ newPEDWARFSection(".debug_aranges", arangessize)
+ newPEDWARFSection(".debug_gdb_scripts", gdbscriptsize)
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/elf.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/elf.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/elf.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/elf.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,2668 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ld
+
+import (
+ "cmd/internal/obj"
+ "crypto/sha1"
+ "encoding/binary"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+/*
+ * Derived from:
+ * $FreeBSD: src/sys/sys/elf32.h,v 1.8.14.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/sys/elf_common.h,v 1.15.8.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/alpha/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $
+ * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $
+ * $FreeBSD: src/sys/arm/include/elf.h,v 1.5.2.1 2006/06/30 21:42:52 cognet Exp $
+ * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $
+ * $FreeBSD: src/sys/powerpc/include/elf.h,v 1.7 2004/11/02 09:47:01 ssouhlal Exp $
+ * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $
+ *
+ * Copyright (c) 1996-1998 John D. Polstra. All rights reserved.
+ * Copyright (c) 2001 David E. O'Brien
+ * Portions Copyright 2009 The Go Authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * ELF definitions that are independent of architecture or word size.
+ */
+
+/*
+ * Note header. The ".note" section contains an array of notes. Each
+ * begins with this header, aligned to a word boundary. Immediately
+ * following the note header is n_namesz bytes of name, padded to the
+ * next word boundary. Then comes n_descsz bytes of descriptor, again
+ * padded to a word boundary. The values of n_namesz and n_descsz do
+ * not include the padding.
+ */
+type Elf_Note struct {
+ n_namesz uint32
+ n_descsz uint32
+ n_type uint32
+}
+
+const (
+ EI_MAG0 = 0
+ EI_MAG1 = 1
+ EI_MAG2 = 2
+ EI_MAG3 = 3
+ EI_CLASS = 4
+ EI_DATA = 5
+ EI_VERSION = 6
+ EI_OSABI = 7
+ EI_ABIVERSION = 8
+ OLD_EI_BRAND = 8
+ EI_PAD = 9
+ EI_NIDENT = 16
+ ELFMAG0 = 0x7f
+ ELFMAG1 = 'E'
+ ELFMAG2 = 'L'
+ ELFMAG3 = 'F'
+ SELFMAG = 4
+ EV_NONE = 0
+ EV_CURRENT = 1
+ ELFCLASSNONE = 0
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATANONE = 0
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ ELFOSABI_NONE = 0
+ ELFOSABI_HPUX = 1
+ ELFOSABI_NETBSD = 2
+ ELFOSABI_LINUX = 3
+ ELFOSABI_HURD = 4
+ ELFOSABI_86OPEN = 5
+ ELFOSABI_SOLARIS = 6
+ ELFOSABI_AIX = 7
+ ELFOSABI_IRIX = 8
+ ELFOSABI_FREEBSD = 9
+ ELFOSABI_TRU64 = 10
+ ELFOSABI_MODESTO = 11
+ ELFOSABI_OPENBSD = 12
+ ELFOSABI_OPENVMS = 13
+ ELFOSABI_NSK = 14
+ ELFOSABI_ARM = 97
+ ELFOSABI_STANDALONE = 255
+ ELFOSABI_SYSV = ELFOSABI_NONE
+ ELFOSABI_MONTEREY = ELFOSABI_AIX
+ ET_NONE = 0
+ ET_REL = 1
+ ET_EXEC = 2
+ ET_DYN = 3
+ ET_CORE = 4
+ ET_LOOS = 0xfe00
+ ET_HIOS = 0xfeff
+ ET_LOPROC = 0xff00
+ ET_HIPROC = 0xffff
+ EM_NONE = 0
+ EM_M32 = 1
+ EM_SPARC = 2
+ EM_386 = 3
+ EM_68K = 4
+ EM_88K = 5
+ EM_860 = 7
+ EM_MIPS = 8
+ EM_S370 = 9
+ EM_MIPS_RS3_LE = 10
+ EM_PARISC = 15
+ EM_VPP500 = 17
+ EM_SPARC32PLUS = 18
+ EM_960 = 19
+ EM_PPC = 20
+ EM_PPC64 = 21
+ EM_S390 = 22
+ EM_V800 = 36
+ EM_FR20 = 37
+ EM_RH32 = 38
+ EM_RCE = 39
+ EM_ARM = 40
+ EM_SH = 42
+ EM_SPARCV9 = 43
+ EM_TRICORE = 44
+ EM_ARC = 45
+ EM_H8_300 = 46
+ EM_H8_300H = 47
+ EM_H8S = 48
+ EM_H8_500 = 49
+ EM_IA_64 = 50
+ EM_MIPS_X = 51
+ EM_COLDFIRE = 52
+ EM_68HC12 = 53
+ EM_MMA = 54
+ EM_PCP = 55
+ EM_NCPU = 56
+ EM_NDR1 = 57
+ EM_STARCORE = 58
+ EM_ME16 = 59
+ EM_ST100 = 60
+ EM_TINYJ = 61
+ EM_X86_64 = 62
+ EM_AARCH64 = 183
+ EM_486 = 6
+ EM_MIPS_RS4_BE = 10
+ EM_ALPHA_STD = 41
+ EM_ALPHA = 0x9026
+ SHN_UNDEF = 0
+ SHN_LORESERVE = 0xff00
+ SHN_LOPROC = 0xff00
+ SHN_HIPROC = 0xff1f
+ SHN_LOOS = 0xff20
+ SHN_HIOS = 0xff3f
+ SHN_ABS = 0xfff1
+ SHN_COMMON = 0xfff2
+ SHN_XINDEX = 0xffff
+ SHN_HIRESERVE = 0xffff
+ SHT_NULL = 0
+ SHT_PROGBITS = 1
+ SHT_SYMTAB = 2
+ SHT_STRTAB = 3
+ SHT_RELA = 4
+ SHT_HASH = 5
+ SHT_DYNAMIC = 6
+ SHT_NOTE = 7
+ SHT_NOBITS = 8
+ SHT_REL = 9
+ SHT_SHLIB = 10
+ SHT_DYNSYM = 11
+ SHT_INIT_ARRAY = 14
+ SHT_FINI_ARRAY = 15
+ SHT_PREINIT_ARRAY = 16
+ SHT_GROUP = 17
+ SHT_SYMTAB_SHNDX = 18
+ SHT_LOOS = 0x60000000
+ SHT_HIOS = 0x6fffffff
+ SHT_GNU_VERDEF = 0x6ffffffd
+ SHT_GNU_VERNEED = 0x6ffffffe
+ SHT_GNU_VERSYM = 0x6fffffff
+ SHT_LOPROC = 0x70000000
+ SHT_ARM_ATTRIBUTES = 0x70000003
+ SHT_HIPROC = 0x7fffffff
+ SHT_LOUSER = 0x80000000
+ SHT_HIUSER = 0xffffffff
+ SHF_WRITE = 0x1
+ SHF_ALLOC = 0x2
+ SHF_EXECINSTR = 0x4
+ SHF_MERGE = 0x10
+ SHF_STRINGS = 0x20
+ SHF_INFO_LINK = 0x40
+ SHF_LINK_ORDER = 0x80
+ SHF_OS_NONCONFORMING = 0x100
+ SHF_GROUP = 0x200
+ SHF_TLS = 0x400
+ SHF_MASKOS = 0x0ff00000
+ SHF_MASKPROC = 0xf0000000
+ PT_NULL = 0
+ PT_LOAD = 1
+ PT_DYNAMIC = 2
+ PT_INTERP = 3
+ PT_NOTE = 4
+ PT_SHLIB = 5
+ PT_PHDR = 6
+ PT_TLS = 7
+ PT_LOOS = 0x60000000
+ PT_HIOS = 0x6fffffff
+ PT_LOPROC = 0x70000000
+ PT_HIPROC = 0x7fffffff
+ PT_GNU_STACK = 0x6474e551
+ PT_PAX_FLAGS = 0x65041580
+ PF_X = 0x1
+ PF_W = 0x2
+ PF_R = 0x4
+ PF_MASKOS = 0x0ff00000
+ PF_MASKPROC = 0xf0000000
+ DT_NULL = 0
+ DT_NEEDED = 1
+ DT_PLTRELSZ = 2
+ DT_PLTGOT = 3
+ DT_HASH = 4
+ DT_STRTAB = 5
+ DT_SYMTAB = 6
+ DT_RELA = 7
+ DT_RELASZ = 8
+ DT_RELAENT = 9
+ DT_STRSZ = 10
+ DT_SYMENT = 11
+ DT_INIT = 12
+ DT_FINI = 13
+ DT_SONAME = 14
+ DT_RPATH = 15
+ DT_SYMBOLIC = 16
+ DT_REL = 17
+ DT_RELSZ = 18
+ DT_RELENT = 19
+ DT_PLTREL = 20
+ DT_DEBUG = 21
+ DT_TEXTREL = 22
+ DT_JMPREL = 23
+ DT_BIND_NOW = 24
+ DT_INIT_ARRAY = 25
+ DT_FINI_ARRAY = 26
+ DT_INIT_ARRAYSZ = 27
+ DT_FINI_ARRAYSZ = 28
+ DT_RUNPATH = 29
+ DT_FLAGS = 30
+ DT_ENCODING = 32
+ DT_PREINIT_ARRAY = 32
+ DT_PREINIT_ARRAYSZ = 33
+ DT_LOOS = 0x6000000d
+ DT_HIOS = 0x6ffff000
+ DT_LOPROC = 0x70000000
+ DT_HIPROC = 0x7fffffff
+ DT_VERNEED = 0x6ffffffe
+ DT_VERNEEDNUM = 0x6fffffff
+ DT_VERSYM = 0x6ffffff0
+ DT_PPC64_GLINK = DT_LOPROC + 0
+ DT_PPC64_OPT = DT_LOPROC + 3
+ DF_ORIGIN = 0x0001
+ DF_SYMBOLIC = 0x0002
+ DF_TEXTREL = 0x0004
+ DF_BIND_NOW = 0x0008
+ DF_STATIC_TLS = 0x0010
+ NT_PRSTATUS = 1
+ NT_FPREGSET = 2
+ NT_PRPSINFO = 3
+ STB_LOCAL = 0
+ STB_GLOBAL = 1
+ STB_WEAK = 2
+ STB_LOOS = 10
+ STB_HIOS = 12
+ STB_LOPROC = 13
+ STB_HIPROC = 15
+ STT_NOTYPE = 0
+ STT_OBJECT = 1
+ STT_FUNC = 2
+ STT_SECTION = 3
+ STT_FILE = 4
+ STT_COMMON = 5
+ STT_TLS = 6
+ STT_LOOS = 10
+ STT_HIOS = 12
+ STT_LOPROC = 13
+ STT_HIPROC = 15
+ STV_DEFAULT = 0x0
+ STV_INTERNAL = 0x1
+ STV_HIDDEN = 0x2
+ STV_PROTECTED = 0x3
+ STN_UNDEF = 0
+)
+
+/* For accessing the fields of r_info. */
+
+/* For constructing r_info from field values. */
+
+/*
+ * Relocation types.
+ */
+const (
+ R_X86_64_NONE = 0
+ R_X86_64_64 = 1
+ R_X86_64_PC32 = 2
+ R_X86_64_GOT32 = 3
+ R_X86_64_PLT32 = 4
+ R_X86_64_COPY = 5
+ R_X86_64_GLOB_DAT = 6
+ R_X86_64_JMP_SLOT = 7
+ R_X86_64_RELATIVE = 8
+ R_X86_64_GOTPCREL = 9
+ R_X86_64_32 = 10
+ R_X86_64_32S = 11
+ R_X86_64_16 = 12
+ R_X86_64_PC16 = 13
+ R_X86_64_8 = 14
+ R_X86_64_PC8 = 15
+ R_X86_64_DTPMOD64 = 16
+ R_X86_64_DTPOFF64 = 17
+ R_X86_64_TPOFF64 = 18
+ R_X86_64_TLSGD = 19
+ R_X86_64_TLSLD = 20
+ R_X86_64_DTPOFF32 = 21
+ R_X86_64_GOTTPOFF = 22
+ R_X86_64_TPOFF32 = 23
+ R_X86_64_PC64 = 24
+ R_X86_64_GOTOFF64 = 25
+ R_X86_64_GOTPC32 = 26
+ R_X86_64_GOT64 = 27
+ R_X86_64_GOTPCREL64 = 28
+ R_X86_64_GOTPC64 = 29
+ R_X86_64_GOTPLT64 = 30
+ R_X86_64_PLTOFF64 = 31
+ R_X86_64_SIZE32 = 32
+ R_X86_64_SIZE64 = 33
+ R_X86_64_GOTPC32_TLSDEC = 34
+ R_X86_64_TLSDESC_CALL = 35
+ R_X86_64_TLSDESC = 36
+ R_X86_64_IRELATIVE = 37
+ R_X86_64_PC32_BND = 40
+ R_X86_64_GOTPCRELX = 41
+ R_X86_64_REX_GOTPCRELX = 42
+
+ R_AARCH64_ABS64 = 257
+ R_AARCH64_ABS32 = 258
+ R_AARCH64_CALL26 = 283
+ R_AARCH64_ADR_PREL_PG_HI21 = 275
+ R_AARCH64_ADD_ABS_LO12_NC = 277
+ R_AARCH64_LDST8_ABS_LO12_NC = 278
+ R_AARCH64_LDST16_ABS_LO12_NC = 284
+ R_AARCH64_LDST32_ABS_LO12_NC = 285
+ R_AARCH64_LDST64_ABS_LO12_NC = 286
+ R_AARCH64_ADR_GOT_PAGE = 311
+ R_AARCH64_LD64_GOT_LO12_NC = 312
+ R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 = 541
+ R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC = 542
+ R_AARCH64_TLSLE_MOVW_TPREL_G0 = 547
+
+ R_ALPHA_NONE = 0
+ R_ALPHA_REFLONG = 1
+ R_ALPHA_REFQUAD = 2
+ R_ALPHA_GPREL32 = 3
+ R_ALPHA_LITERAL = 4
+ R_ALPHA_LITUSE = 5
+ R_ALPHA_GPDISP = 6
+ R_ALPHA_BRADDR = 7
+ R_ALPHA_HINT = 8
+ R_ALPHA_SREL16 = 9
+ R_ALPHA_SREL32 = 10
+ R_ALPHA_SREL64 = 11
+ R_ALPHA_OP_PUSH = 12
+ R_ALPHA_OP_STORE = 13
+ R_ALPHA_OP_PSUB = 14
+ R_ALPHA_OP_PRSHIFT = 15
+ R_ALPHA_GPVALUE = 16
+ R_ALPHA_GPRELHIGH = 17
+ R_ALPHA_GPRELLOW = 18
+ R_ALPHA_IMMED_GP_16 = 19
+ R_ALPHA_IMMED_GP_HI32 = 20
+ R_ALPHA_IMMED_SCN_HI32 = 21
+ R_ALPHA_IMMED_BR_HI32 = 22
+ R_ALPHA_IMMED_LO32 = 23
+ R_ALPHA_COPY = 24
+ R_ALPHA_GLOB_DAT = 25
+ R_ALPHA_JMP_SLOT = 26
+ R_ALPHA_RELATIVE = 27
+
+ R_ARM_NONE = 0
+ R_ARM_PC24 = 1
+ R_ARM_ABS32 = 2
+ R_ARM_REL32 = 3
+ R_ARM_PC13 = 4
+ R_ARM_ABS16 = 5
+ R_ARM_ABS12 = 6
+ R_ARM_THM_ABS5 = 7
+ R_ARM_ABS8 = 8
+ R_ARM_SBREL32 = 9
+ R_ARM_THM_PC22 = 10
+ R_ARM_THM_PC8 = 11
+ R_ARM_AMP_VCALL9 = 12
+ R_ARM_SWI24 = 13
+ R_ARM_THM_SWI8 = 14
+ R_ARM_XPC25 = 15
+ R_ARM_THM_XPC22 = 16
+ R_ARM_COPY = 20
+ R_ARM_GLOB_DAT = 21
+ R_ARM_JUMP_SLOT = 22
+ R_ARM_RELATIVE = 23
+ R_ARM_GOTOFF = 24
+ R_ARM_GOTPC = 25
+ R_ARM_GOT32 = 26
+ R_ARM_PLT32 = 27
+ R_ARM_CALL = 28
+ R_ARM_JUMP24 = 29
+ R_ARM_V4BX = 40
+ R_ARM_GOT_PREL = 96
+ R_ARM_GNU_VTENTRY = 100
+ R_ARM_GNU_VTINHERIT = 101
+ R_ARM_TLS_IE32 = 107
+ R_ARM_TLS_LE32 = 108
+ R_ARM_RSBREL32 = 250
+ R_ARM_THM_RPC22 = 251
+ R_ARM_RREL32 = 252
+ R_ARM_RABS32 = 253
+ R_ARM_RPC24 = 254
+ R_ARM_RBASE = 255
+
+ R_386_NONE = 0
+ R_386_32 = 1
+ R_386_PC32 = 2
+ R_386_GOT32 = 3
+ R_386_PLT32 = 4
+ R_386_COPY = 5
+ R_386_GLOB_DAT = 6
+ R_386_JMP_SLOT = 7
+ R_386_RELATIVE = 8
+ R_386_GOTOFF = 9
+ R_386_GOTPC = 10
+ R_386_TLS_TPOFF = 14
+ R_386_TLS_IE = 15
+ R_386_TLS_GOTIE = 16
+ R_386_TLS_LE = 17
+ R_386_TLS_GD = 18
+ R_386_TLS_LDM = 19
+ R_386_TLS_GD_32 = 24
+ R_386_TLS_GD_PUSH = 25
+ R_386_TLS_GD_CALL = 26
+ R_386_TLS_GD_POP = 27
+ R_386_TLS_LDM_32 = 28
+ R_386_TLS_LDM_PUSH = 29
+ R_386_TLS_LDM_CALL = 30
+ R_386_TLS_LDM_POP = 31
+ R_386_TLS_LDO_32 = 32
+ R_386_TLS_IE_32 = 33
+ R_386_TLS_LE_32 = 34
+ R_386_TLS_DTPMOD32 = 35
+ R_386_TLS_DTPOFF32 = 36
+ R_386_TLS_TPOFF32 = 37
+ R_386_TLS_GOTDESC = 39
+ R_386_TLS_DESC_CALL = 40
+ R_386_TLS_DESC = 41
+ R_386_IRELATIVE = 42
+ R_386_GOT32X = 43
+
+ R_PPC_NONE = 0
+ R_PPC_ADDR32 = 1
+ R_PPC_ADDR24 = 2
+ R_PPC_ADDR16 = 3
+ R_PPC_ADDR16_LO = 4
+ R_PPC_ADDR16_HI = 5
+ R_PPC_ADDR16_HA = 6
+ R_PPC_ADDR14 = 7
+ R_PPC_ADDR14_BRTAKEN = 8
+ R_PPC_ADDR14_BRNTAKEN = 9
+ R_PPC_REL24 = 10
+ R_PPC_REL14 = 11
+ R_PPC_REL14_BRTAKEN = 12
+ R_PPC_REL14_BRNTAKEN = 13
+ R_PPC_GOT16 = 14
+ R_PPC_GOT16_LO = 15
+ R_PPC_GOT16_HI = 16
+ R_PPC_GOT16_HA = 17
+ R_PPC_PLTREL24 = 18
+ R_PPC_COPY = 19
+ R_PPC_GLOB_DAT = 20
+ R_PPC_JMP_SLOT = 21
+ R_PPC_RELATIVE = 22
+ R_PPC_LOCAL24PC = 23
+ R_PPC_UADDR32 = 24
+ R_PPC_UADDR16 = 25
+ R_PPC_REL32 = 26
+ R_PPC_PLT32 = 27
+ R_PPC_PLTREL32 = 28
+ R_PPC_PLT16_LO = 29
+ R_PPC_PLT16_HI = 30
+ R_PPC_PLT16_HA = 31
+ R_PPC_SDAREL16 = 32
+ R_PPC_SECTOFF = 33
+ R_PPC_SECTOFF_LO = 34
+ R_PPC_SECTOFF_HI = 35
+ R_PPC_SECTOFF_HA = 36
+ R_PPC_TLS = 67
+ R_PPC_DTPMOD32 = 68
+ R_PPC_TPREL16 = 69
+ R_PPC_TPREL16_LO = 70
+ R_PPC_TPREL16_HI = 71
+ R_PPC_TPREL16_HA = 72
+ R_PPC_TPREL32 = 73
+ R_PPC_DTPREL16 = 74
+ R_PPC_DTPREL16_LO = 75
+ R_PPC_DTPREL16_HI = 76
+ R_PPC_DTPREL16_HA = 77
+ R_PPC_DTPREL32 = 78
+ R_PPC_GOT_TLSGD16 = 79
+ R_PPC_GOT_TLSGD16_LO = 80
+ R_PPC_GOT_TLSGD16_HI = 81
+ R_PPC_GOT_TLSGD16_HA = 82
+ R_PPC_GOT_TLSLD16 = 83
+ R_PPC_GOT_TLSLD16_LO = 84
+ R_PPC_GOT_TLSLD16_HI = 85
+ R_PPC_GOT_TLSLD16_HA = 86
+ R_PPC_GOT_TPREL16 = 87
+ R_PPC_GOT_TPREL16_LO = 88
+ R_PPC_GOT_TPREL16_HI = 89
+ R_PPC_GOT_TPREL16_HA = 90
+ R_PPC_EMB_NADDR32 = 101
+ R_PPC_EMB_NADDR16 = 102
+ R_PPC_EMB_NADDR16_LO = 103
+ R_PPC_EMB_NADDR16_HI = 104
+ R_PPC_EMB_NADDR16_HA = 105
+ R_PPC_EMB_SDAI16 = 106
+ R_PPC_EMB_SDA2I16 = 107
+ R_PPC_EMB_SDA2REL = 108
+ R_PPC_EMB_SDA21 = 109
+ R_PPC_EMB_MRKREF = 110
+ R_PPC_EMB_RELSEC16 = 111
+ R_PPC_EMB_RELST_LO = 112
+ R_PPC_EMB_RELST_HI = 113
+ R_PPC_EMB_RELST_HA = 114
+ R_PPC_EMB_BIT_FLD = 115
+ R_PPC_EMB_RELSDA = 116
+
+ R_PPC64_ADDR32 = R_PPC_ADDR32
+ R_PPC64_ADDR16_LO = R_PPC_ADDR16_LO
+ R_PPC64_ADDR16_HA = R_PPC_ADDR16_HA
+ R_PPC64_REL24 = R_PPC_REL24
+ R_PPC64_GOT16_HA = R_PPC_GOT16_HA
+ R_PPC64_JMP_SLOT = R_PPC_JMP_SLOT
+ R_PPC64_TPREL16 = R_PPC_TPREL16
+ R_PPC64_ADDR64 = 38
+ R_PPC64_TOC16 = 47
+ R_PPC64_TOC16_LO = 48
+ R_PPC64_TOC16_HI = 49
+ R_PPC64_TOC16_HA = 50
+ R_PPC64_ADDR16_LO_DS = 57
+ R_PPC64_GOT16_LO_DS = 59
+ R_PPC64_TOC16_DS = 63
+ R_PPC64_TOC16_LO_DS = 64
+ R_PPC64_TLS = 67
+ R_PPC64_GOT_TPREL16_LO_DS = 88
+ R_PPC64_GOT_TPREL16_HA = 90
+ R_PPC64_REL16_LO = 250
+ R_PPC64_REL16_HI = 251
+ R_PPC64_REL16_HA = 252
+
+ R_SPARC_NONE = 0
+ R_SPARC_8 = 1
+ R_SPARC_16 = 2
+ R_SPARC_32 = 3
+ R_SPARC_DISP8 = 4
+ R_SPARC_DISP16 = 5
+ R_SPARC_DISP32 = 6
+ R_SPARC_WDISP30 = 7
+ R_SPARC_WDISP22 = 8
+ R_SPARC_HI22 = 9
+ R_SPARC_22 = 10
+ R_SPARC_13 = 11
+ R_SPARC_LO10 = 12
+ R_SPARC_GOT10 = 13
+ R_SPARC_GOT13 = 14
+ R_SPARC_GOT22 = 15
+ R_SPARC_PC10 = 16
+ R_SPARC_PC22 = 17
+ R_SPARC_WPLT30 = 18
+ R_SPARC_COPY = 19
+ R_SPARC_GLOB_DAT = 20
+ R_SPARC_JMP_SLOT = 21
+ R_SPARC_RELATIVE = 22
+ R_SPARC_UA32 = 23
+ R_SPARC_PLT32 = 24
+ R_SPARC_HIPLT22 = 25
+ R_SPARC_LOPLT10 = 26
+ R_SPARC_PCPLT32 = 27
+ R_SPARC_PCPLT22 = 28
+ R_SPARC_PCPLT10 = 29
+ R_SPARC_10 = 30
+ R_SPARC_11 = 31
+ R_SPARC_64 = 32
+ R_SPARC_OLO10 = 33
+ R_SPARC_HH22 = 34
+ R_SPARC_HM10 = 35
+ R_SPARC_LM22 = 36
+ R_SPARC_PC_HH22 = 37
+ R_SPARC_PC_HM10 = 38
+ R_SPARC_PC_LM22 = 39
+ R_SPARC_WDISP16 = 40
+ R_SPARC_WDISP19 = 41
+ R_SPARC_GLOB_JMP = 42
+ R_SPARC_7 = 43
+ R_SPARC_5 = 44
+ R_SPARC_6 = 45
+ R_SPARC_DISP64 = 46
+ R_SPARC_PLT64 = 47
+ R_SPARC_HIX22 = 48
+ R_SPARC_LOX10 = 49
+ R_SPARC_H44 = 50
+ R_SPARC_M44 = 51
+ R_SPARC_L44 = 52
+ R_SPARC_REGISTER = 53
+ R_SPARC_UA64 = 54
+ R_SPARC_UA16 = 55
+
+ ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
+)
+
+/*
+ * Symbol table entries.
+ */
+
+/* For accessing the fields of st_info. */
+
+/* For constructing st_info from field values. */
+
+/* For accessing the fields of st_other. */
+
+/*
+ * ELF header.
+ */
+type ElfEhdr struct {
+ ident [EI_NIDENT]uint8
+ type_ uint16
+ machine uint16
+ version uint32
+ entry uint64
+ phoff uint64
+ shoff uint64
+ flags uint32
+ ehsize uint16
+ phentsize uint16
+ phnum uint16
+ shentsize uint16
+ shnum uint16
+ shstrndx uint16
+}
+
+/*
+ * Section header.
+ */
+type ElfShdr struct {
+ name uint32
+ type_ uint32
+ flags uint64
+ addr uint64
+ off uint64
+ size uint64
+ link uint32
+ info uint32
+ addralign uint64
+ entsize uint64
+ shnum int
+ secsym *LSym
+}
+
+/*
+ * Program header.
+ */
+type ElfPhdr struct {
+ type_ uint32
+ flags uint32
+ off uint64
+ vaddr uint64
+ paddr uint64
+ filesz uint64
+ memsz uint64
+ align uint64
+}
+
+/* For accessing the fields of r_info. */
+
+/* For constructing r_info from field values. */
+
+/*
+ * Symbol table entries.
+ */
+
+/* For accessing the fields of st_info. */
+
+/* For constructing st_info from field values. */
+
+/* For accessing the fields of st_other. */
+
+/*
+ * Go linker interface
+ */
+const (
+ ELF64HDRSIZE = 64
+ ELF64PHDRSIZE = 56
+ ELF64SHDRSIZE = 64
+ ELF64RELSIZE = 16
+ ELF64RELASIZE = 24
+ ELF64SYMSIZE = 24
+ ELF32HDRSIZE = 52
+ ELF32PHDRSIZE = 32
+ ELF32SHDRSIZE = 40
+ ELF32SYMSIZE = 16
+ ELF32RELSIZE = 8
+)
+
+/*
+ * The interface uses the 64-bit structures always,
+ * to avoid code duplication. The writers know how to
+ * marshal a 32-bit representation from the 64-bit structure.
+ */
+
+var Elfstrdat []byte
+
+/*
+ * Total amount of space to reserve at the start of the file
+ * for Header, PHeaders, SHeaders, and interp.
+ * May waste some.
+ * On FreeBSD, cannot be larger than a page.
+ */
+const (
+ ELFRESERVE = 4096
+)
+
+/*
+ * We use the 64-bit data structures on both 32- and 64-bit machines
+ * in order to write the code just once. The 64-bit data structure is
+ * written in the 32-bit format on the 32-bit machines.
+ */
+const (
+ NSECT = 48
+)
+
+var Iself bool
+
+var Nelfsym int = 1
+
+var elf64 bool
+
+var ehdr ElfEhdr
+
+var phdr [NSECT]*ElfPhdr
+
+var shdr [NSECT]*ElfShdr
+
+var interp string
+
+type Elfstring struct {
+ s string
+ off int
+}
+
+var elfstr [100]Elfstring
+
+var nelfstr int
+
+var buildinfo []byte
+
+/*
+ Initialize the global variable that describes the ELF header. It will be updated as
+ we write section and prog headers.
+*/
+func Elfinit() {
+ Iself = true
+
+ switch Thearch.Thechar {
+ // 64-bit architectures
+ case '9':
+ if Ctxt.Arch.ByteOrder == binary.BigEndian {
+ ehdr.flags = 1 /* Version 1 ABI */
+ } else {
+ ehdr.flags = 2 /* Version 2 ABI */
+ }
+ fallthrough
+
+ case '0', '6', '7':
+ if Thearch.Thechar == '0' {
+ ehdr.flags = 0x20000000 /* MIPS 3 */
+ }
+ elf64 = true
+
+ ehdr.phoff = ELF64HDRSIZE /* Must be be ELF64HDRSIZE: first PHdr must follow ELF header */
+ ehdr.shoff = ELF64HDRSIZE /* Will move as we add PHeaders */
+ ehdr.ehsize = ELF64HDRSIZE /* Must be ELF64HDRSIZE */
+ ehdr.phentsize = ELF64PHDRSIZE /* Must be ELF64PHDRSIZE */
+ ehdr.shentsize = ELF64SHDRSIZE /* Must be ELF64SHDRSIZE */
+
+ // we use EABI on both linux/arm and freebsd/arm.
+ // 32-bit architectures
+ case '5':
+ // we use EABI on both linux/arm and freebsd/arm.
+ if HEADTYPE == obj.Hlinux || HEADTYPE == obj.Hfreebsd {
+ // We set a value here that makes no indication of which
+ // float ABI the object uses, because this is information
+ // used by the dynamic linker to compare executables and
+ // shared libraries -- so it only matters for cgo calls, and
+ // the information properly comes from the object files
+ // produced by the host C compiler. parseArmAttributes in
+ // ldelf.go reads that information and updates this field as
+ // appropriate.
+ ehdr.flags = 0x5000002 // has entry point, Version5 EABI
+ }
+ fallthrough
+
+ default:
+ ehdr.phoff = ELF32HDRSIZE
+ /* Must be be ELF32HDRSIZE: first PHdr must follow ELF header */
+ ehdr.shoff = ELF32HDRSIZE /* Will move as we add PHeaders */
+ ehdr.ehsize = ELF32HDRSIZE /* Must be ELF32HDRSIZE */
+ ehdr.phentsize = ELF32PHDRSIZE /* Must be ELF32PHDRSIZE */
+ ehdr.shentsize = ELF32SHDRSIZE /* Must be ELF32SHDRSIZE */
+ }
+}
+
+// Make sure PT_LOAD is aligned properly and
+// that there is no gap,
+// correct ELF loaders will do this implicitly,
+// but buggy ELF loaders like the one in some
+// versions of QEMU and UPX won't.
+func fixElfPhdr(e *ElfPhdr) {
+ frag := int(e.vaddr & (e.align - 1))
+
+ e.off -= uint64(frag)
+ e.vaddr -= uint64(frag)
+ e.paddr -= uint64(frag)
+ e.filesz += uint64(frag)
+ e.memsz += uint64(frag)
+}
+
+func elf64phdr(e *ElfPhdr) {
+ if e.type_ == PT_LOAD {
+ fixElfPhdr(e)
+ }
+
+ Thearch.Lput(e.type_)
+ Thearch.Lput(e.flags)
+ Thearch.Vput(e.off)
+ Thearch.Vput(e.vaddr)
+ Thearch.Vput(e.paddr)
+ Thearch.Vput(e.filesz)
+ Thearch.Vput(e.memsz)
+ Thearch.Vput(e.align)
+}
+
+func elf32phdr(e *ElfPhdr) {
+ if e.type_ == PT_LOAD {
+ fixElfPhdr(e)
+ }
+
+ Thearch.Lput(e.type_)
+ Thearch.Lput(uint32(e.off))
+ Thearch.Lput(uint32(e.vaddr))
+ Thearch.Lput(uint32(e.paddr))
+ Thearch.Lput(uint32(e.filesz))
+ Thearch.Lput(uint32(e.memsz))
+ Thearch.Lput(e.flags)
+ Thearch.Lput(uint32(e.align))
+}
+
+func elf64shdr(e *ElfShdr) {
+ Thearch.Lput(e.name)
+ Thearch.Lput(e.type_)
+ Thearch.Vput(e.flags)
+ Thearch.Vput(e.addr)
+ Thearch.Vput(e.off)
+ Thearch.Vput(e.size)
+ Thearch.Lput(e.link)
+ Thearch.Lput(e.info)
+ Thearch.Vput(e.addralign)
+ Thearch.Vput(e.entsize)
+}
+
+func elf32shdr(e *ElfShdr) {
+ Thearch.Lput(e.name)
+ Thearch.Lput(e.type_)
+ Thearch.Lput(uint32(e.flags))
+ Thearch.Lput(uint32(e.addr))
+ Thearch.Lput(uint32(e.off))
+ Thearch.Lput(uint32(e.size))
+ Thearch.Lput(e.link)
+ Thearch.Lput(e.info)
+ Thearch.Lput(uint32(e.addralign))
+ Thearch.Lput(uint32(e.entsize))
+}
+
+func elfwriteshdrs() uint32 {
+ if elf64 {
+ for i := 0; i < int(ehdr.shnum); i++ {
+ elf64shdr(shdr[i])
+ }
+ return uint32(ehdr.shnum) * ELF64SHDRSIZE
+ }
+
+ for i := 0; i < int(ehdr.shnum); i++ {
+ elf32shdr(shdr[i])
+ }
+ return uint32(ehdr.shnum) * ELF32SHDRSIZE
+}
+
+func elfsetstring(s string, off int) {
+ if nelfstr >= len(elfstr) {
+ Diag("too many elf strings")
+ errorexit()
+ }
+
+ elfstr[nelfstr].s = s
+ elfstr[nelfstr].off = off
+ nelfstr++
+}
+
+func elfwritephdrs() uint32 {
+ if elf64 {
+ for i := 0; i < int(ehdr.phnum); i++ {
+ elf64phdr(phdr[i])
+ }
+ return uint32(ehdr.phnum) * ELF64PHDRSIZE
+ }
+
+ for i := 0; i < int(ehdr.phnum); i++ {
+ elf32phdr(phdr[i])
+ }
+ return uint32(ehdr.phnum) * ELF32PHDRSIZE
+}
+
+func newElfPhdr() *ElfPhdr {
+ e := new(ElfPhdr)
+ if ehdr.phnum >= NSECT {
+ Diag("too many phdrs")
+ } else {
+ phdr[ehdr.phnum] = e
+ ehdr.phnum++
+ }
+ if elf64 {
+ ehdr.shoff += ELF64PHDRSIZE
+ } else {
+ ehdr.shoff += ELF32PHDRSIZE
+ }
+ return e
+}
+
+func newElfShdr(name int64) *ElfShdr {
+ e := new(ElfShdr)
+ e.name = uint32(name)
+ e.shnum = int(ehdr.shnum)
+ if ehdr.shnum >= NSECT {
+ Diag("too many shdrs")
+ } else {
+ shdr[ehdr.shnum] = e
+ ehdr.shnum++
+ }
+
+ return e
+}
+
+func getElfEhdr() *ElfEhdr {
+ return &ehdr
+}
+
+func elf64writehdr() uint32 {
+ for i := 0; i < EI_NIDENT; i++ {
+ Cput(ehdr.ident[i])
+ }
+ Thearch.Wput(ehdr.type_)
+ Thearch.Wput(ehdr.machine)
+ Thearch.Lput(ehdr.version)
+ Thearch.Vput(ehdr.entry)
+ Thearch.Vput(ehdr.phoff)
+ Thearch.Vput(ehdr.shoff)
+ Thearch.Lput(ehdr.flags)
+ Thearch.Wput(ehdr.ehsize)
+ Thearch.Wput(ehdr.phentsize)
+ Thearch.Wput(ehdr.phnum)
+ Thearch.Wput(ehdr.shentsize)
+ Thearch.Wput(ehdr.shnum)
+ Thearch.Wput(ehdr.shstrndx)
+ return ELF64HDRSIZE
+}
+
+func elf32writehdr() uint32 {
+ for i := 0; i < EI_NIDENT; i++ {
+ Cput(ehdr.ident[i])
+ }
+ Thearch.Wput(ehdr.type_)
+ Thearch.Wput(ehdr.machine)
+ Thearch.Lput(ehdr.version)
+ Thearch.Lput(uint32(ehdr.entry))
+ Thearch.Lput(uint32(ehdr.phoff))
+ Thearch.Lput(uint32(ehdr.shoff))
+ Thearch.Lput(ehdr.flags)
+ Thearch.Wput(ehdr.ehsize)
+ Thearch.Wput(ehdr.phentsize)
+ Thearch.Wput(ehdr.phnum)
+ Thearch.Wput(ehdr.shentsize)
+ Thearch.Wput(ehdr.shnum)
+ Thearch.Wput(ehdr.shstrndx)
+ return ELF32HDRSIZE
+}
+
+func elfwritehdr() uint32 {
+ if elf64 {
+ return elf64writehdr()
+ }
+ return elf32writehdr()
+}
+
+/* Taken directly from the definition document for ELF64 */
+func elfhash(name []byte) uint32 {
+ var h uint32 = 0
+ var g uint32
+ for len(name) != 0 {
+ h = (h << 4) + uint32(name[0])
+ name = name[1:]
+ g = h & 0xf0000000
+ if g != 0 {
+ h ^= g >> 24
+ }
+ h &= 0x0fffffff
+ }
+
+ return h
+}
+
+func Elfwritedynent(s *LSym, tag int, val uint64) {
+ if elf64 {
+ Adduint64(Ctxt, s, uint64(tag))
+ Adduint64(Ctxt, s, val)
+ } else {
+ Adduint32(Ctxt, s, uint32(tag))
+ Adduint32(Ctxt, s, uint32(val))
+ }
+}
+
+func elfwritedynentsym(s *LSym, tag int, t *LSym) {
+ Elfwritedynentsymplus(s, tag, t, 0)
+}
+
+func Elfwritedynentsymplus(s *LSym, tag int, t *LSym, add int64) {
+ if elf64 {
+ Adduint64(Ctxt, s, uint64(tag))
+ } else {
+ Adduint32(Ctxt, s, uint32(tag))
+ }
+ Addaddrplus(Ctxt, s, t, add)
+}
+
+func elfwritedynentsymsize(s *LSym, tag int, t *LSym) {
+ if elf64 {
+ Adduint64(Ctxt, s, uint64(tag))
+ } else {
+ Adduint32(Ctxt, s, uint32(tag))
+ }
+ addsize(Ctxt, s, t)
+}
+
+func elfinterp(sh *ElfShdr, startva uint64, resoff uint64, p string) int {
+ interp = p
+ n := len(interp) + 1
+ sh.addr = startva + resoff - uint64(n)
+ sh.off = resoff - uint64(n)
+ sh.size = uint64(n)
+
+ return n
+}
+
+func elfwriteinterp() int {
+ sh := elfshname(".interp")
+ Cseek(int64(sh.off))
+ coutbuf.WriteString(interp)
+ Cput(0)
+ return int(sh.size)
+}
+
+func elfnote(sh *ElfShdr, startva uint64, resoff uint64, sz int, alloc bool) int {
+ n := 3*4 + uint64(sz) + resoff%4
+
+ sh.type_ = SHT_NOTE
+ if alloc {
+ sh.flags = SHF_ALLOC
+ }
+ sh.addralign = 4
+ sh.addr = startva + resoff - n
+ sh.off = resoff - n
+ sh.size = n - resoff%4
+
+ return int(n)
+}
+
+func elfwritenotehdr(str string, namesz uint32, descsz uint32, tag uint32) *ElfShdr {
+ sh := elfshname(str)
+
+ // Write Elf_Note header.
+ Cseek(int64(sh.off))
+
+ Thearch.Lput(namesz)
+ Thearch.Lput(descsz)
+ Thearch.Lput(tag)
+
+ return sh
+}
+
+// NetBSD Signature (as per sys/exec_elf.h)
+const (
+ ELF_NOTE_NETBSD_NAMESZ = 7
+ ELF_NOTE_NETBSD_DESCSZ = 4
+ ELF_NOTE_NETBSD_TAG = 1
+ ELF_NOTE_NETBSD_VERSION = 599000000 /* NetBSD 5.99 */
+)
+
+var ELF_NOTE_NETBSD_NAME = []byte("NetBSD\x00")
+
+func elfnetbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
+ n := int(Rnd(ELF_NOTE_NETBSD_NAMESZ, 4) + Rnd(ELF_NOTE_NETBSD_DESCSZ, 4))
+ return elfnote(sh, startva, resoff, n, true)
+}
+
+func elfwritenetbsdsig() int {
+ // Write Elf_Note header.
+ sh := elfwritenotehdr(".note.netbsd.ident", ELF_NOTE_NETBSD_NAMESZ, ELF_NOTE_NETBSD_DESCSZ, ELF_NOTE_NETBSD_TAG)
+
+ if sh == nil {
+ return 0
+ }
+
+ // Followed by NetBSD string and version.
+ Cwrite(ELF_NOTE_NETBSD_NAME)
+ Cput(0)
+
+ Thearch.Lput(ELF_NOTE_NETBSD_VERSION)
+
+ return int(sh.size)
+}
+
+// OpenBSD Signature
+const (
+ ELF_NOTE_OPENBSD_NAMESZ = 8
+ ELF_NOTE_OPENBSD_DESCSZ = 4
+ ELF_NOTE_OPENBSD_TAG = 1
+ ELF_NOTE_OPENBSD_VERSION = 0
+)
+
+var ELF_NOTE_OPENBSD_NAME = []byte("OpenBSD\x00")
+
+func elfopenbsdsig(sh *ElfShdr, startva uint64, resoff uint64) int {
+ n := ELF_NOTE_OPENBSD_NAMESZ + ELF_NOTE_OPENBSD_DESCSZ
+ return elfnote(sh, startva, resoff, n, true)
+}
+
+func elfwriteopenbsdsig() int {
+ // Write Elf_Note header.
+ sh := elfwritenotehdr(".note.openbsd.ident", ELF_NOTE_OPENBSD_NAMESZ, ELF_NOTE_OPENBSD_DESCSZ, ELF_NOTE_OPENBSD_TAG)
+
+ if sh == nil {
+ return 0
+ }
+
+ // Followed by OpenBSD string and version.
+ Cwrite(ELF_NOTE_OPENBSD_NAME)
+
+ Thearch.Lput(ELF_NOTE_OPENBSD_VERSION)
+
+ return int(sh.size)
+}
+
+func addbuildinfo(val string) {
+ var j int
+
+ if val[0] != '0' || val[1] != 'x' {
+ Exitf("-B argument must start with 0x: %s", val)
+ }
+
+ ov := val
+ val = val[2:]
+ i := 0
+ var b int
+ for val != "" {
+ if len(val) == 1 {
+ Exitf("-B argument must have even number of digits: %s", ov)
+ }
+
+ b = 0
+ for j = 0; j < 2; j, val = j+1, val[1:] {
+ b *= 16
+ if val[0] >= '0' && val[0] <= '9' {
+ b += int(val[0]) - '0'
+ } else if val[0] >= 'a' && val[0] <= 'f' {
+ b += int(val[0]) - 'a' + 10
+ } else if val[0] >= 'A' && val[0] <= 'F' {
+ b += int(val[0]) - 'A' + 10
+ } else {
+ Exitf("-B argument contains invalid hex digit %c: %s", val[0], ov)
+ }
+ }
+
+ const maxLen = 32
+ if i >= maxLen {
+ Exitf("-B option too long (max %d digits): %s", maxLen, ov)
+ }
+
+ buildinfo = append(buildinfo, uint8(b))
+ i++
+ }
+
+ buildinfo = buildinfo[:i]
+}
+
+// Build info note
+const (
+ ELF_NOTE_BUILDINFO_NAMESZ = 4
+ ELF_NOTE_BUILDINFO_TAG = 3
+)
+
+var ELF_NOTE_BUILDINFO_NAME = []byte("GNU\x00")
+
+func elfbuildinfo(sh *ElfShdr, startva uint64, resoff uint64) int {
+ n := int(ELF_NOTE_BUILDINFO_NAMESZ + Rnd(int64(len(buildinfo)), 4))
+ return elfnote(sh, startva, resoff, n, true)
+}
+
+func elfgobuildid(sh *ElfShdr, startva uint64, resoff uint64) int {
+ n := len(ELF_NOTE_GO_NAME) + int(Rnd(int64(len(buildid)), 4))
+ return elfnote(sh, startva, resoff, n, true)
+}
+
+func elfwritebuildinfo() int {
+ sh := elfwritenotehdr(".note.gnu.build-id", ELF_NOTE_BUILDINFO_NAMESZ, uint32(len(buildinfo)), ELF_NOTE_BUILDINFO_TAG)
+ if sh == nil {
+ return 0
+ }
+
+ Cwrite(ELF_NOTE_BUILDINFO_NAME)
+ Cwrite(buildinfo)
+ var zero = make([]byte, 4)
+ Cwrite(zero[:int(Rnd(int64(len(buildinfo)), 4)-int64(len(buildinfo)))])
+
+ return int(sh.size)
+}
+
+func elfwritegobuildid() int {
+ sh := elfwritenotehdr(".note.go.buildid", uint32(len(ELF_NOTE_GO_NAME)), uint32(len(buildid)), ELF_NOTE_GOBUILDID_TAG)
+ if sh == nil {
+ return 0
+ }
+
+ Cwrite(ELF_NOTE_GO_NAME)
+ Cwrite([]byte(buildid))
+ var zero = make([]byte, 4)
+ Cwrite(zero[:int(Rnd(int64(len(buildid)), 4)-int64(len(buildid)))])
+
+ return int(sh.size)
+}
+
+// Go specific notes
+const (
+ ELF_NOTE_GOPKGLIST_TAG = 1
+ ELF_NOTE_GOABIHASH_TAG = 2
+ ELF_NOTE_GODEPS_TAG = 3
+ ELF_NOTE_GOBUILDID_TAG = 4
+)
+
+var ELF_NOTE_GO_NAME = []byte("Go\x00\x00")
+
+var elfverneed int
+
+type Elfaux struct {
+ next *Elfaux
+ num int
+ vers string
+}
+
+type Elflib struct {
+ next *Elflib
+ aux *Elfaux
+ file string
+}
+
+func addelflib(list **Elflib, file string, vers string) *Elfaux {
+ var lib *Elflib
+
+ for lib = *list; lib != nil; lib = lib.next {
+ if lib.file == file {
+ goto havelib
+ }
+ }
+ lib = new(Elflib)
+ lib.next = *list
+ lib.file = file
+ *list = lib
+
+havelib:
+ for aux := lib.aux; aux != nil; aux = aux.next {
+ if aux.vers == vers {
+ return aux
+ }
+ }
+ aux := new(Elfaux)
+ aux.next = lib.aux
+ aux.vers = vers
+ lib.aux = aux
+
+ return aux
+}
+
+func elfdynhash() {
+ if !Iself {
+ return
+ }
+
+ nsym := Nelfsym
+ s := Linklookup(Ctxt, ".hash", 0)
+ s.Type = obj.SELFROSECT
+ s.Reachable = true
+
+ i := nsym
+ nbucket := 1
+ for i > 0 {
+ nbucket++
+ i >>= 1
+ }
+
+ var needlib *Elflib
+ need := make([]*Elfaux, nsym)
+ chain := make([]uint32, nsym)
+ buckets := make([]uint32, nbucket)
+
+ var b int
+ var hc uint32
+ var name string
+ for sy := Ctxt.Allsym; sy != nil; sy = sy.Allsym {
+ if sy.Dynid <= 0 {
+ continue
+ }
+
+ if sy.Dynimpvers != "" {
+ need[sy.Dynid] = addelflib(&needlib, sy.Dynimplib, sy.Dynimpvers)
+ }
+
+ name = sy.Extname
+ hc = elfhash([]byte(name))
+
+ b = int(hc % uint32(nbucket))
+ chain[sy.Dynid] = buckets[b]
+ buckets[b] = uint32(sy.Dynid)
+ }
+
+ Adduint32(Ctxt, s, uint32(nbucket))
+ Adduint32(Ctxt, s, uint32(nsym))
+ for i := 0; i < nbucket; i++ {
+ Adduint32(Ctxt, s, buckets[i])
+ }
+ for i := 0; i < nsym; i++ {
+ Adduint32(Ctxt, s, chain[i])
+ }
+
+ // version symbols
+ dynstr := Linklookup(Ctxt, ".dynstr", 0)
+
+ s = Linklookup(Ctxt, ".gnu.version_r", 0)
+ i = 2
+ nfile := 0
+ var j int
+ var x *Elfaux
+ for l := needlib; l != nil; l = l.next {
+ nfile++
+
+ // header
+ Adduint16(Ctxt, s, 1) // table version
+ j = 0
+ for x = l.aux; x != nil; x = x.next {
+ j++
+ }
+ Adduint16(Ctxt, s, uint16(j)) // aux count
+ Adduint32(Ctxt, s, uint32(Addstring(dynstr, l.file))) // file string offset
+ Adduint32(Ctxt, s, 16) // offset from header to first aux
+ if l.next != nil {
+ Adduint32(Ctxt, s, 16+uint32(j)*16) // offset from this header to next
+ } else {
+ Adduint32(Ctxt, s, 0)
+ }
+
+ for x = l.aux; x != nil; x = x.next {
+ x.num = i
+ i++
+
+ // aux struct
+ Adduint32(Ctxt, s, elfhash([]byte(x.vers))) // hash
+ Adduint16(Ctxt, s, 0) // flags
+ Adduint16(Ctxt, s, uint16(x.num)) // other - index we refer to this by
+ Adduint32(Ctxt, s, uint32(Addstring(dynstr, x.vers))) // version string offset
+ if x.next != nil {
+ Adduint32(Ctxt, s, 16) // offset from this aux to next
+ } else {
+ Adduint32(Ctxt, s, 0)
+ }
+ }
+ }
+
+ // version references
+ s = Linklookup(Ctxt, ".gnu.version", 0)
+
+ for i := 0; i < nsym; i++ {
+ if i == 0 {
+ Adduint16(Ctxt, s, 0) // first entry - no symbol
+ } else if need[i] == nil {
+ Adduint16(Ctxt, s, 1) // global
+ } else {
+ Adduint16(Ctxt, s, uint16(need[i].num))
+ }
+ }
+
+ s = Linklookup(Ctxt, ".dynamic", 0)
+ elfverneed = nfile
+ if elfverneed != 0 {
+ elfwritedynentsym(s, DT_VERNEED, Linklookup(Ctxt, ".gnu.version_r", 0))
+ Elfwritedynent(s, DT_VERNEEDNUM, uint64(nfile))
+ elfwritedynentsym(s, DT_VERSYM, Linklookup(Ctxt, ".gnu.version", 0))
+ }
+
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ sy := Linklookup(Ctxt, ".rela.plt", 0)
+ if sy.Size > 0 {
+ Elfwritedynent(s, DT_PLTREL, DT_RELA)
+ elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
+ elfwritedynentsym(s, DT_JMPREL, sy)
+ }
+ default:
+ sy := Linklookup(Ctxt, ".rel.plt", 0)
+ if sy.Size > 0 {
+ Elfwritedynent(s, DT_PLTREL, DT_REL)
+ elfwritedynentsymsize(s, DT_PLTRELSZ, sy)
+ elfwritedynentsym(s, DT_JMPREL, sy)
+ }
+ }
+
+ Elfwritedynent(s, DT_NULL, 0)
+}
+
+func elfphload(seg *Segment) *ElfPhdr {
+ ph := newElfPhdr()
+ ph.type_ = PT_LOAD
+ if seg.Rwx&4 != 0 {
+ ph.flags |= PF_R
+ }
+ if seg.Rwx&2 != 0 {
+ ph.flags |= PF_W
+ }
+ if seg.Rwx&1 != 0 {
+ ph.flags |= PF_X
+ }
+ ph.vaddr = seg.Vaddr
+ ph.paddr = seg.Vaddr
+ ph.memsz = seg.Length
+ ph.off = seg.Fileoff
+ ph.filesz = seg.Filelen
+ ph.align = uint64(INITRND)
+
+ return ph
+}
+
+func elfshname(name string) *ElfShdr {
+ var off int
+ var sh *ElfShdr
+
+ for i := 0; i < nelfstr; i++ {
+ if name == elfstr[i].s {
+ off = elfstr[i].off
+ for i = 0; i < int(ehdr.shnum); i++ {
+ sh = shdr[i]
+ if sh.name == uint32(off) {
+ return sh
+ }
+ }
+
+ sh = newElfShdr(int64(off))
+ return sh
+ }
+ }
+
+ Diag("cannot find elf name %s", name)
+ errorexit()
+ return nil
+}
+
+func elfshalloc(sect *Section) *ElfShdr {
+ sh := elfshname(sect.Name)
+ sect.Elfsect = sh
+ return sh
+}
+
+func elfshbits(sect *Section) *ElfShdr {
+ sh := elfshalloc(sect)
+ // If this section has already been set up as a note, we assume type_ and
+ // flags are already correct, but the other fields still need filling in.
+ if sh.type_ == SHT_NOTE {
+ if Linkmode != LinkExternal {
+ // TODO(mwhudson): the approach here will work OK when
+ // linking internally for notes that we want to be included
+ // in a loadable segment (e.g. the abihash note) but not for
+ // notes that we do not want to be mapped (e.g. the package
+ // list note). The real fix is probably to define new values
+ // for LSym.Type corresponding to mapped and unmapped notes
+ // and handle them in dodata().
+ Diag("sh.type_ == SHT_NOTE in elfshbits when linking internally")
+ }
+ sh.addralign = uint64(sect.Align)
+ sh.size = sect.Length
+ sh.off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr
+ return sh
+ }
+ if sh.type_ > 0 {
+ return sh
+ }
+
+ if sect.Vaddr < sect.Seg.Vaddr+sect.Seg.Filelen {
+ sh.type_ = SHT_PROGBITS
+ } else {
+ sh.type_ = SHT_NOBITS
+ }
+ sh.flags = SHF_ALLOC
+ if sect.Rwx&1 != 0 {
+ sh.flags |= SHF_EXECINSTR
+ }
+ if sect.Rwx&2 != 0 {
+ sh.flags |= SHF_WRITE
+ }
+ if sect.Name == ".tbss" {
+ sh.flags |= SHF_TLS
+ sh.type_ = SHT_NOBITS
+ }
+
+ if Linkmode != LinkExternal {
+ sh.addr = sect.Vaddr
+ }
+ sh.addralign = uint64(sect.Align)
+ sh.size = sect.Length
+ if sect.Name != ".tbss" {
+ sh.off = sect.Seg.Fileoff + sect.Vaddr - sect.Seg.Vaddr
+ }
+
+ return sh
+}
+
+func elfshreloc(sect *Section) *ElfShdr {
+ // If main section is SHT_NOBITS, nothing to relocate.
+ // Also nothing to relocate in .shstrtab or notes.
+ if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
+ return nil
+ }
+ if sect.Name == ".shstrtab" || sect.Name == ".tbss" {
+ return nil
+ }
+ if sect.Elfsect.type_ == SHT_NOTE {
+ return nil
+ }
+
+ var prefix string
+ var typ int
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ prefix = ".rela"
+ typ = SHT_RELA
+ default:
+ prefix = ".rel"
+ typ = SHT_REL
+ }
+
+ buf := fmt.Sprintf("%s%s", prefix, sect.Name)
+ sh := elfshname(buf)
+ sh.type_ = uint32(typ)
+ sh.entsize = uint64(Thearch.Regsize) * 2
+ if typ == SHT_RELA {
+ sh.entsize += uint64(Thearch.Regsize)
+ }
+ sh.link = uint32(elfshname(".symtab").shnum)
+ sh.info = uint32(sect.Elfsect.shnum)
+ sh.off = sect.Reloff
+ sh.size = sect.Rellen
+ sh.addralign = uint64(Thearch.Regsize)
+ return sh
+}
+
+func elfrelocsect(sect *Section, first *LSym) {
+ // If main section is SHT_NOBITS, nothing to relocate.
+ // Also nothing to relocate in .shstrtab.
+ if sect.Vaddr >= sect.Seg.Vaddr+sect.Seg.Filelen {
+ return
+ }
+ if sect.Name == ".shstrtab" {
+ return
+ }
+
+ sect.Reloff = uint64(Cpos())
+ var sym *LSym
+ for sym = first; sym != nil; sym = sym.Next {
+ if !sym.Reachable {
+ continue
+ }
+ if uint64(sym.Value) >= sect.Vaddr {
+ break
+ }
+ }
+
+ eaddr := int32(sect.Vaddr + sect.Length)
+ var r *Reloc
+ var ri int
+ for ; sym != nil; sym = sym.Next {
+ if !sym.Reachable {
+ continue
+ }
+ if sym.Value >= int64(eaddr) {
+ break
+ }
+ Ctxt.Cursym = sym
+
+ for ri = 0; ri < len(sym.R); ri++ {
+ r = &sym.R[ri]
+ if r.Done != 0 {
+ continue
+ }
+ if r.Xsym == nil {
+ Diag("missing xsym in relocation")
+ continue
+ }
+
+ if r.Xsym.ElfsymForReloc() == 0 {
+ Diag("reloc %d to non-elf symbol %s (outer=%s) %d", r.Type, r.Sym.Name, r.Xsym.Name, r.Sym.Type)
+ }
+ if Thearch.Elfreloc1(r, int64(uint64(sym.Value+int64(r.Off))-sect.Vaddr)) < 0 {
+ Diag("unsupported obj reloc %d/%d to %s", r.Type, r.Siz, r.Sym.Name)
+ }
+ }
+ }
+
+ sect.Rellen = uint64(Cpos()) - sect.Reloff
+}
+
+func Elfemitreloc() {
+ for Cpos()&7 != 0 {
+ Cput(0)
+ }
+
+ elfrelocsect(Segtext.Sect, Ctxt.Textp)
+ for sect := Segtext.Sect.Next; sect != nil; sect = sect.Next {
+ elfrelocsect(sect, datap)
+ }
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
+ elfrelocsect(sect, datap)
+ }
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ elfrelocsect(sect, datap)
+ }
+}
+
+func addgonote(sectionName string, tag uint32, desc []byte) {
+ s := Linklookup(Ctxt, sectionName, 0)
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+ // namesz
+ Adduint32(Ctxt, s, uint32(len(ELF_NOTE_GO_NAME)))
+ // descsz
+ Adduint32(Ctxt, s, uint32(len(desc)))
+ // tag
+ Adduint32(Ctxt, s, tag)
+ // name + padding
+ s.P = append(s.P, ELF_NOTE_GO_NAME...)
+ for len(s.P)%4 != 0 {
+ s.P = append(s.P, 0)
+ }
+ // desc + padding
+ s.P = append(s.P, desc...)
+ for len(s.P)%4 != 0 {
+ s.P = append(s.P, 0)
+ }
+ s.Size = int64(len(s.P))
+}
+
+func doelf() {
+ if !Iself {
+ return
+ }
+
+ /* predefine strings we need for section headers */
+ shstrtab := Linklookup(Ctxt, ".shstrtab", 0)
+
+ shstrtab.Type = obj.SELFROSECT
+ shstrtab.Reachable = true
+
+ Addstring(shstrtab, "")
+ Addstring(shstrtab, ".text")
+ Addstring(shstrtab, ".noptrdata")
+ Addstring(shstrtab, ".data")
+ Addstring(shstrtab, ".bss")
+ Addstring(shstrtab, ".noptrbss")
+
+ // generate .tbss section (except for OpenBSD where it's not supported)
+ // for dynamic internal linker or external linking, so that various
+ // binutils could correctly calculate PT_TLS size.
+ // see https://golang.org/issue/5200.
+ if HEADTYPE != obj.Hopenbsd {
+ if Debug['d'] == 0 || Linkmode == LinkExternal {
+ Addstring(shstrtab, ".tbss")
+ }
+ }
+ if HEADTYPE == obj.Hnetbsd {
+ Addstring(shstrtab, ".note.netbsd.ident")
+ }
+ if HEADTYPE == obj.Hopenbsd {
+ Addstring(shstrtab, ".note.openbsd.ident")
+ }
+ if len(buildinfo) > 0 {
+ Addstring(shstrtab, ".note.gnu.build-id")
+ }
+ if buildid != "" {
+ Addstring(shstrtab, ".note.go.buildid")
+ }
+ Addstring(shstrtab, ".elfdata")
+ Addstring(shstrtab, ".rodata")
+ // See the comment about data.rel.ro.FOO section names in data.go.
+ relro_prefix := ""
+ if UseRelro() {
+ Addstring(shstrtab, ".data.rel.ro")
+ relro_prefix = ".data.rel.ro"
+ }
+ Addstring(shstrtab, relro_prefix+".typelink")
+ Addstring(shstrtab, relro_prefix+".gosymtab")
+ Addstring(shstrtab, relro_prefix+".gopclntab")
+
+ if Linkmode == LinkExternal {
+ Debug['d'] = 1
+
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ Addstring(shstrtab, ".rela.text")
+ Addstring(shstrtab, ".rela.rodata")
+ Addstring(shstrtab, ".rela"+relro_prefix+".typelink")
+ Addstring(shstrtab, ".rela"+relro_prefix+".gosymtab")
+ Addstring(shstrtab, ".rela"+relro_prefix+".gopclntab")
+ Addstring(shstrtab, ".rela.noptrdata")
+ Addstring(shstrtab, ".rela.data")
+ if UseRelro() {
+ Addstring(shstrtab, ".rela.data.rel.ro")
+ }
+
+ default:
+ Addstring(shstrtab, ".rel.text")
+ Addstring(shstrtab, ".rel.rodata")
+ Addstring(shstrtab, ".rel"+relro_prefix+".typelink")
+ Addstring(shstrtab, ".rel"+relro_prefix+".gosymtab")
+ Addstring(shstrtab, ".rel"+relro_prefix+".gopclntab")
+ Addstring(shstrtab, ".rel.noptrdata")
+ Addstring(shstrtab, ".rel.data")
+ if UseRelro() {
+ Addstring(shstrtab, ".rel.data.rel.ro")
+ }
+ }
+
+ // add a .note.GNU-stack section to mark the stack as non-executable
+ Addstring(shstrtab, ".note.GNU-stack")
+
+ if Buildmode == BuildmodeShared {
+ Addstring(shstrtab, ".note.go.abihash")
+ Addstring(shstrtab, ".note.go.pkg-list")
+ Addstring(shstrtab, ".note.go.deps")
+ }
+ }
+
+ hasinitarr := Linkshared
+
+ /* shared library initializer */
+ switch Buildmode {
+ case BuildmodeCArchive, BuildmodeCShared, BuildmodeShared:
+ hasinitarr = true
+ }
+
+ if hasinitarr {
+ Addstring(shstrtab, ".init_array")
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ Addstring(shstrtab, ".rela.init_array")
+ default:
+ Addstring(shstrtab, ".rel.init_array")
+ }
+ }
+
+ if Debug['s'] == 0 {
+ Addstring(shstrtab, ".symtab")
+ Addstring(shstrtab, ".strtab")
+ dwarfaddshstrings(shstrtab)
+ }
+
+ Addstring(shstrtab, ".shstrtab")
+
+ if Debug['d'] == 0 { /* -d suppresses dynamic loader format */
+ Addstring(shstrtab, ".interp")
+ Addstring(shstrtab, ".hash")
+ Addstring(shstrtab, ".got")
+ if Thearch.Thechar == '9' {
+ Addstring(shstrtab, ".glink")
+ }
+ Addstring(shstrtab, ".got.plt")
+ Addstring(shstrtab, ".dynamic")
+ Addstring(shstrtab, ".dynsym")
+ Addstring(shstrtab, ".dynstr")
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ Addstring(shstrtab, ".rela")
+ Addstring(shstrtab, ".rela.plt")
+ default:
+ Addstring(shstrtab, ".rel")
+ Addstring(shstrtab, ".rel.plt")
+ }
+
+ Addstring(shstrtab, ".plt")
+ Addstring(shstrtab, ".gnu.version")
+ Addstring(shstrtab, ".gnu.version_r")
+
+ /* dynamic symbol table - first entry all zeros */
+ s := Linklookup(Ctxt, ".dynsym", 0)
+
+ s.Type = obj.SELFROSECT
+ s.Reachable = true
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ s.Size += ELF64SYMSIZE
+ default:
+ s.Size += ELF32SYMSIZE
+ }
+
+ /* dynamic string table */
+ s = Linklookup(Ctxt, ".dynstr", 0)
+
+ s.Type = obj.SELFROSECT
+ s.Reachable = true
+ if s.Size == 0 {
+ Addstring(s, "")
+ }
+ dynstr := s
+
+ /* relocation table */
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ s = Linklookup(Ctxt, ".rela", 0)
+ default:
+ s = Linklookup(Ctxt, ".rel", 0)
+ }
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+
+ /* global offset table */
+ s = Linklookup(Ctxt, ".got", 0)
+
+ s.Reachable = true
+ s.Type = obj.SELFGOT // writable
+
+ /* ppc64 glink resolver */
+ if Thearch.Thechar == '9' {
+ s := Linklookup(Ctxt, ".glink", 0)
+ s.Reachable = true
+ s.Type = obj.SELFRXSECT
+ }
+
+ /* hash */
+ s = Linklookup(Ctxt, ".hash", 0)
+
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+
+ s = Linklookup(Ctxt, ".got.plt", 0)
+ s.Reachable = true
+ s.Type = obj.SELFSECT // writable
+
+ s = Linklookup(Ctxt, ".plt", 0)
+
+ s.Reachable = true
+ if Thearch.Thechar == '9' {
+ // In the ppc64 ABI, .plt is a data section
+ // written by the dynamic linker.
+ s.Type = obj.SELFSECT
+ } else {
+ s.Type = obj.SELFRXSECT
+ }
+
+ Thearch.Elfsetupplt()
+
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ s = Linklookup(Ctxt, ".rela.plt", 0)
+ default:
+ s = Linklookup(Ctxt, ".rel.plt", 0)
+ }
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+
+ s = Linklookup(Ctxt, ".gnu.version", 0)
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+
+ s = Linklookup(Ctxt, ".gnu.version_r", 0)
+ s.Reachable = true
+ s.Type = obj.SELFROSECT
+
+ /* define dynamic elf table */
+ s = Linklookup(Ctxt, ".dynamic", 0)
+
+ s.Reachable = true
+ s.Type = obj.SELFSECT // writable
+
+ /*
+ * .dynamic table
+ */
+ elfwritedynentsym(s, DT_HASH, Linklookup(Ctxt, ".hash", 0))
+
+ elfwritedynentsym(s, DT_SYMTAB, Linklookup(Ctxt, ".dynsym", 0))
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ Elfwritedynent(s, DT_SYMENT, ELF64SYMSIZE)
+ default:
+ Elfwritedynent(s, DT_SYMENT, ELF32SYMSIZE)
+ }
+ elfwritedynentsym(s, DT_STRTAB, Linklookup(Ctxt, ".dynstr", 0))
+ elfwritedynentsymsize(s, DT_STRSZ, Linklookup(Ctxt, ".dynstr", 0))
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ elfwritedynentsym(s, DT_RELA, Linklookup(Ctxt, ".rela", 0))
+ elfwritedynentsymsize(s, DT_RELASZ, Linklookup(Ctxt, ".rela", 0))
+ Elfwritedynent(s, DT_RELAENT, ELF64RELASIZE)
+ default:
+ elfwritedynentsym(s, DT_REL, Linklookup(Ctxt, ".rel", 0))
+ elfwritedynentsymsize(s, DT_RELSZ, Linklookup(Ctxt, ".rel", 0))
+ Elfwritedynent(s, DT_RELENT, ELF32RELSIZE)
+ }
+
+ if rpath.val != "" {
+ Elfwritedynent(s, DT_RUNPATH, uint64(Addstring(dynstr, rpath.val)))
+ }
+
+ if Thearch.Thechar == '9' {
+ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".plt", 0))
+ } else {
+ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got.plt", 0))
+ }
+
+ if Thearch.Thechar == '9' {
+ Elfwritedynent(s, DT_PPC64_OPT, 0)
+ }
+
+ // Solaris dynamic linker can't handle an empty .rela.plt if
+ // DT_JMPREL is emitted so we have to defer generation of DT_PLTREL,
+ // DT_PLTRELSZ, and DT_JMPREL dynamic entries until after we know the
+ // size of .rel(a).plt section.
+ Elfwritedynent(s, DT_DEBUG, 0)
+ }
+
+ if Buildmode == BuildmodeShared {
+ // The go.link.abihashbytes symbol will be pointed at the appropriate
+ // part of the .note.go.abihash section in data.go:func address().
+ s := Linklookup(Ctxt, "go.link.abihashbytes", 0)
+ s.Local = true
+ s.Type = obj.SRODATA
+ s.Special = 1
+ s.Reachable = true
+ s.Size = int64(sha1.Size)
+
+ sort.Sort(byPkg(Ctxt.Library))
+ h := sha1.New()
+ for _, l := range Ctxt.Library {
+ h.Write(l.hash)
+ }
+ addgonote(".note.go.abihash", ELF_NOTE_GOABIHASH_TAG, h.Sum([]byte{}))
+ addgonote(".note.go.pkg-list", ELF_NOTE_GOPKGLIST_TAG, []byte(pkglistfornote))
+ var deplist []string
+ for _, shlib := range Ctxt.Shlibs {
+ deplist = append(deplist, filepath.Base(shlib.Path))
+ }
+ addgonote(".note.go.deps", ELF_NOTE_GODEPS_TAG, []byte(strings.Join(deplist, "\n")))
+ }
+
+ if Linkmode == LinkExternal && buildid != "" {
+ addgonote(".note.go.buildid", ELF_NOTE_GOBUILDID_TAG, []byte(buildid))
+ }
+}
+
+// Do not write DT_NULL. elfdynhash will finish it.
+func shsym(sh *ElfShdr, s *LSym) {
+ addr := Symaddr(s)
+ if sh.flags&SHF_ALLOC != 0 {
+ sh.addr = uint64(addr)
+ }
+ sh.off = uint64(datoff(addr))
+ sh.size = uint64(s.Size)
+}
+
+func phsh(ph *ElfPhdr, sh *ElfShdr) {
+ ph.vaddr = sh.addr
+ ph.paddr = ph.vaddr
+ ph.off = sh.off
+ ph.filesz = sh.size
+ ph.memsz = sh.size
+ ph.align = sh.addralign
+}
+
+func Asmbelfsetup() {
+ /* This null SHdr must appear before all others */
+ elfshname("")
+
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
+ elfshalloc(sect)
+ }
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
+ elfshalloc(sect)
+ }
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ elfshalloc(sect)
+ }
+}
+
+func Asmbelf(symo int64) {
+ eh := getElfEhdr()
+ switch Thearch.Thechar {
+ default:
+ Exitf("unknown architecture in asmbelf: %v", Thearch.Thechar)
+ case '0':
+ eh.machine = EM_MIPS
+ case '5':
+ eh.machine = EM_ARM
+ case '6':
+ eh.machine = EM_X86_64
+ case '7':
+ eh.machine = EM_AARCH64
+ case '8':
+ eh.machine = EM_386
+ case '9':
+ eh.machine = EM_PPC64
+ }
+
+ elfreserve := int64(ELFRESERVE)
+ startva := INITTEXT - int64(HEADR)
+ resoff := elfreserve
+
+ var pph *ElfPhdr
+ var pnote *ElfPhdr
+ if Linkmode == LinkExternal {
+ /* skip program headers */
+ eh.phoff = 0
+
+ eh.phentsize = 0
+
+ if Buildmode == BuildmodeShared {
+ sh := elfshname(".note.go.pkg-list")
+ sh.type_ = SHT_NOTE
+ sh = elfshname(".note.go.abihash")
+ sh.type_ = SHT_NOTE
+ sh.flags = SHF_ALLOC
+ sh = elfshname(".note.go.deps")
+ sh.type_ = SHT_NOTE
+ }
+
+ if buildid != "" {
+ sh := elfshname(".note.go.buildid")
+ sh.type_ = SHT_NOTE
+ sh.flags = SHF_ALLOC
+ }
+
+ goto elfobj
+ }
+
+ /* program header info */
+ pph = newElfPhdr()
+
+ pph.type_ = PT_PHDR
+ pph.flags = PF_R
+ pph.off = uint64(eh.ehsize)
+ pph.vaddr = uint64(INITTEXT) - uint64(HEADR) + pph.off
+ pph.paddr = uint64(INITTEXT) - uint64(HEADR) + pph.off
+ pph.align = uint64(INITRND)
+
+ /*
+ * PHDR must be in a loaded segment. Adjust the text
+ * segment boundaries downwards to include it.
+ * Except on NaCl where it must not be loaded.
+ */
+ if HEADTYPE != obj.Hnacl {
+ o := int64(Segtext.Vaddr - pph.vaddr)
+ Segtext.Vaddr -= uint64(o)
+ Segtext.Length += uint64(o)
+ o = int64(Segtext.Fileoff - pph.off)
+ Segtext.Fileoff -= uint64(o)
+ Segtext.Filelen += uint64(o)
+ }
+
+ if Debug['d'] == 0 { /* -d suppresses dynamic loader format */
+ /* interpreter */
+ sh := elfshname(".interp")
+
+ sh.type_ = SHT_PROGBITS
+ sh.flags = SHF_ALLOC
+ sh.addralign = 1
+ if interpreter == "" {
+ switch HEADTYPE {
+ case obj.Hlinux:
+ interpreter = Thearch.Linuxdynld
+
+ case obj.Hfreebsd:
+ interpreter = Thearch.Freebsddynld
+
+ case obj.Hnetbsd:
+ interpreter = Thearch.Netbsddynld
+
+ case obj.Hopenbsd:
+ interpreter = Thearch.Openbsddynld
+
+ case obj.Hdragonfly:
+ interpreter = Thearch.Dragonflydynld
+
+ case obj.Hsolaris:
+ interpreter = Thearch.Solarisdynld
+ }
+ }
+
+ resoff -= int64(elfinterp(sh, uint64(startva), uint64(resoff), interpreter))
+
+ ph := newElfPhdr()
+ ph.type_ = PT_INTERP
+ ph.flags = PF_R
+ phsh(ph, sh)
+ }
+
+ pnote = nil
+ if HEADTYPE == obj.Hnetbsd || HEADTYPE == obj.Hopenbsd {
+ var sh *ElfShdr
+ switch HEADTYPE {
+ case obj.Hnetbsd:
+ sh = elfshname(".note.netbsd.ident")
+ resoff -= int64(elfnetbsdsig(sh, uint64(startva), uint64(resoff)))
+
+ case obj.Hopenbsd:
+ sh = elfshname(".note.openbsd.ident")
+ resoff -= int64(elfopenbsdsig(sh, uint64(startva), uint64(resoff)))
+ }
+
+ pnote = newElfPhdr()
+ pnote.type_ = PT_NOTE
+ pnote.flags = PF_R
+ phsh(pnote, sh)
+ }
+
+ if len(buildinfo) > 0 {
+ sh := elfshname(".note.gnu.build-id")
+ resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
+
+ if pnote == nil {
+ pnote = newElfPhdr()
+ pnote.type_ = PT_NOTE
+ pnote.flags = PF_R
+ }
+
+ phsh(pnote, sh)
+ }
+
+ if buildid != "" {
+ sh := elfshname(".note.go.buildid")
+ resoff -= int64(elfgobuildid(sh, uint64(startva), uint64(resoff)))
+
+ pnote := newElfPhdr()
+ pnote.type_ = PT_NOTE
+ pnote.flags = PF_R
+ phsh(pnote, sh)
+ }
+
+ // Additions to the reserved area must be above this line.
+
+ elfphload(&Segtext)
+ if Segrodata.Sect != nil {
+ elfphload(&Segrodata)
+ }
+ elfphload(&Segdata)
+
+ /* Dynamic linking sections */
+ if Debug['d'] == 0 {
+ sh := elfshname(".dynsym")
+ sh.type_ = SHT_DYNSYM
+ sh.flags = SHF_ALLOC
+ if elf64 {
+ sh.entsize = ELF64SYMSIZE
+ } else {
+ sh.entsize = ELF32SYMSIZE
+ }
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.link = uint32(elfshname(".dynstr").shnum)
+
+ // sh->info = index of first non-local symbol (number of local symbols)
+ shsym(sh, Linklookup(Ctxt, ".dynsym", 0))
+
+ sh = elfshname(".dynstr")
+ sh.type_ = SHT_STRTAB
+ sh.flags = SHF_ALLOC
+ sh.addralign = 1
+ shsym(sh, Linklookup(Ctxt, ".dynstr", 0))
+
+ if elfverneed != 0 {
+ sh := elfshname(".gnu.version")
+ sh.type_ = SHT_GNU_VERSYM
+ sh.flags = SHF_ALLOC
+ sh.addralign = 2
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ sh.entsize = 2
+ shsym(sh, Linklookup(Ctxt, ".gnu.version", 0))
+
+ sh = elfshname(".gnu.version_r")
+ sh.type_ = SHT_GNU_VERNEED
+ sh.flags = SHF_ALLOC
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.info = uint32(elfverneed)
+ sh.link = uint32(elfshname(".dynstr").shnum)
+ shsym(sh, Linklookup(Ctxt, ".gnu.version_r", 0))
+ }
+
+ switch eh.machine {
+ case EM_X86_64, EM_PPC64, EM_AARCH64:
+ sh := elfshname(".rela.plt")
+ sh.type_ = SHT_RELA
+ sh.flags = SHF_ALLOC
+ sh.entsize = ELF64RELASIZE
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ sh.info = uint32(elfshname(".plt").shnum)
+ shsym(sh, Linklookup(Ctxt, ".rela.plt", 0))
+
+ sh = elfshname(".rela")
+ sh.type_ = SHT_RELA
+ sh.flags = SHF_ALLOC
+ sh.entsize = ELF64RELASIZE
+ sh.addralign = 8
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ shsym(sh, Linklookup(Ctxt, ".rela", 0))
+
+ default:
+ sh := elfshname(".rel.plt")
+ sh.type_ = SHT_REL
+ sh.flags = SHF_ALLOC
+ sh.entsize = ELF32RELSIZE
+ sh.addralign = 4
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ shsym(sh, Linklookup(Ctxt, ".rel.plt", 0))
+
+ sh = elfshname(".rel")
+ sh.type_ = SHT_REL
+ sh.flags = SHF_ALLOC
+ sh.entsize = ELF32RELSIZE
+ sh.addralign = 4
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ shsym(sh, Linklookup(Ctxt, ".rel", 0))
+ }
+
+ if eh.machine == EM_PPC64 {
+ sh := elfshname(".glink")
+ sh.type_ = SHT_PROGBITS
+ sh.flags = SHF_ALLOC + SHF_EXECINSTR
+ sh.addralign = 4
+ shsym(sh, Linklookup(Ctxt, ".glink", 0))
+ }
+
+ sh = elfshname(".plt")
+ sh.type_ = SHT_PROGBITS
+ sh.flags = SHF_ALLOC + SHF_EXECINSTR
+ if eh.machine == EM_X86_64 {
+ sh.entsize = 16
+ } else if eh.machine == EM_PPC64 {
+ // On ppc64, this is just a table of addresses
+ // filled by the dynamic linker
+ sh.type_ = SHT_NOBITS
+
+ sh.flags = SHF_ALLOC + SHF_WRITE
+ sh.entsize = 8
+ } else {
+ sh.entsize = 4
+ }
+ sh.addralign = sh.entsize
+ shsym(sh, Linklookup(Ctxt, ".plt", 0))
+
+ // On ppc64, .got comes from the input files, so don't
+ // create it here, and .got.plt is not used.
+ if eh.machine != EM_PPC64 {
+ sh := elfshname(".got")
+ sh.type_ = SHT_PROGBITS
+ sh.flags = SHF_ALLOC + SHF_WRITE
+ sh.entsize = uint64(Thearch.Regsize)
+ sh.addralign = uint64(Thearch.Regsize)
+ shsym(sh, Linklookup(Ctxt, ".got", 0))
+
+ sh = elfshname(".got.plt")
+ sh.type_ = SHT_PROGBITS
+ sh.flags = SHF_ALLOC + SHF_WRITE
+ sh.entsize = uint64(Thearch.Regsize)
+ sh.addralign = uint64(Thearch.Regsize)
+ shsym(sh, Linklookup(Ctxt, ".got.plt", 0))
+ }
+
+ sh = elfshname(".hash")
+ sh.type_ = SHT_HASH
+ sh.flags = SHF_ALLOC
+ sh.entsize = 4
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.link = uint32(elfshname(".dynsym").shnum)
+ shsym(sh, Linklookup(Ctxt, ".hash", 0))
+
+ /* sh and PT_DYNAMIC for .dynamic section */
+ sh = elfshname(".dynamic")
+
+ sh.type_ = SHT_DYNAMIC
+ sh.flags = SHF_ALLOC + SHF_WRITE
+ sh.entsize = 2 * uint64(Thearch.Regsize)
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.link = uint32(elfshname(".dynstr").shnum)
+ shsym(sh, Linklookup(Ctxt, ".dynamic", 0))
+ ph := newElfPhdr()
+ ph.type_ = PT_DYNAMIC
+ ph.flags = PF_R + PF_W
+ phsh(ph, sh)
+
+ /*
+ * Thread-local storage segment (really just size).
+ */
+ // Do not emit PT_TLS for OpenBSD since ld.so(1) does
+ // not currently support it. This is handled
+ // appropriately in runtime/cgo.
+ if HEADTYPE != obj.Hopenbsd {
+ tlssize := uint64(0)
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ if sect.Name == ".tbss" {
+ tlssize = sect.Length
+ }
+ }
+ if tlssize != 0 {
+ ph := newElfPhdr()
+ ph.type_ = PT_TLS
+ ph.flags = PF_R
+ ph.memsz = tlssize
+ ph.align = uint64(Thearch.Regsize)
+ }
+ }
+ }
+
+ if HEADTYPE == obj.Hlinux {
+ ph := newElfPhdr()
+ ph.type_ = PT_GNU_STACK
+ ph.flags = PF_W + PF_R
+ ph.align = uint64(Thearch.Regsize)
+
+ ph = newElfPhdr()
+ ph.type_ = PT_PAX_FLAGS
+ ph.flags = 0x2a00 // mprotect, randexec, emutramp disabled
+ ph.align = uint64(Thearch.Regsize)
+ }
+
+elfobj:
+ sh := elfshname(".shstrtab")
+ sh.type_ = SHT_STRTAB
+ sh.addralign = 1
+ shsym(sh, Linklookup(Ctxt, ".shstrtab", 0))
+ eh.shstrndx = uint16(sh.shnum)
+
+ // put these sections early in the list
+ if Debug['s'] == 0 {
+ elfshname(".symtab")
+ elfshname(".strtab")
+ }
+
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
+ elfshbits(sect)
+ }
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
+ elfshbits(sect)
+ }
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ elfshbits(sect)
+ }
+
+ if Linkmode == LinkExternal {
+ for sect := Segtext.Sect; sect != nil; sect = sect.Next {
+ elfshreloc(sect)
+ }
+ for sect := Segrodata.Sect; sect != nil; sect = sect.Next {
+ elfshreloc(sect)
+ }
+ for sect := Segdata.Sect; sect != nil; sect = sect.Next {
+ elfshreloc(sect)
+ }
+
+ // add a .note.GNU-stack section to mark the stack as non-executable
+ sh := elfshname(".note.GNU-stack")
+
+ sh.type_ = SHT_PROGBITS
+ sh.addralign = 1
+ sh.flags = 0
+ }
+
+ if Debug['s'] == 0 {
+ sh := elfshname(".symtab")
+ sh.type_ = SHT_SYMTAB
+ sh.off = uint64(symo)
+ sh.size = uint64(Symsize)
+ sh.addralign = uint64(Thearch.Regsize)
+ sh.entsize = 8 + 2*uint64(Thearch.Regsize)
+ sh.link = uint32(elfshname(".strtab").shnum)
+ sh.info = uint32(elfglobalsymndx)
+
+ sh = elfshname(".strtab")
+ sh.type_ = SHT_STRTAB
+ sh.off = uint64(symo) + uint64(Symsize)
+ sh.size = uint64(len(Elfstrdat))
+ sh.addralign = 1
+
+ dwarfaddelfheaders()
+ }
+
+ /* Main header */
+ eh.ident[EI_MAG0] = '\177'
+
+ eh.ident[EI_MAG1] = 'E'
+ eh.ident[EI_MAG2] = 'L'
+ eh.ident[EI_MAG3] = 'F'
+ if HEADTYPE == obj.Hfreebsd {
+ eh.ident[EI_OSABI] = ELFOSABI_FREEBSD
+ } else if HEADTYPE == obj.Hnetbsd {
+ eh.ident[EI_OSABI] = ELFOSABI_NETBSD
+ } else if HEADTYPE == obj.Hopenbsd {
+ eh.ident[EI_OSABI] = ELFOSABI_OPENBSD
+ } else if HEADTYPE == obj.Hdragonfly {
+ eh.ident[EI_OSABI] = ELFOSABI_NONE
+ }
+ if elf64 {
+ eh.ident[EI_CLASS] = ELFCLASS64
+ } else {
+ eh.ident[EI_CLASS] = ELFCLASS32
+ }
+ if Ctxt.Arch.ByteOrder == binary.BigEndian {
+ eh.ident[EI_DATA] = ELFDATA2MSB
+ } else {
+ eh.ident[EI_DATA] = ELFDATA2LSB
+ }
+ eh.ident[EI_VERSION] = EV_CURRENT
+
+ if Linkmode == LinkExternal {
+ eh.type_ = ET_REL
+ } else {
+ eh.type_ = ET_EXEC
+ }
+
+ if Linkmode != LinkExternal {
+ eh.entry = uint64(Entryvalue())
+ }
+
+ eh.version = EV_CURRENT
+
+ if pph != nil {
+ pph.filesz = uint64(eh.phnum) * uint64(eh.phentsize)
+ pph.memsz = pph.filesz
+ }
+
+ Cseek(0)
+ a := int64(0)
+ a += int64(elfwritehdr())
+ a += int64(elfwritephdrs())
+ a += int64(elfwriteshdrs())
+ if Debug['d'] == 0 {
+ a += int64(elfwriteinterp())
+ }
+ if Linkmode != LinkExternal {
+ if HEADTYPE == obj.Hnetbsd {
+ a += int64(elfwritenetbsdsig())
+ }
+ if HEADTYPE == obj.Hopenbsd {
+ a += int64(elfwriteopenbsdsig())
+ }
+ if len(buildinfo) > 0 {
+ a += int64(elfwritebuildinfo())
+ }
+ if buildid != "" {
+ a += int64(elfwritegobuildid())
+ }
+ }
+
+ if a > elfreserve {
+ Diag("ELFRESERVE too small: %d > %d", a, elfreserve)
+ }
+}
+
+func Elfadddynsym(ctxt *Link, s *LSym) {
+ if elf64 {
+ s.Dynid = int32(Nelfsym)
+ Nelfsym++
+
+ d := Linklookup(ctxt, ".dynsym", 0)
+
+ name := s.Extname
+ Adduint32(ctxt, d, uint32(Addstring(Linklookup(ctxt, ".dynstr", 0), name)))
+
+ /* type */
+ t := STB_GLOBAL << 4
+
+ if s.Cgoexport != 0 && s.Type&obj.SMASK == obj.STEXT {
+ t |= STT_FUNC
+ } else {
+ t |= STT_OBJECT
+ }
+ Adduint8(ctxt, d, uint8(t))
+
+ /* reserved */
+ Adduint8(ctxt, d, 0)
+
+ /* section where symbol is defined */
+ if s.Type == obj.SDYNIMPORT {
+ Adduint16(ctxt, d, SHN_UNDEF)
+ } else {
+ Adduint16(ctxt, d, 1)
+ }
+
+ /* value */
+ if s.Type == obj.SDYNIMPORT {
+ Adduint64(ctxt, d, 0)
+ } else {
+ Addaddr(ctxt, d, s)
+ }
+
+ /* size of object */
+ Adduint64(ctxt, d, uint64(s.Size))
+
+ if Thearch.Thechar == '6' && s.Cgoexport&CgoExportDynamic == 0 && s.Dynimplib != "" && !seenlib[s.Dynimplib] {
+ Elfwritedynent(Linklookup(ctxt, ".dynamic", 0), DT_NEEDED, uint64(Addstring(Linklookup(ctxt, ".dynstr", 0), s.Dynimplib)))
+ }
+ } else {
+ s.Dynid = int32(Nelfsym)
+ Nelfsym++
+
+ d := Linklookup(ctxt, ".dynsym", 0)
+
+ /* name */
+ name := s.Extname
+
+ Adduint32(ctxt, d, uint32(Addstring(Linklookup(ctxt, ".dynstr", 0), name)))
+
+ /* value */
+ if s.Type == obj.SDYNIMPORT {
+ Adduint32(ctxt, d, 0)
+ } else {
+ Addaddr(ctxt, d, s)
+ }
+
+ /* size */
+ Adduint32(ctxt, d, 0)
+
+ /* type */
+ t := STB_GLOBAL << 4
+
+ // TODO(mwhudson): presumably the behaviour should actually be the same on both arm and 386.
+ if Thearch.Thechar == '8' && s.Cgoexport != 0 && s.Type&obj.SMASK == obj.STEXT {
+ t |= STT_FUNC
+ } else if Thearch.Thechar == '5' && s.Cgoexport&CgoExportDynamic != 0 && s.Type&obj.SMASK == obj.STEXT {
+ t |= STT_FUNC
+ } else {
+ t |= STT_OBJECT
+ }
+ Adduint8(ctxt, d, uint8(t))
+ Adduint8(ctxt, d, 0)
+
+ /* shndx */
+ if s.Type == obj.SDYNIMPORT {
+ Adduint16(ctxt, d, SHN_UNDEF)
+ } else {
+ Adduint16(ctxt, d, 1)
+ }
+ }
+}
+
+func ELF32_R_SYM(info uint32) uint32 {
+ return info >> 8
+}
+
+func ELF32_R_TYPE(info uint32) uint32 {
+ return uint32(uint8(info))
+}
+
+func ELF32_R_INFO(sym uint32, type_ uint32) uint32 {
+ return sym<<8 | type_
+}
+
+func ELF32_ST_BIND(info uint8) uint8 {
+ return info >> 4
+}
+
+func ELF32_ST_TYPE(info uint8) uint8 {
+ return info & 0xf
+}
+
+func ELF32_ST_INFO(bind uint8, type_ uint8) uint8 {
+ return bind<<4 | type_&0xf
+}
+
+func ELF32_ST_VISIBILITY(oth uint8) uint8 {
+ return oth & 3
+}
+
+func ELF64_R_SYM(info uint64) uint32 {
+ return uint32(info >> 32)
+}
+
+func ELF64_R_TYPE(info uint64) uint32 {
+ return uint32(info)
+}
+
+func ELF64_R_INFO(sym uint32, type_ uint32) uint64 {
+ return uint64(sym)<<32 | uint64(type_)
+}
+
+func ELF64_ST_BIND(info uint8) uint8 {
+ return info >> 4
+}
+
+func ELF64_ST_TYPE(info uint8) uint8 {
+ return info & 0xf
+}
+
+func ELF64_ST_INFO(bind uint8, type_ uint8) uint8 {
+ return bind<<4 | type_&0xf
+}
+
+func ELF64_ST_VISIBILITY(oth uint8) uint8 {
+ return oth & 3
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/ldelf.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/ldelf.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/ldelf.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/ldelf.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,1172 @@
+package ld
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "sort"
+ "strings"
+)
+
+/*
+Derived from Plan 9 from User Space's src/libmach/elf.h, elf.c
+http://code.swtch.com/plan9port/src/tip/src/libmach/
+
+ Copyright © 2004 Russ Cox.
+ Portions Copyright © 2008-2010 Google Inc.
+ Portions Copyright © 2010 The Go Authors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+const (
+ ElfClassNone = 0
+ ElfClass32 = 1
+ ElfClass64 = 2
+)
+
+const (
+ ElfDataNone = 0
+ ElfDataLsb = 1
+ ElfDataMsb = 2
+)
+
+const (
+ ElfTypeNone = 0
+ ElfTypeRelocatable = 1
+ ElfTypeExecutable = 2
+ ElfTypeSharedObject = 3
+ ElfTypeCore = 4
+)
+
+const (
+ ElfMachNone = 0
+ ElfMach32100 = 1
+ ElfMachSparc = 2
+ ElfMach386 = 3
+ ElfMach68000 = 4
+ ElfMach88000 = 5
+ ElfMach486 = 6
+ ElfMach860 = 7
+ ElfMachMips = 8
+ ElfMachS370 = 9
+ ElfMachMipsLe = 10
+ ElfMachParisc = 15
+ ElfMachVpp500 = 17
+ ElfMachSparc32Plus = 18
+ ElfMach960 = 19
+ ElfMachPower = 20
+ ElfMachPower64 = 21
+ ElfMachS390 = 22
+ ElfMachV800 = 36
+ ElfMachFr20 = 37
+ ElfMachRh32 = 38
+ ElfMachRce = 39
+ ElfMachArm = 40
+ ElfMachAlpha = 41
+ ElfMachSH = 42
+ ElfMachSparc9 = 43
+ ElfMachAmd64 = 62
+ ElfMachArm64 = 183
+)
+
+const (
+ ElfAbiNone = 0
+ ElfAbiSystemV = 0
+ ElfAbiHPUX = 1
+ ElfAbiNetBSD = 2
+ ElfAbiLinux = 3
+ ElfAbiSolaris = 6
+ ElfAbiAix = 7
+ ElfAbiIrix = 8
+ ElfAbiFreeBSD = 9
+ ElfAbiTru64 = 10
+ ElfAbiModesto = 11
+ ElfAbiOpenBSD = 12
+ ElfAbiARM = 97
+ ElfAbiEmbedded = 255
+)
+
+const (
+ ElfSectNone = 0
+ ElfSectProgbits = 1
+ ElfSectSymtab = 2
+ ElfSectStrtab = 3
+ ElfSectRela = 4
+ ElfSectHash = 5
+ ElfSectDynamic = 6
+ ElfSectNote = 7
+ ElfSectNobits = 8
+ ElfSectRel = 9
+ ElfSectShlib = 10
+ ElfSectDynsym = 11
+ ElfSectFlagWrite = 0x1
+ ElfSectFlagAlloc = 0x2
+ ElfSectFlagExec = 0x4
+)
+
+const (
+ ElfSymBindLocal = 0
+ ElfSymBindGlobal = 1
+ ElfSymBindWeak = 2
+)
+
+const (
+ ElfSymTypeNone = 0
+ ElfSymTypeObject = 1
+ ElfSymTypeFunc = 2
+ ElfSymTypeSection = 3
+ ElfSymTypeFile = 4
+)
+
+const (
+ ElfSymShnNone = 0
+ ElfSymShnAbs = 0xFFF1
+ ElfSymShnCommon = 0xFFF2
+)
+
+const (
+ ElfProgNone = 0
+ ElfProgLoad = 1
+ ElfProgDynamic = 2
+ ElfProgInterp = 3
+ ElfProgNote = 4
+ ElfProgShlib = 5
+ ElfProgPhdr = 6
+ ElfProgFlagExec = 0x1
+ ElfProgFlagWrite = 0x2
+ ElfProgFlagRead = 0x4
+)
+
+const (
+ ElfNotePrStatus = 1
+ ElfNotePrFpreg = 2
+ ElfNotePrPsinfo = 3
+ ElfNotePrTaskstruct = 4
+ ElfNotePrAuxv = 6
+ ElfNotePrXfpreg = 0x46e62b7f
+)
+
+type ElfHdrBytes struct {
+ Ident [16]uint8
+ Type [2]uint8
+ Machine [2]uint8
+ Version [4]uint8
+ Entry [4]uint8
+ Phoff [4]uint8
+ Shoff [4]uint8
+ Flags [4]uint8
+ Ehsize [2]uint8
+ Phentsize [2]uint8
+ Phnum [2]uint8
+ Shentsize [2]uint8
+ Shnum [2]uint8
+ Shstrndx [2]uint8
+}
+
+type ElfSectBytes struct {
+ Name [4]uint8
+ Type [4]uint8
+ Flags [4]uint8
+ Addr [4]uint8
+ Off [4]uint8
+ Size [4]uint8
+ Link [4]uint8
+ Info [4]uint8
+ Align [4]uint8
+ Entsize [4]uint8
+}
+
+type ElfProgBytes struct {
+}
+
+type ElfSymBytes struct {
+ Name [4]uint8
+ Value [4]uint8
+ Size [4]uint8
+ Info uint8
+ Other uint8
+ Shndx [2]uint8
+}
+
+type ElfHdrBytes64 struct {
+ Ident [16]uint8
+ Type [2]uint8
+ Machine [2]uint8
+ Version [4]uint8
+ Entry [8]uint8
+ Phoff [8]uint8
+ Shoff [8]uint8
+ Flags [4]uint8
+ Ehsize [2]uint8
+ Phentsize [2]uint8
+ Phnum [2]uint8
+ Shentsize [2]uint8
+ Shnum [2]uint8
+ Shstrndx [2]uint8
+}
+
+type ElfSectBytes64 struct {
+ Name [4]uint8
+ Type [4]uint8
+ Flags [8]uint8
+ Addr [8]uint8
+ Off [8]uint8
+ Size [8]uint8
+ Link [4]uint8
+ Info [4]uint8
+ Align [8]uint8
+ Entsize [8]uint8
+}
+
+type ElfProgBytes64 struct {
+}
+
+type ElfSymBytes64 struct {
+ Name [4]uint8
+ Info uint8
+ Other uint8
+ Shndx [2]uint8
+ Value [8]uint8
+ Size [8]uint8
+}
+
+type ElfSect struct {
+ name string
+ nameoff uint32
+ type_ uint32
+ flags uint64
+ addr uint64
+ off uint64
+ size uint64
+ link uint32
+ info uint32
+ align uint64
+ entsize uint64
+ base []byte
+ sym *LSym
+}
+
+type ElfObj struct {
+ f *obj.Biobuf
+ base int64 // offset in f where ELF begins
+ length int64 // length of ELF
+ is64 int
+ name string
+ e binary.ByteOrder
+ sect []ElfSect
+ nsect uint
+ shstrtab string
+ nsymtab int
+ symtab *ElfSect
+ symstr *ElfSect
+ type_ uint32
+ machine uint32
+ version uint32
+ entry uint64
+ phoff uint64
+ shoff uint64
+ flags uint32
+ ehsize uint32
+ phentsize uint32
+ phnum uint32
+ shentsize uint32
+ shnum uint32
+ shstrndx uint32
+}
+
+type ElfSym struct {
+ name string
+ value uint64
+ size uint64
+ bind uint8
+ type_ uint8
+ other uint8
+ shndx uint16
+ sym *LSym
+}
+
+var ElfMagic = [4]uint8{0x7F, 'E', 'L', 'F'}
+
+func valuecmp(a *LSym, b *LSym) int {
+ if a.Value < b.Value {
+ return -1
+ }
+ if a.Value > b.Value {
+ return +1
+ }
+ return 0
+}
+
+const (
+ Tag_file = 1
+ Tag_CPU_name = 4
+ Tag_CPU_raw_name = 5
+ Tag_compatibility = 32
+ Tag_nodefaults = 64
+ Tag_also_compatible_with = 65
+ Tag_ABI_VFP_args = 28
+)
+
+type elfAttribute struct {
+ tag uint64
+ sval string
+ ival uint64
+}
+
+type elfAttributeList struct {
+ data []byte
+ err error
+}
+
+func (a *elfAttributeList) string() string {
+ if a.err != nil {
+ return ""
+ }
+ nul := bytes.IndexByte(a.data, 0)
+ if nul < 0 {
+ a.err = io.EOF
+ return ""
+ }
+ s := string(a.data[:nul])
+ a.data = a.data[nul+1:]
+ return s
+}
+
+func (a *elfAttributeList) uleb128() uint64 {
+ if a.err != nil {
+ return 0
+ }
+ v, size := binary.Uvarint(a.data)
+ a.data = a.data[size:]
+ return v
+}
+
+// Read an elfAttribute from the list following the rules used on ARM systems.
+func (a *elfAttributeList) armAttr() elfAttribute {
+ attr := elfAttribute{tag: a.uleb128()}
+ switch {
+ case attr.tag == Tag_compatibility:
+ attr.ival = a.uleb128()
+ attr.sval = a.string()
+
+ case attr.tag == 64: // Tag_nodefaults has no argument
+
+ case attr.tag == 65: // Tag_also_compatible_with
+ // Not really, but we don't actually care about this tag.
+ attr.sval = a.string()
+
+ // Tag with string argument
+ case attr.tag == Tag_CPU_name || attr.tag == Tag_CPU_raw_name || (attr.tag >= 32 && attr.tag&1 != 0):
+ attr.sval = a.string()
+
+ default: // Tag with integer argument
+ attr.ival = a.uleb128()
+ }
+ return attr
+}
+
+func (a *elfAttributeList) done() bool {
+ if a.err != nil || len(a.data) == 0 {
+ return true
+ }
+ return false
+}
+
+// Look for the attribute that indicates the object uses the hard-float ABI (a
+// file-level attribute with tag Tag_VFP_arch and value 1). Unfortunately the
+// format used means that we have to parse all of the file-level attributes to
+// find the one we are looking for. This format is slightly documented in "ELF
+// for the ARM Architecture" but mostly this is derived from reading the source
+// to gold and readelf.
+func parseArmAttributes(e binary.ByteOrder, data []byte) {
+ // We assume the soft-float ABI unless we see a tag indicating otherwise.
+ if ehdr.flags == 0x5000002 {
+ ehdr.flags = 0x5000202
+ }
+ if data[0] != 'A' {
+ fmt.Fprintf(&Bso, ".ARM.attributes has unexpected format %c\n", data[0])
+ return
+ }
+ data = data[1:]
+ for len(data) != 0 {
+ sectionlength := e.Uint32(data)
+ sectiondata := data[4:sectionlength]
+ data = data[sectionlength:]
+
+ nulIndex := bytes.IndexByte(sectiondata, 0)
+ if nulIndex < 0 {
+ fmt.Fprintf(&Bso, "corrupt .ARM.attributes (section name not NUL-terminated)\n")
+ return
+ }
+ name := string(sectiondata[:nulIndex])
+ sectiondata = sectiondata[nulIndex+1:]
+
+ if name != "aeabi" {
+ continue
+ }
+ for len(sectiondata) != 0 {
+ subsectiontag, sz := binary.Uvarint(sectiondata)
+ subsectionsize := e.Uint32(sectiondata[sz:])
+ subsectiondata := sectiondata[sz+4 : subsectionsize]
+ sectiondata = sectiondata[subsectionsize:]
+
+ if subsectiontag == Tag_file {
+ attrList := elfAttributeList{data: subsectiondata}
+ for !attrList.done() {
+ attr := attrList.armAttr()
+ if attr.tag == Tag_ABI_VFP_args && attr.ival == 1 {
+ ehdr.flags = 0x5000402 // has entry point, Version5 EABI, hard-float ABI
+ }
+ }
+ if attrList.err != nil {
+ fmt.Fprintf(&Bso, "could not parse .ARM.attributes\n")
+ }
+ }
+ }
+ }
+}
+
+func ldelf(f *obj.Biobuf, pkg string, length int64, pn string) {
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f ldelf %s\n", obj.Cputime(), pn)
+ }
+
+ Ctxt.Version++
+ base := int32(obj.Boffset(f))
+
+ var add uint64
+ var e binary.ByteOrder
+ var elfobj *ElfObj
+ var err error
+ var flag int
+ var hdr *ElfHdrBytes
+ var hdrbuf [64]uint8
+ var info uint64
+ var is64 int
+ var j int
+ var n int
+ var name string
+ var p []byte
+ var r []Reloc
+ var rela int
+ var rp *Reloc
+ var rsect *ElfSect
+ var s *LSym
+ var sect *ElfSect
+ var sym ElfSym
+ var symbols []*LSym
+ if obj.Bread(f, hdrbuf[:]) != len(hdrbuf) {
+ goto bad
+ }
+ hdr = new(ElfHdrBytes)
+ binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
+ if string(hdr.Ident[:4]) != "\x7FELF" {
+ goto bad
+ }
+ switch hdr.Ident[5] {
+ case ElfDataLsb:
+ e = binary.LittleEndian
+
+ case ElfDataMsb:
+ e = binary.BigEndian
+
+ default:
+ goto bad
+ }
+
+ // read header
+ elfobj = new(ElfObj)
+
+ elfobj.e = e
+ elfobj.f = f
+ elfobj.base = int64(base)
+ elfobj.length = length
+ elfobj.name = pn
+
+ is64 = 0
+ if hdr.Ident[4] == ElfClass64 {
+ is64 = 1
+ hdr := new(ElfHdrBytes64)
+ binary.Read(bytes.NewReader(hdrbuf[:]), binary.BigEndian, hdr) // only byte arrays; byte order doesn't matter
+ elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
+ elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
+ elfobj.version = e.Uint32(hdr.Version[:])
+ elfobj.phoff = e.Uint64(hdr.Phoff[:])
+ elfobj.shoff = e.Uint64(hdr.Shoff[:])
+ elfobj.flags = e.Uint32(hdr.Flags[:])
+ elfobj.ehsize = uint32(e.Uint16(hdr.Ehsize[:]))
+ elfobj.phentsize = uint32(e.Uint16(hdr.Phentsize[:]))
+ elfobj.phnum = uint32(e.Uint16(hdr.Phnum[:]))
+ elfobj.shentsize = uint32(e.Uint16(hdr.Shentsize[:]))
+ elfobj.shnum = uint32(e.Uint16(hdr.Shnum[:]))
+ elfobj.shstrndx = uint32(e.Uint16(hdr.Shstrndx[:]))
+ } else {
+ elfobj.type_ = uint32(e.Uint16(hdr.Type[:]))
+ elfobj.machine = uint32(e.Uint16(hdr.Machine[:]))
+ elfobj.version = e.Uint32(hdr.Version[:])
+ elfobj.entry = uint64(e.Uint32(hdr.Entry[:]))
+ elfobj.phoff = uint64(e.Uint32(hdr.Phoff[:]))
+ elfobj.shoff = uint64(e.Uint32(hdr.Shoff[:]))
+ elfobj.flags = e.Uint32(hdr.Flags[:])
+ elfobj.ehsize = uint32(e.Uint16(hdr.Ehsize[:]))
+ elfobj.phentsize = uint32(e.Uint16(hdr.Phentsize[:]))
+ elfobj.phnum = uint32(e.Uint16(hdr.Phnum[:]))
+ elfobj.shentsize = uint32(e.Uint16(hdr.Shentsize[:]))
+ elfobj.shnum = uint32(e.Uint16(hdr.Shnum[:]))
+ elfobj.shstrndx = uint32(e.Uint16(hdr.Shstrndx[:]))
+ }
+
+ elfobj.is64 = is64
+
+ if uint32(hdr.Ident[6]) != elfobj.version {
+ goto bad
+ }
+
+ if e.Uint16(hdr.Type[:]) != ElfTypeRelocatable {
+ Diag("%s: elf but not elf relocatable object", pn)
+ return
+ }
+
+ switch Thearch.Thechar {
+ default:
+ Diag("%s: elf %s unimplemented", pn, Thestring)
+ return
+
+ case '0':
+ if elfobj.machine != ElfMachMips || hdr.Ident[4] != ElfClass64 {
+ Diag("%s: elf object but not mips64", pn)
+ return
+ }
+
+ case '5':
+ if e != binary.LittleEndian || elfobj.machine != ElfMachArm || hdr.Ident[4] != ElfClass32 {
+ Diag("%s: elf object but not arm", pn)
+ return
+ }
+
+ case '6':
+ if e != binary.LittleEndian || elfobj.machine != ElfMachAmd64 || hdr.Ident[4] != ElfClass64 {
+ Diag("%s: elf object but not amd64", pn)
+ return
+ }
+
+ case '7':
+ if e != binary.LittleEndian || elfobj.machine != ElfMachArm64 || hdr.Ident[4] != ElfClass64 {
+ Diag("%s: elf object but not arm64", pn)
+ return
+ }
+
+ case '8':
+ if e != binary.LittleEndian || elfobj.machine != ElfMach386 || hdr.Ident[4] != ElfClass32 {
+ Diag("%s: elf object but not 386", pn)
+ return
+ }
+
+ case '9':
+ if elfobj.machine != ElfMachPower64 || hdr.Ident[4] != ElfClass64 {
+ Diag("%s: elf object but not ppc64", pn)
+ return
+ }
+ }
+
+ // load section list into memory.
+ elfobj.sect = make([]ElfSect, elfobj.shnum)
+
+ elfobj.nsect = uint(elfobj.shnum)
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ if obj.Bseek(f, int64(uint64(base)+elfobj.shoff+uint64(int64(i)*int64(elfobj.shentsize))), 0) < 0 {
+ goto bad
+ }
+ sect = &elfobj.sect[i]
+ if is64 != 0 {
+ var b ElfSectBytes64
+
+ if err = binary.Read(f, e, &b); err != nil {
+ goto bad
+ }
+
+ sect.nameoff = uint32(e.Uint32(b.Name[:]))
+ sect.type_ = e.Uint32(b.Type[:])
+ sect.flags = e.Uint64(b.Flags[:])
+ sect.addr = e.Uint64(b.Addr[:])
+ sect.off = e.Uint64(b.Off[:])
+ sect.size = e.Uint64(b.Size[:])
+ sect.link = e.Uint32(b.Link[:])
+ sect.info = e.Uint32(b.Info[:])
+ sect.align = e.Uint64(b.Align[:])
+ sect.entsize = e.Uint64(b.Entsize[:])
+ } else {
+ var b ElfSectBytes
+
+ if err = binary.Read(f, e, &b); err != nil {
+ goto bad
+ }
+
+ sect.nameoff = uint32(e.Uint32(b.Name[:]))
+ sect.type_ = e.Uint32(b.Type[:])
+ sect.flags = uint64(e.Uint32(b.Flags[:]))
+ sect.addr = uint64(e.Uint32(b.Addr[:]))
+ sect.off = uint64(e.Uint32(b.Off[:]))
+ sect.size = uint64(e.Uint32(b.Size[:]))
+ sect.link = e.Uint32(b.Link[:])
+ sect.info = e.Uint32(b.Info[:])
+ sect.align = uint64(e.Uint32(b.Align[:]))
+ sect.entsize = uint64(e.Uint32(b.Entsize[:]))
+ }
+ }
+
+ // read section string table and translate names
+ if elfobj.shstrndx >= uint32(elfobj.nsect) {
+ err = fmt.Errorf("shstrndx out of range %d >= %d", elfobj.shstrndx, elfobj.nsect)
+ goto bad
+ }
+
+ sect = &elfobj.sect[elfobj.shstrndx]
+ if err = elfmap(elfobj, sect); err != nil {
+ goto bad
+ }
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ if elfobj.sect[i].nameoff != 0 {
+ elfobj.sect[i].name = cstring(sect.base[elfobj.sect[i].nameoff:])
+ }
+ }
+
+ // load string table for symbols into memory.
+ elfobj.symtab = section(elfobj, ".symtab")
+
+ if elfobj.symtab == nil {
+ // our work is done here - no symbols means nothing can refer to this file
+ return
+ }
+
+ if elfobj.symtab.link <= 0 || elfobj.symtab.link >= uint32(elfobj.nsect) {
+ Diag("%s: elf object has symbol table with invalid string table link", pn)
+ return
+ }
+
+ elfobj.symstr = &elfobj.sect[elfobj.symtab.link]
+ if is64 != 0 {
+ elfobj.nsymtab = int(elfobj.symtab.size / ELF64SYMSIZE)
+ } else {
+ elfobj.nsymtab = int(elfobj.symtab.size / ELF32SYMSIZE)
+ }
+
+ if err = elfmap(elfobj, elfobj.symtab); err != nil {
+ goto bad
+ }
+ if err = elfmap(elfobj, elfobj.symstr); err != nil {
+ goto bad
+ }
+
+ // load text and data segments into memory.
+ // they are not as small as the section lists, but we'll need
+ // the memory anyway for the symbol images, so we might
+ // as well use one large chunk.
+
+ // create symbols for elfmapped sections
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ sect = &elfobj.sect[i]
+ if sect.type_ == SHT_ARM_ATTRIBUTES && sect.name == ".ARM.attributes" {
+ if err = elfmap(elfobj, sect); err != nil {
+ goto bad
+ }
+ parseArmAttributes(e, sect.base[:sect.size])
+ }
+ if (sect.type_ != ElfSectProgbits && sect.type_ != ElfSectNobits) || sect.flags&ElfSectFlagAlloc == 0 {
+ continue
+ }
+ if sect.type_ != ElfSectNobits {
+ if err = elfmap(elfobj, sect); err != nil {
+ goto bad
+ }
+ }
+
+ name = fmt.Sprintf("%s(%s)", pkg, sect.name)
+ s = Linklookup(Ctxt, name, Ctxt.Version)
+
+ switch int(sect.flags) & (ElfSectFlagAlloc | ElfSectFlagWrite | ElfSectFlagExec) {
+ default:
+ err = fmt.Errorf("unexpected flags for ELF section %s", sect.name)
+ goto bad
+
+ case ElfSectFlagAlloc:
+ s.Type = obj.SRODATA
+
+ case ElfSectFlagAlloc + ElfSectFlagWrite:
+ if sect.type_ == ElfSectNobits {
+ s.Type = obj.SNOPTRBSS
+ } else {
+ s.Type = obj.SNOPTRDATA
+ }
+
+ case ElfSectFlagAlloc + ElfSectFlagExec:
+ s.Type = obj.STEXT
+ }
+
+ if sect.name == ".got" || sect.name == ".toc" {
+ s.Type = obj.SELFGOT
+ }
+ if sect.type_ == ElfSectProgbits {
+ s.P = sect.base
+ s.P = s.P[:sect.size]
+ }
+
+ s.Size = int64(sect.size)
+ s.Align = int32(sect.align)
+ sect.sym = s
+ }
+
+ // enter sub-symbols into symbol table.
+ // symbol 0 is the null symbol.
+ symbols = make([]*LSym, elfobj.nsymtab)
+
+ for i := 1; i < elfobj.nsymtab; i++ {
+ if err = readelfsym(elfobj, i, &sym, 1); err != nil {
+ goto bad
+ }
+ symbols[i] = sym.sym
+ if sym.type_ != ElfSymTypeFunc && sym.type_ != ElfSymTypeObject && sym.type_ != ElfSymTypeNone {
+ continue
+ }
+ if sym.shndx == ElfSymShnCommon {
+ s = sym.sym
+ if uint64(s.Size) < sym.size {
+ s.Size = int64(sym.size)
+ }
+ if s.Type == 0 || s.Type == obj.SXREF {
+ s.Type = obj.SNOPTRBSS
+ }
+ continue
+ }
+
+ if uint(sym.shndx) >= elfobj.nsect || sym.shndx == 0 {
+ continue
+ }
+
+ // even when we pass needSym == 1 to readelfsym, it might still return nil to skip some unwanted symbols
+ if sym.sym == nil {
+ continue
+ }
+ sect = &elfobj.sect[sym.shndx:][0]
+ if sect.sym == nil {
+ if strings.HasPrefix(sym.name, ".Linfo_string") { // clang does this
+ continue
+ }
+
+ if sym.name == "" && sym.type_ == 0 && sect.name == ".debug_str" {
+ // This reportedly happens with clang 3.7 on ARM.
+ // See issue 13139.
+ continue
+ }
+
+ Diag("%s: sym#%d: ignoring %s in section %d (type %d)", pn, i, sym.name, sym.shndx, sym.type_)
+ continue
+ }
+
+ s = sym.sym
+ if s.Outer != nil {
+ if s.Dupok != 0 {
+ continue
+ }
+ Exitf("%s: duplicate symbol reference: %s in both %s and %s", pn, s.Name, s.Outer.Name, sect.sym.Name)
+ }
+
+ s.Sub = sect.sym.Sub
+ sect.sym.Sub = s
+ s.Type = sect.sym.Type | s.Type&^obj.SMASK | obj.SSUB
+ if s.Cgoexport&CgoExportDynamic == 0 {
+ s.Dynimplib = "" // satisfy dynimport
+ }
+ s.Value = int64(sym.value)
+ s.Size = int64(sym.size)
+ s.Outer = sect.sym
+ if sect.sym.Type == obj.STEXT {
+ if s.External != 0 && s.Dupok == 0 {
+ Diag("%s: duplicate definition of %s", pn, s.Name)
+ }
+ s.External = 1
+ }
+
+ if elfobj.machine == ElfMachPower64 {
+ flag = int(sym.other) >> 5
+ if 2 <= flag && flag <= 6 {
+ s.Localentry = 1 << uint(flag-2)
+ } else if flag == 7 {
+ Diag("%s: invalid sym.other 0x%x for %s", pn, sym.other, s.Name)
+ }
+ }
+ }
+
+ // Sort outer lists by address, adding to textp.
+ // This keeps textp in increasing address order.
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ s = elfobj.sect[i].sym
+ if s == nil {
+ continue
+ }
+ if s.Sub != nil {
+ s.Sub = listsort(s.Sub, valuecmp, listsubp)
+ }
+ if s.Type == obj.STEXT {
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ if Ctxt.Etextp != nil {
+ Ctxt.Etextp.Next = s
+ } else {
+ Ctxt.Textp = s
+ }
+ Ctxt.Etextp = s
+ for s = s.Sub; s != nil; s = s.Sub {
+ if s.Onlist != 0 {
+ log.Fatalf("symbol %s listed multiple times", s.Name)
+ }
+ s.Onlist = 1
+ Ctxt.Etextp.Next = s
+ Ctxt.Etextp = s
+ }
+ }
+ }
+
+ // load relocations
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ rsect = &elfobj.sect[i]
+ if rsect.type_ != ElfSectRela && rsect.type_ != ElfSectRel {
+ continue
+ }
+ if rsect.info >= uint32(elfobj.nsect) || elfobj.sect[rsect.info].base == nil {
+ continue
+ }
+ sect = &elfobj.sect[rsect.info]
+ if err = elfmap(elfobj, rsect); err != nil {
+ goto bad
+ }
+ rela = 0
+ if rsect.type_ == ElfSectRela {
+ rela = 1
+ }
+ n = int(rsect.size / uint64(4+4*is64) / uint64(2+rela))
+ r = make([]Reloc, n)
+ p = rsect.base
+ for j = 0; j < n; j++ {
+ add = 0
+ rp = &r[j]
+ if is64 != 0 {
+ // 64-bit rel/rela
+ rp.Off = int32(e.Uint64(p))
+
+ p = p[8:]
+ info = e.Uint64(p)
+ p = p[8:]
+ if rela != 0 {
+ add = e.Uint64(p)
+ p = p[8:]
+ }
+ } else {
+ // 32-bit rel/rela
+ rp.Off = int32(e.Uint32(p))
+
+ p = p[4:]
+ info = uint64(e.Uint32(p))
+ info = info>>8<<32 | info&0xff // convert to 64-bit info
+ p = p[4:]
+ if rela != 0 {
+ add = uint64(e.Uint32(p))
+ p = p[4:]
+ }
+ }
+
+ if info&0xffffffff == 0 { // skip R_*_NONE relocation
+ j--
+ n--
+ continue
+ }
+
+ if info>>32 == 0 { // absolute relocation, don't bother reading the null symbol
+ rp.Sym = nil
+ } else {
+ if err = readelfsym(elfobj, int(info>>32), &sym, 0); err != nil {
+ goto bad
+ }
+ sym.sym = symbols[info>>32]
+ if sym.sym == nil {
+ err = fmt.Errorf("%s#%d: reloc of invalid sym #%d %s shndx=%d type=%d", sect.sym.Name, j, int(info>>32), sym.name, sym.shndx, sym.type_)
+ goto bad
+ }
+
+ rp.Sym = sym.sym
+ }
+
+ rp.Type = int32(reltype(pn, int(uint32(info)), &rp.Siz))
+ if rela != 0 {
+ rp.Add = int64(add)
+ } else {
+ // load addend from image
+ if rp.Siz == 4 {
+ rp.Add = int64(e.Uint32(sect.base[rp.Off:]))
+ } else if rp.Siz == 8 {
+ rp.Add = int64(e.Uint64(sect.base[rp.Off:]))
+ } else {
+ Diag("invalid rela size %d", rp.Siz)
+ }
+ }
+
+ if rp.Siz == 2 {
+ rp.Add = int64(int16(rp.Add))
+ }
+ if rp.Siz == 4 {
+ rp.Add = int64(int32(rp.Add))
+ }
+ }
+
+ //print("rel %s %d %d %s %#llx\n", sect->sym->name, rp->type, rp->siz, rp->sym->name, rp->add);
+ sort.Sort(rbyoff(r[:n]))
+ // just in case
+
+ s = sect.sym
+ s.R = r
+ s.R = s.R[:n]
+ }
+
+ return
+
+bad:
+ Diag("%s: malformed elf file: %v", pn, err)
+}
+
+func section(elfobj *ElfObj, name string) *ElfSect {
+ for i := 0; uint(i) < elfobj.nsect; i++ {
+ if elfobj.sect[i].name != "" && name != "" && elfobj.sect[i].name == name {
+ return &elfobj.sect[i]
+ }
+ }
+ return nil
+}
+
+func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) {
+ if sect.base != nil {
+ return nil
+ }
+
+ if sect.off+sect.size > uint64(elfobj.length) {
+ err = fmt.Errorf("elf section past end of file")
+ return err
+ }
+
+ sect.base = make([]byte, sect.size)
+ err = fmt.Errorf("short read")
+ if obj.Bseek(elfobj.f, int64(uint64(elfobj.base)+sect.off), 0) < 0 || obj.Bread(elfobj.f, sect.base) != len(sect.base) {
+ return err
+ }
+
+ return nil
+}
+
+func readelfsym(elfobj *ElfObj, i int, sym *ElfSym, needSym int) (err error) {
+ if i >= elfobj.nsymtab || i < 0 {
+ err = fmt.Errorf("invalid elf symbol index")
+ return err
+ }
+
+ if i == 0 {
+ Diag("readym: read null symbol!")
+ }
+
+ if elfobj.is64 != 0 {
+ b := new(ElfSymBytes64)
+ binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF64SYMSIZE:(i+1)*ELF64SYMSIZE]), elfobj.e, b)
+ sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):])
+ sym.value = elfobj.e.Uint64(b.Value[:])
+ sym.size = elfobj.e.Uint64(b.Size[:])
+ sym.shndx = elfobj.e.Uint16(b.Shndx[:])
+ sym.bind = b.Info >> 4
+ sym.type_ = b.Info & 0xf
+ sym.other = b.Other
+ } else {
+ b := new(ElfSymBytes)
+ binary.Read(bytes.NewReader(elfobj.symtab.base[i*ELF32SYMSIZE:(i+1)*ELF32SYMSIZE]), elfobj.e, b)
+ sym.name = cstring(elfobj.symstr.base[elfobj.e.Uint32(b.Name[:]):])
+ sym.value = uint64(elfobj.e.Uint32(b.Value[:]))
+ sym.size = uint64(elfobj.e.Uint32(b.Size[:]))
+ sym.shndx = elfobj.e.Uint16(b.Shndx[:])
+ sym.bind = b.Info >> 4
+ sym.type_ = b.Info & 0xf
+ sym.other = b.Other
+ }
+
+ var s *LSym
+ if sym.name == "_GLOBAL_OFFSET_TABLE_" {
+ sym.name = ".got"
+ }
+ if sym.name == ".TOC." {
+ // Magic symbol on ppc64. Will be set to this object
+ // file's .got+0x8000.
+ sym.bind = ElfSymBindLocal
+ }
+
+ switch sym.type_ {
+ case ElfSymTypeSection:
+ s = elfobj.sect[sym.shndx].sym
+
+ case ElfSymTypeObject, ElfSymTypeFunc, ElfSymTypeNone:
+ switch sym.bind {
+ case ElfSymBindGlobal:
+ if needSym != 0 {
+ s = Linklookup(Ctxt, sym.name, 0)
+
+ // for global scoped hidden symbols we should insert it into
+ // symbol hash table, but mark them as hidden.
+ // __i686.get_pc_thunk.bx is allowed to be duplicated, to
+ // workaround that we set dupok.
+ // TODO(minux): correctly handle __i686.get_pc_thunk.bx without
+ // set dupok generally. See http://codereview.appspot.com/5823055/
+ // comment #5 for details.
+ if s != nil && sym.other == 2 {
+ s.Type |= obj.SHIDDEN
+ s.Dupok = 1
+ }
+ }
+
+ case ElfSymBindLocal:
+ if Thearch.Thechar == '5' && (strings.HasPrefix(sym.name, "$a") || strings.HasPrefix(sym.name, "$d")) {
+ // binutils for arm generate these mapping
+ // symbols, ignore these
+ break
+ }
+
+ if sym.name == ".TOC." {
+ // We need to be able to look this up,
+ // so put it in the hash table.
+ if needSym != 0 {
+ s = Linklookup(Ctxt, sym.name, Ctxt.Version)
+ s.Type |= obj.SHIDDEN
+ }
+
+ break
+ }
+
+ if needSym != 0 {
+ // local names and hidden visiblity global names are unique
+ // and should only reference by its index, not name, so we
+ // don't bother to add them into hash table
+ s = linknewsym(Ctxt, sym.name, Ctxt.Version)
+
+ s.Type |= obj.SHIDDEN
+ }
+
+ case ElfSymBindWeak:
+ if needSym != 0 {
+ s = Linklookup(Ctxt, sym.name, 0)
+ if sym.other == 2 {
+ s.Type |= obj.SHIDDEN
+ }
+ }
+
+ default:
+ err = fmt.Errorf("%s: invalid symbol binding %d", sym.name, sym.bind)
+ return err
+ }
+ }
+
+ if s != nil && s.Type == 0 && sym.type_ != ElfSymTypeSection {
+ s.Type = obj.SXREF
+ }
+ sym.sym = s
+
+ return nil
+}
+
+type rbyoff []Reloc
+
+func (x rbyoff) Len() int {
+ return len(x)
+}
+
+func (x rbyoff) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x rbyoff) Less(i, j int) bool {
+ a := &x[i]
+ b := &x[j]
+ if a.Off < b.Off {
+ return true
+ }
+ if a.Off > b.Off {
+ return false
+ }
+ return false
+}
+
+func reltype(pn string, elftype int, siz *uint8) int {
+ switch uint32(Thearch.Thechar) | uint32(elftype)<<24 {
+ default:
+ Diag("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype)
+ fallthrough
+
+ case '9' | R_PPC64_TOC16<<24,
+ '9' | R_PPC64_TOC16_LO<<24,
+ '9' | R_PPC64_TOC16_HI<<24,
+ '9' | R_PPC64_TOC16_HA<<24,
+ '9' | R_PPC64_TOC16_DS<<24,
+ '9' | R_PPC64_TOC16_LO_DS<<24,
+ '9' | R_PPC64_REL16_LO<<24,
+ '9' | R_PPC64_REL16_HI<<24,
+ '9' | R_PPC64_REL16_HA<<24:
+ *siz = 2
+
+ case '5' | R_ARM_ABS32<<24,
+ '5' | R_ARM_GOT32<<24,
+ '5' | R_ARM_PLT32<<24,
+ '5' | R_ARM_GOTOFF<<24,
+ '5' | R_ARM_GOTPC<<24,
+ '5' | R_ARM_THM_PC22<<24,
+ '5' | R_ARM_REL32<<24,
+ '5' | R_ARM_CALL<<24,
+ '5' | R_ARM_V4BX<<24,
+ '5' | R_ARM_GOT_PREL<<24,
+ '5' | R_ARM_PC24<<24,
+ '5' | R_ARM_JUMP24<<24,
+ '6' | R_X86_64_PC32<<24,
+ '6' | R_X86_64_PLT32<<24,
+ '6' | R_X86_64_GOTPCREL<<24,
+ '6' | R_X86_64_GOTPCRELX<<24,
+ '6' | R_X86_64_REX_GOTPCRELX<<24,
+ '8' | R_386_32<<24,
+ '8' | R_386_PC32<<24,
+ '8' | R_386_GOT32<<24,
+ '8' | R_386_PLT32<<24,
+ '8' | R_386_GOTOFF<<24,
+ '8' | R_386_GOTPC<<24,
+ '8' | R_386_GOT32X<<24,
+ '9' | R_PPC64_REL24<<24,
+ '9' | R_PPC_REL32<<24:
+ *siz = 4
+
+ case '6' | R_X86_64_64<<24,
+ '9' | R_PPC64_ADDR64<<24:
+ *siz = 8
+ }
+
+ return 256 + elftype
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/lib.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/lib.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/lib.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/lib.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,2181 @@
+// Inferno utils/8l/asm.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/asm.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ld
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/internal/obj"
+ "crypto/sha1"
+ "debug/elf"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// Data layout and relocation.
+
+// Derived from Inferno utils/6l/l.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+type Arch struct {
+ Thechar int
+ Ptrsize int
+ Intsize int
+ Regsize int
+ Funcalign int
+ Maxalign int
+ Minlc int
+ Dwarfregsp int
+ Dwarfreglr int
+ Linuxdynld string
+ Freebsddynld string
+ Netbsddynld string
+ Openbsddynld string
+ Dragonflydynld string
+ Solarisdynld string
+ Adddynrel func(*LSym, *Reloc)
+ Archinit func()
+ Archreloc func(*Reloc, *LSym, *int64) int
+ Archrelocvariant func(*Reloc, *LSym, int64) int64
+ Asmb func()
+ Elfreloc1 func(*Reloc, int64) int
+ Elfsetupplt func()
+ Gentext func()
+ Machoreloc1 func(*Reloc, int64) int
+ PEreloc1 func(*Reloc, int64) bool
+ Lput func(uint32)
+ Wput func(uint16)
+ Vput func(uint64)
+}
+
+type Rpath struct {
+ set bool
+ val string
+}
+
+func (r *Rpath) Set(val string) error {
+ r.set = true
+ r.val = val
+ return nil
+}
+
+func (r *Rpath) String() string {
+ return r.val
+}
+
+var (
+ Thearch Arch
+ datap *LSym
+ Debug [128]int
+ Lcsize int32
+ rpath Rpath
+ Spsize int32
+ Symsize int32
+)
+
+// Terrible but standard terminology.
+// A segment describes a block of file to load into memory.
+// A section further describes the pieces of that block for
+// use in debuggers and such.
+
+const (
+ MINFUNC = 16 // minimum size for a function
+)
+
+type Segment struct {
+ Rwx uint8 // permission as usual unix bits (5 = r-x etc)
+ Vaddr uint64 // virtual address
+ Length uint64 // length in memory
+ Fileoff uint64 // file offset
+ Filelen uint64 // length on disk
+ Sect *Section
+}
+
+type Section struct {
+ Rwx uint8
+ Extnum int16
+ Align int32
+ Name string
+ Vaddr uint64
+ Length uint64
+ Next *Section
+ Seg *Segment
+ Elfsect *ElfShdr
+ Reloff uint64
+ Rellen uint64
+}
+
+// DynlinkingGo returns whether we are producing Go code that can live
+// in separate shared libraries linked together at runtime.
+func DynlinkingGo() bool {
+ return Buildmode == BuildmodeShared || Linkshared
+}
+
+// UseRelro returns whether to make use of "read only relocations" aka
+// relro.
+func UseRelro() bool {
+ switch Buildmode {
+ case BuildmodeCShared, BuildmodeShared, BuildmodePIE:
+ return Iself
+ default:
+ return false
+ }
+}
+
+var (
+ Thestring string
+ Thelinkarch *LinkArch
+ outfile string
+ dynexp []*LSym
+ dynlib []string
+ ldflag []string
+ havedynamic int
+ Funcalign int
+ iscgo bool
+ elfglobalsymndx int
+ flag_installsuffix string
+ flag_race int
+ flag_msan int
+ Buildmode BuildMode
+ Linkshared bool
+ tracksym string
+ interpreter string
+ tmpdir string
+ extld string
+ extldflags string
+ extar string
+ libgccfile string
+ debug_s int // backup old value of debug['s']
+ Ctxt *Link
+ HEADR int32
+ HEADTYPE int32
+ INITRND int32
+ INITTEXT int64
+ INITDAT int64
+ INITENTRY string /* entry point */
+ nerrors int
+ Linkmode int
+ liveness int64
+)
+
+// for dynexport field of LSym
+const (
+ CgoExportDynamic = 1 << 0
+ CgoExportStatic = 1 << 1
+)
+
+var (
+ Segtext Segment
+ Segrodata Segment
+ Segdata Segment
+ Segdwarf Segment
+)
+
+/* set by call to mywhatsys() */
+
+/* whence for ldpkg */
+const (
+ FileObj = 0 + iota
+ ArchiveObj
+ Pkgdef
+)
+
+var (
+ headstring string
+ // buffered output
+ Bso obj.Biobuf
+)
+
+var coutbuf struct {
+ *bufio.Writer
+ f *os.File
+}
+
+const (
+ symname = "__.GOSYMDEF"
+ pkgname = "__.PKGDEF"
+)
+
+var (
+ // Set if we see an object compiled by the host compiler that is not
+ // from a package that is known to support internal linking mode.
+ externalobj = false
+ goroot string
+ goarch string
+ goos string
+ theline string
+)
+
+func Lflag(arg string) {
+ Ctxt.Libdir = append(Ctxt.Libdir, arg)
+}
+
+// A BuildMode indicates the sort of object we are building:
+// "exe": build a main package and everything it imports into an executable.
+// "c-shared": build a main package, plus all packages that it imports, into a
+// single C shared library. The only callable symbols will be those functions
+// marked as exported.
+// "shared": combine all packages passed on the command line, and their
+// dependencies, into a single shared library that will be used when
+// building with the -linkshared option.
+type BuildMode uint8
+
+const (
+ BuildmodeUnset BuildMode = iota
+ BuildmodeExe
+ BuildmodePIE
+ BuildmodeCArchive
+ BuildmodeCShared
+ BuildmodeShared
+)
+
+func (mode *BuildMode) Set(s string) error {
+ goos := obj.Getgoos()
+ goarch := obj.Getgoarch()
+ badmode := func() error {
+ return fmt.Errorf("buildmode %s not supported on %s/%s", s, goos, goarch)
+ }
+ switch s {
+ default:
+ return fmt.Errorf("invalid buildmode: %q", s)
+ case "exe":
+ *mode = BuildmodeExe
+ case "pie":
+ switch goos {
+ case "android", "linux":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodePIE
+ case "c-archive":
+ switch goos {
+ case "darwin", "linux":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeCArchive
+ case "c-shared":
+ switch goarch {
+ case "386", "amd64", "arm", "arm64":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeCShared
+ case "shared":
+ switch goos {
+ case "linux":
+ switch goarch {
+ case "386", "amd64", "arm", "arm64", "ppc64le":
+ default:
+ return badmode()
+ }
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeShared
+ }
+ return nil
+}
+
+func (mode *BuildMode) String() string {
+ switch *mode {
+ case BuildmodeUnset:
+ return "" // avoid showing a default in usage message
+ case BuildmodeExe:
+ return "exe"
+ case BuildmodePIE:
+ return "pie"
+ case BuildmodeCArchive:
+ return "c-archive"
+ case BuildmodeCShared:
+ return "c-shared"
+ case BuildmodeShared:
+ return "shared"
+ }
+ return fmt.Sprintf("BuildMode(%d)", uint8(*mode))
+}
+
+/*
+ * Unix doesn't like it when we write to a running (or, sometimes,
+ * recently run) binary, so remove the output file before writing it.
+ * On Windows 7, remove() can force a subsequent create() to fail.
+ * S_ISREG() does not exist on Plan 9.
+ */
+func mayberemoveoutfile() {
+ if fi, err := os.Lstat(outfile); err == nil && !fi.Mode().IsRegular() {
+ return
+ }
+ os.Remove(outfile)
+}
+
+func libinit() {
+ Funcalign = Thearch.Funcalign
+ mywhatsys() // get goroot, goarch, goos
+
+ // add goroot to the end of the libdir list.
+ suffix := ""
+
+ suffixsep := ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race != 0 {
+ suffixsep = "_"
+ suffix = "race"
+ } else if flag_msan != 0 {
+ suffixsep = "_"
+ suffix = "msan"
+ }
+
+ Lflag(fmt.Sprintf("%s/pkg/%s_%s%s%s", goroot, goos, goarch, suffixsep, suffix))
+
+ mayberemoveoutfile()
+ f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
+ if err != nil {
+ Exitf("cannot create %s: %v", outfile, err)
+ }
+
+ coutbuf.Writer = bufio.NewWriter(f)
+ coutbuf.f = f
+
+ if INITENTRY == "" {
+ switch Buildmode {
+ case BuildmodeCShared, BuildmodeCArchive:
+ INITENTRY = fmt.Sprintf("_rt0_%s_%s_lib", goarch, goos)
+ case BuildmodeExe, BuildmodePIE:
+ INITENTRY = fmt.Sprintf("_rt0_%s_%s", goarch, goos)
+ case BuildmodeShared:
+ // No INITENTRY for -buildmode=shared
+ default:
+ Diag("unknown INITENTRY for buildmode %v", Buildmode)
+ }
+ }
+
+ if !DynlinkingGo() {
+ Linklookup(Ctxt, INITENTRY, 0).Type = obj.SXREF
+ }
+}
+
+func Exitf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...)
+ if coutbuf.f != nil {
+ coutbuf.f.Close()
+ mayberemoveoutfile()
+ }
+ Exit(2)
+}
+
+func errorexit() {
+ if coutbuf.f != nil {
+ if nerrors != 0 {
+ Cflush()
+ }
+ // For rmtemp run at atexit time on Windows.
+ if err := coutbuf.f.Close(); err != nil {
+ Exitf("close: %v", err)
+ }
+ }
+
+ if nerrors != 0 {
+ if coutbuf.f != nil {
+ mayberemoveoutfile()
+ }
+ Exit(2)
+ }
+
+ Exit(0)
+}
+
+func loadinternal(name string) {
+ found := 0
+ for i := 0; i < len(Ctxt.Libdir); i++ {
+ if Linkshared {
+ shlibname := fmt.Sprintf("%s/%s.shlibname", Ctxt.Libdir[i], name)
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, shlibname)
+ }
+ if _, err := os.Stat(shlibname); err == nil {
+ addlibpath(Ctxt, "internal", "internal", "", name, shlibname)
+ found = 1
+ break
+ }
+ }
+ pname := fmt.Sprintf("%s/%s.a", Ctxt.Libdir[i], name)
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, pname)
+ }
+ if _, err := os.Stat(pname); err == nil {
+ addlibpath(Ctxt, "internal", "internal", pname, name, "")
+ found = 1
+ break
+ }
+ }
+
+ if found == 0 {
+ fmt.Fprintf(&Bso, "warning: unable to find %s.a\n", name)
+ }
+}
+
+func loadlib() {
+ switch Buildmode {
+ case BuildmodeCShared:
+ s := Linklookup(Ctxt, "runtime.islibrary", 0)
+ s.Dupok = 1
+ Adduint8(Ctxt, s, 1)
+ case BuildmodeCArchive:
+ s := Linklookup(Ctxt, "runtime.isarchive", 0)
+ s.Dupok = 1
+ Adduint8(Ctxt, s, 1)
+ }
+
+ loadinternal("runtime")
+ if Thearch.Thechar == '5' {
+ loadinternal("math")
+ }
+ if flag_race != 0 {
+ loadinternal("runtime/race")
+ }
+ if flag_msan != 0 {
+ loadinternal("runtime/msan")
+ }
+
+ var i int
+ for i = 0; i < len(Ctxt.Library); i++ {
+ iscgo = iscgo || Ctxt.Library[i].Pkg == "runtime/cgo"
+ if Ctxt.Library[i].Shlib == "" {
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].File, Ctxt.Library[i].Objref)
+ }
+ objfile(Ctxt.Library[i])
+ }
+ }
+
+ for i = 0; i < len(Ctxt.Library); i++ {
+ if Ctxt.Library[i].Shlib != "" {
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].Shlib, Ctxt.Library[i].Objref)
+ }
+ ldshlibsyms(Ctxt.Library[i].Shlib)
+ }
+ }
+
+ if Linkmode == LinkAuto {
+ if iscgo && externalobj {
+ Linkmode = LinkExternal
+ } else {
+ Linkmode = LinkInternal
+ }
+
+ // Force external linking for android.
+ if goos == "android" {
+ Linkmode = LinkExternal
+ }
+
+ // Force external linking for PIE executables, as
+ // internal linking does not support TLS_IE.
+ if Buildmode == BuildmodePIE {
+ Linkmode = LinkExternal
+ }
+
+ // cgo on Darwin must use external linking
+ // we can always use external linking, but then there will be circular
+ // dependency problems when compiling natively (external linking requires
+ // runtime/cgo, runtime/cgo requires cmd/cgo, but cmd/cgo needs to be
+ // compiled using external linking.)
+ if (Thearch.Thechar == '5' || Thearch.Thechar == '7') && HEADTYPE == obj.Hdarwin && iscgo {
+ Linkmode = LinkExternal
+ }
+
+ // Force external linking for msan.
+ if flag_msan != 0 {
+ Linkmode = LinkExternal
+ }
+ }
+
+ // cmd/7l doesn't support cgo internal linking
+ // This is https://golang.org/issue/10373.
+ if iscgo && goarch == "arm64" {
+ Linkmode = LinkExternal
+ }
+
+ if Linkmode == LinkExternal && !iscgo {
+ // This indicates a user requested -linkmode=external.
+ // The startup code uses an import of runtime/cgo to decide
+ // whether to initialize the TLS. So give it one. This could
+ // be handled differently but it's an unusual case.
+ loadinternal("runtime/cgo")
+
+ if i < len(Ctxt.Library) {
+ if Ctxt.Library[i].Shlib != "" {
+ ldshlibsyms(Ctxt.Library[i].Shlib)
+ } else {
+ if DynlinkingGo() {
+ Exitf("cannot implicitly include runtime/cgo in a shared library")
+ }
+ objfile(Ctxt.Library[i])
+ }
+ }
+ }
+
+ if Linkmode == LinkInternal {
+ // Drop all the cgo_import_static declarations.
+ // Turns out we won't be needing them.
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if s.Type == obj.SHOSTOBJ {
+ // If a symbol was marked both
+ // cgo_import_static and cgo_import_dynamic,
+ // then we want to make it cgo_import_dynamic
+ // now.
+ if s.Extname != "" && s.Dynimplib != "" && s.Cgoexport == 0 {
+ s.Type = obj.SDYNIMPORT
+ } else {
+ s.Type = 0
+ }
+ }
+ }
+ }
+
+ tlsg := Linklookup(Ctxt, "runtime.tlsg", 0)
+
+ // runtime.tlsg is used for external linking on platforms that do not define
+ // a variable to hold g in assembly (currently only intel).
+ if tlsg.Type == 0 {
+ tlsg.Type = obj.STLSBSS
+ tlsg.Size = int64(Thearch.Ptrsize)
+ } else if tlsg.Type != obj.SDYNIMPORT {
+ Diag("internal error: runtime declared tlsg variable %d", tlsg.Type)
+ }
+ tlsg.Reachable = true
+ Ctxt.Tlsg = tlsg
+
+ moduledata := Linklookup(Ctxt, "runtime.firstmoduledata", 0)
+ if moduledata.Type != 0 && moduledata.Type != obj.SDYNIMPORT {
+ // If the module (toolchain-speak for "executable or shared
+ // library") we are linking contains the runtime package, it
+ // will define the runtime.firstmoduledata symbol and we
+ // truncate it back to 0 bytes so we can define its entire
+ // contents in symtab.go:symtab().
+ moduledata.Size = 0
+
+ // In addition, on ARM, the runtime depends on the linker
+ // recording the value of GOARM.
+ if Thearch.Thechar == '5' {
+ s := Linklookup(Ctxt, "runtime.goarm", 0)
+
+ s.Type = obj.SRODATA
+ s.Size = 0
+ Adduint8(Ctxt, s, uint8(Ctxt.Goarm))
+ }
+ } else {
+ // If OTOH the module does not contain the runtime package,
+ // create a local symbol for the moduledata.
+ moduledata = Linklookup(Ctxt, "local.moduledata", 0)
+ moduledata.Local = true
+ }
+ // In all cases way we mark the moduledata as noptrdata to hide it from
+ // the GC.
+ moduledata.Type = obj.SNOPTRDATA
+ moduledata.Reachable = true
+ Ctxt.Moduledata = moduledata
+
+ // Now that we know the link mode, trim the dynexp list.
+ x := CgoExportDynamic
+
+ if Linkmode == LinkExternal {
+ x = CgoExportStatic
+ }
+ w := 0
+ for i := 0; i < len(dynexp); i++ {
+ if int(dynexp[i].Cgoexport)&x != 0 {
+ dynexp[w] = dynexp[i]
+ w++
+ }
+ }
+ dynexp = dynexp[:w]
+
+ // In internal link mode, read the host object files.
+ if Linkmode == LinkInternal {
+ hostobjs()
+
+ // If we have any undefined symbols in external
+ // objects, try to read them from the libgcc file.
+ any := false
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, r := range s.R {
+ if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF && r.Sym.Name != ".got" {
+ any = true
+ break
+ }
+ }
+ }
+ if any {
+ if libgccfile == "" {
+ if extld == "" {
+ extld = "gcc"
+ }
+ args := hostlinkArchArgs()
+ args = append(args, "--print-libgcc-file-name")
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%s %v\n", extld, args)
+ }
+ out, err := exec.Command(extld, args...).Output()
+ if err != nil {
+ if Debug['v'] != 0 {
+ fmt.Fprintln(&Bso, "not using a libgcc file because compiler failed")
+ fmt.Fprintf(&Bso, "%v\n%s\n", err, out)
+ }
+ libgccfile = "none"
+ } else {
+ libgccfile = strings.TrimSpace(string(out))
+ }
+ }
+
+ if libgccfile != "none" {
+ hostArchive(libgccfile)
+ }
+ }
+ } else {
+ hostlinksetup()
+ }
+
+ // We've loaded all the code now.
+ // If there are no dynamic libraries needed, gcc disables dynamic linking.
+ // Because of this, glibc's dynamic ELF loader occasionally (like in version 2.13)
+ // assumes that a dynamic binary always refers to at least one dynamic library.
+ // Rather than be a source of test cases for glibc, disable dynamic linking
+ // the same way that gcc would.
+ //
+ // Exception: on OS X, programs such as Shark only work with dynamic
+ // binaries, so leave it enabled on OS X (Mach-O) binaries.
+ // Also leave it enabled on Solaris which doesn't support
+ // statically linked binaries.
+ switch Buildmode {
+ case BuildmodeExe, BuildmodePIE:
+ if havedynamic == 0 && HEADTYPE != obj.Hdarwin && HEADTYPE != obj.Hsolaris {
+ Debug['d'] = 1
+ }
+ }
+
+ importcycles()
+}
+
+/*
+ * look for the next file in an archive.
+ * adapted from libmach.
+ */
+func nextar(bp *obj.Biobuf, off int64, a *ArHdr) int64 {
+ if off&1 != 0 {
+ off++
+ }
+ obj.Bseek(bp, off, 0)
+ buf := make([]byte, SAR_HDR)
+ if n := obj.Bread(bp, buf); n < len(buf) {
+ if n >= 0 {
+ return 0
+ }
+ return -1
+ }
+
+ a.name = artrim(buf[0:16])
+ a.date = artrim(buf[16:28])
+ a.uid = artrim(buf[28:34])
+ a.gid = artrim(buf[34:40])
+ a.mode = artrim(buf[40:48])
+ a.size = artrim(buf[48:58])
+ a.fmag = artrim(buf[58:60])
+
+ arsize := atolwhex(a.size)
+ if arsize&1 != 0 {
+ arsize++
+ }
+ return int64(arsize) + SAR_HDR
+}
+
+func objfile(lib *Library) {
+ pkg := pathtoprefix(lib.Pkg)
+
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f ldobj: %s (%s)\n", obj.Cputime(), lib.File, pkg)
+ }
+ Bso.Flush()
+ var err error
+ var f *obj.Biobuf
+ f, err = obj.Bopenr(lib.File)
+ if err != nil {
+ Exitf("cannot open file %s: %v", lib.File, err)
+ }
+
+ magbuf := make([]byte, len(ARMAG))
+ if obj.Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) {
+ /* load it as a regular file */
+ l := obj.Bseek(f, 0, 2)
+
+ obj.Bseek(f, 0, 0)
+ ldobj(f, pkg, l, lib.File, lib.File, FileObj)
+ obj.Bterm(f)
+
+ return
+ }
+
+ /* skip over optional __.GOSYMDEF and process __.PKGDEF */
+ off := obj.Boffset(f)
+
+ var arhdr ArHdr
+ l := nextar(f, off, &arhdr)
+ var pname string
+ if l <= 0 {
+ Diag("%s: short read on archive file symbol header", lib.File)
+ goto out
+ }
+
+ if strings.HasPrefix(arhdr.name, symname) {
+ off += l
+ l = nextar(f, off, &arhdr)
+ if l <= 0 {
+ Diag("%s: short read on archive file symbol header", lib.File)
+ goto out
+ }
+ }
+
+ if !strings.HasPrefix(arhdr.name, pkgname) {
+ Diag("%s: cannot find package header", lib.File)
+ goto out
+ }
+
+ if Buildmode == BuildmodeShared {
+ before := obj.Boffset(f)
+ pkgdefBytes := make([]byte, atolwhex(arhdr.size))
+ obj.Bread(f, pkgdefBytes)
+ hash := sha1.Sum(pkgdefBytes)
+ lib.hash = hash[:]
+ obj.Bseek(f, before, 0)
+ }
+
+ off += l
+
+ ldpkg(f, pkg, atolwhex(arhdr.size), lib.File, Pkgdef)
+
+ /*
+ * load all the object files from the archive now.
+ * this gives us sequential file access and keeps us
+ * from needing to come back later to pick up more
+ * objects. it breaks the usual C archive model, but
+ * this is Go, not C. the common case in Go is that
+ * we need to load all the objects, and then we throw away
+ * the individual symbols that are unused.
+ *
+ * loading every object will also make it possible to
+ * load foreign objects not referenced by __.GOSYMDEF.
+ */
+ for {
+ l = nextar(f, off, &arhdr)
+ if l == 0 {
+ break
+ }
+ if l < 0 {
+ Exitf("%s: malformed archive", lib.File)
+ }
+
+ off += l
+
+ pname = fmt.Sprintf("%s(%s)", lib.File, arhdr.name)
+ l = atolwhex(arhdr.size)
+ ldobj(f, pkg, l, pname, lib.File, ArchiveObj)
+ }
+
+out:
+ obj.Bterm(f)
+}
+
+type Hostobj struct {
+ ld func(*obj.Biobuf, string, int64, string)
+ pkg string
+ pn string
+ file string
+ off int64
+ length int64
+}
+
+var hostobj []Hostobj
+
+// These packages can use internal linking mode.
+// Others trigger external mode.
+var internalpkg = []string{
+ "crypto/x509",
+ "net",
+ "os/user",
+ "runtime/cgo",
+ "runtime/race",
+ "runtime/msan",
+}
+
+func ldhostobj(ld func(*obj.Biobuf, string, int64, string), f *obj.Biobuf, pkg string, length int64, pn string, file string) *Hostobj {
+ isinternal := false
+ for i := 0; i < len(internalpkg); i++ {
+ if pkg == internalpkg[i] {
+ isinternal = true
+ break
+ }
+ }
+
+ // DragonFly declares errno with __thread, which results in a symbol
+ // type of R_386_TLS_GD or R_X86_64_TLSGD. The Go linker does not
+ // currently know how to handle TLS relocations, hence we have to
+ // force external linking for any libraries that link in code that
+ // uses errno. This can be removed if the Go linker ever supports
+ // these relocation types.
+ if HEADTYPE == obj.Hdragonfly {
+ if pkg == "net" || pkg == "os/user" {
+ isinternal = false
+ }
+ }
+
+ if !isinternal {
+ externalobj = true
+ }
+
+ hostobj = append(hostobj, Hostobj{})
+ h := &hostobj[len(hostobj)-1]
+ h.ld = ld
+ h.pkg = pkg
+ h.pn = pn
+ h.file = file
+ h.off = obj.Boffset(f)
+ h.length = length
+ return h
+}
+
+func hostobjs() {
+ var f *obj.Biobuf
+ var h *Hostobj
+
+ for i := 0; i < len(hostobj); i++ {
+ h = &hostobj[i]
+ var err error
+ f, err = obj.Bopenr(h.file)
+ if f == nil {
+ Exitf("cannot reopen %s: %v", h.pn, err)
+ }
+
+ obj.Bseek(f, h.off, 0)
+ h.ld(f, h.pkg, h.length, h.pn)
+ obj.Bterm(f)
+ }
+}
+
+// provided by lib9
+
+func rmtemp() {
+ os.RemoveAll(tmpdir)
+}
+
+func hostlinksetup() {
+ if Linkmode != LinkExternal {
+ return
+ }
+
+ // For external link, record that we need to tell the external linker -s,
+ // and turn off -s internally: the external linker needs the symbol
+ // information for its final link.
+ debug_s = Debug['s']
+ Debug['s'] = 0
+
+ // create temporary directory and arrange cleanup
+ if tmpdir == "" {
+ dir, err := ioutil.TempDir("", "go-link-")
+ if err != nil {
+ log.Fatal(err)
+ }
+ tmpdir = dir
+ AtExit(rmtemp)
+ }
+
+ // change our output to temporary object file
+ coutbuf.f.Close()
+ mayberemoveoutfile()
+
+ p := fmt.Sprintf("%s/go.o", tmpdir)
+ var err error
+ f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
+ if err != nil {
+ Exitf("cannot create %s: %v", p, err)
+ }
+
+ coutbuf.Writer = bufio.NewWriter(f)
+ coutbuf.f = f
+}
+
+// hostobjCopy creates a copy of the object files in hostobj in a
+// temporary directory.
+func hostobjCopy() (paths []string) {
+ var wg sync.WaitGroup
+ sema := make(chan struct{}, runtime.NumCPU()) // limit open file descriptors
+ for i, h := range hostobj {
+ h := h
+ dst := fmt.Sprintf("%s/%06d.o", tmpdir, i)
+ paths = append(paths, dst)
+
+ wg.Add(1)
+ go func() {
+ sema <- struct{}{}
+ defer func() {
+ <-sema
+ wg.Done()
+ }()
+ f, err := os.Open(h.file)
+ if err != nil {
+ Exitf("cannot reopen %s: %v", h.pn, err)
+ }
+ if _, err := f.Seek(h.off, 0); err != nil {
+ Exitf("cannot seek %s: %v", h.pn, err)
+ }
+
+ w, err := os.Create(dst)
+ if err != nil {
+ Exitf("cannot create %s: %v", dst, err)
+ }
+ if _, err := io.CopyN(w, f, h.length); err != nil {
+ Exitf("cannot write %s: %v", dst, err)
+ }
+ if err := w.Close(); err != nil {
+ Exitf("cannot close %s: %v", dst, err)
+ }
+ }()
+ }
+ wg.Wait()
+ return paths
+}
+
+// archive builds a .a archive from the hostobj object files.
+func archive() {
+ if Buildmode != BuildmodeCArchive {
+ return
+ }
+
+ if extar == "" {
+ extar = "ar"
+ }
+
+ mayberemoveoutfile()
+ argv := []string{extar, "-q", "-c", "-s", outfile}
+ argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
+
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "archive: %s\n", strings.Join(argv, " "))
+ Bso.Flush()
+ }
+
+ if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
+ Exitf("running %s failed: %v\n%s", argv[0], err, out)
+ }
+}
+
+func hostlink() {
+ if Linkmode != LinkExternal || nerrors > 0 {
+ return
+ }
+ if Buildmode == BuildmodeCArchive {
+ return
+ }
+
+ if extld == "" {
+ extld = "gcc"
+ }
+
+ var argv []string
+ argv = append(argv, extld)
+ argv = append(argv, hostlinkArchArgs()...)
+
+ if Debug['s'] == 0 && debug_s == 0 {
+ argv = append(argv, "-gdwarf-2")
+ } else {
+ argv = append(argv, "-s")
+ }
+
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-Wl,-no_pie,-headerpad,1144")
+ }
+ if HEADTYPE == obj.Hopenbsd {
+ argv = append(argv, "-Wl,-nopie")
+ }
+ if HEADTYPE == obj.Hwindows {
+ if headstring == "windowsgui" {
+ argv = append(argv, "-mwindows")
+ } else {
+ argv = append(argv, "-mconsole")
+ }
+ }
+
+ switch Buildmode {
+ case BuildmodeExe:
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-Wl,-pagezero_size,4000000")
+ }
+ case BuildmodePIE:
+ argv = append(argv, "-pie")
+ case BuildmodeCShared:
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-dynamiclib", "-Wl,-read_only_relocs,suppress")
+ } else {
+ // ELF.
+ argv = append(argv, "-Wl,-Bsymbolic")
+ if UseRelro() {
+ argv = append(argv, "-Wl,-z,relro")
+ }
+ // Pass -z nodelete to mark the shared library as
+ // non-closeable: a dlclose will do nothing.
+ argv = append(argv, "-shared", "-Wl,-z,nodelete")
+ }
+ case BuildmodeShared:
+ if UseRelro() {
+ argv = append(argv, "-Wl,-z,relro")
+ }
+ argv = append(argv, "-shared")
+ }
+
+ if Iself && DynlinkingGo() {
+ // We force all symbol resolution to be done at program startup
+ // because lazy PLT resolution can use large amounts of stack at
+ // times we cannot allow it to do so.
+ argv = append(argv, "-Wl,-znow")
+ }
+
+ if Iself && len(buildinfo) > 0 {
+ argv = append(argv, fmt.Sprintf("-Wl,--build-id=0x%x", buildinfo))
+ }
+
+ // On Windows, given -o foo, GCC will append ".exe" to produce
+ // "foo.exe". We have decided that we want to honor the -o
+ // option. To make this work, we append a '.' so that GCC
+ // will decide that the file already has an extension. We
+ // only want to do this when producing a Windows output file
+ // on a Windows host.
+ outopt := outfile
+ if goos == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" {
+ outopt += "."
+ }
+ argv = append(argv, "-o")
+ argv = append(argv, outopt)
+
+ if rpath.val != "" {
+ argv = append(argv, fmt.Sprintf("-Wl,-rpath,%s", rpath.val))
+ }
+
+ // Force global symbols to be exported for dlopen, etc.
+ if Iself {
+ argv = append(argv, "-rdynamic")
+ }
+
+ if strings.Contains(argv[0], "clang") {
+ argv = append(argv, "-Qunused-arguments")
+ }
+
+ argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
+
+ if Linkshared {
+ seenDirs := make(map[string]bool)
+ seenLibs := make(map[string]bool)
+ addshlib := func(path string) {
+ dir, base := filepath.Split(path)
+ if !seenDirs[dir] {
+ argv = append(argv, "-L"+dir)
+ if !rpath.set {
+ argv = append(argv, "-Wl,-rpath="+dir)
+ }
+ seenDirs[dir] = true
+ }
+ base = strings.TrimSuffix(base, ".so")
+ base = strings.TrimPrefix(base, "lib")
+ if !seenLibs[base] {
+ argv = append(argv, "-l"+base)
+ seenLibs[base] = true
+ }
+ }
+ for _, shlib := range Ctxt.Shlibs {
+ addshlib(shlib.Path)
+ for _, dep := range shlib.Deps {
+ if dep == "" {
+ continue
+ }
+ libpath := findshlib(dep)
+ if libpath != "" {
+ addshlib(libpath)
+ }
+ }
+ }
+ }
+
+ argv = append(argv, ldflag...)
+
+ for _, p := range strings.Fields(extldflags) {
+ argv = append(argv, p)
+
+ // clang, unlike GCC, passes -rdynamic to the linker
+ // even when linking with -static, causing a linker
+ // error when using GNU ld. So take out -rdynamic if
+ // we added it. We do it in this order, rather than
+ // only adding -rdynamic later, so that -extldflags
+ // can override -rdynamic without using -static.
+ if Iself && p == "-static" {
+ for i := range argv {
+ if argv[i] == "-rdynamic" {
+ argv[i] = "-static"
+ }
+ }
+ }
+ }
+ if HEADTYPE == obj.Hwindows {
+ argv = append(argv, peimporteddlls()...)
+ }
+
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "host link:")
+ for _, v := range argv {
+ fmt.Fprintf(&Bso, " %q", v)
+ }
+ fmt.Fprintf(&Bso, "\n")
+ Bso.Flush()
+ }
+
+ if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
+ Exitf("running %s failed: %v\n%s", argv[0], err, out)
+ } else if Debug['v'] != 0 && len(out) > 0 {
+ fmt.Fprintf(&Bso, "%s", out)
+ Bso.Flush()
+ }
+
+ if Debug['s'] == 0 && debug_s == 0 && HEADTYPE == obj.Hdarwin {
+ // Skip combining dwarf on arm.
+ if Thearch.Thechar != '5' && Thearch.Thechar != '7' {
+ dsym := fmt.Sprintf("%s/go.dwarf", tmpdir)
+ if out, err := exec.Command("dsymutil", "-f", outfile, "-o", dsym).CombinedOutput(); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
+ }
+ // Skip combining if `dsymutil` didn't generate a file. See #11994.
+ if _, err := os.Stat(dsym); os.IsNotExist(err) {
+ return
+ }
+ // For os.Rename to work reliably, must be in same directory as outfile.
+ combinedOutput := outfile + "~"
+ if err := machoCombineDwarf(outfile, dsym, combinedOutput); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
+ }
+ os.Remove(outfile)
+ if err := os.Rename(combinedOutput, outfile); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: %v", os.Args[0], err)
+ }
+ }
+ }
+}
+
+// hostlinkArchArgs returns arguments to pass to the external linker
+// based on the architecture.
+func hostlinkArchArgs() []string {
+ switch Thearch.Thechar {
+ case '8':
+ return []string{"-m32"}
+ case '6', '9':
+ return []string{"-m64"}
+ case '5':
+ return []string{"-marm"}
+ case '7':
+ // nothing needed
+ }
+ return nil
+}
+
+// ldobj loads an input object. If it is a host object (an object
+// compiled by a non-Go compiler) it returns the Hostobj pointer. If
+// it is a Go object, it returns nil.
+func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, whence int) *Hostobj {
+ eof := obj.Boffset(f) + length
+
+ start := obj.Boffset(f)
+ c1 := obj.Bgetc(f)
+ c2 := obj.Bgetc(f)
+ c3 := obj.Bgetc(f)
+ c4 := obj.Bgetc(f)
+ obj.Bseek(f, start, 0)
+
+ magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
+ if magic == 0x7f454c46 { // \x7F E L F
+ return ldhostobj(ldelf, f, pkg, length, pn, file)
+ }
+
+ if magic&^1 == 0xfeedface || magic&^0x01000000 == 0xcefaedfe {
+ return ldhostobj(ldmacho, f, pkg, length, pn, file)
+ }
+
+ if c1 == 0x4c && c2 == 0x01 || c1 == 0x64 && c2 == 0x86 {
+ return ldhostobj(ldpe, f, pkg, length, pn, file)
+ }
+
+ /* check the header */
+ line := obj.Brdline(f, '\n')
+ if line == "" {
+ if obj.Blinelen(f) > 0 {
+ Diag("%s: not an object file", pn)
+ return nil
+ }
+ Diag("truncated object file: %s", pn)
+ return nil
+ }
+
+ if !strings.HasPrefix(line, "go object ") {
+ if strings.HasSuffix(pn, ".go") {
+ Exitf("%cl: input %s is not .%c file (use %cg to compile .go files)", Thearch.Thechar, pn, Thearch.Thechar, Thearch.Thechar)
+ }
+
+ if line == Thestring {
+ // old header format: just $GOOS
+ Diag("%s: stale object file", pn)
+ return nil
+ }
+
+ Diag("%s: not an object file", pn)
+ return nil
+ }
+
+ // First, check that the basic goos, goarch, and version match.
+ t := fmt.Sprintf("%s %s %s ", goos, obj.Getgoarch(), obj.Getgoversion())
+
+ line = strings.TrimRight(line, "\n")
+ if !strings.HasPrefix(line[10:]+" ", t) && Debug['f'] == 0 {
+ Diag("%s: object is [%s] expected [%s]", pn, line[10:], t)
+ return nil
+ }
+
+ // Second, check that longer lines match each other exactly,
+ // so that the Go compiler and write additional information
+ // that must be the same from run to run.
+ if len(line) >= len(t)+10 {
+ if theline == "" {
+ theline = line[10:]
+ } else if theline != line[10:] {
+ Diag("%s: object is [%s] expected [%s]", pn, line[10:], theline)
+ return nil
+ }
+ }
+
+ /* skip over exports and other info -- ends with \n!\n */
+ import0 := obj.Boffset(f)
+
+ c1 = '\n' // the last line ended in \n
+ c2 = obj.Bgetc(f)
+ c3 = obj.Bgetc(f)
+ for c1 != '\n' || c2 != '!' || c3 != '\n' {
+ c1 = c2
+ c2 = c3
+ c3 = obj.Bgetc(f)
+ if c3 == obj.Beof {
+ Diag("truncated object file: %s", pn)
+ return nil
+ }
+ }
+
+ import1 := obj.Boffset(f)
+
+ obj.Bseek(f, import0, 0)
+ ldpkg(f, pkg, import1-import0-2, pn, whence) // -2 for !\n
+ obj.Bseek(f, import1, 0)
+
+ ldobjfile(Ctxt, f, pkg, eof-obj.Boffset(f), pn)
+ return nil
+}
+
+func readelfsymboldata(f *elf.File, sym *elf.Symbol) []byte {
+ data := make([]byte, sym.Size)
+ sect := f.Sections[sym.Section]
+ if sect.Type != elf.SHT_PROGBITS && sect.Type != elf.SHT_NOTE {
+ Diag("reading %s from non-data section", sym.Name)
+ }
+ n, err := sect.ReadAt(data, int64(sym.Value-sect.Addr))
+ if uint64(n) != sym.Size {
+ Diag("reading contents of %s: %v", sym.Name, err)
+ }
+ return data
+}
+
+func readwithpad(r io.Reader, sz int32) ([]byte, error) {
+ data := make([]byte, Rnd(int64(sz), 4))
+ _, err := io.ReadFull(r, data)
+ if err != nil {
+ return nil, err
+ }
+ data = data[:sz]
+ return data, nil
+}
+
+func readnote(f *elf.File, name []byte, typ int32) ([]byte, error) {
+ for _, sect := range f.Sections {
+ if sect.Type != elf.SHT_NOTE {
+ continue
+ }
+ r := sect.Open()
+ for {
+ var namesize, descsize, noteType int32
+ err := binary.Read(r, f.ByteOrder, &namesize)
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("read namesize failed: %v", err)
+ }
+ err = binary.Read(r, f.ByteOrder, &descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read descsize failed: %v", err)
+ }
+ err = binary.Read(r, f.ByteOrder, ¬eType)
+ if err != nil {
+ return nil, fmt.Errorf("read type failed: %v", err)
+ }
+ noteName, err := readwithpad(r, namesize)
+ if err != nil {
+ return nil, fmt.Errorf("read name failed: %v", err)
+ }
+ desc, err := readwithpad(r, descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read desc failed: %v", err)
+ }
+ if string(name) == string(noteName) && typ == noteType {
+ return desc, nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+func findshlib(shlib string) string {
+ for _, libdir := range Ctxt.Libdir {
+ libpath := filepath.Join(libdir, shlib)
+ if _, err := os.Stat(libpath); err == nil {
+ return libpath
+ }
+ }
+ Diag("cannot find shared library: %s", shlib)
+ return ""
+}
+
+func ldshlibsyms(shlib string) {
+ libpath := findshlib(shlib)
+ if libpath == "" {
+ return
+ }
+ for _, processedlib := range Ctxt.Shlibs {
+ if processedlib.Path == libpath {
+ return
+ }
+ }
+ if Ctxt.Debugvlog > 1 && Ctxt.Bso != nil {
+ fmt.Fprintf(Ctxt.Bso, "%5.2f ldshlibsyms: found library with name %s at %s\n", obj.Cputime(), shlib, libpath)
+ Ctxt.Bso.Flush()
+ }
+
+ f, err := elf.Open(libpath)
+ if err != nil {
+ Diag("cannot open shared library: %s", libpath)
+ return
+ }
+
+ hash, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GOABIHASH_TAG)
+ if err != nil {
+ Diag("cannot read ABI hash from shared library %s: %v", libpath, err)
+ return
+ }
+
+ depsbytes, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GODEPS_TAG)
+ if err != nil {
+ Diag("cannot read dep list from shared library %s: %v", libpath, err)
+ return
+ }
+ deps := strings.Split(string(depsbytes), "\n")
+
+ syms, err := f.DynamicSymbols()
+ if err != nil {
+ Diag("cannot read symbols from shared library: %s", libpath)
+ return
+ }
+ gcdata_locations := make(map[uint64]*LSym)
+ for _, elfsym := range syms {
+ if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION {
+ continue
+ }
+ lsym := Linklookup(Ctxt, elfsym.Name, 0)
+ // Because loadlib above loads all .a files before loading any shared
+ // libraries, any symbols we find that duplicate symbols already
+ // loaded should be ignored (the symbols from the .a files "win").
+ if lsym.Type != 0 {
+ continue
+ }
+ lsym.Type = obj.SDYNIMPORT
+ lsym.ElfType = elf.ST_TYPE(elfsym.Info)
+ lsym.Size = int64(elfsym.Size)
+ if elfsym.Section != elf.SHN_UNDEF {
+ // Set .File for the library that actually defines the symbol.
+ lsym.File = libpath
+ // The decodetype_* functions in decodetype.go need access to
+ // the type data.
+ if strings.HasPrefix(lsym.Name, "type.") && !strings.HasPrefix(lsym.Name, "type..") {
+ lsym.P = readelfsymboldata(f, &elfsym)
+ gcdata_locations[elfsym.Value+2*uint64(Thearch.Ptrsize)+8+1*uint64(Thearch.Ptrsize)] = lsym
+ }
+ }
+ }
+ gcdata_addresses := make(map[*LSym]uint64)
+ if Thearch.Thechar == '7' {
+ for _, sect := range f.Sections {
+ if sect.Type == elf.SHT_RELA {
+ var rela elf.Rela64
+ rdr := sect.Open()
+ for {
+ err := binary.Read(rdr, f.ByteOrder, &rela)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ Diag("reading relocation failed %v", err)
+ return
+ }
+ t := elf.R_AARCH64(rela.Info & 0xffff)
+ if t != elf.R_AARCH64_RELATIVE {
+ continue
+ }
+ if lsym, ok := gcdata_locations[rela.Off]; ok {
+ gcdata_addresses[lsym] = uint64(rela.Addend)
+ }
+ }
+ }
+ }
+ }
+
+ // We might have overwritten some functions above (this tends to happen for the
+ // autogenerated type equality/hashing functions) and we don't want to generated
+ // pcln table entries for these any more so unstitch them from the Textp linked
+ // list.
+ var last *LSym
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Type == obj.SDYNIMPORT {
+ continue
+ }
+
+ if last == nil {
+ Ctxt.Textp = s
+ } else {
+ last.Next = s
+ }
+ last = s
+ }
+
+ if last == nil {
+ Ctxt.Textp = nil
+ Ctxt.Etextp = nil
+ } else {
+ last.Next = nil
+ Ctxt.Etextp = last
+ }
+
+ Ctxt.Shlibs = append(Ctxt.Shlibs, Shlib{Path: libpath, Hash: hash, Deps: deps, File: f, gcdata_addresses: gcdata_addresses})
+}
+
+func mywhatsys() {
+ goroot = obj.Getgoroot()
+ goos = obj.Getgoos()
+ goarch = obj.Getgoarch()
+
+ if !strings.HasPrefix(goarch, Thestring) {
+ log.Fatalf("cannot use %cc with GOARCH=%s", Thearch.Thechar, goarch)
+ }
+}
+
+// Copied from ../gc/subr.c:/^pathtoprefix; must stay in sync.
+/*
+ * Convert raw string to the prefix that will be used in the symbol table.
+ * Invalid bytes turn into %xx. Right now the only bytes that need
+ * escaping are %, ., and ", but we escape all control characters too.
+ *
+ * If you edit this, edit ../gc/subr.c:/^pathtoprefix too.
+ * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+ */
+func pathtoprefix(s string) string {
+ slash := strings.LastIndex(s, "/")
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
+ }
+ }
+ return s
+}
+
+func addsection(seg *Segment, name string, rwx int) *Section {
+ var l **Section
+
+ for l = &seg.Sect; *l != nil; l = &(*l).Next {
+ }
+ sect := new(Section)
+ sect.Rwx = uint8(rwx)
+ sect.Name = name
+ sect.Seg = seg
+ sect.Align = int32(Thearch.Ptrsize) // everything is at least pointer-aligned
+ *l = sect
+ return sect
+}
+
+func Le16(b []byte) uint16 {
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func Le32(b []byte) uint32 {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func Le64(b []byte) uint64 {
+ return uint64(Le32(b)) | uint64(Le32(b[4:]))<<32
+}
+
+func Be16(b []byte) uint16 {
+ return uint16(b[0])<<8 | uint16(b[1])
+}
+
+func Be32(b []byte) uint32 {
+ return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
+}
+
+type Chain struct {
+ sym *LSym
+ up *Chain
+ limit int // limit on entry to sym
+}
+
+var morestack *LSym
+
+// TODO: Record enough information in new object files to
+// allow stack checks here.
+
+func haslinkregister() bool {
+ return Ctxt.FixedFrameSize() != 0
+}
+
+func callsize() int {
+ if haslinkregister() {
+ return 0
+ }
+ return Thearch.Regsize
+}
+
+func dostkcheck() {
+ var ch Chain
+
+ morestack = Linklookup(Ctxt, "runtime.morestack", 0)
+
+ // Every splitting function ensures that there are at least StackLimit
+ // bytes available below SP when the splitting prologue finishes.
+ // If the splitting function calls F, then F begins execution with
+ // at least StackLimit - callsize() bytes available.
+ // Check that every function behaves correctly with this amount
+ // of stack, following direct calls in order to piece together chains
+ // of non-splitting functions.
+ ch.up = nil
+
+ ch.limit = obj.StackLimit - callsize()
+
+ // Check every function, but do the nosplit functions in a first pass,
+ // to make the printed failure chains as short as possible.
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ // runtime.racesymbolizethunk is called from gcc-compiled C
+ // code running on the operating system thread stack.
+ // It uses more than the usual amount of stack but that's okay.
+ if s.Name == "runtime.racesymbolizethunk" {
+ continue
+ }
+
+ if s.Nosplit != 0 {
+ Ctxt.Cursym = s
+ ch.sym = s
+ stkcheck(&ch, 0)
+ }
+ }
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Nosplit == 0 {
+ Ctxt.Cursym = s
+ ch.sym = s
+ stkcheck(&ch, 0)
+ }
+ }
+}
+
+func stkcheck(up *Chain, depth int) int {
+ limit := up.limit
+ s := up.sym
+
+ // Don't duplicate work: only need to consider each
+ // function at top of safe zone once.
+ top := limit == obj.StackLimit-callsize()
+ if top {
+ if s.Stkcheck != 0 {
+ return 0
+ }
+ s.Stkcheck = 1
+ }
+
+ if depth > 100 {
+ Diag("nosplit stack check too deep")
+ stkbroke(up, 0)
+ return -1
+ }
+
+ if s.External != 0 || s.Pcln == nil {
+ // external function.
+ // should never be called directly.
+ // only diagnose the direct caller.
+ // TODO(mwhudson): actually think about this.
+ if depth == 1 && s.Type != obj.SXREF && !DynlinkingGo() &&
+ Buildmode != BuildmodePIE && Buildmode != BuildmodeCShared {
+ Diag("call to external function %s", s.Name)
+ }
+ return -1
+ }
+
+ if limit < 0 {
+ stkbroke(up, limit)
+ return -1
+ }
+
+ // morestack looks like it calls functions,
+ // but it switches the stack pointer first.
+ if s == morestack {
+ return 0
+ }
+
+ var ch Chain
+ ch.up = up
+
+ if s.Nosplit == 0 {
+ // Ensure we have enough stack to call morestack.
+ ch.limit = limit - callsize()
+ ch.sym = morestack
+ if stkcheck(&ch, depth+1) < 0 {
+ return -1
+ }
+ if !top {
+ return 0
+ }
+ // Raise limit to allow frame.
+ limit = int(obj.StackLimit+s.Locals) + int(Ctxt.FixedFrameSize())
+ }
+
+ // Walk through sp adjustments in function, consuming relocs.
+ ri := 0
+
+ endr := len(s.R)
+ var ch1 Chain
+ var pcsp Pciter
+ var r *Reloc
+ for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
+ // pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
+
+ // Check stack size in effect for this span.
+ if int32(limit)-pcsp.value < 0 {
+ stkbroke(up, int(int32(limit)-pcsp.value))
+ return -1
+ }
+
+ // Process calls in this span.
+ for ; ri < endr && uint32(s.R[ri].Off) < pcsp.nextpc; ri++ {
+ r = &s.R[ri]
+ switch r.Type {
+ // Direct call.
+ case obj.R_CALL, obj.R_CALLARM, obj.R_CALLARM64, obj.R_CALLPOWER, obj.R_CALLMIPS:
+ ch.limit = int(int32(limit) - pcsp.value - int32(callsize()))
+ ch.sym = r.Sym
+ if stkcheck(&ch, depth+1) < 0 {
+ return -1
+ }
+
+ // Indirect call. Assume it is a call to a splitting function,
+ // so we have to make sure it can call morestack.
+ // Arrange the data structures to report both calls, so that
+ // if there is an error, stkprint shows all the steps involved.
+ case obj.R_CALLIND:
+ ch.limit = int(int32(limit) - pcsp.value - int32(callsize()))
+
+ ch.sym = nil
+ ch1.limit = ch.limit - callsize() // for morestack in called prologue
+ ch1.up = &ch
+ ch1.sym = morestack
+ if stkcheck(&ch1, depth+2) < 0 {
+ return -1
+ }
+ }
+ }
+ }
+
+ return 0
+}
+
+func stkbroke(ch *Chain, limit int) {
+ Diag("nosplit stack overflow")
+ stkprint(ch, limit)
+}
+
+func stkprint(ch *Chain, limit int) {
+ var name string
+
+ if ch.sym != nil {
+ name = ch.sym.Name
+ if ch.sym.Nosplit != 0 {
+ name += " (nosplit)"
+ }
+ } else {
+ name = "function pointer"
+ }
+
+ if ch.up == nil {
+ // top of chain. ch->sym != nil.
+ if ch.sym.Nosplit != 0 {
+ fmt.Printf("\t%d\tassumed on entry to %s\n", ch.limit, name)
+ } else {
+ fmt.Printf("\t%d\tguaranteed after split check in %s\n", ch.limit, name)
+ }
+ } else {
+ stkprint(ch.up, ch.limit+callsize())
+ if !haslinkregister() {
+ fmt.Printf("\t%d\ton entry to %s\n", ch.limit, name)
+ }
+ }
+
+ if ch.limit != limit {
+ fmt.Printf("\t%d\tafter %s uses %d\n", limit, name, ch.limit-limit)
+ }
+}
+
+func Cflush() {
+ if err := coutbuf.Writer.Flush(); err != nil {
+ Exitf("flushing %s: %v", coutbuf.f.Name(), err)
+ }
+}
+
+func Cpos() int64 {
+ off, err := coutbuf.f.Seek(0, 1)
+ if err != nil {
+ Exitf("seeking in output [0, 1]: %v", err)
+ }
+ return off + int64(coutbuf.Buffered())
+}
+
+func Cseek(p int64) {
+ Cflush()
+ if _, err := coutbuf.f.Seek(p, 0); err != nil {
+ Exitf("seeking in output [0, 1]: %v", err)
+ }
+}
+
+func Cwrite(p []byte) {
+ coutbuf.Write(p)
+}
+
+func Cput(c uint8) {
+ coutbuf.WriteByte(c)
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n")
+ obj.Flagprint(2)
+ Exit(2)
+}
+
+func setheadtype(s string) {
+ h := headtype(s)
+ if h < 0 {
+ Exitf("unknown header type -H %s", s)
+ }
+
+ headstring = s
+ HEADTYPE = int32(headtype(s))
+}
+
+func setinterp(s string) {
+ Debug['I'] = 1 // denote cmdline interpreter override
+ interpreter = s
+}
+
+func doversion() {
+ Exitf("version %s", obj.Getgoversion())
+}
+
+func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
+ // These symbols won't show up in the first loop below because we
+ // skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
+ s := Linklookup(Ctxt, "runtime.text", 0)
+
+ if s.Type == obj.STEXT {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
+ }
+ s = Linklookup(Ctxt, "runtime.etext", 0)
+ if s.Type == obj.STEXT {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
+ }
+
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if s.Hide != 0 || ((s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC.") {
+ continue
+ }
+ switch s.Type & obj.SMASK {
+ case obj.SCONST,
+ obj.SRODATA,
+ obj.SSYMTAB,
+ obj.SPCLNTAB,
+ obj.SINITARR,
+ obj.SDATA,
+ obj.SNOPTRDATA,
+ obj.SELFROSECT,
+ obj.SMACHOGOT,
+ obj.STYPE,
+ obj.SSTRING,
+ obj.SGOSTRING,
+ obj.SGOFUNC,
+ obj.SGCBITS,
+ obj.STYPERELRO,
+ obj.SSTRINGRELRO,
+ obj.SGOSTRINGRELRO,
+ obj.SGOFUNCRELRO,
+ obj.SGCBITSRELRO,
+ obj.SRODATARELRO,
+ obj.STYPELINK,
+ obj.SWINDOWS:
+ if !s.Reachable {
+ continue
+ }
+ put(s, s.Name, 'D', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+
+ case obj.SBSS, obj.SNOPTRBSS:
+ if !s.Reachable {
+ continue
+ }
+ if len(s.P) > 0 {
+ Diag("%s should not be bss (size=%d type=%d special=%d)", s.Name, int(len(s.P)), s.Type, s.Special)
+ }
+ put(s, s.Name, 'B', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+
+ case obj.SFILE:
+ put(nil, s.Name, 'f', s.Value, 0, int(s.Version), nil)
+
+ case obj.SHOSTOBJ:
+ if HEADTYPE == obj.Hwindows || Iself {
+ put(s, s.Name, 'U', s.Value, 0, int(s.Version), nil)
+ }
+
+ case obj.SDYNIMPORT:
+ if !s.Reachable {
+ continue
+ }
+ put(s, s.Extname, 'U', 0, 0, int(s.Version), nil)
+
+ case obj.STLSBSS:
+ if Linkmode == LinkExternal && HEADTYPE != obj.Hopenbsd {
+ put(s, s.Name, 't', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+ }
+ }
+ }
+
+ var a *Auto
+ var off int32
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
+
+ // NOTE(ality): acid can't produce a stack trace without .frame symbols
+ put(nil, ".frame", 'm', int64(s.Locals)+int64(Thearch.Ptrsize), 0, 0, nil)
+
+ for a = s.Autom; a != nil; a = a.Link {
+ // Emit a or p according to actual offset, even if label is wrong.
+ // This avoids negative offsets, which cannot be encoded.
+ if a.Name != obj.A_AUTO && a.Name != obj.A_PARAM {
+ continue
+ }
+
+ // compute offset relative to FP
+ if a.Name == obj.A_PARAM {
+ off = a.Aoffset
+ } else {
+ off = a.Aoffset - int32(Thearch.Ptrsize)
+ }
+
+ // FP
+ if off >= 0 {
+ put(nil, a.Asym.Name, 'p', int64(off), 0, 0, a.Gotype)
+ continue
+ }
+
+ // SP
+ if off <= int32(-Thearch.Ptrsize) {
+ put(nil, a.Asym.Name, 'a', -(int64(off) + int64(Thearch.Ptrsize)), 0, 0, a.Gotype)
+ continue
+ }
+ }
+ }
+
+ // Otherwise, off is addressing the saved program counter.
+ // Something underhanded is going on. Say nothing.
+ if Debug['v'] != 0 || Debug['n'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f symsize = %d\n", obj.Cputime(), uint32(Symsize))
+ }
+ Bso.Flush()
+}
+
+func Symaddr(s *LSym) int64 {
+ if !s.Reachable {
+ Diag("unreachable symbol in symaddr - %s", s.Name)
+ }
+ return s.Value
+}
+
+func xdefine(p string, t int, v int64) {
+ s := Linklookup(Ctxt, p, 0)
+ s.Type = int16(t)
+ s.Value = v
+ s.Reachable = true
+ s.Special = 1
+ s.Local = true
+}
+
+func datoff(addr int64) int64 {
+ if uint64(addr) >= Segdata.Vaddr {
+ return int64(uint64(addr) - Segdata.Vaddr + Segdata.Fileoff)
+ }
+ if uint64(addr) >= Segtext.Vaddr {
+ return int64(uint64(addr) - Segtext.Vaddr + Segtext.Fileoff)
+ }
+ Diag("datoff %#x", addr)
+ return 0
+}
+
+func Entryvalue() int64 {
+ a := INITENTRY
+ if a[0] >= '0' && a[0] <= '9' {
+ return atolwhex(a)
+ }
+ s := Linklookup(Ctxt, a, 0)
+ if s.Type == 0 {
+ return INITTEXT
+ }
+ if s.Type != obj.STEXT {
+ Diag("entry not text: %s", s.Name)
+ }
+ return s.Value
+}
+
+func undefsym(s *LSym) {
+ var r *Reloc
+
+ Ctxt.Cursym = s
+ for i := 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil { // happens for some external ARM relocs
+ continue
+ }
+ if r.Sym.Type == obj.Sxxx || r.Sym.Type == obj.SXREF {
+ Diag("undefined: %s", r.Sym.Name)
+ }
+ if !r.Sym.Reachable {
+ Diag("use of unreachable symbol: %s", r.Sym.Name)
+ }
+ }
+}
+
+func undef() {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ undefsym(s)
+ }
+ for s := datap; s != nil; s = s.Next {
+ undefsym(s)
+ }
+ if nerrors > 0 {
+ errorexit()
+ }
+}
+
+func callgraph() {
+ if Debug['c'] == 0 {
+ return
+ }
+
+ var i int
+ var r *Reloc
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM || r.Type == obj.R_CALLPOWER || r.Type == obj.R_CALLMIPS) && r.Sym.Type == obj.STEXT {
+ fmt.Fprintf(&Bso, "%s calls %s\n", s.Name, r.Sym.Name)
+ }
+ }
+ }
+}
+
+func Diag(format string, args ...interface{}) {
+ tn := ""
+ sep := ""
+ if Ctxt.Cursym != nil {
+ tn = Ctxt.Cursym.Name
+ sep = ": "
+ }
+ fmt.Printf("%s%s%s\n", tn, sep, fmt.Sprintf(format, args...))
+ nerrors++
+ if Debug['h'] != 0 {
+ panic("error")
+ }
+ if nerrors > 20 {
+ Exitf("too many errors")
+ }
+}
+
+func checkgo() {
+ if Debug['C'] == 0 {
+ return
+ }
+
+ // TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
+ // which would simplify this logic quite a bit.
+
+ // Mark every Go-called C function with cfunc=2, recursively.
+ var changed int
+ var i int
+ var r *Reloc
+ var s *LSym
+ for {
+ changed = 0
+ for s = Ctxt.Textp; s != nil; s = s.Next {
+ if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
+ if r.Sym.Cfunc == 1 {
+ changed = 1
+ r.Sym.Cfunc = 2
+ }
+ }
+ }
+ }
+ }
+ if changed == 0 {
+ break
+ }
+ }
+
+ // Complain about Go-called C functions that can split the stack
+ // (that can be preempted for garbage collection or trigger a stack copy).
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
+ if s.Cfunc == 0 && r.Sym.Cfunc == 2 && r.Sym.Nosplit == 0 {
+ fmt.Printf("Go %s calls C %s\n", s.Name, r.Sym.Name)
+ } else if s.Cfunc == 2 && s.Nosplit != 0 && r.Sym.Nosplit == 0 {
+ fmt.Printf("Go calls C %s calls %s\n", s.Name, r.Sym.Name)
+ }
+ }
+ }
+ }
+ }
+}
+
+func Rnd(v int64, r int64) int64 {
+ if r <= 0 {
+ return v
+ }
+ v += r - 1
+ c := v % r
+ if c < 0 {
+ c += r
+ }
+ v -= c
+ return v
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/link.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/link.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/link.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/link.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,252 @@
+// Derived from Inferno utils/6l/l.h and related files.
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ld
+
+import (
+ "cmd/internal/obj"
+ "debug/elf"
+ "encoding/binary"
+ "fmt"
+)
+
+type LSym struct {
+ Name string
+ Extname string
+ Type int16
+ Version int16
+ Dupok uint8
+ Cfunc uint8
+ External uint8
+ Nosplit uint8
+ Reachable bool
+ Cgoexport uint8
+ Special uint8
+ Stkcheck uint8
+ Hide uint8
+ Leaf uint8
+ Localentry uint8
+ Onlist uint8
+ // ElfType is set for symbols read from shared libraries by ldshlibsyms. It
+ // is not set for symbols defined by the packages being linked or by symbols
+ // read by ldelf (and so is left as elf.STT_NOTYPE).
+ ElfType elf.SymType
+ Dynid int32
+ Plt int32
+ Got int32
+ Align int32
+ Elfsym int32
+ LocalElfsym int32
+ Args int32
+ Locals int32
+ Value int64
+ Size int64
+ Allsym *LSym
+ Next *LSym
+ Sub *LSym
+ Outer *LSym
+ Gotype *LSym
+ Reachparent *LSym
+ Queue *LSym
+ File string
+ Dynimplib string
+ Dynimpvers string
+ Sect *Section
+ Autom *Auto
+ Pcln *Pcln
+ P []byte
+ R []Reloc
+ Local bool
+}
+
+func (s *LSym) String() string {
+ if s.Version == 0 {
+ return s.Name
+ }
+ return fmt.Sprintf("%s<%d>", s.Name, s.Version)
+}
+
+func (s *LSym) ElfsymForReloc() int32 {
+ // If putelfsym created a local version of this symbol, use that in all
+ // relocations.
+ if s.LocalElfsym != 0 {
+ return s.LocalElfsym
+ } else {
+ return s.Elfsym
+ }
+}
+
+type Reloc struct {
+ Off int32
+ Siz uint8
+ Done uint8
+ Type int32
+ Variant int32
+ Add int64
+ Xadd int64
+ Sym *LSym
+ Xsym *LSym
+}
+
+type Auto struct {
+ Asym *LSym
+ Link *Auto
+ Aoffset int32
+ Name int16
+ Gotype *LSym
+}
+
+type Shlib struct {
+ Path string
+ Hash []byte
+ Deps []string
+ File *elf.File
+ gcdata_addresses map[*LSym]uint64
+}
+
+type Link struct {
+ Thechar int32
+ Thestring string
+ Goarm int32
+ Headtype int
+ Arch *LinkArch
+ Debugasm int32
+ Debugvlog int32
+ Bso *obj.Biobuf
+ Windows int32
+ Goroot string
+ Hash map[symVer]*LSym
+ Allsym *LSym
+ Nsymbol int32
+ Tlsg *LSym
+ Libdir []string
+ Library []*Library
+ Shlibs []Shlib
+ Tlsoffset int
+ Diag func(string, ...interface{})
+ Cursym *LSym
+ Version int
+ Textp *LSym
+ Etextp *LSym
+ Nhistfile int32
+ Filesyms *LSym
+ Moduledata *LSym
+}
+
+// The smallest possible offset from the hardware stack pointer to a local
+// variable on the stack. Architectures that use a link register save its value
+// on the stack in the function prologue and so always have a pointer between
+// the hardware stack pointer and the local variable area.
+func (ctxt *Link) FixedFrameSize() int64 {
+ switch ctxt.Arch.Thechar {
+ case '6', '8':
+ return 0
+ case '9':
+ // PIC code on ppc64le requires 32 bytes of stack, and it's easier to
+ // just use that much stack always on ppc64x.
+ return int64(4 * ctxt.Arch.Ptrsize)
+ default:
+ return int64(ctxt.Arch.Ptrsize)
+ }
+}
+
+type LinkArch struct {
+ ByteOrder binary.ByteOrder
+ Name string
+ Thechar int
+ Minlc int
+ Ptrsize int
+ Regsize int
+}
+
+type Library struct {
+ Objref string
+ Srcref string
+ File string
+ Pkg string
+ Shlib string
+ hash []byte
+}
+
+type Pcln struct {
+ Pcsp Pcdata
+ Pcfile Pcdata
+ Pcline Pcdata
+ Pcdata []Pcdata
+ Npcdata int
+ Funcdata []*LSym
+ Funcdataoff []int64
+ Nfuncdata int
+ File []*LSym
+ Nfile int
+ Mfile int
+ Lastfile *LSym
+ Lastindex int
+}
+
+type Pcdata struct {
+ P []byte
+}
+
+type Pciter struct {
+ d Pcdata
+ p []byte
+ pc uint32
+ nextpc uint32
+ pcscale uint32
+ value int32
+ start int
+ done int
+}
+
+// Reloc.variant
+const (
+ RV_NONE = iota
+ RV_POWER_LO
+ RV_POWER_HI
+ RV_POWER_HA
+ RV_POWER_DS
+ RV_CHECK_OVERFLOW = 1 << 8
+ RV_TYPE_MASK = RV_CHECK_OVERFLOW - 1
+)
+
+// Pcdata iterator.
+// for(pciterinit(ctxt, &it, &pcd); !it.done; pciternext(&it)) { it.value holds in [it.pc, it.nextpc) }
+
+// Link holds the context for writing object code from a compiler
+// to be linker input or for reading that input into the linker.
+
+// LinkArch is the definition of a single architecture.
+
+const (
+ LinkAuto = 0 + iota
+ LinkInternal
+ LinkExternal
+)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/symtab.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/symtab.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/symtab.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/internal/ld/symtab.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,579 @@
+// Inferno utils/6l/span.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/span.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ld
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+ "path/filepath"
+ "strings"
+)
+
+// Symbol table.
+
+func putelfstr(s string) int {
+ if len(Elfstrdat) == 0 && s != "" {
+ // first entry must be empty string
+ putelfstr("")
+ }
+
+ // When dynamically linking, we create LSym's by reading the names from
+ // the symbol tables of the shared libraries and so the names need to
+ // match exactly. Tools like DTrace will have to wait for now.
+ if !DynlinkingGo() {
+ // Rewrite · to . for ASCII-only tools like DTrace (sigh)
+ s = strings.Replace(s, "·", ".", -1)
+ }
+
+ n := len(s) + 1
+ for len(Elfstrdat)+n > cap(Elfstrdat) {
+ Elfstrdat = append(Elfstrdat[:cap(Elfstrdat)], 0)[:len(Elfstrdat)]
+ }
+
+ off := len(Elfstrdat)
+ Elfstrdat = Elfstrdat[:off+n]
+ copy(Elfstrdat[off:], s)
+
+ return off
+}
+
+func putelfsyment(off int, addr int64, size int64, info int, shndx int, other int) {
+ switch Thearch.Thechar {
+ case '0', '6', '7', '9':
+ Thearch.Lput(uint32(off))
+ Cput(uint8(info))
+ Cput(uint8(other))
+ Thearch.Wput(uint16(shndx))
+ Thearch.Vput(uint64(addr))
+ Thearch.Vput(uint64(size))
+ Symsize += ELF64SYMSIZE
+
+ default:
+ Thearch.Lput(uint32(off))
+ Thearch.Lput(uint32(addr))
+ Thearch.Lput(uint32(size))
+ Cput(uint8(info))
+ Cput(uint8(other))
+ Thearch.Wput(uint16(shndx))
+ Symsize += ELF32SYMSIZE
+ }
+}
+
+var numelfsym int = 1 // 0 is reserved
+
+var elfbind int
+
+func putelfsym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
+ var type_ int
+
+ switch t {
+ default:
+ return
+
+ case 'T':
+ type_ = STT_FUNC
+
+ case 'D':
+ type_ = STT_OBJECT
+
+ case 'B':
+ type_ = STT_OBJECT
+
+ case 'U':
+ // ElfType is only set for symbols read from Go shared libraries, but
+ // for other symbols it is left as STT_NOTYPE which is fine.
+ type_ = int(x.ElfType)
+
+ case 't':
+ type_ = STT_TLS
+ }
+
+ xo := x
+ for xo.Outer != nil {
+ xo = xo.Outer
+ }
+
+ var elfshnum int
+ if xo.Type == obj.SDYNIMPORT || xo.Type == obj.SHOSTOBJ {
+ elfshnum = SHN_UNDEF
+ } else {
+ if xo.Sect == nil {
+ Ctxt.Cursym = x
+ Diag("missing section in putelfsym")
+ return
+ }
+ if xo.Sect.Elfsect == nil {
+ Ctxt.Cursym = x
+ Diag("missing ELF section in putelfsym")
+ return
+ }
+ elfshnum = xo.Sect.Elfsect.shnum
+ }
+
+ // One pass for each binding: STB_LOCAL, STB_GLOBAL,
+ // maybe one day STB_WEAK.
+ bind := STB_GLOBAL
+
+ if ver != 0 || (x.Type&obj.SHIDDEN != 0) || x.Local {
+ bind = STB_LOCAL
+ }
+
+ // In external linking mode, we have to invoke gcc with -rdynamic
+ // to get the exported symbols put into the dynamic symbol table.
+ // To avoid filling the dynamic table with lots of unnecessary symbols,
+ // mark all Go symbols local (not global) in the final executable.
+ // But when we're dynamically linking, we need all those global symbols.
+ if !DynlinkingGo() && Linkmode == LinkExternal && x.Cgoexport&CgoExportStatic == 0 && elfshnum != SHN_UNDEF {
+ bind = STB_LOCAL
+ }
+
+ if Linkmode == LinkExternal && elfshnum != SHN_UNDEF {
+ addr -= int64(xo.Sect.Vaddr)
+ }
+ other := STV_DEFAULT
+ if x.Type&obj.SHIDDEN != 0 {
+ other = STV_HIDDEN
+ }
+ if (Buildmode == BuildmodePIE || DynlinkingGo()) && Thearch.Thechar == '9' && type_ == STT_FUNC && x.Name != "runtime.duffzero" && x.Name != "runtime.duffcopy" {
+ // On ppc64 the top three bits of the st_other field indicate how
+ // many instructions separate the global and local entry points. In
+ // our case it is two instructions, indicated by the value 3.
+ other |= 3 << 5
+ }
+
+ if DynlinkingGo() && bind == STB_GLOBAL && elfbind == STB_LOCAL && x.Type == obj.STEXT {
+ // When dynamically linking, we want references to functions defined
+ // in this module to always be to the function object, not to the
+ // PLT. We force this by writing an additional local symbol for every
+ // global function symbol and making all relocations against the
+ // global symbol refer to this local symbol instead (see
+ // (*LSym).ElfsymForReloc). This is approximately equivalent to the
+ // ELF linker -Bsymbolic-functions option, but that is buggy on
+ // several platforms.
+ putelfsyment(putelfstr("local."+s), addr, size, STB_LOCAL<<4|type_&0xf, elfshnum, other)
+ x.LocalElfsym = int32(numelfsym)
+ numelfsym++
+ return
+ } else if bind != elfbind {
+ return
+ }
+
+ putelfsyment(putelfstr(s), addr, size, bind<<4|type_&0xf, elfshnum, other)
+ x.Elfsym = int32(numelfsym)
+ numelfsym++
+}
+
+func putelfsectionsym(s *LSym, shndx int) {
+ putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_SECTION, shndx, 0)
+ s.Elfsym = int32(numelfsym)
+ numelfsym++
+}
+
+func putelfsymshndx(sympos int64, shndx int) {
+ here := Cpos()
+ if elf64 {
+ Cseek(sympos + 6)
+ } else {
+ Cseek(sympos + 14)
+ }
+
+ Thearch.Wput(uint16(shndx))
+ Cseek(here)
+}
+
+func Asmelfsym() {
+ // the first symbol entry is reserved
+ putelfsyment(0, 0, 0, STB_LOCAL<<4|STT_NOTYPE, 0, 0)
+
+ dwarfaddelfsectionsyms()
+
+ elfbind = STB_LOCAL
+ genasmsym(putelfsym)
+
+ elfbind = STB_GLOBAL
+ elfglobalsymndx = numelfsym
+ genasmsym(putelfsym)
+}
+
+func putplan9sym(x *LSym, s string, t int, addr int64, size int64, ver int, go_ *LSym) {
+ switch t {
+ case 'T', 'L', 'D', 'B':
+ if ver != 0 {
+ t += 'a' - 'A'
+ }
+ fallthrough
+
+ case 'a',
+ 'p',
+ 'f',
+ 'z',
+ 'Z',
+ 'm':
+ l := 4
+ if HEADTYPE == obj.Hplan9 && Thearch.Thechar == '6' && Debug['8'] == 0 {
+ Lputb(uint32(addr >> 32))
+ l = 8
+ }
+
+ Lputb(uint32(addr))
+ Cput(uint8(t + 0x80)) /* 0x80 is variable length */
+
+ var i int
+ if t == 'z' || t == 'Z' {
+ Cput(uint8(s[0]))
+ for i = 1; s[i] != 0 || s[i+1] != 0; i += 2 {
+ Cput(uint8(s[i]))
+ Cput(uint8(s[i+1]))
+ }
+
+ Cput(0)
+ Cput(0)
+ i++
+ } else {
+ /* skip the '<' in filenames */
+ if t == 'f' {
+ s = s[1:]
+ }
+ for i = 0; i < len(s); i++ {
+ Cput(uint8(s[i]))
+ }
+ Cput(0)
+ }
+
+ Symsize += int32(l) + 1 + int32(i) + 1
+
+ default:
+ return
+ }
+}
+
+func Asmplan9sym() {
+ genasmsym(putplan9sym)
+}
+
+var symt *LSym
+
+func Wputl(w uint16) {
+ Cput(uint8(w))
+ Cput(uint8(w >> 8))
+}
+
+func Wputb(w uint16) {
+ Cput(uint8(w >> 8))
+ Cput(uint8(w))
+}
+
+func Lputb(l uint32) {
+ Cput(uint8(l >> 24))
+ Cput(uint8(l >> 16))
+ Cput(uint8(l >> 8))
+ Cput(uint8(l))
+}
+
+func Lputl(l uint32) {
+ Cput(uint8(l))
+ Cput(uint8(l >> 8))
+ Cput(uint8(l >> 16))
+ Cput(uint8(l >> 24))
+}
+
+func Vputb(v uint64) {
+ Lputb(uint32(v >> 32))
+ Lputb(uint32(v))
+}
+
+func Vputl(v uint64) {
+ Lputl(uint32(v))
+ Lputl(uint32(v >> 32))
+}
+
+type byPkg []*Library
+
+func (libs byPkg) Len() int {
+ return len(libs)
+}
+
+func (libs byPkg) Less(a, b int) bool {
+ return libs[a].Pkg < libs[b].Pkg
+}
+
+func (libs byPkg) Swap(a, b int) {
+ libs[a], libs[b] = libs[b], libs[a]
+}
+
+func symtab() {
+ dosymtype()
+
+ // Define these so that they'll get put into the symbol table.
+ // data.c:/^address will provide the actual values.
+ xdefine("runtime.text", obj.STEXT, 0)
+
+ xdefine("runtime.etext", obj.STEXT, 0)
+ xdefine("runtime.typelink", obj.SRODATA, 0)
+ xdefine("runtime.etypelink", obj.SRODATA, 0)
+ xdefine("runtime.rodata", obj.SRODATA, 0)
+ xdefine("runtime.erodata", obj.SRODATA, 0)
+ xdefine("runtime.noptrdata", obj.SNOPTRDATA, 0)
+ xdefine("runtime.enoptrdata", obj.SNOPTRDATA, 0)
+ xdefine("runtime.data", obj.SDATA, 0)
+ xdefine("runtime.edata", obj.SDATA, 0)
+ xdefine("runtime.bss", obj.SBSS, 0)
+ xdefine("runtime.ebss", obj.SBSS, 0)
+ xdefine("runtime.noptrbss", obj.SNOPTRBSS, 0)
+ xdefine("runtime.enoptrbss", obj.SNOPTRBSS, 0)
+ xdefine("runtime.end", obj.SBSS, 0)
+ xdefine("runtime.epclntab", obj.SRODATA, 0)
+ xdefine("runtime.esymtab", obj.SRODATA, 0)
+
+ // garbage collection symbols
+ s := Linklookup(Ctxt, "runtime.gcdata", 0)
+
+ s.Type = obj.SRODATA
+ s.Size = 0
+ s.Reachable = true
+ xdefine("runtime.egcdata", obj.SRODATA, 0)
+
+ s = Linklookup(Ctxt, "runtime.gcbss", 0)
+ s.Type = obj.SRODATA
+ s.Size = 0
+ s.Reachable = true
+ xdefine("runtime.egcbss", obj.SRODATA, 0)
+
+ // pseudo-symbols to mark locations of type, string, and go string data.
+ var symtype *LSym
+ var symtyperel *LSym
+ if UseRelro() && (Buildmode == BuildmodeCShared || Buildmode == BuildmodePIE) {
+ s = Linklookup(Ctxt, "type.*", 0)
+
+ s.Type = obj.STYPE
+ s.Size = 0
+ s.Reachable = true
+ symtype = s
+
+ s = Linklookup(Ctxt, "typerel.*", 0)
+
+ s.Type = obj.STYPERELRO
+ s.Size = 0
+ s.Reachable = true
+ symtyperel = s
+ } else if !DynlinkingGo() {
+ s = Linklookup(Ctxt, "type.*", 0)
+
+ s.Type = obj.STYPE
+ s.Size = 0
+ s.Reachable = true
+ symtype = s
+ symtyperel = s
+ }
+
+ s = Linklookup(Ctxt, "go.string.*", 0)
+ s.Type = obj.SGOSTRING
+ s.Local = true
+ s.Size = 0
+ s.Reachable = true
+ symgostring := s
+
+ s = Linklookup(Ctxt, "go.func.*", 0)
+ s.Type = obj.SGOFUNC
+ s.Local = true
+ s.Size = 0
+ s.Reachable = true
+ symgofunc := s
+
+ s = Linklookup(Ctxt, "runtime.gcbits.*", 0)
+ s.Type = obj.SGCBITS
+ s.Local = true
+ s.Size = 0
+ s.Reachable = true
+ symgcbits := s
+
+ symtypelink := Linklookup(Ctxt, "runtime.typelink", 0)
+ symtypelink.Type = obj.STYPELINK
+
+ symt = Linklookup(Ctxt, "runtime.symtab", 0)
+ symt.Local = true
+ symt.Type = obj.SSYMTAB
+ symt.Size = 0
+ symt.Reachable = true
+
+ ntypelinks := 0
+
+ // assign specific types so that they sort together.
+ // within a type they sort by size, so the .* symbols
+ // just defined above will be first.
+ // hide the specific symbols.
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if !s.Reachable || s.Special != 0 || s.Type != obj.SRODATA {
+ continue
+ }
+
+ if strings.HasPrefix(s.Name, "type.") && !DynlinkingGo() {
+ s.Hide = 1
+ if UseRelro() && len(s.R) > 0 {
+ s.Type = obj.STYPERELRO
+ s.Outer = symtyperel
+ } else {
+ s.Type = obj.STYPE
+ s.Outer = symtype
+ }
+ }
+
+ if strings.HasPrefix(s.Name, "go.typelink.") {
+ ntypelinks++
+ s.Type = obj.STYPELINK
+ s.Hide = 1
+ s.Outer = symtypelink
+ }
+
+ if strings.HasPrefix(s.Name, "go.string.") {
+ s.Type = obj.SGOSTRING
+ s.Hide = 1
+ s.Outer = symgostring
+ }
+
+ if strings.HasPrefix(s.Name, "runtime.gcbits.") {
+ s.Type = obj.SGCBITS
+ s.Hide = 1
+ s.Outer = symgcbits
+ }
+
+ if strings.HasPrefix(s.Name, "go.func.") {
+ s.Type = obj.SGOFUNC
+ s.Hide = 1
+ s.Outer = symgofunc
+ }
+
+ if strings.HasPrefix(s.Name, "gcargs.") || strings.HasPrefix(s.Name, "gclocals.") || strings.HasPrefix(s.Name, "gclocals·") {
+ s.Type = obj.SGOFUNC
+ s.Hide = 1
+ s.Outer = symgofunc
+ s.Align = 4
+ liveness += (s.Size + int64(s.Align) - 1) &^ (int64(s.Align) - 1)
+ }
+ }
+
+ if Buildmode == BuildmodeShared {
+ abihashgostr := Linklookup(Ctxt, "go.link.abihash."+filepath.Base(outfile), 0)
+ abihashgostr.Reachable = true
+ abihashgostr.Type = obj.SRODATA
+ hashsym := Linklookup(Ctxt, "go.link.abihashbytes", 0)
+ Addaddr(Ctxt, abihashgostr, hashsym)
+ adduint(Ctxt, abihashgostr, uint64(hashsym.Size))
+ }
+
+ // Information about the layout of the executable image for the
+ // runtime to use. Any changes here must be matched by changes to
+ // the definition of moduledata in runtime/symtab.go.
+ // This code uses several global variables that are set by pcln.go:pclntab.
+ moduledata := Ctxt.Moduledata
+ // The pclntab slice
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.pclntab", 0))
+ adduint(Ctxt, moduledata, uint64(Linklookup(Ctxt, "runtime.pclntab", 0).Size))
+ adduint(Ctxt, moduledata, uint64(Linklookup(Ctxt, "runtime.pclntab", 0).Size))
+ // The ftab slice
+ Addaddrplus(Ctxt, moduledata, Linklookup(Ctxt, "runtime.pclntab", 0), int64(pclntabPclntabOffset))
+ adduint(Ctxt, moduledata, uint64(pclntabNfunc+1))
+ adduint(Ctxt, moduledata, uint64(pclntabNfunc+1))
+ // The filetab slice
+ Addaddrplus(Ctxt, moduledata, Linklookup(Ctxt, "runtime.pclntab", 0), int64(pclntabFiletabOffset))
+ adduint(Ctxt, moduledata, uint64(Ctxt.Nhistfile)+1)
+ adduint(Ctxt, moduledata, uint64(Ctxt.Nhistfile)+1)
+ // findfunctab
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.findfunctab", 0))
+ // minpc, maxpc
+ Addaddr(Ctxt, moduledata, pclntabFirstFunc)
+ Addaddrplus(Ctxt, moduledata, pclntabLastFunc, pclntabLastFunc.Size)
+ // pointers to specific parts of the module
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.text", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.etext", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.noptrdata", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.enoptrdata", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.data", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.edata", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.bss", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.ebss", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.noptrbss", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.enoptrbss", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.end", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.gcdata", 0))
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.gcbss", 0))
+ // The typelinks slice
+ Addaddr(Ctxt, moduledata, Linklookup(Ctxt, "runtime.typelink", 0))
+ adduint(Ctxt, moduledata, uint64(ntypelinks))
+ adduint(Ctxt, moduledata, uint64(ntypelinks))
+ if len(Ctxt.Shlibs) > 0 {
+ thismodulename := filepath.Base(outfile)
+ switch Buildmode {
+ case BuildmodeExe, BuildmodePIE:
+ // When linking an executable, outfile is just "a.out". Make
+ // it something slightly more comprehensible.
+ thismodulename = "the executable"
+ }
+ addgostring(moduledata, "go.link.thismodulename", thismodulename)
+
+ modulehashes := Linklookup(Ctxt, "go.link.abihashes", 0)
+ modulehashes.Reachable = true
+ modulehashes.Local = true
+ modulehashes.Type = obj.SRODATA
+
+ for i, shlib := range Ctxt.Shlibs {
+ // modulehashes[i].modulename
+ modulename := filepath.Base(shlib.Path)
+ addgostring(modulehashes, fmt.Sprintf("go.link.libname.%d", i), modulename)
+
+ // modulehashes[i].linktimehash
+ addgostring(modulehashes, fmt.Sprintf("go.link.linkhash.%d", i), string(shlib.Hash))
+
+ // modulehashes[i].runtimehash
+ abihash := Linklookup(Ctxt, "go.link.abihash."+modulename, 0)
+ abihash.Reachable = true
+ Addaddr(Ctxt, modulehashes, abihash)
+ }
+
+ Addaddr(Ctxt, moduledata, modulehashes)
+ adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
+ adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
+ }
+ // The rest of moduledata is zero initialized.
+ // When linking an object that does not contain the runtime we are
+ // creating the moduledata from scratch and it does not have a
+ // compiler-provided size, so read it from the type data.
+ moduledatatype := Linkrlookup(Ctxt, "type.runtime.moduledata", 0)
+ moduledata.Size = decodetype_size(moduledatatype)
+ Symgrow(Ctxt, moduledata, moduledata.Size)
+
+ lastmoduledatap := Linklookup(Ctxt, "runtime.lastmoduledatap", 0)
+ if lastmoduledatap.Type != obj.SDYNIMPORT {
+ lastmoduledatap.Type = obj.SNOPTRDATA
+ lastmoduledatap.Size = 0 // overwrite existing value
+ Addaddr(Ctxt, lastmoduledatap, moduledata)
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/main.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/main.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/link/main.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/link/main.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/internal/obj"
+ "cmd/link/internal/amd64"
+ "cmd/link/internal/arm"
+ "cmd/link/internal/arm64"
+ "cmd/link/internal/mips64"
+ "cmd/link/internal/ppc64"
+ "cmd/link/internal/x86"
+ "fmt"
+ "os"
+)
+
+func main() {
+ switch obj.Getgoarch() {
+ default:
+ fmt.Fprintf(os.Stderr, "link: unknown architecture %q\n", obj.Getgoarch())
+ os.Exit(2)
+ case "386":
+ x86.Main()
+ case "amd64", "amd64p32":
+ amd64.Main()
+ case "arm":
+ arm.Main()
+ case "arm64":
+ arm64.Main()
+ case "mips64", "mips64le":
+ mips64.Main()
+ case "ppc64", "ppc64le":
+ ppc64.Main()
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/objdump/objdump_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/objdump/objdump_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/objdump/objdump_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/objdump/objdump_test.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,135 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "go/build"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+func buildObjdump(t *testing.T) (tmp, exe string) {
+ testenv.MustHaveGoBuild(t)
+
+ tmp, err := ioutil.TempDir("", "TestObjDump")
+ if err != nil {
+ t.Fatal("TempDir failed: ", err)
+ }
+
+ exe = filepath.Join(tmp, "testobjdump.exe")
+ out, err := exec.Command("go", "build", "-o", exe, "cmd/objdump").CombinedOutput()
+ if err != nil {
+ os.RemoveAll(tmp)
+ t.Fatalf("go build -o %v cmd/objdump: %v\n%s", exe, err, string(out))
+ }
+ return
+}
+
+var x86Need = []string{
+ "fmthello.go:6",
+ "TEXT main.main(SB)",
+ "JMP main.main(SB)",
+ "CALL fmt.Println(SB)",
+ "RET",
+}
+
+var armNeed = []string{
+ "fmthello.go:6",
+ "TEXT main.main(SB)",
+ //"B.LS main.main(SB)", // TODO(rsc): restore; golang.org/issue/9021
+ "BL fmt.Println(SB)",
+ "RET",
+}
+
+// objdump is fully cross platform: it can handle binaries
+// from any known operating system and architecture.
+// We could in principle add binaries to testdata and check
+// all the supported systems during this test. However, the
+// binaries would be about 1 MB each, and we don't want to
+// add that much junk to the hg repository. Instead, build a
+// binary for the current system (only) and test that objdump
+// can handle that one.
+
+func testDisasm(t *testing.T, flags ...string) {
+ tmp, exe := buildObjdump(t)
+ defer os.RemoveAll(tmp)
+
+ hello := filepath.Join(tmp, "hello.exe")
+ args := []string{"build", "-o", hello}
+ args = append(args, flags...)
+ args = append(args, "testdata/fmthello.go")
+ out, err := exec.Command("go", args...).CombinedOutput()
+ if err != nil {
+ t.Fatalf("go build fmthello.go: %v\n%s", err, out)
+ }
+ need := []string{
+ "fmthello.go:6",
+ "TEXT main.main(SB)",
+ }
+ switch runtime.GOARCH {
+ case "amd64", "386":
+ need = append(need, x86Need...)
+ case "arm":
+ need = append(need, armNeed...)
+ }
+
+ out, err = exec.Command(exe, "-s", "main.main", hello).CombinedOutput()
+ if err != nil {
+ t.Fatalf("objdump fmthello.exe: %v\n%s", err, out)
+ }
+
+ text := string(out)
+ ok := true
+ for _, s := range need {
+ if !strings.Contains(text, s) {
+ t.Errorf("disassembly missing '%s'", s)
+ ok = false
+ }
+ }
+ if !ok {
+ t.Logf("full disassembly:\n%s", text)
+ }
+}
+
+func TestDisasm(t *testing.T) {
+ switch runtime.GOARCH {
+ case "ppc64", "ppc64le":
+ t.Skipf("skipping on %s, issue 9039", runtime.GOARCH)
+ case "arm64":
+ t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
+ case "mips64", "mips64le":
+ t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
+ }
+ testDisasm(t)
+}
+
+func TestDisasmExtld(t *testing.T) {
+ switch runtime.GOOS {
+ case "plan9", "windows":
+ t.Skipf("skipping on %s", runtime.GOOS)
+ }
+ switch runtime.GOARCH {
+ case "ppc64", "ppc64le":
+ t.Skipf("skipping on %s, no support for external linking, issue 9038", runtime.GOARCH)
+ case "arm64":
+ t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
+ case "mips64", "mips64le":
+ t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH)
+ }
+ // TODO(jsing): Reenable once openbsd/arm has external linking support.
+ if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" {
+ t.Skip("skipping on openbsd/arm, no support for external linking, issue 10619")
+ }
+ if !build.Default.CgoEnabled {
+ t.Skip("skipping because cgo is not enabled")
+ }
+ testDisasm(t, "-ldflags=-linkmode=external")
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/vet/asmdecl.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/vet/asmdecl.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/cmd/vet/asmdecl.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/cmd/vet/asmdecl.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,662 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Identify mismatches between assembly files and Go func declarations.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// 'kind' is a kind of assembly variable.
+// The kinds 1, 2, 4, 8 stand for values of that size.
+type asmKind int
+
+// These special kinds are not valid sizes.
+const (
+ asmString asmKind = 100 + iota
+ asmSlice
+ asmInterface
+ asmEmptyInterface
+)
+
+// An asmArch describes assembly parameters for an architecture
+type asmArch struct {
+ name string
+ ptrSize int
+ intSize int
+ maxAlign int
+ bigEndian bool
+ stack string
+ lr bool
+}
+
+// An asmFunc describes the expected variables for a function on a given architecture.
+type asmFunc struct {
+ arch *asmArch
+ size int // size of all arguments
+ vars map[string]*asmVar
+ varByOffset map[int]*asmVar
+}
+
+// An asmVar describes a single assembly variable.
+type asmVar struct {
+ name string
+ kind asmKind
+ typ string
+ off int
+ size int
+ inner []*asmVar
+}
+
+var (
+ asmArch386 = asmArch{"386", 4, 4, 4, false, "SP", false}
+ asmArchArm = asmArch{"arm", 4, 4, 4, false, "R13", true}
+ asmArchArm64 = asmArch{"arm64", 8, 8, 8, false, "RSP", true}
+ asmArchAmd64 = asmArch{"amd64", 8, 8, 8, false, "SP", false}
+ asmArchAmd64p32 = asmArch{"amd64p32", 4, 4, 8, false, "SP", false}
+ asmArchPpc64 = asmArch{"ppc64", 8, 8, 8, true, "R1", true}
+ asmArchPpc64LE = asmArch{"ppc64le", 8, 8, 8, false, "R1", true}
+
+ arches = []*asmArch{
+ &asmArch386,
+ &asmArchArm,
+ &asmArchArm64,
+ &asmArchAmd64,
+ &asmArchAmd64p32,
+ &asmArchPpc64,
+ &asmArchPpc64LE,
+ }
+)
+
+var (
+ re = regexp.MustCompile
+ asmPlusBuild = re(`//\s+\+build\s+([^\n]+)`)
+ asmTEXT = re(`\bTEXT\b.*·([^\(]+)\(SB\)(?:\s*,\s*([0-9A-Z|+]+))?(?:\s*,\s*\$(-?[0-9]+)(?:-([0-9]+))?)?`)
+ asmDATA = re(`\b(DATA|GLOBL)\b`)
+ asmNamedFP = re(`([a-zA-Z0-9_\xFF-\x{10FFFF}]+)(?:\+([0-9]+))\(FP\)`)
+ asmUnnamedFP = re(`[^+\-0-9](([0-9]+)\(FP\))`)
+ asmSP = re(`[^+\-0-9](([0-9]+)\(([A-Z0-9]+)\))`)
+ asmOpcode = re(`^\s*(?:[A-Z0-9a-z_]+:)?\s*([A-Z]+)\s*([^,]*)(?:,\s*(.*))?`)
+ ppc64Suff = re(`([BHWD])(ZU|Z|U|BR)?$`)
+)
+
+func asmCheck(pkg *Package) {
+ if !vet("asmdecl") {
+ return
+ }
+
+ // No work if no assembly files.
+ if !pkg.hasFileWithSuffix(".s") {
+ return
+ }
+
+ // Gather declarations. knownFunc[name][arch] is func description.
+ knownFunc := make(map[string]map[string]*asmFunc)
+
+ for _, f := range pkg.files {
+ if f.file != nil {
+ for _, decl := range f.file.Decls {
+ if decl, ok := decl.(*ast.FuncDecl); ok && decl.Body == nil {
+ knownFunc[decl.Name.Name] = f.asmParseDecl(decl)
+ }
+ }
+ }
+ }
+
+Files:
+ for _, f := range pkg.files {
+ if !strings.HasSuffix(f.name, ".s") {
+ continue
+ }
+ Println("Checking file", f.name)
+
+ // Determine architecture from file name if possible.
+ var arch string
+ var archDef *asmArch
+ for _, a := range arches {
+ if strings.HasSuffix(f.name, "_"+a.name+".s") {
+ arch = a.name
+ archDef = a
+ break
+ }
+ }
+
+ lines := strings.SplitAfter(string(f.content), "\n")
+ var (
+ fn *asmFunc
+ fnName string
+ localSize, argSize int
+ wroteSP bool
+ haveRetArg bool
+ retLine []int
+ )
+
+ flushRet := func() {
+ if fn != nil && fn.vars["ret"] != nil && !haveRetArg && len(retLine) > 0 {
+ v := fn.vars["ret"]
+ for _, line := range retLine {
+ f.Badf(token.NoPos, "%s:%d: [%s] %s: RET without writing to %d-byte ret+%d(FP)", f.name, line, arch, fnName, v.size, v.off)
+ }
+ }
+ retLine = nil
+ }
+ for lineno, line := range lines {
+ lineno++
+
+ badf := func(format string, args ...interface{}) {
+ f.Badf(token.NoPos, "%s:%d: [%s] %s: %s", f.name, lineno, arch, fnName, fmt.Sprintf(format, args...))
+ }
+
+ if arch == "" {
+ // Determine architecture from +build line if possible.
+ if m := asmPlusBuild.FindStringSubmatch(line); m != nil {
+ Fields:
+ for _, fld := range strings.Fields(m[1]) {
+ for _, a := range arches {
+ if a.name == fld {
+ arch = a.name
+ archDef = a
+ break Fields
+ }
+ }
+ }
+ }
+ }
+
+ if m := asmTEXT.FindStringSubmatch(line); m != nil {
+ flushRet()
+ if arch == "" {
+ f.Warnf(token.NoPos, "%s: cannot determine architecture for assembly file", f.name)
+ continue Files
+ }
+ fnName = m[1]
+ fn = knownFunc[m[1]][arch]
+ if fn != nil {
+ size, _ := strconv.Atoi(m[4])
+ if size != fn.size && (m[2] != "7" && !strings.Contains(m[2], "NOSPLIT") || size != 0) {
+ badf("wrong argument size %d; expected $...-%d", size, fn.size)
+ }
+ }
+ localSize, _ = strconv.Atoi(m[3])
+ localSize += archDef.intSize
+ if archDef.lr {
+ // Account for caller's saved LR
+ localSize += archDef.intSize
+ }
+ argSize, _ = strconv.Atoi(m[4])
+ if fn == nil && !strings.Contains(fnName, "<>") {
+ badf("function %s missing Go declaration", fnName)
+ }
+ wroteSP = false
+ haveRetArg = false
+ continue
+ } else if strings.Contains(line, "TEXT") && strings.Contains(line, "SB") {
+ // function, but not visible from Go (didn't match asmTEXT), so stop checking
+ flushRet()
+ fn = nil
+ fnName = ""
+ continue
+ }
+
+ if strings.Contains(line, "RET") {
+ retLine = append(retLine, lineno)
+ }
+
+ if fnName == "" {
+ continue
+ }
+
+ if asmDATA.FindStringSubmatch(line) != nil {
+ fn = nil
+ }
+
+ if archDef == nil {
+ continue
+ }
+
+ if strings.Contains(line, ", "+archDef.stack) || strings.Contains(line, ",\t"+archDef.stack) {
+ wroteSP = true
+ continue
+ }
+
+ for _, m := range asmSP.FindAllStringSubmatch(line, -1) {
+ if m[3] != archDef.stack || wroteSP {
+ continue
+ }
+ off := 0
+ if m[1] != "" {
+ off, _ = strconv.Atoi(m[2])
+ }
+ if off >= localSize {
+ if fn != nil {
+ v := fn.varByOffset[off-localSize]
+ if v != nil {
+ badf("%s should be %s+%d(FP)", m[1], v.name, off-localSize)
+ continue
+ }
+ }
+ if off >= localSize+argSize {
+ badf("use of %s points beyond argument frame", m[1])
+ continue
+ }
+ badf("use of %s to access argument frame", m[1])
+ }
+ }
+
+ if fn == nil {
+ continue
+ }
+
+ for _, m := range asmUnnamedFP.FindAllStringSubmatch(line, -1) {
+ off, _ := strconv.Atoi(m[2])
+ v := fn.varByOffset[off]
+ if v != nil {
+ badf("use of unnamed argument %s; offset %d is %s+%d(FP)", m[1], off, v.name, v.off)
+ } else {
+ badf("use of unnamed argument %s", m[1])
+ }
+ }
+
+ for _, m := range asmNamedFP.FindAllStringSubmatch(line, -1) {
+ name := m[1]
+ off := 0
+ if m[2] != "" {
+ off, _ = strconv.Atoi(m[2])
+ }
+ if name == "ret" || strings.HasPrefix(name, "ret_") {
+ haveRetArg = true
+ }
+ v := fn.vars[name]
+ if v == nil {
+ // Allow argframe+0(FP).
+ if name == "argframe" && off == 0 {
+ continue
+ }
+ v = fn.varByOffset[off]
+ if v != nil {
+ badf("unknown variable %s; offset %d is %s+%d(FP)", name, off, v.name, v.off)
+ } else {
+ badf("unknown variable %s", name)
+ }
+ continue
+ }
+ asmCheckVar(badf, fn, line, m[0], off, v)
+ }
+ }
+ flushRet()
+ }
+}
+
+// asmParseDecl parses a function decl for expected assembly variables.
+func (f *File) asmParseDecl(decl *ast.FuncDecl) map[string]*asmFunc {
+ var (
+ arch *asmArch
+ fn *asmFunc
+ offset int
+ failed bool
+ )
+
+ addVar := func(outer string, v asmVar) {
+ if vo := fn.vars[outer]; vo != nil {
+ vo.inner = append(vo.inner, &v)
+ }
+ fn.vars[v.name] = &v
+ for i := 0; i < v.size; i++ {
+ fn.varByOffset[v.off+i] = &v
+ }
+ }
+
+ addParams := func(list []*ast.Field) {
+ for i, fld := range list {
+ // Determine alignment, size, and kind of type in declaration.
+ var align, size int
+ var kind asmKind
+ names := fld.Names
+ typ := f.gofmt(fld.Type)
+ switch t := fld.Type.(type) {
+ default:
+ switch typ {
+ default:
+ f.Warnf(fld.Type.Pos(), "unknown assembly argument type %s", typ)
+ failed = true
+ return
+ case "int8", "uint8", "byte", "bool":
+ size = 1
+ case "int16", "uint16":
+ size = 2
+ case "int32", "uint32", "float32":
+ size = 4
+ case "int64", "uint64", "float64":
+ align = arch.maxAlign
+ size = 8
+ case "int", "uint":
+ size = arch.intSize
+ case "uintptr", "iword", "Word", "Errno", "unsafe.Pointer":
+ size = arch.ptrSize
+ case "string", "ErrorString":
+ size = arch.ptrSize * 2
+ align = arch.ptrSize
+ kind = asmString
+ }
+ case *ast.ChanType, *ast.FuncType, *ast.MapType, *ast.StarExpr:
+ size = arch.ptrSize
+ case *ast.InterfaceType:
+ align = arch.ptrSize
+ size = 2 * arch.ptrSize
+ if len(t.Methods.List) > 0 {
+ kind = asmInterface
+ } else {
+ kind = asmEmptyInterface
+ }
+ case *ast.ArrayType:
+ if t.Len == nil {
+ size = arch.ptrSize + 2*arch.intSize
+ align = arch.ptrSize
+ kind = asmSlice
+ break
+ }
+ f.Warnf(fld.Type.Pos(), "unsupported assembly argument type %s", typ)
+ failed = true
+ case *ast.StructType:
+ f.Warnf(fld.Type.Pos(), "unsupported assembly argument type %s", typ)
+ failed = true
+ }
+ if align == 0 {
+ align = size
+ }
+ if kind == 0 {
+ kind = asmKind(size)
+ }
+ offset += -offset & (align - 1)
+
+ // Create variable for each name being declared with this type.
+ if len(names) == 0 {
+ name := "unnamed"
+ if decl.Type.Results != nil && len(decl.Type.Results.List) > 0 && &list[0] == &decl.Type.Results.List[0] && i == 0 {
+ // Assume assembly will refer to single unnamed result as r.
+ name = "ret"
+ }
+ names = []*ast.Ident{{Name: name}}
+ }
+ for _, id := range names {
+ name := id.Name
+ addVar("", asmVar{
+ name: name,
+ kind: kind,
+ typ: typ,
+ off: offset,
+ size: size,
+ })
+ switch kind {
+ case 8:
+ if arch.ptrSize == 4 {
+ w1, w2 := "lo", "hi"
+ if arch.bigEndian {
+ w1, w2 = w2, w1
+ }
+ addVar(name, asmVar{
+ name: name + "_" + w1,
+ kind: 4,
+ typ: "half " + typ,
+ off: offset,
+ size: 4,
+ })
+ addVar(name, asmVar{
+ name: name + "_" + w2,
+ kind: 4,
+ typ: "half " + typ,
+ off: offset + 4,
+ size: 4,
+ })
+ }
+
+ case asmEmptyInterface:
+ addVar(name, asmVar{
+ name: name + "_type",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface type",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_data",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface data",
+ off: offset + arch.ptrSize,
+ size: arch.ptrSize,
+ })
+
+ case asmInterface:
+ addVar(name, asmVar{
+ name: name + "_itable",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface itable",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_data",
+ kind: asmKind(arch.ptrSize),
+ typ: "interface data",
+ off: offset + arch.ptrSize,
+ size: arch.ptrSize,
+ })
+
+ case asmSlice:
+ addVar(name, asmVar{
+ name: name + "_base",
+ kind: asmKind(arch.ptrSize),
+ typ: "slice base",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_len",
+ kind: asmKind(arch.intSize),
+ typ: "slice len",
+ off: offset + arch.ptrSize,
+ size: arch.intSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_cap",
+ kind: asmKind(arch.intSize),
+ typ: "slice cap",
+ off: offset + arch.ptrSize + arch.intSize,
+ size: arch.intSize,
+ })
+
+ case asmString:
+ addVar(name, asmVar{
+ name: name + "_base",
+ kind: asmKind(arch.ptrSize),
+ typ: "string base",
+ off: offset,
+ size: arch.ptrSize,
+ })
+ addVar(name, asmVar{
+ name: name + "_len",
+ kind: asmKind(arch.intSize),
+ typ: "string len",
+ off: offset + arch.ptrSize,
+ size: arch.intSize,
+ })
+ }
+ offset += size
+ }
+ }
+ }
+
+ m := make(map[string]*asmFunc)
+ for _, arch = range arches {
+ fn = &asmFunc{
+ arch: arch,
+ vars: make(map[string]*asmVar),
+ varByOffset: make(map[int]*asmVar),
+ }
+ offset = 0
+ addParams(decl.Type.Params.List)
+ if decl.Type.Results != nil && len(decl.Type.Results.List) > 0 {
+ offset += -offset & (arch.maxAlign - 1)
+ addParams(decl.Type.Results.List)
+ }
+ fn.size = offset
+ m[arch.name] = fn
+ }
+
+ if failed {
+ return nil
+ }
+ return m
+}
+
+// asmCheckVar checks a single variable reference.
+func asmCheckVar(badf func(string, ...interface{}), fn *asmFunc, line, expr string, off int, v *asmVar) {
+ m := asmOpcode.FindStringSubmatch(line)
+ if m == nil {
+ if !strings.HasPrefix(strings.TrimSpace(line), "//") {
+ badf("cannot find assembly opcode")
+ }
+ return
+ }
+
+ // Determine operand sizes from instruction.
+ // Typically the suffix suffices, but there are exceptions.
+ var src, dst, kind asmKind
+ op := m[1]
+ switch fn.arch.name + "." + op {
+ case "386.FMOVLP":
+ src, dst = 8, 4
+ case "arm.MOVD":
+ src = 8
+ case "arm.MOVW":
+ src = 4
+ case "arm.MOVH", "arm.MOVHU":
+ src = 2
+ case "arm.MOVB", "arm.MOVBU":
+ src = 1
+ // LEA* opcodes don't really read the second arg.
+ // They just take the address of it.
+ case "386.LEAL":
+ dst = 4
+ case "amd64.LEAQ":
+ dst = 8
+ case "amd64p32.LEAL":
+ dst = 4
+ default:
+ switch fn.arch.name {
+ case "386", "amd64":
+ if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "D") || strings.HasSuffix(op, "DP")) {
+ // FMOVDP, FXCHD, etc
+ src = 8
+ break
+ }
+ if strings.HasPrefix(op, "F") && (strings.HasSuffix(op, "F") || strings.HasSuffix(op, "FP")) {
+ // FMOVFP, FXCHF, etc
+ src = 4
+ break
+ }
+ if strings.HasSuffix(op, "SD") {
+ // MOVSD, SQRTSD, etc
+ src = 8
+ break
+ }
+ if strings.HasSuffix(op, "SS") {
+ // MOVSS, SQRTSS, etc
+ src = 4
+ break
+ }
+ if strings.HasPrefix(op, "SET") {
+ // SETEQ, etc
+ src = 1
+ break
+ }
+ switch op[len(op)-1] {
+ case 'B':
+ src = 1
+ case 'W':
+ src = 2
+ case 'L':
+ src = 4
+ case 'D', 'Q':
+ src = 8
+ }
+ case "ppc64", "ppc64le":
+ // Strip standard suffixes to reveal size letter.
+ m := ppc64Suff.FindStringSubmatch(op)
+ if m != nil {
+ switch m[1][0] {
+ case 'B':
+ src = 1
+ case 'H':
+ src = 2
+ case 'W':
+ src = 4
+ case 'D':
+ src = 8
+ }
+ }
+ }
+ }
+ if dst == 0 {
+ dst = src
+ }
+
+ // Determine whether the match we're holding
+ // is the first or second argument.
+ if strings.Index(line, expr) > strings.Index(line, ",") {
+ kind = dst
+ } else {
+ kind = src
+ }
+
+ vk := v.kind
+ vt := v.typ
+ switch vk {
+ case asmInterface, asmEmptyInterface, asmString, asmSlice:
+ // allow reference to first word (pointer)
+ vk = v.inner[0].kind
+ vt = v.inner[0].typ
+ }
+
+ if off != v.off {
+ var inner bytes.Buffer
+ for i, vi := range v.inner {
+ if len(v.inner) > 1 {
+ fmt.Fprintf(&inner, ",")
+ }
+ fmt.Fprintf(&inner, " ")
+ if i == len(v.inner)-1 {
+ fmt.Fprintf(&inner, "or ")
+ }
+ fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off)
+ }
+ badf("invalid offset %s; expected %s+%d(FP)%s", expr, v.name, v.off, inner.String())
+ return
+ }
+ if kind != 0 && kind != vk {
+ var inner bytes.Buffer
+ if len(v.inner) > 0 {
+ fmt.Fprintf(&inner, " containing")
+ for i, vi := range v.inner {
+ if i > 0 && len(v.inner) > 2 {
+ fmt.Fprintf(&inner, ",")
+ }
+ fmt.Fprintf(&inner, " ")
+ if i > 0 && i == len(v.inner)-1 {
+ fmt.Fprintf(&inner, "and ")
+ }
+ fmt.Fprintf(&inner, "%s+%d(FP)", vi.name, vi.off)
+ }
+ }
+ badf("invalid %s of %s; %s is %d-byte value%s", op, expr, vt, vk, inner.String())
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_asm.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_asm.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_asm.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_asm.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,48 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64
+
+package aes
+
+// defined in asm_$GOARCH.s
+func hasAsm() bool
+func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
+
+var useAsm = hasAsm()
+
+func encryptBlock(xk []uint32, dst, src []byte) {
+ if useAsm {
+ encryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0])
+ } else {
+ encryptBlockGo(xk, dst, src)
+ }
+}
+
+func decryptBlock(xk []uint32, dst, src []byte) {
+ if useAsm {
+ decryptBlockAsm(len(xk)/4-1, &xk[0], &dst[0], &src[0])
+ } else {
+ decryptBlockGo(xk, dst, src)
+ }
+}
+
+func expandKey(key []byte, enc, dec []uint32) {
+ if useAsm {
+ rounds := 10
+ switch len(key) {
+ case 128 / 8:
+ rounds = 10
+ case 192 / 8:
+ rounds = 12
+ case 256 / 8:
+ rounds = 14
+ }
+ expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
+ } else {
+ expandKeyGo(key, enc, dec)
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/aes/cipher_generic.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,27 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64
+
+package aes
+
+func encryptBlock(xk []uint32, dst, src []byte) {
+ encryptBlockGo(xk, dst, src)
+}
+
+func decryptBlock(xk []uint32, dst, src []byte) {
+ decryptBlockGo(xk, dst, src)
+}
+
+func expandKey(key []byte, enc, dec []uint32) {
+ expandKeyGo(key, enc, dec)
+}
+
+func hasGCMAsm() bool {
+ return false
+}
+
+type aesCipherGCM struct {
+ aesCipher
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/cipher/xor.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/cipher/xor.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/cipher/xor.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/cipher/xor.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,84 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cipher
+
+import (
+ "runtime"
+ "unsafe"
+)
+
+const wordSize = int(unsafe.Sizeof(uintptr(0)))
+const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64"
+
+// fastXORBytes xors in bulk. It only works on architectures that
+// support unaligned read/writes.
+func fastXORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+
+ w := n / wordSize
+ if w > 0 {
+ dw := *(*[]uintptr)(unsafe.Pointer(&dst))
+ aw := *(*[]uintptr)(unsafe.Pointer(&a))
+ bw := *(*[]uintptr)(unsafe.Pointer(&b))
+ for i := 0; i < w; i++ {
+ dw[i] = aw[i] ^ bw[i]
+ }
+ }
+
+ for i := (n - n%wordSize); i < n; i++ {
+ dst[i] = a[i] ^ b[i]
+ }
+
+ return n
+}
+
+func safeXORBytes(dst, a, b []byte) int {
+ n := len(a)
+ if len(b) < n {
+ n = len(b)
+ }
+ for i := 0; i < n; i++ {
+ dst[i] = a[i] ^ b[i]
+ }
+ return n
+}
+
+// xorBytes xors the bytes in a and b. The destination is assumed to have enough
+// space. Returns the number of bytes xor'd.
+func xorBytes(dst, a, b []byte) int {
+ if supportsUnaligned {
+ return fastXORBytes(dst, a, b)
+ } else {
+ // TODO(hanwen): if (dst, a, b) have common alignment
+ // we could still try fastXORBytes. It is not clear
+ // how often this happens, and it's only worth it if
+ // the block encryption itself is hardware
+ // accelerated.
+ return safeXORBytes(dst, a, b)
+ }
+}
+
+// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
+// The arguments are assumed to be of equal length.
+func fastXORWords(dst, a, b []byte) {
+ dw := *(*[]uintptr)(unsafe.Pointer(&dst))
+ aw := *(*[]uintptr)(unsafe.Pointer(&a))
+ bw := *(*[]uintptr)(unsafe.Pointer(&b))
+ n := len(b) / wordSize
+ for i := 0; i < n; i++ {
+ dw[i] = aw[i] ^ bw[i]
+ }
+}
+
+func xorWords(dst, a, b []byte) {
+ if supportsUnaligned {
+ fastXORWords(dst, a, b)
+ } else {
+ safeXORBytes(dst, a, b)
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_decl.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_decl.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_decl.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_decl.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 amd64p32 386 arm
+
+package md5
+
+//go:noescape
+
+func block(dig *digest, p []byte)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/md5/md5block_generic.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,9 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!amd64p32,!386,!arm
+
+package md5
+
+var block = blockGeneric
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_decl.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_decl.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_decl.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_decl.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 amd64p32 arm 386
+
+package sha1
+
+//go:noescape
+
+func block(dig *digest, p []byte)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha1/sha1block_generic.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,9 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!amd64p32,!386,!arm
+
+package sha1
+
+var block = blockGeneric
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block_decl.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block_decl.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block_decl.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block_decl.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 amd64
+
+package sha256
+
+//go:noescape
+
+func block(dig *digest, p []byte)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha256/sha256block.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,128 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !386,!amd64
+
+// SHA256 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package sha256
+
+var _K = []uint32{
+ 0x428a2f98,
+ 0x71374491,
+ 0xb5c0fbcf,
+ 0xe9b5dba5,
+ 0x3956c25b,
+ 0x59f111f1,
+ 0x923f82a4,
+ 0xab1c5ed5,
+ 0xd807aa98,
+ 0x12835b01,
+ 0x243185be,
+ 0x550c7dc3,
+ 0x72be5d74,
+ 0x80deb1fe,
+ 0x9bdc06a7,
+ 0xc19bf174,
+ 0xe49b69c1,
+ 0xefbe4786,
+ 0x0fc19dc6,
+ 0x240ca1cc,
+ 0x2de92c6f,
+ 0x4a7484aa,
+ 0x5cb0a9dc,
+ 0x76f988da,
+ 0x983e5152,
+ 0xa831c66d,
+ 0xb00327c8,
+ 0xbf597fc7,
+ 0xc6e00bf3,
+ 0xd5a79147,
+ 0x06ca6351,
+ 0x14292967,
+ 0x27b70a85,
+ 0x2e1b2138,
+ 0x4d2c6dfc,
+ 0x53380d13,
+ 0x650a7354,
+ 0x766a0abb,
+ 0x81c2c92e,
+ 0x92722c85,
+ 0xa2bfe8a1,
+ 0xa81a664b,
+ 0xc24b8b70,
+ 0xc76c51a3,
+ 0xd192e819,
+ 0xd6990624,
+ 0xf40e3585,
+ 0x106aa070,
+ 0x19a4c116,
+ 0x1e376c08,
+ 0x2748774c,
+ 0x34b0bcb5,
+ 0x391c0cb3,
+ 0x4ed8aa4a,
+ 0x5b9cca4f,
+ 0x682e6ff3,
+ 0x748f82ee,
+ 0x78a5636f,
+ 0x84c87814,
+ 0x8cc70208,
+ 0x90befffa,
+ 0xa4506ceb,
+ 0xbef9a3f7,
+ 0xc67178f2,
+}
+
+func block(dig *digest, p []byte) {
+ var w [64]uint32
+ h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
+ for len(p) >= chunk {
+ // Can interlace the computation of w with the
+ // rounds below if needed for speed.
+ for i := 0; i < 16; i++ {
+ j := i * 4
+ w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
+ }
+ for i := 16; i < 64; i++ {
+ v1 := w[i-2]
+ t1 := (v1>>17 | v1<<(32-17)) ^ (v1>>19 | v1<<(32-19)) ^ (v1 >> 10)
+ v2 := w[i-15]
+ t2 := (v2>>7 | v2<<(32-7)) ^ (v2>>18 | v2<<(32-18)) ^ (v2 >> 3)
+ w[i] = t1 + w[i-7] + t2 + w[i-16]
+ }
+
+ a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
+
+ for i := 0; i < 64; i++ {
+ t1 := h + ((e>>6 | e<<(32-6)) ^ (e>>11 | e<<(32-11)) ^ (e>>25 | e<<(32-25))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
+
+ t2 := ((a>>2 | a<<(32-2)) ^ (a>>13 | a<<(32-13)) ^ (a>>22 | a<<(32-22))) + ((a & b) ^ (a & c) ^ (b & c))
+
+ h = g
+ g = f
+ f = e
+ e = d + t1
+ d = c
+ c = b
+ b = a
+ a = t1 + t2
+ }
+
+ h0 += a
+ h1 += b
+ h2 += c
+ h3 += d
+ h4 += e
+ h5 += f
+ h6 += g
+ h7 += h
+
+ p = p[chunk:]
+ }
+
+ dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block_decl.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block_decl.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block_decl.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block_decl.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,11 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64
+
+package sha512
+
+//go:noescape
+
+func block(dig *digest, p []byte)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/sha512/sha512block.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,144 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64
+
+// SHA512 block step.
+// In its own file so that a faster assembly or C version
+// can be substituted easily.
+
+package sha512
+
+var _K = []uint64{
+ 0x428a2f98d728ae22,
+ 0x7137449123ef65cd,
+ 0xb5c0fbcfec4d3b2f,
+ 0xe9b5dba58189dbbc,
+ 0x3956c25bf348b538,
+ 0x59f111f1b605d019,
+ 0x923f82a4af194f9b,
+ 0xab1c5ed5da6d8118,
+ 0xd807aa98a3030242,
+ 0x12835b0145706fbe,
+ 0x243185be4ee4b28c,
+ 0x550c7dc3d5ffb4e2,
+ 0x72be5d74f27b896f,
+ 0x80deb1fe3b1696b1,
+ 0x9bdc06a725c71235,
+ 0xc19bf174cf692694,
+ 0xe49b69c19ef14ad2,
+ 0xefbe4786384f25e3,
+ 0x0fc19dc68b8cd5b5,
+ 0x240ca1cc77ac9c65,
+ 0x2de92c6f592b0275,
+ 0x4a7484aa6ea6e483,
+ 0x5cb0a9dcbd41fbd4,
+ 0x76f988da831153b5,
+ 0x983e5152ee66dfab,
+ 0xa831c66d2db43210,
+ 0xb00327c898fb213f,
+ 0xbf597fc7beef0ee4,
+ 0xc6e00bf33da88fc2,
+ 0xd5a79147930aa725,
+ 0x06ca6351e003826f,
+ 0x142929670a0e6e70,
+ 0x27b70a8546d22ffc,
+ 0x2e1b21385c26c926,
+ 0x4d2c6dfc5ac42aed,
+ 0x53380d139d95b3df,
+ 0x650a73548baf63de,
+ 0x766a0abb3c77b2a8,
+ 0x81c2c92e47edaee6,
+ 0x92722c851482353b,
+ 0xa2bfe8a14cf10364,
+ 0xa81a664bbc423001,
+ 0xc24b8b70d0f89791,
+ 0xc76c51a30654be30,
+ 0xd192e819d6ef5218,
+ 0xd69906245565a910,
+ 0xf40e35855771202a,
+ 0x106aa07032bbd1b8,
+ 0x19a4c116b8d2d0c8,
+ 0x1e376c085141ab53,
+ 0x2748774cdf8eeb99,
+ 0x34b0bcb5e19b48a8,
+ 0x391c0cb3c5c95a63,
+ 0x4ed8aa4ae3418acb,
+ 0x5b9cca4f7763e373,
+ 0x682e6ff3d6b2b8a3,
+ 0x748f82ee5defb2fc,
+ 0x78a5636f43172f60,
+ 0x84c87814a1f0ab72,
+ 0x8cc702081a6439ec,
+ 0x90befffa23631e28,
+ 0xa4506cebde82bde9,
+ 0xbef9a3f7b2c67915,
+ 0xc67178f2e372532b,
+ 0xca273eceea26619c,
+ 0xd186b8c721c0c207,
+ 0xeada7dd6cde0eb1e,
+ 0xf57d4f7fee6ed178,
+ 0x06f067aa72176fba,
+ 0x0a637dc5a2c898a6,
+ 0x113f9804bef90dae,
+ 0x1b710b35131c471b,
+ 0x28db77f523047d84,
+ 0x32caab7b40c72493,
+ 0x3c9ebe0a15c9bebc,
+ 0x431d67c49c100d4c,
+ 0x4cc5d4becb3e42b6,
+ 0x597f299cfc657e2a,
+ 0x5fcb6fab3ad6faec,
+ 0x6c44198c4a475817,
+}
+
+func block(dig *digest, p []byte) {
+ var w [80]uint64
+ h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
+ for len(p) >= chunk {
+ for i := 0; i < 16; i++ {
+ j := i * 8
+ w[i] = uint64(p[j])<<56 | uint64(p[j+1])<<48 | uint64(p[j+2])<<40 | uint64(p[j+3])<<32 |
+ uint64(p[j+4])<<24 | uint64(p[j+5])<<16 | uint64(p[j+6])<<8 | uint64(p[j+7])
+ }
+ for i := 16; i < 80; i++ {
+ v1 := w[i-2]
+ t1 := (v1>>19 | v1<<(64-19)) ^ (v1>>61 | v1<<(64-61)) ^ (v1 >> 6)
+ v2 := w[i-15]
+ t2 := (v2>>1 | v2<<(64-1)) ^ (v2>>8 | v2<<(64-8)) ^ (v2 >> 7)
+
+ w[i] = t1 + w[i-7] + t2 + w[i-16]
+ }
+
+ a, b, c, d, e, f, g, h := h0, h1, h2, h3, h4, h5, h6, h7
+
+ for i := 0; i < 80; i++ {
+ t1 := h + ((e>>14 | e<<(64-14)) ^ (e>>18 | e<<(64-18)) ^ (e>>41 | e<<(64-41))) + ((e & f) ^ (^e & g)) + _K[i] + w[i]
+
+ t2 := ((a>>28 | a<<(64-28)) ^ (a>>34 | a<<(64-34)) ^ (a>>39 | a<<(64-39))) + ((a & b) ^ (a & c) ^ (b & c))
+
+ h = g
+ g = f
+ f = e
+ e = d + t1
+ d = c
+ c = b
+ b = a
+ a = t1 + t2
+ }
+
+ h0 += a
+ h1 += b
+ h2 += c
+ h3 += d
+ h4 += e
+ h5 += f
+ h6 += g
+ h7 += h
+
+ p = p[chunk:]
+ }
+
+ dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7] = h0, h1, h2, h3, h4, h5, h6, h7
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,105 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "encoding/asn1"
+ "errors"
+ "fmt"
+ "math/big"
+)
+
+const ecPrivKeyVersion = 1
+
+// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure.
+// References:
+// RFC5915
+// SEC1 - http://www.secg.org/sec1-v2.pdf
+// Per RFC5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in
+// most cases it is not.
+type ecPrivateKey struct {
+ Version int
+ PrivateKey []byte
+ NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"`
+ PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"`
+}
+
+// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+func ParseECPrivateKey(der []byte) (key *ecdsa.PrivateKey, err error) {
+ return parseECPrivateKey(nil, der)
+}
+
+// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format.
+func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) {
+ oid, ok := oidFromNamedCurve(key.Curve)
+ if !ok {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ privateKeyBytes := key.D.Bytes()
+ paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen() + 7) / 8)
+ copy(paddedPrivateKey[len(paddedPrivateKey) - len(privateKeyBytes):], privateKeyBytes)
+
+ return asn1.Marshal(ecPrivateKey{
+ Version: 1,
+ PrivateKey: paddedPrivateKey,
+ NamedCurveOID: oid,
+ PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)},
+ })
+}
+
+// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure.
+// The OID for the named curve may be provided from another source (such as
+// the PKCS8 container) - if it is provided then use this instead of the OID
+// that may exist in the EC private key structure.
+func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) {
+ var privKey ecPrivateKey
+ if _, err := asn1.Unmarshal(der, &privKey); err != nil {
+ return nil, errors.New("x509: failed to parse EC private key: " + err.Error())
+ }
+ if privKey.Version != ecPrivKeyVersion {
+ return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version)
+ }
+
+ var curve elliptic.Curve
+ if namedCurveOID != nil {
+ curve = namedCurveFromOID(*namedCurveOID)
+ } else {
+ curve = namedCurveFromOID(privKey.NamedCurveOID)
+ }
+ if curve == nil {
+ return nil, errors.New("x509: unknown elliptic curve")
+ }
+
+ k := new(big.Int).SetBytes(privKey.PrivateKey)
+ curveOrder := curve.Params().N
+ if k.Cmp(curveOrder) >= 0 {
+ return nil, errors.New("x509: invalid elliptic curve private key value")
+ }
+ priv := new(ecdsa.PrivateKey)
+ priv.Curve = curve
+ priv.D = k
+
+ privateKey := make([]byte, (curveOrder.BitLen() + 7) / 8)
+
+ // Some private keys have leading zero padding. This is invalid
+ // according to [SEC1], but this code will ignore it.
+ for len(privKey.PrivateKey) > len(privateKey) {
+ if privKey.PrivateKey[0] != 0 {
+ return nil, errors.New("x509: invalid private key length")
+ }
+ privKey.PrivateKey = privKey.PrivateKey[1:]
+ }
+
+ // Some private keys remove all leading zeros, this is also invalid
+ // according to [SEC1] but since OpenSSL used to do this, we ignore
+ // this too.
+ copy(privateKey[len(privateKey) - len(privKey.PrivateKey):], privKey.PrivateKey)
+ priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
+
+ return priv, nil
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/crypto/x509/sec1_test.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,44 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x509
+
+import (
+ "bytes"
+ "encoding/hex"
+ "testing"
+)
+
+var ecKeyTests = []struct{
+ derHex string
+ shouldReserialize bool
+}{
+ // Generated using:
+ // openssl ecparam -genkey -name secp384r1 -outform PEM
+ {"3081a40201010430bdb9839c08ee793d1157886a7a758a3c8b2a17a4df48f17ace57c72c56b4723cf21dcda21d4e1ad57ff034f19fcfd98ea00706052b81040022a16403620004feea808b5ee2429cfcce13c32160e1c960990bd050bb0fdf7222f3decd0a55008e32a6aa3c9062051c4cba92a7a3b178b24567412d43cdd2f882fa5addddd726fe3e208d2c26d733a773a597abb749714df7256ead5105fa6e7b3650de236b50", true},
+ // This key was generated by GnuTLS and has illegal zero-padding of the
+ // private key. See https://github.com/golang/go/issues/13699.
+ {"3078020101042100f9f43a04b9bdc3ab01f53be6df80e7a7bc3eaf7b87fc24e630a4a0aa97633645a00a06082a8648ce3d030107a1440342000441a51bc318461b4c39a45048a16d4fc2a935b1ea7fe86e8c1fa219d6f2438f7c7fd62957d3442efb94b6a23eb0ea66dda663dc42f379cda6630b21b7888a5d3d", false},
+ // This was generated using an old version of OpenSSL and is missing a
+ // leading zero byte in the private key that should be present.
+ {"3081db0201010441607b4f985774ac21e633999794542e09312073480baa69550914d6d43d8414441e61b36650567901da714f94dffb3ce0e2575c31928a0997d51df5c440e983ca17a00706052b81040023a181890381860004001661557afedd7ac8d6b70e038e576558c626eb62edda36d29c3a1310277c11f67a8c6f949e5430a37dcfb95d902c1b5b5379c389873b9dd17be3bdb088a4774a7401072f830fb9a08d93bfa50a03dd3292ea07928724ddb915d831917a338f6b0aecfbc3cf5352c4a1295d356890c41c34116d29eeb93779aab9d9d78e2613437740f6", false},
+}
+
+func TestParseECPrivateKey(t *testing.T) {
+ for i, test := range ecKeyTests {
+ derBytes, _ := hex.DecodeString(test.derHex)
+ key, err := ParseECPrivateKey(derBytes)
+ if err != nil {
+ t.Fatalf("#%d: failed to decode EC private key: %s", i, err)
+ }
+ serialized, err := MarshalECPrivateKey(key)
+ if err != nil {
+ t.Fatalf("#%d: failed to encode EC private key: %s", i, err)
+ }
+ matches := bytes.Equal(serialized, derBytes)
+ if matches != test.shouldReserialize {
+ t.Fatalf("#%d: when serializing key: matches=%t, should match=%t: original %x, reserialized %x", i, matches, test.shouldReserialize, serialized, derBytes)
+ }
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/elf.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/elf.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/elf.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/elf.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,2099 @@
+/*
+ * ELF constants and data structures
+ *
+ * Derived from:
+ * $FreeBSD: src/sys/sys/elf32.h,v 1.8.14.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/sys/elf_common.h,v 1.15.8.1 2005/12/30 22:13:58 marcel Exp $
+ * $FreeBSD: src/sys/alpha/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $
+ * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $
+ * $FreeBSD: src/sys/arm/include/elf.h,v 1.5.2.1 2006/06/30 21:42:52 cognet Exp $
+ * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $
+ * $FreeBSD: src/sys/powerpc/include/elf.h,v 1.7 2004/11/02 09:47:01 ssouhlal Exp $
+ * $FreeBSD: src/sys/sparc64/include/elf.h,v 1.12 2003/09/25 01:10:26 peter Exp $
+ * "ELF for the ARM® 64-bit Architecture (AArch64)" (ARM IHI 0056B)
+ *
+ * Copyright (c) 1996-1998 John D. Polstra. All rights reserved.
+ * Copyright (c) 2001 David E. O'Brien
+ * Portions Copyright 2009 The Go Authors. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+package elf
+
+import "strconv"
+
+/*
+ * Constants
+ */
+
+// Indexes into the Header.Ident array.
+const (
+ EI_CLASS = 4 /* Class of machine. */
+ EI_DATA = 5 /* Data format. */
+ EI_VERSION = 6 /* ELF format version. */
+ EI_OSABI = 7 /* Operating system / ABI identification */
+ EI_ABIVERSION = 8 /* ABI version */
+ EI_PAD = 9 /* Start of padding (per SVR4 ABI). */
+ EI_NIDENT = 16 /* Size of e_ident array. */
+)
+
+// Initial magic number for ELF files.
+const ELFMAG = "\177ELF"
+
+// Version is found in Header.Ident[EI_VERSION] and Header.Version.
+type Version byte
+
+const (
+ EV_NONE Version = 0
+ EV_CURRENT Version = 1
+)
+
+var versionStrings = []intName{
+ {0, "EV_NONE"},
+ {1, "EV_CURRENT"},
+}
+
+func (i Version) String() string { return stringName(uint32(i), versionStrings, false) }
+func (i Version) GoString() string { return stringName(uint32(i), versionStrings, true) }
+
+// Class is found in Header.Ident[EI_CLASS] and Header.Class.
+type Class byte
+
+const (
+ ELFCLASSNONE Class = 0 /* Unknown class. */
+ ELFCLASS32 Class = 1 /* 32-bit architecture. */
+ ELFCLASS64 Class = 2 /* 64-bit architecture. */
+)
+
+var classStrings = []intName{
+ {0, "ELFCLASSNONE"},
+ {1, "ELFCLASS32"},
+ {2, "ELFCLASS64"},
+}
+
+func (i Class) String() string { return stringName(uint32(i), classStrings, false) }
+func (i Class) GoString() string { return stringName(uint32(i), classStrings, true) }
+
+// Data is found in Header.Ident[EI_DATA] and Header.Data.
+type Data byte
+
+const (
+ ELFDATANONE Data = 0 /* Unknown data format. */
+ ELFDATA2LSB Data = 1 /* 2's complement little-endian. */
+ ELFDATA2MSB Data = 2 /* 2's complement big-endian. */
+)
+
+var dataStrings = []intName{
+ {0, "ELFDATANONE"},
+ {1, "ELFDATA2LSB"},
+ {2, "ELFDATA2MSB"},
+}
+
+func (i Data) String() string { return stringName(uint32(i), dataStrings, false) }
+func (i Data) GoString() string { return stringName(uint32(i), dataStrings, true) }
+
+// OSABI is found in Header.Ident[EI_OSABI] and Header.OSABI.
+type OSABI byte
+
+const (
+ ELFOSABI_NONE OSABI = 0 /* UNIX System V ABI */
+ ELFOSABI_HPUX OSABI = 1 /* HP-UX operating system */
+ ELFOSABI_NETBSD OSABI = 2 /* NetBSD */
+ ELFOSABI_LINUX OSABI = 3 /* GNU/Linux */
+ ELFOSABI_HURD OSABI = 4 /* GNU/Hurd */
+ ELFOSABI_86OPEN OSABI = 5 /* 86Open common IA32 ABI */
+ ELFOSABI_SOLARIS OSABI = 6 /* Solaris */
+ ELFOSABI_AIX OSABI = 7 /* AIX */
+ ELFOSABI_IRIX OSABI = 8 /* IRIX */
+ ELFOSABI_FREEBSD OSABI = 9 /* FreeBSD */
+ ELFOSABI_TRU64 OSABI = 10 /* TRU64 UNIX */
+ ELFOSABI_MODESTO OSABI = 11 /* Novell Modesto */
+ ELFOSABI_OPENBSD OSABI = 12 /* OpenBSD */
+ ELFOSABI_OPENVMS OSABI = 13 /* Open VMS */
+ ELFOSABI_NSK OSABI = 14 /* HP Non-Stop Kernel */
+ ELFOSABI_ARM OSABI = 97 /* ARM */
+ ELFOSABI_STANDALONE OSABI = 255 /* Standalone (embedded) application */
+)
+
+var osabiStrings = []intName{
+ {0, "ELFOSABI_NONE"},
+ {1, "ELFOSABI_HPUX"},
+ {2, "ELFOSABI_NETBSD"},
+ {3, "ELFOSABI_LINUX"},
+ {4, "ELFOSABI_HURD"},
+ {5, "ELFOSABI_86OPEN"},
+ {6, "ELFOSABI_SOLARIS"},
+ {7, "ELFOSABI_AIX"},
+ {8, "ELFOSABI_IRIX"},
+ {9, "ELFOSABI_FREEBSD"},
+ {10, "ELFOSABI_TRU64"},
+ {11, "ELFOSABI_MODESTO"},
+ {12, "ELFOSABI_OPENBSD"},
+ {13, "ELFOSABI_OPENVMS"},
+ {14, "ELFOSABI_NSK"},
+ {97, "ELFOSABI_ARM"},
+ {255, "ELFOSABI_STANDALONE"},
+}
+
+func (i OSABI) String() string { return stringName(uint32(i), osabiStrings, false) }
+func (i OSABI) GoString() string { return stringName(uint32(i), osabiStrings, true) }
+
+// Type is found in Header.Type.
+type Type uint16
+
+const (
+ ET_NONE Type = 0 /* Unknown type. */
+ ET_REL Type = 1 /* Relocatable. */
+ ET_EXEC Type = 2 /* Executable. */
+ ET_DYN Type = 3 /* Shared object. */
+ ET_CORE Type = 4 /* Core file. */
+ ET_LOOS Type = 0xfe00 /* First operating system specific. */
+ ET_HIOS Type = 0xfeff /* Last operating system-specific. */
+ ET_LOPROC Type = 0xff00 /* First processor-specific. */
+ ET_HIPROC Type = 0xffff /* Last processor-specific. */
+)
+
+var typeStrings = []intName{
+ {0, "ET_NONE"},
+ {1, "ET_REL"},
+ {2, "ET_EXEC"},
+ {3, "ET_DYN"},
+ {4, "ET_CORE"},
+ {0xfe00, "ET_LOOS"},
+ {0xfeff, "ET_HIOS"},
+ {0xff00, "ET_LOPROC"},
+ {0xffff, "ET_HIPROC"},
+}
+
+func (i Type) String() string { return stringName(uint32(i), typeStrings, false) }
+func (i Type) GoString() string { return stringName(uint32(i), typeStrings, true) }
+
+// Machine is found in Header.Machine.
+type Machine uint16
+
+const (
+ EM_NONE Machine = 0 /* Unknown machine. */
+ EM_M32 Machine = 1 /* AT&T WE32100. */
+ EM_SPARC Machine = 2 /* Sun SPARC. */
+ EM_386 Machine = 3 /* Intel i386. */
+ EM_68K Machine = 4 /* Motorola 68000. */
+ EM_88K Machine = 5 /* Motorola 88000. */
+ EM_860 Machine = 7 /* Intel i860. */
+ EM_MIPS Machine = 8 /* MIPS R3000 Big-Endian only. */
+ EM_S370 Machine = 9 /* IBM System/370. */
+ EM_MIPS_RS3_LE Machine = 10 /* MIPS R3000 Little-Endian. */
+ EM_PARISC Machine = 15 /* HP PA-RISC. */
+ EM_VPP500 Machine = 17 /* Fujitsu VPP500. */
+ EM_SPARC32PLUS Machine = 18 /* SPARC v8plus. */
+ EM_960 Machine = 19 /* Intel 80960. */
+ EM_PPC Machine = 20 /* PowerPC 32-bit. */
+ EM_PPC64 Machine = 21 /* PowerPC 64-bit. */
+ EM_S390 Machine = 22 /* IBM System/390. */
+ EM_V800 Machine = 36 /* NEC V800. */
+ EM_FR20 Machine = 37 /* Fujitsu FR20. */
+ EM_RH32 Machine = 38 /* TRW RH-32. */
+ EM_RCE Machine = 39 /* Motorola RCE. */
+ EM_ARM Machine = 40 /* ARM. */
+ EM_SH Machine = 42 /* Hitachi SH. */
+ EM_SPARCV9 Machine = 43 /* SPARC v9 64-bit. */
+ EM_TRICORE Machine = 44 /* Siemens TriCore embedded processor. */
+ EM_ARC Machine = 45 /* Argonaut RISC Core. */
+ EM_H8_300 Machine = 46 /* Hitachi H8/300. */
+ EM_H8_300H Machine = 47 /* Hitachi H8/300H. */
+ EM_H8S Machine = 48 /* Hitachi H8S. */
+ EM_H8_500 Machine = 49 /* Hitachi H8/500. */
+ EM_IA_64 Machine = 50 /* Intel IA-64 Processor. */
+ EM_MIPS_X Machine = 51 /* Stanford MIPS-X. */
+ EM_COLDFIRE Machine = 52 /* Motorola ColdFire. */
+ EM_68HC12 Machine = 53 /* Motorola M68HC12. */
+ EM_MMA Machine = 54 /* Fujitsu MMA. */
+ EM_PCP Machine = 55 /* Siemens PCP. */
+ EM_NCPU Machine = 56 /* Sony nCPU. */
+ EM_NDR1 Machine = 57 /* Denso NDR1 microprocessor. */
+ EM_STARCORE Machine = 58 /* Motorola Star*Core processor. */
+ EM_ME16 Machine = 59 /* Toyota ME16 processor. */
+ EM_ST100 Machine = 60 /* STMicroelectronics ST100 processor. */
+ EM_TINYJ Machine = 61 /* Advanced Logic Corp. TinyJ processor. */
+ EM_X86_64 Machine = 62 /* Advanced Micro Devices x86-64 */
+ EM_AARCH64 Machine = 183 /* ARM 64-bit Architecture (AArch64) */
+
+ /* Non-standard or deprecated. */
+ EM_486 Machine = 6 /* Intel i486. */
+ EM_MIPS_RS4_BE Machine = 10 /* MIPS R4000 Big-Endian */
+ EM_ALPHA_STD Machine = 41 /* Digital Alpha (standard value). */
+ EM_ALPHA Machine = 0x9026 /* Alpha (written in the absence of an ABI) */
+)
+
+var machineStrings = []intName{
+ {0, "EM_NONE"},
+ {1, "EM_M32"},
+ {2, "EM_SPARC"},
+ {3, "EM_386"},
+ {4, "EM_68K"},
+ {5, "EM_88K"},
+ {7, "EM_860"},
+ {8, "EM_MIPS"},
+ {9, "EM_S370"},
+ {10, "EM_MIPS_RS3_LE"},
+ {15, "EM_PARISC"},
+ {17, "EM_VPP500"},
+ {18, "EM_SPARC32PLUS"},
+ {19, "EM_960"},
+ {20, "EM_PPC"},
+ {21, "EM_PPC64"},
+ {22, "EM_S390"},
+ {36, "EM_V800"},
+ {37, "EM_FR20"},
+ {38, "EM_RH32"},
+ {39, "EM_RCE"},
+ {40, "EM_ARM"},
+ {42, "EM_SH"},
+ {43, "EM_SPARCV9"},
+ {44, "EM_TRICORE"},
+ {45, "EM_ARC"},
+ {46, "EM_H8_300"},
+ {47, "EM_H8_300H"},
+ {48, "EM_H8S"},
+ {49, "EM_H8_500"},
+ {50, "EM_IA_64"},
+ {51, "EM_MIPS_X"},
+ {52, "EM_COLDFIRE"},
+ {53, "EM_68HC12"},
+ {54, "EM_MMA"},
+ {55, "EM_PCP"},
+ {56, "EM_NCPU"},
+ {57, "EM_NDR1"},
+ {58, "EM_STARCORE"},
+ {59, "EM_ME16"},
+ {60, "EM_ST100"},
+ {61, "EM_TINYJ"},
+ {62, "EM_X86_64"},
+
+ /* Non-standard or deprecated. */
+ {6, "EM_486"},
+ {10, "EM_MIPS_RS4_BE"},
+ {41, "EM_ALPHA_STD"},
+ {0x9026, "EM_ALPHA"},
+}
+
+func (i Machine) String() string { return stringName(uint32(i), machineStrings, false) }
+func (i Machine) GoString() string { return stringName(uint32(i), machineStrings, true) }
+
+// Special section indices.
+type SectionIndex int
+
+const (
+ SHN_UNDEF SectionIndex = 0 /* Undefined, missing, irrelevant. */
+ SHN_LORESERVE SectionIndex = 0xff00 /* First of reserved range. */
+ SHN_LOPROC SectionIndex = 0xff00 /* First processor-specific. */
+ SHN_HIPROC SectionIndex = 0xff1f /* Last processor-specific. */
+ SHN_LOOS SectionIndex = 0xff20 /* First operating system-specific. */
+ SHN_HIOS SectionIndex = 0xff3f /* Last operating system-specific. */
+ SHN_ABS SectionIndex = 0xfff1 /* Absolute values. */
+ SHN_COMMON SectionIndex = 0xfff2 /* Common data. */
+ SHN_XINDEX SectionIndex = 0xffff /* Escape; index stored elsewhere. */
+ SHN_HIRESERVE SectionIndex = 0xffff /* Last of reserved range. */
+)
+
+var shnStrings = []intName{
+ {0, "SHN_UNDEF"},
+ {0xff00, "SHN_LOPROC"},
+ {0xff20, "SHN_LOOS"},
+ {0xfff1, "SHN_ABS"},
+ {0xfff2, "SHN_COMMON"},
+ {0xffff, "SHN_XINDEX"},
+}
+
+func (i SectionIndex) String() string { return stringName(uint32(i), shnStrings, false) }
+func (i SectionIndex) GoString() string { return stringName(uint32(i), shnStrings, true) }
+
+// Section type.
+type SectionType uint32
+
+const (
+ SHT_NULL SectionType = 0 /* inactive */
+ SHT_PROGBITS SectionType = 1 /* program defined information */
+ SHT_SYMTAB SectionType = 2 /* symbol table section */
+ SHT_STRTAB SectionType = 3 /* string table section */
+ SHT_RELA SectionType = 4 /* relocation section with addends */
+ SHT_HASH SectionType = 5 /* symbol hash table section */
+ SHT_DYNAMIC SectionType = 6 /* dynamic section */
+ SHT_NOTE SectionType = 7 /* note section */
+ SHT_NOBITS SectionType = 8 /* no space section */
+ SHT_REL SectionType = 9 /* relocation section - no addends */
+ SHT_SHLIB SectionType = 10 /* reserved - purpose unknown */
+ SHT_DYNSYM SectionType = 11 /* dynamic symbol table section */
+ SHT_INIT_ARRAY SectionType = 14 /* Initialization function pointers. */
+ SHT_FINI_ARRAY SectionType = 15 /* Termination function pointers. */
+ SHT_PREINIT_ARRAY SectionType = 16 /* Pre-initialization function ptrs. */
+ SHT_GROUP SectionType = 17 /* Section group. */
+ SHT_SYMTAB_SHNDX SectionType = 18 /* Section indexes (see SHN_XINDEX). */
+ SHT_LOOS SectionType = 0x60000000 /* First of OS specific semantics */
+ SHT_GNU_ATTRIBUTES SectionType = 0x6ffffff5 /* GNU object attributes */
+ SHT_GNU_HASH SectionType = 0x6ffffff6 /* GNU hash table */
+ SHT_GNU_LIBLIST SectionType = 0x6ffffff7 /* GNU prelink library list */
+ SHT_GNU_VERDEF SectionType = 0x6ffffffd /* GNU version definition section */
+ SHT_GNU_VERNEED SectionType = 0x6ffffffe /* GNU version needs section */
+ SHT_GNU_VERSYM SectionType = 0x6fffffff /* GNU version symbol table */
+ SHT_HIOS SectionType = 0x6fffffff /* Last of OS specific semantics */
+ SHT_LOPROC SectionType = 0x70000000 /* reserved range for processor */
+ SHT_HIPROC SectionType = 0x7fffffff /* specific section header types */
+ SHT_LOUSER SectionType = 0x80000000 /* reserved range for application */
+ SHT_HIUSER SectionType = 0xffffffff /* specific indexes */
+)
+
+var shtStrings = []intName{
+ {0, "SHT_NULL"},
+ {1, "SHT_PROGBITS"},
+ {2, "SHT_SYMTAB"},
+ {3, "SHT_STRTAB"},
+ {4, "SHT_RELA"},
+ {5, "SHT_HASH"},
+ {6, "SHT_DYNAMIC"},
+ {7, "SHT_NOTE"},
+ {8, "SHT_NOBITS"},
+ {9, "SHT_REL"},
+ {10, "SHT_SHLIB"},
+ {11, "SHT_DYNSYM"},
+ {14, "SHT_INIT_ARRAY"},
+ {15, "SHT_FINI_ARRAY"},
+ {16, "SHT_PREINIT_ARRAY"},
+ {17, "SHT_GROUP"},
+ {18, "SHT_SYMTAB_SHNDX"},
+ {0x60000000, "SHT_LOOS"},
+ {0x6ffffff5, "SHT_GNU_ATTRIBUTES"},
+ {0x6ffffff6, "SHT_GNU_HASH"},
+ {0x6ffffff7, "SHT_GNU_LIBLIST"},
+ {0x6ffffffd, "SHT_GNU_VERDEF"},
+ {0x6ffffffe, "SHT_GNU_VERNEED"},
+ {0x6fffffff, "SHT_GNU_VERSYM"},
+ {0x70000000, "SHT_LOPROC"},
+ {0x7fffffff, "SHT_HIPROC"},
+ {0x80000000, "SHT_LOUSER"},
+ {0xffffffff, "SHT_HIUSER"},
+}
+
+func (i SectionType) String() string { return stringName(uint32(i), shtStrings, false) }
+func (i SectionType) GoString() string { return stringName(uint32(i), shtStrings, true) }
+
+// Section flags.
+type SectionFlag uint32
+
+const (
+ SHF_WRITE SectionFlag = 0x1 /* Section contains writable data. */
+ SHF_ALLOC SectionFlag = 0x2 /* Section occupies memory. */
+ SHF_EXECINSTR SectionFlag = 0x4 /* Section contains instructions. */
+ SHF_MERGE SectionFlag = 0x10 /* Section may be merged. */
+ SHF_STRINGS SectionFlag = 0x20 /* Section contains strings. */
+ SHF_INFO_LINK SectionFlag = 0x40 /* sh_info holds section index. */
+ SHF_LINK_ORDER SectionFlag = 0x80 /* Special ordering requirements. */
+ SHF_OS_NONCONFORMING SectionFlag = 0x100 /* OS-specific processing required. */
+ SHF_GROUP SectionFlag = 0x200 /* Member of section group. */
+ SHF_TLS SectionFlag = 0x400 /* Section contains TLS data. */
+ SHF_COMPRESSED SectionFlag = 0x800 /* Section is compressed. */
+ SHF_MASKOS SectionFlag = 0x0ff00000 /* OS-specific semantics. */
+ SHF_MASKPROC SectionFlag = 0xf0000000 /* Processor-specific semantics. */
+)
+
+var shfStrings = []intName{
+ {0x1, "SHF_WRITE"},
+ {0x2, "SHF_ALLOC"},
+ {0x4, "SHF_EXECINSTR"},
+ {0x10, "SHF_MERGE"},
+ {0x20, "SHF_STRINGS"},
+ {0x40, "SHF_INFO_LINK"},
+ {0x80, "SHF_LINK_ORDER"},
+ {0x100, "SHF_OS_NONCONFORMING"},
+ {0x200, "SHF_GROUP"},
+ {0x400, "SHF_TLS"},
+ {0x800, "SHF_COMPRESSED"},
+}
+
+func (i SectionFlag) String() string { return flagName(uint32(i), shfStrings, false) }
+func (i SectionFlag) GoString() string { return flagName(uint32(i), shfStrings, true) }
+
+// Section compression type.
+type CompressionType int
+
+const (
+ COMPRESS_ZLIB CompressionType = 1 /* ZLIB compression. */
+ COMPRESS_LOOS CompressionType = 0x60000000 /* First OS-specific. */
+ COMPRESS_HIOS CompressionType = 0x6fffffff /* Last OS-specific. */
+ COMPRESS_LOPROC CompressionType = 0x70000000 /* First processor-specific type. */
+ COMPRESS_HIPROC CompressionType = 0x7fffffff /* Last processor-specific type. */
+)
+
+var compressionStrings = []intName{
+ {0, "COMPRESS_ZLIB"},
+ {0x60000000, "COMPRESS_LOOS"},
+ {0x6fffffff, "COMPRESS_HIOS"},
+ {0x70000000, "COMPRESS_LOPROC"},
+ {0x7fffffff, "COMPRESS_HIPROC"},
+}
+
+func (i CompressionType) String() string { return stringName(uint32(i), compressionStrings, false) }
+func (i CompressionType) GoString() string { return stringName(uint32(i), compressionStrings, true) }
+
+// Prog.Type
+type ProgType int
+
+const (
+ PT_NULL ProgType = 0 /* Unused entry. */
+ PT_LOAD ProgType = 1 /* Loadable segment. */
+ PT_DYNAMIC ProgType = 2 /* Dynamic linking information segment. */
+ PT_INTERP ProgType = 3 /* Pathname of interpreter. */
+ PT_NOTE ProgType = 4 /* Auxiliary information. */
+ PT_SHLIB ProgType = 5 /* Reserved (not used). */
+ PT_PHDR ProgType = 6 /* Location of program header itself. */
+ PT_TLS ProgType = 7 /* Thread local storage segment */
+ PT_LOOS ProgType = 0x60000000 /* First OS-specific. */
+ PT_HIOS ProgType = 0x6fffffff /* Last OS-specific. */
+ PT_LOPROC ProgType = 0x70000000 /* First processor-specific type. */
+ PT_HIPROC ProgType = 0x7fffffff /* Last processor-specific type. */
+)
+
+var ptStrings = []intName{
+ {0, "PT_NULL"},
+ {1, "PT_LOAD"},
+ {2, "PT_DYNAMIC"},
+ {3, "PT_INTERP"},
+ {4, "PT_NOTE"},
+ {5, "PT_SHLIB"},
+ {6, "PT_PHDR"},
+ {7, "PT_TLS"},
+ {0x60000000, "PT_LOOS"},
+ {0x6fffffff, "PT_HIOS"},
+ {0x70000000, "PT_LOPROC"},
+ {0x7fffffff, "PT_HIPROC"},
+}
+
+func (i ProgType) String() string { return stringName(uint32(i), ptStrings, false) }
+func (i ProgType) GoString() string { return stringName(uint32(i), ptStrings, true) }
+
+// Prog.Flag
+type ProgFlag uint32
+
+const (
+ PF_X ProgFlag = 0x1 /* Executable. */
+ PF_W ProgFlag = 0x2 /* Writable. */
+ PF_R ProgFlag = 0x4 /* Readable. */
+ PF_MASKOS ProgFlag = 0x0ff00000 /* Operating system-specific. */
+ PF_MASKPROC ProgFlag = 0xf0000000 /* Processor-specific. */
+)
+
+var pfStrings = []intName{
+ {0x1, "PF_X"},
+ {0x2, "PF_W"},
+ {0x4, "PF_R"},
+}
+
+func (i ProgFlag) String() string { return flagName(uint32(i), pfStrings, false) }
+func (i ProgFlag) GoString() string { return flagName(uint32(i), pfStrings, true) }
+
+// Dyn.Tag
+type DynTag int
+
+const (
+ DT_NULL DynTag = 0 /* Terminating entry. */
+ DT_NEEDED DynTag = 1 /* String table offset of a needed shared library. */
+ DT_PLTRELSZ DynTag = 2 /* Total size in bytes of PLT relocations. */
+ DT_PLTGOT DynTag = 3 /* Processor-dependent address. */
+ DT_HASH DynTag = 4 /* Address of symbol hash table. */
+ DT_STRTAB DynTag = 5 /* Address of string table. */
+ DT_SYMTAB DynTag = 6 /* Address of symbol table. */
+ DT_RELA DynTag = 7 /* Address of ElfNN_Rela relocations. */
+ DT_RELASZ DynTag = 8 /* Total size of ElfNN_Rela relocations. */
+ DT_RELAENT DynTag = 9 /* Size of each ElfNN_Rela relocation entry. */
+ DT_STRSZ DynTag = 10 /* Size of string table. */
+ DT_SYMENT DynTag = 11 /* Size of each symbol table entry. */
+ DT_INIT DynTag = 12 /* Address of initialization function. */
+ DT_FINI DynTag = 13 /* Address of finalization function. */
+ DT_SONAME DynTag = 14 /* String table offset of shared object name. */
+ DT_RPATH DynTag = 15 /* String table offset of library path. [sup] */
+ DT_SYMBOLIC DynTag = 16 /* Indicates "symbolic" linking. [sup] */
+ DT_REL DynTag = 17 /* Address of ElfNN_Rel relocations. */
+ DT_RELSZ DynTag = 18 /* Total size of ElfNN_Rel relocations. */
+ DT_RELENT DynTag = 19 /* Size of each ElfNN_Rel relocation. */
+ DT_PLTREL DynTag = 20 /* Type of relocation used for PLT. */
+ DT_DEBUG DynTag = 21 /* Reserved (not used). */
+ DT_TEXTREL DynTag = 22 /* Indicates there may be relocations in non-writable segments. [sup] */
+ DT_JMPREL DynTag = 23 /* Address of PLT relocations. */
+ DT_BIND_NOW DynTag = 24 /* [sup] */
+ DT_INIT_ARRAY DynTag = 25 /* Address of the array of pointers to initialization functions */
+ DT_FINI_ARRAY DynTag = 26 /* Address of the array of pointers to termination functions */
+ DT_INIT_ARRAYSZ DynTag = 27 /* Size in bytes of the array of initialization functions. */
+ DT_FINI_ARRAYSZ DynTag = 28 /* Size in bytes of the array of termination functions. */
+ DT_RUNPATH DynTag = 29 /* String table offset of a null-terminated library search path string. */
+ DT_FLAGS DynTag = 30 /* Object specific flag values. */
+ DT_ENCODING DynTag = 32 /* Values greater than or equal to DT_ENCODING
+ and less than DT_LOOS follow the rules for
+ the interpretation of the d_un union
+ as follows: even == 'd_ptr', even == 'd_val'
+ or none */
+ DT_PREINIT_ARRAY DynTag = 32 /* Address of the array of pointers to pre-initialization functions. */
+ DT_PREINIT_ARRAYSZ DynTag = 33 /* Size in bytes of the array of pre-initialization functions. */
+ DT_LOOS DynTag = 0x6000000d /* First OS-specific */
+ DT_HIOS DynTag = 0x6ffff000 /* Last OS-specific */
+ DT_VERSYM DynTag = 0x6ffffff0
+ DT_VERNEED DynTag = 0x6ffffffe
+ DT_VERNEEDNUM DynTag = 0x6fffffff
+ DT_LOPROC DynTag = 0x70000000 /* First processor-specific type. */
+ DT_HIPROC DynTag = 0x7fffffff /* Last processor-specific type. */
+)
+
+var dtStrings = []intName{
+ {0, "DT_NULL"},
+ {1, "DT_NEEDED"},
+ {2, "DT_PLTRELSZ"},
+ {3, "DT_PLTGOT"},
+ {4, "DT_HASH"},
+ {5, "DT_STRTAB"},
+ {6, "DT_SYMTAB"},
+ {7, "DT_RELA"},
+ {8, "DT_RELASZ"},
+ {9, "DT_RELAENT"},
+ {10, "DT_STRSZ"},
+ {11, "DT_SYMENT"},
+ {12, "DT_INIT"},
+ {13, "DT_FINI"},
+ {14, "DT_SONAME"},
+ {15, "DT_RPATH"},
+ {16, "DT_SYMBOLIC"},
+ {17, "DT_REL"},
+ {18, "DT_RELSZ"},
+ {19, "DT_RELENT"},
+ {20, "DT_PLTREL"},
+ {21, "DT_DEBUG"},
+ {22, "DT_TEXTREL"},
+ {23, "DT_JMPREL"},
+ {24, "DT_BIND_NOW"},
+ {25, "DT_INIT_ARRAY"},
+ {26, "DT_FINI_ARRAY"},
+ {27, "DT_INIT_ARRAYSZ"},
+ {28, "DT_FINI_ARRAYSZ"},
+ {29, "DT_RUNPATH"},
+ {30, "DT_FLAGS"},
+ {32, "DT_ENCODING"},
+ {32, "DT_PREINIT_ARRAY"},
+ {33, "DT_PREINIT_ARRAYSZ"},
+ {0x6000000d, "DT_LOOS"},
+ {0x6ffff000, "DT_HIOS"},
+ {0x6ffffff0, "DT_VERSYM"},
+ {0x6ffffffe, "DT_VERNEED"},
+ {0x6fffffff, "DT_VERNEEDNUM"},
+ {0x70000000, "DT_LOPROC"},
+ {0x7fffffff, "DT_HIPROC"},
+}
+
+func (i DynTag) String() string { return stringName(uint32(i), dtStrings, false) }
+func (i DynTag) GoString() string { return stringName(uint32(i), dtStrings, true) }
+
+// DT_FLAGS values.
+type DynFlag int
+
+const (
+ DF_ORIGIN DynFlag = 0x0001 /* Indicates that the object being loaded may
+ make reference to the
+ $ORIGIN substitution string */
+ DF_SYMBOLIC DynFlag = 0x0002 /* Indicates "symbolic" linking. */
+ DF_TEXTREL DynFlag = 0x0004 /* Indicates there may be relocations in non-writable segments. */
+ DF_BIND_NOW DynFlag = 0x0008 /* Indicates that the dynamic linker should
+ process all relocations for the object
+ containing this entry before transferring
+ control to the program. */
+ DF_STATIC_TLS DynFlag = 0x0010 /* Indicates that the shared object or
+ executable contains code using a static
+ thread-local storage scheme. */
+)
+
+var dflagStrings = []intName{
+ {0x0001, "DF_ORIGIN"},
+ {0x0002, "DF_SYMBOLIC"},
+ {0x0004, "DF_TEXTREL"},
+ {0x0008, "DF_BIND_NOW"},
+ {0x0010, "DF_STATIC_TLS"},
+}
+
+func (i DynFlag) String() string { return flagName(uint32(i), dflagStrings, false) }
+func (i DynFlag) GoString() string { return flagName(uint32(i), dflagStrings, true) }
+
+// NType values; used in core files.
+type NType int
+
+const (
+ NT_PRSTATUS NType = 1 /* Process status. */
+ NT_FPREGSET NType = 2 /* Floating point registers. */
+ NT_PRPSINFO NType = 3 /* Process state info. */
+)
+
+var ntypeStrings = []intName{
+ {1, "NT_PRSTATUS"},
+ {2, "NT_FPREGSET"},
+ {3, "NT_PRPSINFO"},
+}
+
+func (i NType) String() string { return stringName(uint32(i), ntypeStrings, false) }
+func (i NType) GoString() string { return stringName(uint32(i), ntypeStrings, true) }
+
+/* Symbol Binding - ELFNN_ST_BIND - st_info */
+type SymBind int
+
+const (
+ STB_LOCAL SymBind = 0 /* Local symbol */
+ STB_GLOBAL SymBind = 1 /* Global symbol */
+ STB_WEAK SymBind = 2 /* like global - lower precedence */
+ STB_LOOS SymBind = 10 /* Reserved range for operating system */
+ STB_HIOS SymBind = 12 /* specific semantics. */
+ STB_LOPROC SymBind = 13 /* reserved range for processor */
+ STB_HIPROC SymBind = 15 /* specific semantics. */
+)
+
+var stbStrings = []intName{
+ {0, "STB_LOCAL"},
+ {1, "STB_GLOBAL"},
+ {2, "STB_WEAK"},
+ {10, "STB_LOOS"},
+ {12, "STB_HIOS"},
+ {13, "STB_LOPROC"},
+ {15, "STB_HIPROC"},
+}
+
+func (i SymBind) String() string { return stringName(uint32(i), stbStrings, false) }
+func (i SymBind) GoString() string { return stringName(uint32(i), stbStrings, true) }
+
+/* Symbol type - ELFNN_ST_TYPE - st_info */
+type SymType int
+
+const (
+ STT_NOTYPE SymType = 0 /* Unspecified type. */
+ STT_OBJECT SymType = 1 /* Data object. */
+ STT_FUNC SymType = 2 /* Function. */
+ STT_SECTION SymType = 3 /* Section. */
+ STT_FILE SymType = 4 /* Source file. */
+ STT_COMMON SymType = 5 /* Uninitialized common block. */
+ STT_TLS SymType = 6 /* TLS object. */
+ STT_LOOS SymType = 10 /* Reserved range for operating system */
+ STT_HIOS SymType = 12 /* specific semantics. */
+ STT_LOPROC SymType = 13 /* reserved range for processor */
+ STT_HIPROC SymType = 15 /* specific semantics. */
+)
+
+var sttStrings = []intName{
+ {0, "STT_NOTYPE"},
+ {1, "STT_OBJECT"},
+ {2, "STT_FUNC"},
+ {3, "STT_SECTION"},
+ {4, "STT_FILE"},
+ {5, "STT_COMMON"},
+ {6, "STT_TLS"},
+ {10, "STT_LOOS"},
+ {12, "STT_HIOS"},
+ {13, "STT_LOPROC"},
+ {15, "STT_HIPROC"},
+}
+
+func (i SymType) String() string { return stringName(uint32(i), sttStrings, false) }
+func (i SymType) GoString() string { return stringName(uint32(i), sttStrings, true) }
+
+/* Symbol visibility - ELFNN_ST_VISIBILITY - st_other */
+type SymVis int
+
+const (
+ STV_DEFAULT SymVis = 0x0 /* Default visibility (see binding). */
+ STV_INTERNAL SymVis = 0x1 /* Special meaning in relocatable objects. */
+ STV_HIDDEN SymVis = 0x2 /* Not visible. */
+ STV_PROTECTED SymVis = 0x3 /* Visible but not preemptible. */
+)
+
+var stvStrings = []intName{
+ {0x0, "STV_DEFAULT"},
+ {0x1, "STV_INTERNAL"},
+ {0x2, "STV_HIDDEN"},
+ {0x3, "STV_PROTECTED"},
+}
+
+func (i SymVis) String() string { return stringName(uint32(i), stvStrings, false) }
+func (i SymVis) GoString() string { return stringName(uint32(i), stvStrings, true) }
+
+/*
+ * Relocation types.
+ */
+
+// Relocation types for x86-64.
+type R_X86_64 int
+
+const (
+ R_X86_64_NONE R_X86_64 = 0 /* No relocation. */
+ R_X86_64_64 R_X86_64 = 1 /* Add 64 bit symbol value. */
+ R_X86_64_PC32 R_X86_64 = 2 /* PC-relative 32 bit signed sym value. */
+ R_X86_64_GOT32 R_X86_64 = 3 /* PC-relative 32 bit GOT offset. */
+ R_X86_64_PLT32 R_X86_64 = 4 /* PC-relative 32 bit PLT offset. */
+ R_X86_64_COPY R_X86_64 = 5 /* Copy data from shared object. */
+ R_X86_64_GLOB_DAT R_X86_64 = 6 /* Set GOT entry to data address. */
+ R_X86_64_JMP_SLOT R_X86_64 = 7 /* Set GOT entry to code address. */
+ R_X86_64_RELATIVE R_X86_64 = 8 /* Add load address of shared object. */
+ R_X86_64_GOTPCREL R_X86_64 = 9 /* Add 32 bit signed pcrel offset to GOT. */
+ R_X86_64_32 R_X86_64 = 10 /* Add 32 bit zero extended symbol value */
+ R_X86_64_32S R_X86_64 = 11 /* Add 32 bit sign extended symbol value */
+ R_X86_64_16 R_X86_64 = 12 /* Add 16 bit zero extended symbol value */
+ R_X86_64_PC16 R_X86_64 = 13 /* Add 16 bit signed extended pc relative symbol value */
+ R_X86_64_8 R_X86_64 = 14 /* Add 8 bit zero extended symbol value */
+ R_X86_64_PC8 R_X86_64 = 15 /* Add 8 bit signed extended pc relative symbol value */
+ R_X86_64_DTPMOD64 R_X86_64 = 16 /* ID of module containing symbol */
+ R_X86_64_DTPOFF64 R_X86_64 = 17 /* Offset in TLS block */
+ R_X86_64_TPOFF64 R_X86_64 = 18 /* Offset in static TLS block */
+ R_X86_64_TLSGD R_X86_64 = 19 /* PC relative offset to GD GOT entry */
+ R_X86_64_TLSLD R_X86_64 = 20 /* PC relative offset to LD GOT entry */
+ R_X86_64_DTPOFF32 R_X86_64 = 21 /* Offset in TLS block */
+ R_X86_64_GOTTPOFF R_X86_64 = 22 /* PC relative offset to IE GOT entry */
+ R_X86_64_TPOFF32 R_X86_64 = 23 /* Offset in static TLS block */
+)
+
+var rx86_64Strings = []intName{
+ {0, "R_X86_64_NONE"},
+ {1, "R_X86_64_64"},
+ {2, "R_X86_64_PC32"},
+ {3, "R_X86_64_GOT32"},
+ {4, "R_X86_64_PLT32"},
+ {5, "R_X86_64_COPY"},
+ {6, "R_X86_64_GLOB_DAT"},
+ {7, "R_X86_64_JMP_SLOT"},
+ {8, "R_X86_64_RELATIVE"},
+ {9, "R_X86_64_GOTPCREL"},
+ {10, "R_X86_64_32"},
+ {11, "R_X86_64_32S"},
+ {12, "R_X86_64_16"},
+ {13, "R_X86_64_PC16"},
+ {14, "R_X86_64_8"},
+ {15, "R_X86_64_PC8"},
+ {16, "R_X86_64_DTPMOD64"},
+ {17, "R_X86_64_DTPOFF64"},
+ {18, "R_X86_64_TPOFF64"},
+ {19, "R_X86_64_TLSGD"},
+ {20, "R_X86_64_TLSLD"},
+ {21, "R_X86_64_DTPOFF32"},
+ {22, "R_X86_64_GOTTPOFF"},
+ {23, "R_X86_64_TPOFF32"},
+}
+
+func (i R_X86_64) String() string { return stringName(uint32(i), rx86_64Strings, false) }
+func (i R_X86_64) GoString() string { return stringName(uint32(i), rx86_64Strings, true) }
+
+// Relocation types for AArch64 (aka arm64)
+type R_AARCH64 int
+
+const (
+ R_AARCH64_NONE R_AARCH64 = 0
+ R_AARCH64_P32_ABS32 R_AARCH64 = 1
+ R_AARCH64_P32_ABS16 R_AARCH64 = 2
+ R_AARCH64_P32_PREL32 R_AARCH64 = 3
+ R_AARCH64_P32_PREL16 R_AARCH64 = 4
+ R_AARCH64_P32_MOVW_UABS_G0 R_AARCH64 = 5
+ R_AARCH64_P32_MOVW_UABS_G0_NC R_AARCH64 = 6
+ R_AARCH64_P32_MOVW_UABS_G1 R_AARCH64 = 7
+ R_AARCH64_P32_MOVW_SABS_G0 R_AARCH64 = 8
+ R_AARCH64_P32_LD_PREL_LO19 R_AARCH64 = 9
+ R_AARCH64_P32_ADR_PREL_LO21 R_AARCH64 = 10
+ R_AARCH64_P32_ADR_PREL_PG_HI21 R_AARCH64 = 11
+ R_AARCH64_P32_ADD_ABS_LO12_NC R_AARCH64 = 12
+ R_AARCH64_P32_LDST8_ABS_LO12_NC R_AARCH64 = 13
+ R_AARCH64_P32_LDST16_ABS_LO12_NC R_AARCH64 = 14
+ R_AARCH64_P32_LDST32_ABS_LO12_NC R_AARCH64 = 15
+ R_AARCH64_P32_LDST64_ABS_LO12_NC R_AARCH64 = 16
+ R_AARCH64_P32_LDST128_ABS_LO12_NC R_AARCH64 = 17
+ R_AARCH64_P32_TSTBR14 R_AARCH64 = 18
+ R_AARCH64_P32_CONDBR19 R_AARCH64 = 19
+ R_AARCH64_P32_JUMP26 R_AARCH64 = 20
+ R_AARCH64_P32_CALL26 R_AARCH64 = 21
+ R_AARCH64_P32_GOT_LD_PREL19 R_AARCH64 = 25
+ R_AARCH64_P32_ADR_GOT_PAGE R_AARCH64 = 26
+ R_AARCH64_P32_LD32_GOT_LO12_NC R_AARCH64 = 27
+ R_AARCH64_P32_TLSGD_ADR_PAGE21 R_AARCH64 = 81
+ R_AARCH64_P32_TLSGD_ADD_LO12_NC R_AARCH64 = 82
+ R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21 R_AARCH64 = 103
+ R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC R_AARCH64 = 104
+ R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19 R_AARCH64 = 105
+ R_AARCH64_P32_TLSLE_MOVW_TPREL_G1 R_AARCH64 = 106
+ R_AARCH64_P32_TLSLE_MOVW_TPREL_G0 R_AARCH64 = 107
+ R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC R_AARCH64 = 108
+ R_AARCH64_P32_TLSLE_ADD_TPREL_HI12 R_AARCH64 = 109
+ R_AARCH64_P32_TLSLE_ADD_TPREL_LO12 R_AARCH64 = 110
+ R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC R_AARCH64 = 111
+ R_AARCH64_P32_TLSDESC_LD_PREL19 R_AARCH64 = 122
+ R_AARCH64_P32_TLSDESC_ADR_PREL21 R_AARCH64 = 123
+ R_AARCH64_P32_TLSDESC_ADR_PAGE21 R_AARCH64 = 124
+ R_AARCH64_P32_TLSDESC_LD32_LO12_NC R_AARCH64 = 125
+ R_AARCH64_P32_TLSDESC_ADD_LO12_NC R_AARCH64 = 126
+ R_AARCH64_P32_TLSDESC_CALL R_AARCH64 = 127
+ R_AARCH64_P32_COPY R_AARCH64 = 180
+ R_AARCH64_P32_GLOB_DAT R_AARCH64 = 181
+ R_AARCH64_P32_JUMP_SLOT R_AARCH64 = 182
+ R_AARCH64_P32_RELATIVE R_AARCH64 = 183
+ R_AARCH64_P32_TLS_DTPMOD R_AARCH64 = 184
+ R_AARCH64_P32_TLS_DTPREL R_AARCH64 = 185
+ R_AARCH64_P32_TLS_TPREL R_AARCH64 = 186
+ R_AARCH64_P32_TLSDESC R_AARCH64 = 187
+ R_AARCH64_P32_IRELATIVE R_AARCH64 = 188
+ R_AARCH64_NULL R_AARCH64 = 256
+ R_AARCH64_ABS64 R_AARCH64 = 257
+ R_AARCH64_ABS32 R_AARCH64 = 258
+ R_AARCH64_ABS16 R_AARCH64 = 259
+ R_AARCH64_PREL64 R_AARCH64 = 260
+ R_AARCH64_PREL32 R_AARCH64 = 261
+ R_AARCH64_PREL16 R_AARCH64 = 262
+ R_AARCH64_MOVW_UABS_G0 R_AARCH64 = 263
+ R_AARCH64_MOVW_UABS_G0_NC R_AARCH64 = 264
+ R_AARCH64_MOVW_UABS_G1 R_AARCH64 = 265
+ R_AARCH64_MOVW_UABS_G1_NC R_AARCH64 = 266
+ R_AARCH64_MOVW_UABS_G2 R_AARCH64 = 267
+ R_AARCH64_MOVW_UABS_G2_NC R_AARCH64 = 268
+ R_AARCH64_MOVW_UABS_G3 R_AARCH64 = 269
+ R_AARCH64_MOVW_SABS_G0 R_AARCH64 = 270
+ R_AARCH64_MOVW_SABS_G1 R_AARCH64 = 271
+ R_AARCH64_MOVW_SABS_G2 R_AARCH64 = 272
+ R_AARCH64_LD_PREL_LO19 R_AARCH64 = 273
+ R_AARCH64_ADR_PREL_LO21 R_AARCH64 = 274
+ R_AARCH64_ADR_PREL_PG_HI21 R_AARCH64 = 275
+ R_AARCH64_ADR_PREL_PG_HI21_NC R_AARCH64 = 276
+ R_AARCH64_ADD_ABS_LO12_NC R_AARCH64 = 277
+ R_AARCH64_LDST8_ABS_LO12_NC R_AARCH64 = 278
+ R_AARCH64_TSTBR14 R_AARCH64 = 279
+ R_AARCH64_CONDBR19 R_AARCH64 = 280
+ R_AARCH64_JUMP26 R_AARCH64 = 282
+ R_AARCH64_CALL26 R_AARCH64 = 283
+ R_AARCH64_LDST16_ABS_LO12_NC R_AARCH64 = 284
+ R_AARCH64_LDST32_ABS_LO12_NC R_AARCH64 = 285
+ R_AARCH64_LDST64_ABS_LO12_NC R_AARCH64 = 286
+ R_AARCH64_LDST128_ABS_LO12_NC R_AARCH64 = 299
+ R_AARCH64_GOT_LD_PREL19 R_AARCH64 = 309
+ R_AARCH64_ADR_GOT_PAGE R_AARCH64 = 311
+ R_AARCH64_LD64_GOT_LO12_NC R_AARCH64 = 312
+ R_AARCH64_TLSGD_ADR_PAGE21 R_AARCH64 = 513
+ R_AARCH64_TLSGD_ADD_LO12_NC R_AARCH64 = 514
+ R_AARCH64_TLSIE_MOVW_GOTTPREL_G1 R_AARCH64 = 539
+ R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC R_AARCH64 = 540
+ R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21 R_AARCH64 = 541
+ R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC R_AARCH64 = 542
+ R_AARCH64_TLSIE_LD_GOTTPREL_PREL19 R_AARCH64 = 543
+ R_AARCH64_TLSLE_MOVW_TPREL_G2 R_AARCH64 = 544
+ R_AARCH64_TLSLE_MOVW_TPREL_G1 R_AARCH64 = 545
+ R_AARCH64_TLSLE_MOVW_TPREL_G1_NC R_AARCH64 = 546
+ R_AARCH64_TLSLE_MOVW_TPREL_G0 R_AARCH64 = 547
+ R_AARCH64_TLSLE_MOVW_TPREL_G0_NC R_AARCH64 = 548
+ R_AARCH64_TLSLE_ADD_TPREL_HI12 R_AARCH64 = 549
+ R_AARCH64_TLSLE_ADD_TPREL_LO12 R_AARCH64 = 550
+ R_AARCH64_TLSLE_ADD_TPREL_LO12_NC R_AARCH64 = 551
+ R_AARCH64_TLSDESC_LD_PREL19 R_AARCH64 = 560
+ R_AARCH64_TLSDESC_ADR_PREL21 R_AARCH64 = 561
+ R_AARCH64_TLSDESC_ADR_PAGE21 R_AARCH64 = 562
+ R_AARCH64_TLSDESC_LD64_LO12_NC R_AARCH64 = 563
+ R_AARCH64_TLSDESC_ADD_LO12_NC R_AARCH64 = 564
+ R_AARCH64_TLSDESC_OFF_G1 R_AARCH64 = 565
+ R_AARCH64_TLSDESC_OFF_G0_NC R_AARCH64 = 566
+ R_AARCH64_TLSDESC_LDR R_AARCH64 = 567
+ R_AARCH64_TLSDESC_ADD R_AARCH64 = 568
+ R_AARCH64_TLSDESC_CALL R_AARCH64 = 569
+ R_AARCH64_COPY R_AARCH64 = 1024
+ R_AARCH64_GLOB_DAT R_AARCH64 = 1025
+ R_AARCH64_JUMP_SLOT R_AARCH64 = 1026
+ R_AARCH64_RELATIVE R_AARCH64 = 1027
+ R_AARCH64_TLS_DTPMOD64 R_AARCH64 = 1028
+ R_AARCH64_TLS_DTPREL64 R_AARCH64 = 1029
+ R_AARCH64_TLS_TPREL64 R_AARCH64 = 1030
+ R_AARCH64_TLSDESC R_AARCH64 = 1031
+ R_AARCH64_IRELATIVE R_AARCH64 = 1032
+)
+
+var raarch64Strings = []intName{
+ {0, "R_AARCH64_NONE"},
+ {1, "R_AARCH64_P32_ABS32"},
+ {2, "R_AARCH64_P32_ABS16"},
+ {3, "R_AARCH64_P32_PREL32"},
+ {4, "R_AARCH64_P32_PREL16"},
+ {5, "R_AARCH64_P32_MOVW_UABS_G0"},
+ {6, "R_AARCH64_P32_MOVW_UABS_G0_NC"},
+ {7, "R_AARCH64_P32_MOVW_UABS_G1"},
+ {8, "R_AARCH64_P32_MOVW_SABS_G0"},
+ {9, "R_AARCH64_P32_LD_PREL_LO19"},
+ {10, "R_AARCH64_P32_ADR_PREL_LO21"},
+ {11, "R_AARCH64_P32_ADR_PREL_PG_HI21"},
+ {12, "R_AARCH64_P32_ADD_ABS_LO12_NC"},
+ {13, "R_AARCH64_P32_LDST8_ABS_LO12_NC"},
+ {14, "R_AARCH64_P32_LDST16_ABS_LO12_NC"},
+ {15, "R_AARCH64_P32_LDST32_ABS_LO12_NC"},
+ {16, "R_AARCH64_P32_LDST64_ABS_LO12_NC"},
+ {17, "R_AARCH64_P32_LDST128_ABS_LO12_NC"},
+ {18, "R_AARCH64_P32_TSTBR14"},
+ {19, "R_AARCH64_P32_CONDBR19"},
+ {20, "R_AARCH64_P32_JUMP26"},
+ {21, "R_AARCH64_P32_CALL26"},
+ {25, "R_AARCH64_P32_GOT_LD_PREL19"},
+ {26, "R_AARCH64_P32_ADR_GOT_PAGE"},
+ {27, "R_AARCH64_P32_LD32_GOT_LO12_NC"},
+ {81, "R_AARCH64_P32_TLSGD_ADR_PAGE21"},
+ {82, "R_AARCH64_P32_TLSGD_ADD_LO12_NC"},
+ {103, "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21"},
+ {104, "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC"},
+ {105, "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19"},
+ {106, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1"},
+ {107, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0"},
+ {108, "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC"},
+ {109, "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12"},
+ {110, "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12"},
+ {111, "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC"},
+ {122, "R_AARCH64_P32_TLSDESC_LD_PREL19"},
+ {123, "R_AARCH64_P32_TLSDESC_ADR_PREL21"},
+ {124, "R_AARCH64_P32_TLSDESC_ADR_PAGE21"},
+ {125, "R_AARCH64_P32_TLSDESC_LD32_LO12_NC"},
+ {126, "R_AARCH64_P32_TLSDESC_ADD_LO12_NC"},
+ {127, "R_AARCH64_P32_TLSDESC_CALL"},
+ {180, "R_AARCH64_P32_COPY"},
+ {181, "R_AARCH64_P32_GLOB_DAT"},
+ {182, "R_AARCH64_P32_JUMP_SLOT"},
+ {183, "R_AARCH64_P32_RELATIVE"},
+ {184, "R_AARCH64_P32_TLS_DTPMOD"},
+ {185, "R_AARCH64_P32_TLS_DTPREL"},
+ {186, "R_AARCH64_P32_TLS_TPREL"},
+ {187, "R_AARCH64_P32_TLSDESC"},
+ {188, "R_AARCH64_P32_IRELATIVE"},
+ {256, "R_AARCH64_NULL"},
+ {257, "R_AARCH64_ABS64"},
+ {258, "R_AARCH64_ABS32"},
+ {259, "R_AARCH64_ABS16"},
+ {260, "R_AARCH64_PREL64"},
+ {261, "R_AARCH64_PREL32"},
+ {262, "R_AARCH64_PREL16"},
+ {263, "R_AARCH64_MOVW_UABS_G0"},
+ {264, "R_AARCH64_MOVW_UABS_G0_NC"},
+ {265, "R_AARCH64_MOVW_UABS_G1"},
+ {266, "R_AARCH64_MOVW_UABS_G1_NC"},
+ {267, "R_AARCH64_MOVW_UABS_G2"},
+ {268, "R_AARCH64_MOVW_UABS_G2_NC"},
+ {269, "R_AARCH64_MOVW_UABS_G3"},
+ {270, "R_AARCH64_MOVW_SABS_G0"},
+ {271, "R_AARCH64_MOVW_SABS_G1"},
+ {272, "R_AARCH64_MOVW_SABS_G2"},
+ {273, "R_AARCH64_LD_PREL_LO19"},
+ {274, "R_AARCH64_ADR_PREL_LO21"},
+ {275, "R_AARCH64_ADR_PREL_PG_HI21"},
+ {276, "R_AARCH64_ADR_PREL_PG_HI21_NC"},
+ {277, "R_AARCH64_ADD_ABS_LO12_NC"},
+ {278, "R_AARCH64_LDST8_ABS_LO12_NC"},
+ {279, "R_AARCH64_TSTBR14"},
+ {280, "R_AARCH64_CONDBR19"},
+ {282, "R_AARCH64_JUMP26"},
+ {283, "R_AARCH64_CALL26"},
+ {284, "R_AARCH64_LDST16_ABS_LO12_NC"},
+ {285, "R_AARCH64_LDST32_ABS_LO12_NC"},
+ {286, "R_AARCH64_LDST64_ABS_LO12_NC"},
+ {299, "R_AARCH64_LDST128_ABS_LO12_NC"},
+ {309, "R_AARCH64_GOT_LD_PREL19"},
+ {311, "R_AARCH64_ADR_GOT_PAGE"},
+ {312, "R_AARCH64_LD64_GOT_LO12_NC"},
+ {513, "R_AARCH64_TLSGD_ADR_PAGE21"},
+ {514, "R_AARCH64_TLSGD_ADD_LO12_NC"},
+ {539, "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1"},
+ {540, "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC"},
+ {541, "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21"},
+ {542, "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC"},
+ {543, "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19"},
+ {544, "R_AARCH64_TLSLE_MOVW_TPREL_G2"},
+ {545, "R_AARCH64_TLSLE_MOVW_TPREL_G1"},
+ {546, "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC"},
+ {547, "R_AARCH64_TLSLE_MOVW_TPREL_G0"},
+ {548, "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC"},
+ {549, "R_AARCH64_TLSLE_ADD_TPREL_HI12"},
+ {550, "R_AARCH64_TLSLE_ADD_TPREL_LO12"},
+ {551, "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC"},
+ {560, "R_AARCH64_TLSDESC_LD_PREL19"},
+ {561, "R_AARCH64_TLSDESC_ADR_PREL21"},
+ {562, "R_AARCH64_TLSDESC_ADR_PAGE21"},
+ {563, "R_AARCH64_TLSDESC_LD64_LO12_NC"},
+ {564, "R_AARCH64_TLSDESC_ADD_LO12_NC"},
+ {565, "R_AARCH64_TLSDESC_OFF_G1"},
+ {566, "R_AARCH64_TLSDESC_OFF_G0_NC"},
+ {567, "R_AARCH64_TLSDESC_LDR"},
+ {568, "R_AARCH64_TLSDESC_ADD"},
+ {569, "R_AARCH64_TLSDESC_CALL"},
+ {1024, "R_AARCH64_COPY"},
+ {1025, "R_AARCH64_GLOB_DAT"},
+ {1026, "R_AARCH64_JUMP_SLOT"},
+ {1027, "R_AARCH64_RELATIVE"},
+ {1028, "R_AARCH64_TLS_DTPMOD64"},
+ {1029, "R_AARCH64_TLS_DTPREL64"},
+ {1030, "R_AARCH64_TLS_TPREL64"},
+ {1031, "R_AARCH64_TLSDESC"},
+ {1032, "R_AARCH64_IRELATIVE"},
+}
+
+func (i R_AARCH64) String() string { return stringName(uint32(i), raarch64Strings, false) }
+func (i R_AARCH64) GoString() string { return stringName(uint32(i), raarch64Strings, true) }
+
+// Relocation types for Alpha.
+type R_ALPHA int
+
+const (
+ R_ALPHA_NONE R_ALPHA = 0 /* No reloc */
+ R_ALPHA_REFLONG R_ALPHA = 1 /* Direct 32 bit */
+ R_ALPHA_REFQUAD R_ALPHA = 2 /* Direct 64 bit */
+ R_ALPHA_GPREL32 R_ALPHA = 3 /* GP relative 32 bit */
+ R_ALPHA_LITERAL R_ALPHA = 4 /* GP relative 16 bit w/optimization */
+ R_ALPHA_LITUSE R_ALPHA = 5 /* Optimization hint for LITERAL */
+ R_ALPHA_GPDISP R_ALPHA = 6 /* Add displacement to GP */
+ R_ALPHA_BRADDR R_ALPHA = 7 /* PC+4 relative 23 bit shifted */
+ R_ALPHA_HINT R_ALPHA = 8 /* PC+4 relative 16 bit shifted */
+ R_ALPHA_SREL16 R_ALPHA = 9 /* PC relative 16 bit */
+ R_ALPHA_SREL32 R_ALPHA = 10 /* PC relative 32 bit */
+ R_ALPHA_SREL64 R_ALPHA = 11 /* PC relative 64 bit */
+ R_ALPHA_OP_PUSH R_ALPHA = 12 /* OP stack push */
+ R_ALPHA_OP_STORE R_ALPHA = 13 /* OP stack pop and store */
+ R_ALPHA_OP_PSUB R_ALPHA = 14 /* OP stack subtract */
+ R_ALPHA_OP_PRSHIFT R_ALPHA = 15 /* OP stack right shift */
+ R_ALPHA_GPVALUE R_ALPHA = 16
+ R_ALPHA_GPRELHIGH R_ALPHA = 17
+ R_ALPHA_GPRELLOW R_ALPHA = 18
+ R_ALPHA_IMMED_GP_16 R_ALPHA = 19
+ R_ALPHA_IMMED_GP_HI32 R_ALPHA = 20
+ R_ALPHA_IMMED_SCN_HI32 R_ALPHA = 21
+ R_ALPHA_IMMED_BR_HI32 R_ALPHA = 22
+ R_ALPHA_IMMED_LO32 R_ALPHA = 23
+ R_ALPHA_COPY R_ALPHA = 24 /* Copy symbol at runtime */
+ R_ALPHA_GLOB_DAT R_ALPHA = 25 /* Create GOT entry */
+ R_ALPHA_JMP_SLOT R_ALPHA = 26 /* Create PLT entry */
+ R_ALPHA_RELATIVE R_ALPHA = 27 /* Adjust by program base */
+)
+
+var ralphaStrings = []intName{
+ {0, "R_ALPHA_NONE"},
+ {1, "R_ALPHA_REFLONG"},
+ {2, "R_ALPHA_REFQUAD"},
+ {3, "R_ALPHA_GPREL32"},
+ {4, "R_ALPHA_LITERAL"},
+ {5, "R_ALPHA_LITUSE"},
+ {6, "R_ALPHA_GPDISP"},
+ {7, "R_ALPHA_BRADDR"},
+ {8, "R_ALPHA_HINT"},
+ {9, "R_ALPHA_SREL16"},
+ {10, "R_ALPHA_SREL32"},
+ {11, "R_ALPHA_SREL64"},
+ {12, "R_ALPHA_OP_PUSH"},
+ {13, "R_ALPHA_OP_STORE"},
+ {14, "R_ALPHA_OP_PSUB"},
+ {15, "R_ALPHA_OP_PRSHIFT"},
+ {16, "R_ALPHA_GPVALUE"},
+ {17, "R_ALPHA_GPRELHIGH"},
+ {18, "R_ALPHA_GPRELLOW"},
+ {19, "R_ALPHA_IMMED_GP_16"},
+ {20, "R_ALPHA_IMMED_GP_HI32"},
+ {21, "R_ALPHA_IMMED_SCN_HI32"},
+ {22, "R_ALPHA_IMMED_BR_HI32"},
+ {23, "R_ALPHA_IMMED_LO32"},
+ {24, "R_ALPHA_COPY"},
+ {25, "R_ALPHA_GLOB_DAT"},
+ {26, "R_ALPHA_JMP_SLOT"},
+ {27, "R_ALPHA_RELATIVE"},
+}
+
+func (i R_ALPHA) String() string { return stringName(uint32(i), ralphaStrings, false) }
+func (i R_ALPHA) GoString() string { return stringName(uint32(i), ralphaStrings, true) }
+
+// Relocation types for ARM.
+type R_ARM int
+
+const (
+ R_ARM_NONE R_ARM = 0 /* No relocation. */
+ R_ARM_PC24 R_ARM = 1
+ R_ARM_ABS32 R_ARM = 2
+ R_ARM_REL32 R_ARM = 3
+ R_ARM_PC13 R_ARM = 4
+ R_ARM_ABS16 R_ARM = 5
+ R_ARM_ABS12 R_ARM = 6
+ R_ARM_THM_ABS5 R_ARM = 7
+ R_ARM_ABS8 R_ARM = 8
+ R_ARM_SBREL32 R_ARM = 9
+ R_ARM_THM_PC22 R_ARM = 10
+ R_ARM_THM_PC8 R_ARM = 11
+ R_ARM_AMP_VCALL9 R_ARM = 12
+ R_ARM_SWI24 R_ARM = 13
+ R_ARM_THM_SWI8 R_ARM = 14
+ R_ARM_XPC25 R_ARM = 15
+ R_ARM_THM_XPC22 R_ARM = 16
+ R_ARM_COPY R_ARM = 20 /* Copy data from shared object. */
+ R_ARM_GLOB_DAT R_ARM = 21 /* Set GOT entry to data address. */
+ R_ARM_JUMP_SLOT R_ARM = 22 /* Set GOT entry to code address. */
+ R_ARM_RELATIVE R_ARM = 23 /* Add load address of shared object. */
+ R_ARM_GOTOFF R_ARM = 24 /* Add GOT-relative symbol address. */
+ R_ARM_GOTPC R_ARM = 25 /* Add PC-relative GOT table address. */
+ R_ARM_GOT32 R_ARM = 26 /* Add PC-relative GOT offset. */
+ R_ARM_PLT32 R_ARM = 27 /* Add PC-relative PLT offset. */
+ R_ARM_GNU_VTENTRY R_ARM = 100
+ R_ARM_GNU_VTINHERIT R_ARM = 101
+ R_ARM_RSBREL32 R_ARM = 250
+ R_ARM_THM_RPC22 R_ARM = 251
+ R_ARM_RREL32 R_ARM = 252
+ R_ARM_RABS32 R_ARM = 253
+ R_ARM_RPC24 R_ARM = 254
+ R_ARM_RBASE R_ARM = 255
+)
+
+var rarmStrings = []intName{
+ {0, "R_ARM_NONE"},
+ {1, "R_ARM_PC24"},
+ {2, "R_ARM_ABS32"},
+ {3, "R_ARM_REL32"},
+ {4, "R_ARM_PC13"},
+ {5, "R_ARM_ABS16"},
+ {6, "R_ARM_ABS12"},
+ {7, "R_ARM_THM_ABS5"},
+ {8, "R_ARM_ABS8"},
+ {9, "R_ARM_SBREL32"},
+ {10, "R_ARM_THM_PC22"},
+ {11, "R_ARM_THM_PC8"},
+ {12, "R_ARM_AMP_VCALL9"},
+ {13, "R_ARM_SWI24"},
+ {14, "R_ARM_THM_SWI8"},
+ {15, "R_ARM_XPC25"},
+ {16, "R_ARM_THM_XPC22"},
+ {20, "R_ARM_COPY"},
+ {21, "R_ARM_GLOB_DAT"},
+ {22, "R_ARM_JUMP_SLOT"},
+ {23, "R_ARM_RELATIVE"},
+ {24, "R_ARM_GOTOFF"},
+ {25, "R_ARM_GOTPC"},
+ {26, "R_ARM_GOT32"},
+ {27, "R_ARM_PLT32"},
+ {100, "R_ARM_GNU_VTENTRY"},
+ {101, "R_ARM_GNU_VTINHERIT"},
+ {250, "R_ARM_RSBREL32"},
+ {251, "R_ARM_THM_RPC22"},
+ {252, "R_ARM_RREL32"},
+ {253, "R_ARM_RABS32"},
+ {254, "R_ARM_RPC24"},
+ {255, "R_ARM_RBASE"},
+}
+
+func (i R_ARM) String() string { return stringName(uint32(i), rarmStrings, false) }
+func (i R_ARM) GoString() string { return stringName(uint32(i), rarmStrings, true) }
+
+// Relocation types for 386.
+type R_386 int
+
+const (
+ R_386_NONE R_386 = 0 /* No relocation. */
+ R_386_32 R_386 = 1 /* Add symbol value. */
+ R_386_PC32 R_386 = 2 /* Add PC-relative symbol value. */
+ R_386_GOT32 R_386 = 3 /* Add PC-relative GOT offset. */
+ R_386_PLT32 R_386 = 4 /* Add PC-relative PLT offset. */
+ R_386_COPY R_386 = 5 /* Copy data from shared object. */
+ R_386_GLOB_DAT R_386 = 6 /* Set GOT entry to data address. */
+ R_386_JMP_SLOT R_386 = 7 /* Set GOT entry to code address. */
+ R_386_RELATIVE R_386 = 8 /* Add load address of shared object. */
+ R_386_GOTOFF R_386 = 9 /* Add GOT-relative symbol address. */
+ R_386_GOTPC R_386 = 10 /* Add PC-relative GOT table address. */
+ R_386_TLS_TPOFF R_386 = 14 /* Negative offset in static TLS block */
+ R_386_TLS_IE R_386 = 15 /* Absolute address of GOT for -ve static TLS */
+ R_386_TLS_GOTIE R_386 = 16 /* GOT entry for negative static TLS block */
+ R_386_TLS_LE R_386 = 17 /* Negative offset relative to static TLS */
+ R_386_TLS_GD R_386 = 18 /* 32 bit offset to GOT (index,off) pair */
+ R_386_TLS_LDM R_386 = 19 /* 32 bit offset to GOT (index,zero) pair */
+ R_386_TLS_GD_32 R_386 = 24 /* 32 bit offset to GOT (index,off) pair */
+ R_386_TLS_GD_PUSH R_386 = 25 /* pushl instruction for Sun ABI GD sequence */
+ R_386_TLS_GD_CALL R_386 = 26 /* call instruction for Sun ABI GD sequence */
+ R_386_TLS_GD_POP R_386 = 27 /* popl instruction for Sun ABI GD sequence */
+ R_386_TLS_LDM_32 R_386 = 28 /* 32 bit offset to GOT (index,zero) pair */
+ R_386_TLS_LDM_PUSH R_386 = 29 /* pushl instruction for Sun ABI LD sequence */
+ R_386_TLS_LDM_CALL R_386 = 30 /* call instruction for Sun ABI LD sequence */
+ R_386_TLS_LDM_POP R_386 = 31 /* popl instruction for Sun ABI LD sequence */
+ R_386_TLS_LDO_32 R_386 = 32 /* 32 bit offset from start of TLS block */
+ R_386_TLS_IE_32 R_386 = 33 /* 32 bit offset to GOT static TLS offset entry */
+ R_386_TLS_LE_32 R_386 = 34 /* 32 bit offset within static TLS block */
+ R_386_TLS_DTPMOD32 R_386 = 35 /* GOT entry containing TLS index */
+ R_386_TLS_DTPOFF32 R_386 = 36 /* GOT entry containing TLS offset */
+ R_386_TLS_TPOFF32 R_386 = 37 /* GOT entry of -ve static TLS offset */
+)
+
+var r386Strings = []intName{
+ {0, "R_386_NONE"},
+ {1, "R_386_32"},
+ {2, "R_386_PC32"},
+ {3, "R_386_GOT32"},
+ {4, "R_386_PLT32"},
+ {5, "R_386_COPY"},
+ {6, "R_386_GLOB_DAT"},
+ {7, "R_386_JMP_SLOT"},
+ {8, "R_386_RELATIVE"},
+ {9, "R_386_GOTOFF"},
+ {10, "R_386_GOTPC"},
+ {14, "R_386_TLS_TPOFF"},
+ {15, "R_386_TLS_IE"},
+ {16, "R_386_TLS_GOTIE"},
+ {17, "R_386_TLS_LE"},
+ {18, "R_386_TLS_GD"},
+ {19, "R_386_TLS_LDM"},
+ {24, "R_386_TLS_GD_32"},
+ {25, "R_386_TLS_GD_PUSH"},
+ {26, "R_386_TLS_GD_CALL"},
+ {27, "R_386_TLS_GD_POP"},
+ {28, "R_386_TLS_LDM_32"},
+ {29, "R_386_TLS_LDM_PUSH"},
+ {30, "R_386_TLS_LDM_CALL"},
+ {31, "R_386_TLS_LDM_POP"},
+ {32, "R_386_TLS_LDO_32"},
+ {33, "R_386_TLS_IE_32"},
+ {34, "R_386_TLS_LE_32"},
+ {35, "R_386_TLS_DTPMOD32"},
+ {36, "R_386_TLS_DTPOFF32"},
+ {37, "R_386_TLS_TPOFF32"},
+}
+
+func (i R_386) String() string { return stringName(uint32(i), r386Strings, false) }
+func (i R_386) GoString() string { return stringName(uint32(i), r386Strings, true) }
+
+// Relocation types for MIPS.
+type R_MIPS int
+
+const (
+ R_MIPS_NONE R_MIPS = 0
+ R_MIPS_16 R_MIPS = 1
+ R_MIPS_32 R_MIPS = 2
+ R_MIPS_REL32 R_MIPS = 3
+ R_MIPS_26 R_MIPS = 4
+ R_MIPS_HI16 R_MIPS = 5 /* high 16 bits of symbol value */
+ R_MIPS_LO16 R_MIPS = 6 /* low 16 bits of symbol value */
+ R_MIPS_GPREL16 R_MIPS = 7 /* GP-relative reference */
+ R_MIPS_LITERAL R_MIPS = 8 /* Reference to literal section */
+ R_MIPS_GOT16 R_MIPS = 9 /* Reference to global offset table */
+ R_MIPS_PC16 R_MIPS = 10 /* 16 bit PC relative reference */
+ R_MIPS_CALL16 R_MIPS = 11 /* 16 bit call thru glbl offset tbl */
+ R_MIPS_GPREL32 R_MIPS = 12
+ R_MIPS_SHIFT5 R_MIPS = 16
+ R_MIPS_SHIFT6 R_MIPS = 17
+ R_MIPS_64 R_MIPS = 18
+ R_MIPS_GOT_DISP R_MIPS = 19
+ R_MIPS_GOT_PAGE R_MIPS = 20
+ R_MIPS_GOT_OFST R_MIPS = 21
+ R_MIPS_GOT_HI16 R_MIPS = 22
+ R_MIPS_GOT_LO16 R_MIPS = 23
+ R_MIPS_SUB R_MIPS = 24
+ R_MIPS_INSERT_A R_MIPS = 25
+ R_MIPS_INSERT_B R_MIPS = 26
+ R_MIPS_DELETE R_MIPS = 27
+ R_MIPS_HIGHER R_MIPS = 28
+ R_MIPS_HIGHEST R_MIPS = 29
+ R_MIPS_CALL_HI16 R_MIPS = 30
+ R_MIPS_CALL_LO16 R_MIPS = 31
+ R_MIPS_SCN_DISP R_MIPS = 32
+ R_MIPS_REL16 R_MIPS = 33
+ R_MIPS_ADD_IMMEDIATE R_MIPS = 34
+ R_MIPS_PJUMP R_MIPS = 35
+ R_MIPS_RELGOT R_MIPS = 36
+ R_MIPS_JALR R_MIPS = 37
+
+ R_MIPS_TLS_DTPMOD32 R_MIPS = 38 /* Module number 32 bit */
+ R_MIPS_TLS_DTPREL32 R_MIPS = 39 /* Module-relative offset 32 bit */
+ R_MIPS_TLS_DTPMOD64 R_MIPS = 40 /* Module number 64 bit */
+ R_MIPS_TLS_DTPREL64 R_MIPS = 41 /* Module-relative offset 64 bit */
+ R_MIPS_TLS_GD R_MIPS = 42 /* 16 bit GOT offset for GD */
+ R_MIPS_TLS_LDM R_MIPS = 43 /* 16 bit GOT offset for LDM */
+ R_MIPS_TLS_DTPREL_HI16 R_MIPS = 44 /* Module-relative offset, high 16 bits */
+ R_MIPS_TLS_DTPREL_LO16 R_MIPS = 45 /* Module-relative offset, low 16 bits */
+ R_MIPS_TLS_GOTTPREL R_MIPS = 46 /* 16 bit GOT offset for IE */
+ R_MIPS_TLS_TPREL32 R_MIPS = 47 /* TP-relative offset, 32 bit */
+ R_MIPS_TLS_TPREL64 R_MIPS = 48 /* TP-relative offset, 64 bit */
+ R_MIPS_TLS_TPREL_HI16 R_MIPS = 49 /* TP-relative offset, high 16 bits */
+ R_MIPS_TLS_TPREL_LO16 R_MIPS = 50 /* TP-relative offset, low 16 bits */
+)
+
+var rmipsStrings = []intName{
+ {0, "R_MIPS_NONE"},
+ {1, "R_MIPS_16"},
+ {2, "R_MIPS_32"},
+ {3, "R_MIPS_REL32"},
+ {4, "R_MIPS_26"},
+ {5, "R_MIPS_HI16"},
+ {6, "R_MIPS_LO16"},
+ {7, "R_MIPS_GPREL16"},
+ {8, "R_MIPS_LITERAL"},
+ {9, "R_MIPS_GOT16"},
+ {10, "R_MIPS_PC16"},
+ {11, "R_MIPS_CALL16"},
+ {12, "R_MIPS_GPREL32"},
+ {16, "R_MIPS_SHIFT5"},
+ {17, "R_MIPS_SHIFT6"},
+ {18, "R_MIPS_64"},
+ {19, "R_MIPS_GOT_DISP"},
+ {20, "R_MIPS_GOT_PAGE"},
+ {21, "R_MIPS_GOT_OFST"},
+ {22, "R_MIPS_GOT_HI16"},
+ {23, "R_MIPS_GOT_LO16"},
+ {24, "R_MIPS_SUB"},
+ {25, "R_MIPS_INSERT_A"},
+ {26, "R_MIPS_INSERT_B"},
+ {27, "R_MIPS_DELETE"},
+ {28, "R_MIPS_HIGHER"},
+ {29, "R_MIPS_HIGHEST"},
+ {30, "R_MIPS_CALL_HI16"},
+ {31, "R_MIPS_CALL_LO16"},
+ {32, "R_MIPS_SCN_DISP"},
+ {33, "R_MIPS_REL16"},
+ {34, "R_MIPS_ADD_IMMEDIATE"},
+ {35, "R_MIPS_PJUMP"},
+ {36, "R_MIPS_RELGOT"},
+ {37, "R_MIPS_JALR"},
+ {38, "R_MIPS_TLS_DTPMOD32"},
+ {39, "R_MIPS_TLS_DTPREL32"},
+ {40, "R_MIPS_TLS_DTPMOD64"},
+ {41, "R_MIPS_TLS_DTPREL64"},
+ {42, "R_MIPS_TLS_GD"},
+ {43, "R_MIPS_TLS_LDM"},
+ {44, "R_MIPS_TLS_DTPREL_HI16"},
+ {45, "R_MIPS_TLS_DTPREL_LO16"},
+ {46, "R_MIPS_TLS_GOTTPREL"},
+ {47, "R_MIPS_TLS_TPREL32"},
+ {48, "R_MIPS_TLS_TPREL64"},
+ {49, "R_MIPS_TLS_TPREL_HI16"},
+ {50, "R_MIPS_TLS_TPREL_LO16"},
+}
+
+func (i R_MIPS) String() string { return stringName(uint32(i), rmipsStrings, false) }
+func (i R_MIPS) GoString() string { return stringName(uint32(i), rmipsStrings, true) }
+
+// Relocation types for PowerPC.
+type R_PPC int
+
+const (
+ R_PPC_NONE R_PPC = 0 /* No relocation. */
+ R_PPC_ADDR32 R_PPC = 1
+ R_PPC_ADDR24 R_PPC = 2
+ R_PPC_ADDR16 R_PPC = 3
+ R_PPC_ADDR16_LO R_PPC = 4
+ R_PPC_ADDR16_HI R_PPC = 5
+ R_PPC_ADDR16_HA R_PPC = 6
+ R_PPC_ADDR14 R_PPC = 7
+ R_PPC_ADDR14_BRTAKEN R_PPC = 8
+ R_PPC_ADDR14_BRNTAKEN R_PPC = 9
+ R_PPC_REL24 R_PPC = 10
+ R_PPC_REL14 R_PPC = 11
+ R_PPC_REL14_BRTAKEN R_PPC = 12
+ R_PPC_REL14_BRNTAKEN R_PPC = 13
+ R_PPC_GOT16 R_PPC = 14
+ R_PPC_GOT16_LO R_PPC = 15
+ R_PPC_GOT16_HI R_PPC = 16
+ R_PPC_GOT16_HA R_PPC = 17
+ R_PPC_PLTREL24 R_PPC = 18
+ R_PPC_COPY R_PPC = 19
+ R_PPC_GLOB_DAT R_PPC = 20
+ R_PPC_JMP_SLOT R_PPC = 21
+ R_PPC_RELATIVE R_PPC = 22
+ R_PPC_LOCAL24PC R_PPC = 23
+ R_PPC_UADDR32 R_PPC = 24
+ R_PPC_UADDR16 R_PPC = 25
+ R_PPC_REL32 R_PPC = 26
+ R_PPC_PLT32 R_PPC = 27
+ R_PPC_PLTREL32 R_PPC = 28
+ R_PPC_PLT16_LO R_PPC = 29
+ R_PPC_PLT16_HI R_PPC = 30
+ R_PPC_PLT16_HA R_PPC = 31
+ R_PPC_SDAREL16 R_PPC = 32
+ R_PPC_SECTOFF R_PPC = 33
+ R_PPC_SECTOFF_LO R_PPC = 34
+ R_PPC_SECTOFF_HI R_PPC = 35
+ R_PPC_SECTOFF_HA R_PPC = 36
+ R_PPC_TLS R_PPC = 67
+ R_PPC_DTPMOD32 R_PPC = 68
+ R_PPC_TPREL16 R_PPC = 69
+ R_PPC_TPREL16_LO R_PPC = 70
+ R_PPC_TPREL16_HI R_PPC = 71
+ R_PPC_TPREL16_HA R_PPC = 72
+ R_PPC_TPREL32 R_PPC = 73
+ R_PPC_DTPREL16 R_PPC = 74
+ R_PPC_DTPREL16_LO R_PPC = 75
+ R_PPC_DTPREL16_HI R_PPC = 76
+ R_PPC_DTPREL16_HA R_PPC = 77
+ R_PPC_DTPREL32 R_PPC = 78
+ R_PPC_GOT_TLSGD16 R_PPC = 79
+ R_PPC_GOT_TLSGD16_LO R_PPC = 80
+ R_PPC_GOT_TLSGD16_HI R_PPC = 81
+ R_PPC_GOT_TLSGD16_HA R_PPC = 82
+ R_PPC_GOT_TLSLD16 R_PPC = 83
+ R_PPC_GOT_TLSLD16_LO R_PPC = 84
+ R_PPC_GOT_TLSLD16_HI R_PPC = 85
+ R_PPC_GOT_TLSLD16_HA R_PPC = 86
+ R_PPC_GOT_TPREL16 R_PPC = 87
+ R_PPC_GOT_TPREL16_LO R_PPC = 88
+ R_PPC_GOT_TPREL16_HI R_PPC = 89
+ R_PPC_GOT_TPREL16_HA R_PPC = 90
+ R_PPC_EMB_NADDR32 R_PPC = 101
+ R_PPC_EMB_NADDR16 R_PPC = 102
+ R_PPC_EMB_NADDR16_LO R_PPC = 103
+ R_PPC_EMB_NADDR16_HI R_PPC = 104
+ R_PPC_EMB_NADDR16_HA R_PPC = 105
+ R_PPC_EMB_SDAI16 R_PPC = 106
+ R_PPC_EMB_SDA2I16 R_PPC = 107
+ R_PPC_EMB_SDA2REL R_PPC = 108
+ R_PPC_EMB_SDA21 R_PPC = 109
+ R_PPC_EMB_MRKREF R_PPC = 110
+ R_PPC_EMB_RELSEC16 R_PPC = 111
+ R_PPC_EMB_RELST_LO R_PPC = 112
+ R_PPC_EMB_RELST_HI R_PPC = 113
+ R_PPC_EMB_RELST_HA R_PPC = 114
+ R_PPC_EMB_BIT_FLD R_PPC = 115
+ R_PPC_EMB_RELSDA R_PPC = 116
+)
+
+var rppcStrings = []intName{
+ {0, "R_PPC_NONE"},
+ {1, "R_PPC_ADDR32"},
+ {2, "R_PPC_ADDR24"},
+ {3, "R_PPC_ADDR16"},
+ {4, "R_PPC_ADDR16_LO"},
+ {5, "R_PPC_ADDR16_HI"},
+ {6, "R_PPC_ADDR16_HA"},
+ {7, "R_PPC_ADDR14"},
+ {8, "R_PPC_ADDR14_BRTAKEN"},
+ {9, "R_PPC_ADDR14_BRNTAKEN"},
+ {10, "R_PPC_REL24"},
+ {11, "R_PPC_REL14"},
+ {12, "R_PPC_REL14_BRTAKEN"},
+ {13, "R_PPC_REL14_BRNTAKEN"},
+ {14, "R_PPC_GOT16"},
+ {15, "R_PPC_GOT16_LO"},
+ {16, "R_PPC_GOT16_HI"},
+ {17, "R_PPC_GOT16_HA"},
+ {18, "R_PPC_PLTREL24"},
+ {19, "R_PPC_COPY"},
+ {20, "R_PPC_GLOB_DAT"},
+ {21, "R_PPC_JMP_SLOT"},
+ {22, "R_PPC_RELATIVE"},
+ {23, "R_PPC_LOCAL24PC"},
+ {24, "R_PPC_UADDR32"},
+ {25, "R_PPC_UADDR16"},
+ {26, "R_PPC_REL32"},
+ {27, "R_PPC_PLT32"},
+ {28, "R_PPC_PLTREL32"},
+ {29, "R_PPC_PLT16_LO"},
+ {30, "R_PPC_PLT16_HI"},
+ {31, "R_PPC_PLT16_HA"},
+ {32, "R_PPC_SDAREL16"},
+ {33, "R_PPC_SECTOFF"},
+ {34, "R_PPC_SECTOFF_LO"},
+ {35, "R_PPC_SECTOFF_HI"},
+ {36, "R_PPC_SECTOFF_HA"},
+
+ {67, "R_PPC_TLS"},
+ {68, "R_PPC_DTPMOD32"},
+ {69, "R_PPC_TPREL16"},
+ {70, "R_PPC_TPREL16_LO"},
+ {71, "R_PPC_TPREL16_HI"},
+ {72, "R_PPC_TPREL16_HA"},
+ {73, "R_PPC_TPREL32"},
+ {74, "R_PPC_DTPREL16"},
+ {75, "R_PPC_DTPREL16_LO"},
+ {76, "R_PPC_DTPREL16_HI"},
+ {77, "R_PPC_DTPREL16_HA"},
+ {78, "R_PPC_DTPREL32"},
+ {79, "R_PPC_GOT_TLSGD16"},
+ {80, "R_PPC_GOT_TLSGD16_LO"},
+ {81, "R_PPC_GOT_TLSGD16_HI"},
+ {82, "R_PPC_GOT_TLSGD16_HA"},
+ {83, "R_PPC_GOT_TLSLD16"},
+ {84, "R_PPC_GOT_TLSLD16_LO"},
+ {85, "R_PPC_GOT_TLSLD16_HI"},
+ {86, "R_PPC_GOT_TLSLD16_HA"},
+ {87, "R_PPC_GOT_TPREL16"},
+ {88, "R_PPC_GOT_TPREL16_LO"},
+ {89, "R_PPC_GOT_TPREL16_HI"},
+ {90, "R_PPC_GOT_TPREL16_HA"},
+
+ {101, "R_PPC_EMB_NADDR32"},
+ {102, "R_PPC_EMB_NADDR16"},
+ {103, "R_PPC_EMB_NADDR16_LO"},
+ {104, "R_PPC_EMB_NADDR16_HI"},
+ {105, "R_PPC_EMB_NADDR16_HA"},
+ {106, "R_PPC_EMB_SDAI16"},
+ {107, "R_PPC_EMB_SDA2I16"},
+ {108, "R_PPC_EMB_SDA2REL"},
+ {109, "R_PPC_EMB_SDA21"},
+ {110, "R_PPC_EMB_MRKREF"},
+ {111, "R_PPC_EMB_RELSEC16"},
+ {112, "R_PPC_EMB_RELST_LO"},
+ {113, "R_PPC_EMB_RELST_HI"},
+ {114, "R_PPC_EMB_RELST_HA"},
+ {115, "R_PPC_EMB_BIT_FLD"},
+ {116, "R_PPC_EMB_RELSDA"},
+}
+
+func (i R_PPC) String() string { return stringName(uint32(i), rppcStrings, false) }
+func (i R_PPC) GoString() string { return stringName(uint32(i), rppcStrings, true) }
+
+// Relocation types for 64-bit PowerPC or Power Architecture processors.
+type R_PPC64 int
+
+const (
+ R_PPC64_NONE R_PPC64 = 0
+ R_PPC64_ADDR32 R_PPC64 = 1
+ R_PPC64_ADDR24 R_PPC64 = 2
+ R_PPC64_ADDR16 R_PPC64 = 3
+ R_PPC64_ADDR16_LO R_PPC64 = 4
+ R_PPC64_ADDR16_HI R_PPC64 = 5
+ R_PPC64_ADDR16_HA R_PPC64 = 6
+ R_PPC64_ADDR14 R_PPC64 = 7
+ R_PPC64_ADDR14_BRTAKEN R_PPC64 = 8
+ R_PPC64_ADDR14_BRNTAKEN R_PPC64 = 9
+ R_PPC64_REL24 R_PPC64 = 10
+ R_PPC64_REL14 R_PPC64 = 11
+ R_PPC64_REL14_BRTAKEN R_PPC64 = 12
+ R_PPC64_REL14_BRNTAKEN R_PPC64 = 13
+ R_PPC64_GOT16 R_PPC64 = 14
+ R_PPC64_GOT16_LO R_PPC64 = 15
+ R_PPC64_GOT16_HI R_PPC64 = 16
+ R_PPC64_GOT16_HA R_PPC64 = 17
+ R_PPC64_JMP_SLOT R_PPC64 = 21
+ R_PPC64_REL32 R_PPC64 = 26
+ R_PPC64_ADDR64 R_PPC64 = 38
+ R_PPC64_ADDR16_HIGHER R_PPC64 = 39
+ R_PPC64_ADDR16_HIGHERA R_PPC64 = 40
+ R_PPC64_ADDR16_HIGHEST R_PPC64 = 41
+ R_PPC64_ADDR16_HIGHESTA R_PPC64 = 42
+ R_PPC64_REL64 R_PPC64 = 44
+ R_PPC64_TOC16 R_PPC64 = 47
+ R_PPC64_TOC16_LO R_PPC64 = 48
+ R_PPC64_TOC16_HI R_PPC64 = 49
+ R_PPC64_TOC16_HA R_PPC64 = 50
+ R_PPC64_TOC R_PPC64 = 51
+ R_PPC64_ADDR16_DS R_PPC64 = 56
+ R_PPC64_ADDR16_LO_DS R_PPC64 = 57
+ R_PPC64_GOT16_DS R_PPC64 = 58
+ R_PPC64_GOT16_LO_DS R_PPC64 = 59
+ R_PPC64_TOC16_DS R_PPC64 = 63
+ R_PPC64_TOC16_LO_DS R_PPC64 = 64
+ R_PPC64_TLS R_PPC64 = 67
+ R_PPC64_DTPMOD64 R_PPC64 = 68
+ R_PPC64_TPREL16 R_PPC64 = 69
+ R_PPC64_TPREL16_LO R_PPC64 = 70
+ R_PPC64_TPREL16_HI R_PPC64 = 71
+ R_PPC64_TPREL16_HA R_PPC64 = 72
+ R_PPC64_TPREL64 R_PPC64 = 73
+ R_PPC64_DTPREL16 R_PPC64 = 74
+ R_PPC64_DTPREL16_LO R_PPC64 = 75
+ R_PPC64_DTPREL16_HI R_PPC64 = 76
+ R_PPC64_DTPREL16_HA R_PPC64 = 77
+ R_PPC64_DTPREL64 R_PPC64 = 78
+ R_PPC64_GOT_TLSGD16 R_PPC64 = 79
+ R_PPC64_GOT_TLSGD16_LO R_PPC64 = 80
+ R_PPC64_GOT_TLSGD16_HI R_PPC64 = 81
+ R_PPC64_GOT_TLSGD16_HA R_PPC64 = 82
+ R_PPC64_GOT_TLSLD16 R_PPC64 = 83
+ R_PPC64_GOT_TLSLD16_LO R_PPC64 = 84
+ R_PPC64_GOT_TLSLD16_HI R_PPC64 = 85
+ R_PPC64_GOT_TLSLD16_HA R_PPC64 = 86
+ R_PPC64_GOT_TPREL16_DS R_PPC64 = 87
+ R_PPC64_GOT_TPREL16_LO_DS R_PPC64 = 88
+ R_PPC64_GOT_TPREL16_HI R_PPC64 = 89
+ R_PPC64_GOT_TPREL16_HA R_PPC64 = 90
+ R_PPC64_GOT_DTPREL16_DS R_PPC64 = 91
+ R_PPC64_GOT_DTPREL16_LO_DS R_PPC64 = 92
+ R_PPC64_GOT_DTPREL16_HI R_PPC64 = 93
+ R_PPC64_GOT_DTPREL16_HA R_PPC64 = 94
+ R_PPC64_TPREL16_DS R_PPC64 = 95
+ R_PPC64_TPREL16_LO_DS R_PPC64 = 96
+ R_PPC64_TPREL16_HIGHER R_PPC64 = 97
+ R_PPC64_TPREL16_HIGHERA R_PPC64 = 98
+ R_PPC64_TPREL16_HIGHEST R_PPC64 = 99
+ R_PPC64_TPREL16_HIGHESTA R_PPC64 = 100
+ R_PPC64_DTPREL16_DS R_PPC64 = 101
+ R_PPC64_DTPREL16_LO_DS R_PPC64 = 102
+ R_PPC64_DTPREL16_HIGHER R_PPC64 = 103
+ R_PPC64_DTPREL16_HIGHERA R_PPC64 = 104
+ R_PPC64_DTPREL16_HIGHEST R_PPC64 = 105
+ R_PPC64_DTPREL16_HIGHESTA R_PPC64 = 106
+ R_PPC64_TLSGD R_PPC64 = 107
+ R_PPC64_TLSLD R_PPC64 = 108
+ R_PPC64_REL16 R_PPC64 = 249
+ R_PPC64_REL16_LO R_PPC64 = 250
+ R_PPC64_REL16_HI R_PPC64 = 251
+ R_PPC64_REL16_HA R_PPC64 = 252
+)
+
+var rppc64Strings = []intName{
+ {0, "R_PPC64_NONE"},
+ {1, "R_PPC64_ADDR32"},
+ {2, "R_PPC64_ADDR24"},
+ {3, "R_PPC64_ADDR16"},
+ {4, "R_PPC64_ADDR16_LO"},
+ {5, "R_PPC64_ADDR16_HI"},
+ {6, "R_PPC64_ADDR16_HA"},
+ {7, "R_PPC64_ADDR14"},
+ {8, "R_PPC64_ADDR14_BRTAKEN"},
+ {9, "R_PPC64_ADDR14_BRNTAKEN"},
+ {10, "R_PPC64_REL24"},
+ {11, "R_PPC64_REL14"},
+ {12, "R_PPC64_REL14_BRTAKEN"},
+ {13, "R_PPC64_REL14_BRNTAKEN"},
+ {14, "R_PPC64_GOT16"},
+ {15, "R_PPC64_GOT16_LO"},
+ {16, "R_PPC64_GOT16_HI"},
+ {17, "R_PPC64_GOT16_HA"},
+ {21, "R_PPC64_JMP_SLOT"},
+ {26, "R_PPC64_REL32"},
+ {38, "R_PPC64_ADDR64"},
+ {39, "R_PPC64_ADDR16_HIGHER"},
+ {40, "R_PPC64_ADDR16_HIGHERA"},
+ {41, "R_PPC64_ADDR16_HIGHEST"},
+ {42, "R_PPC64_ADDR16_HIGHESTA"},
+ {44, "R_PPC64_REL64"},
+ {47, "R_PPC64_TOC16"},
+ {48, "R_PPC64_TOC16_LO"},
+ {49, "R_PPC64_TOC16_HI"},
+ {50, "R_PPC64_TOC16_HA"},
+ {51, "R_PPC64_TOC"},
+ {56, "R_PPC64_ADDR16_DS"},
+ {57, "R_PPC64_ADDR16_LO_DS"},
+ {58, "R_PPC64_GOT16_DS"},
+ {59, "R_PPC64_GOT16_LO_DS"},
+ {63, "R_PPC64_TOC16_DS"},
+ {64, "R_PPC64_TOC16_LO_DS"},
+ {67, "R_PPC64_TLS"},
+ {68, "R_PPC64_DTPMOD64"},
+ {69, "R_PPC64_TPREL16"},
+ {70, "R_PPC64_TPREL16_LO"},
+ {71, "R_PPC64_TPREL16_HI"},
+ {72, "R_PPC64_TPREL16_HA"},
+ {73, "R_PPC64_TPREL64"},
+ {74, "R_PPC64_DTPREL16"},
+ {75, "R_PPC64_DTPREL16_LO"},
+ {76, "R_PPC64_DTPREL16_HI"},
+ {77, "R_PPC64_DTPREL16_HA"},
+ {78, "R_PPC64_DTPREL64"},
+ {79, "R_PPC64_GOT_TLSGD16"},
+ {80, "R_PPC64_GOT_TLSGD16_LO"},
+ {81, "R_PPC64_GOT_TLSGD16_HI"},
+ {82, "R_PPC64_GOT_TLSGD16_HA"},
+ {83, "R_PPC64_GOT_TLSLD16"},
+ {84, "R_PPC64_GOT_TLSLD16_LO"},
+ {85, "R_PPC64_GOT_TLSLD16_HI"},
+ {86, "R_PPC64_GOT_TLSLD16_HA"},
+ {87, "R_PPC64_GOT_TPREL16_DS"},
+ {88, "R_PPC64_GOT_TPREL16_LO_DS"},
+ {89, "R_PPC64_GOT_TPREL16_HI"},
+ {90, "R_PPC64_GOT_TPREL16_HA"},
+ {91, "R_PPC64_GOT_DTPREL16_DS"},
+ {92, "R_PPC64_GOT_DTPREL16_LO_DS"},
+ {93, "R_PPC64_GOT_DTPREL16_HI"},
+ {94, "R_PPC64_GOT_DTPREL16_HA"},
+ {95, "R_PPC64_TPREL16_DS"},
+ {96, "R_PPC64_TPREL16_LO_DS"},
+ {97, "R_PPC64_TPREL16_HIGHER"},
+ {98, "R_PPC64_TPREL16_HIGHERA"},
+ {99, "R_PPC64_TPREL16_HIGHEST"},
+ {100, "R_PPC64_TPREL16_HIGHESTA"},
+ {101, "R_PPC64_DTPREL16_DS"},
+ {102, "R_PPC64_DTPREL16_LO_DS"},
+ {103, "R_PPC64_DTPREL16_HIGHER"},
+ {104, "R_PPC64_DTPREL16_HIGHERA"},
+ {105, "R_PPC64_DTPREL16_HIGHEST"},
+ {106, "R_PPC64_DTPREL16_HIGHESTA"},
+ {107, "R_PPC64_TLSGD"},
+ {108, "R_PPC64_TLSLD"},
+ {249, "R_PPC64_REL16"},
+ {250, "R_PPC64_REL16_LO"},
+ {251, "R_PPC64_REL16_HI"},
+ {252, "R_PPC64_REL16_HA"},
+}
+
+func (i R_PPC64) String() string { return stringName(uint32(i), rppc64Strings, false) }
+func (i R_PPC64) GoString() string { return stringName(uint32(i), rppc64Strings, true) }
+
+// Relocation types for SPARC.
+type R_SPARC int
+
+const (
+ R_SPARC_NONE R_SPARC = 0
+ R_SPARC_8 R_SPARC = 1
+ R_SPARC_16 R_SPARC = 2
+ R_SPARC_32 R_SPARC = 3
+ R_SPARC_DISP8 R_SPARC = 4
+ R_SPARC_DISP16 R_SPARC = 5
+ R_SPARC_DISP32 R_SPARC = 6
+ R_SPARC_WDISP30 R_SPARC = 7
+ R_SPARC_WDISP22 R_SPARC = 8
+ R_SPARC_HI22 R_SPARC = 9
+ R_SPARC_22 R_SPARC = 10
+ R_SPARC_13 R_SPARC = 11
+ R_SPARC_LO10 R_SPARC = 12
+ R_SPARC_GOT10 R_SPARC = 13
+ R_SPARC_GOT13 R_SPARC = 14
+ R_SPARC_GOT22 R_SPARC = 15
+ R_SPARC_PC10 R_SPARC = 16
+ R_SPARC_PC22 R_SPARC = 17
+ R_SPARC_WPLT30 R_SPARC = 18
+ R_SPARC_COPY R_SPARC = 19
+ R_SPARC_GLOB_DAT R_SPARC = 20
+ R_SPARC_JMP_SLOT R_SPARC = 21
+ R_SPARC_RELATIVE R_SPARC = 22
+ R_SPARC_UA32 R_SPARC = 23
+ R_SPARC_PLT32 R_SPARC = 24
+ R_SPARC_HIPLT22 R_SPARC = 25
+ R_SPARC_LOPLT10 R_SPARC = 26
+ R_SPARC_PCPLT32 R_SPARC = 27
+ R_SPARC_PCPLT22 R_SPARC = 28
+ R_SPARC_PCPLT10 R_SPARC = 29
+ R_SPARC_10 R_SPARC = 30
+ R_SPARC_11 R_SPARC = 31
+ R_SPARC_64 R_SPARC = 32
+ R_SPARC_OLO10 R_SPARC = 33
+ R_SPARC_HH22 R_SPARC = 34
+ R_SPARC_HM10 R_SPARC = 35
+ R_SPARC_LM22 R_SPARC = 36
+ R_SPARC_PC_HH22 R_SPARC = 37
+ R_SPARC_PC_HM10 R_SPARC = 38
+ R_SPARC_PC_LM22 R_SPARC = 39
+ R_SPARC_WDISP16 R_SPARC = 40
+ R_SPARC_WDISP19 R_SPARC = 41
+ R_SPARC_GLOB_JMP R_SPARC = 42
+ R_SPARC_7 R_SPARC = 43
+ R_SPARC_5 R_SPARC = 44
+ R_SPARC_6 R_SPARC = 45
+ R_SPARC_DISP64 R_SPARC = 46
+ R_SPARC_PLT64 R_SPARC = 47
+ R_SPARC_HIX22 R_SPARC = 48
+ R_SPARC_LOX10 R_SPARC = 49
+ R_SPARC_H44 R_SPARC = 50
+ R_SPARC_M44 R_SPARC = 51
+ R_SPARC_L44 R_SPARC = 52
+ R_SPARC_REGISTER R_SPARC = 53
+ R_SPARC_UA64 R_SPARC = 54
+ R_SPARC_UA16 R_SPARC = 55
+)
+
+var rsparcStrings = []intName{
+ {0, "R_SPARC_NONE"},
+ {1, "R_SPARC_8"},
+ {2, "R_SPARC_16"},
+ {3, "R_SPARC_32"},
+ {4, "R_SPARC_DISP8"},
+ {5, "R_SPARC_DISP16"},
+ {6, "R_SPARC_DISP32"},
+ {7, "R_SPARC_WDISP30"},
+ {8, "R_SPARC_WDISP22"},
+ {9, "R_SPARC_HI22"},
+ {10, "R_SPARC_22"},
+ {11, "R_SPARC_13"},
+ {12, "R_SPARC_LO10"},
+ {13, "R_SPARC_GOT10"},
+ {14, "R_SPARC_GOT13"},
+ {15, "R_SPARC_GOT22"},
+ {16, "R_SPARC_PC10"},
+ {17, "R_SPARC_PC22"},
+ {18, "R_SPARC_WPLT30"},
+ {19, "R_SPARC_COPY"},
+ {20, "R_SPARC_GLOB_DAT"},
+ {21, "R_SPARC_JMP_SLOT"},
+ {22, "R_SPARC_RELATIVE"},
+ {23, "R_SPARC_UA32"},
+ {24, "R_SPARC_PLT32"},
+ {25, "R_SPARC_HIPLT22"},
+ {26, "R_SPARC_LOPLT10"},
+ {27, "R_SPARC_PCPLT32"},
+ {28, "R_SPARC_PCPLT22"},
+ {29, "R_SPARC_PCPLT10"},
+ {30, "R_SPARC_10"},
+ {31, "R_SPARC_11"},
+ {32, "R_SPARC_64"},
+ {33, "R_SPARC_OLO10"},
+ {34, "R_SPARC_HH22"},
+ {35, "R_SPARC_HM10"},
+ {36, "R_SPARC_LM22"},
+ {37, "R_SPARC_PC_HH22"},
+ {38, "R_SPARC_PC_HM10"},
+ {39, "R_SPARC_PC_LM22"},
+ {40, "R_SPARC_WDISP16"},
+ {41, "R_SPARC_WDISP19"},
+ {42, "R_SPARC_GLOB_JMP"},
+ {43, "R_SPARC_7"},
+ {44, "R_SPARC_5"},
+ {45, "R_SPARC_6"},
+ {46, "R_SPARC_DISP64"},
+ {47, "R_SPARC_PLT64"},
+ {48, "R_SPARC_HIX22"},
+ {49, "R_SPARC_LOX10"},
+ {50, "R_SPARC_H44"},
+ {51, "R_SPARC_M44"},
+ {52, "R_SPARC_L44"},
+ {53, "R_SPARC_REGISTER"},
+ {54, "R_SPARC_UA64"},
+ {55, "R_SPARC_UA16"},
+}
+
+func (i R_SPARC) String() string { return stringName(uint32(i), rsparcStrings, false) }
+func (i R_SPARC) GoString() string { return stringName(uint32(i), rsparcStrings, true) }
+
+// Magic number for the elf trampoline, chosen wisely to be an immediate value.
+const ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
+
+// ELF32 File header.
+type Header32 struct {
+ Ident [EI_NIDENT]byte /* File identification. */
+ Type uint16 /* File type. */
+ Machine uint16 /* Machine architecture. */
+ Version uint32 /* ELF format version. */
+ Entry uint32 /* Entry point. */
+ Phoff uint32 /* Program header file offset. */
+ Shoff uint32 /* Section header file offset. */
+ Flags uint32 /* Architecture-specific flags. */
+ Ehsize uint16 /* Size of ELF header in bytes. */
+ Phentsize uint16 /* Size of program header entry. */
+ Phnum uint16 /* Number of program header entries. */
+ Shentsize uint16 /* Size of section header entry. */
+ Shnum uint16 /* Number of section header entries. */
+ Shstrndx uint16 /* Section name strings section. */
+}
+
+// ELF32 Section header.
+type Section32 struct {
+ Name uint32 /* Section name (index into the section header string table). */
+ Type uint32 /* Section type. */
+ Flags uint32 /* Section flags. */
+ Addr uint32 /* Address in memory image. */
+ Off uint32 /* Offset in file. */
+ Size uint32 /* Size in bytes. */
+ Link uint32 /* Index of a related section. */
+ Info uint32 /* Depends on section type. */
+ Addralign uint32 /* Alignment in bytes. */
+ Entsize uint32 /* Size of each entry in section. */
+}
+
+// ELF32 Program header.
+type Prog32 struct {
+ Type uint32 /* Entry type. */
+ Off uint32 /* File offset of contents. */
+ Vaddr uint32 /* Virtual address in memory image. */
+ Paddr uint32 /* Physical address (not used). */
+ Filesz uint32 /* Size of contents in file. */
+ Memsz uint32 /* Size of contents in memory. */
+ Flags uint32 /* Access permission flags. */
+ Align uint32 /* Alignment in memory and file. */
+}
+
+// ELF32 Dynamic structure. The ".dynamic" section contains an array of them.
+type Dyn32 struct {
+ Tag int32 /* Entry type. */
+ Val uint32 /* Integer/Address value. */
+}
+
+// ELF32 Compression header.
+type Chdr32 struct {
+ Type uint32
+ Size uint32
+ Addralign uint32
+}
+
+/*
+ * Relocation entries.
+ */
+
+// ELF32 Relocations that don't need an addend field.
+type Rel32 struct {
+ Off uint32 /* Location to be relocated. */
+ Info uint32 /* Relocation type and symbol index. */
+}
+
+// ELF32 Relocations that need an addend field.
+type Rela32 struct {
+ Off uint32 /* Location to be relocated. */
+ Info uint32 /* Relocation type and symbol index. */
+ Addend int32 /* Addend. */
+}
+
+func R_SYM32(info uint32) uint32 { return uint32(info >> 8) }
+func R_TYPE32(info uint32) uint32 { return uint32(info & 0xff) }
+func R_INFO32(sym, typ uint32) uint32 { return sym<<8 | typ }
+
+// ELF32 Symbol.
+type Sym32 struct {
+ Name uint32
+ Value uint32
+ Size uint32
+ Info uint8
+ Other uint8
+ Shndx uint16
+}
+
+const Sym32Size = 16
+
+func ST_BIND(info uint8) SymBind { return SymBind(info >> 4) }
+func ST_TYPE(info uint8) SymType { return SymType(info & 0xF) }
+func ST_INFO(bind SymBind, typ SymType) uint8 {
+ return uint8(bind)<<4 | uint8(typ)&0xf
+}
+func ST_VISIBILITY(other uint8) SymVis { return SymVis(other & 3) }
+
+/*
+ * ELF64
+ */
+
+// ELF64 file header.
+type Header64 struct {
+ Ident [EI_NIDENT]byte /* File identification. */
+ Type uint16 /* File type. */
+ Machine uint16 /* Machine architecture. */
+ Version uint32 /* ELF format version. */
+ Entry uint64 /* Entry point. */
+ Phoff uint64 /* Program header file offset. */
+ Shoff uint64 /* Section header file offset. */
+ Flags uint32 /* Architecture-specific flags. */
+ Ehsize uint16 /* Size of ELF header in bytes. */
+ Phentsize uint16 /* Size of program header entry. */
+ Phnum uint16 /* Number of program header entries. */
+ Shentsize uint16 /* Size of section header entry. */
+ Shnum uint16 /* Number of section header entries. */
+ Shstrndx uint16 /* Section name strings section. */
+}
+
+// ELF64 Section header.
+type Section64 struct {
+ Name uint32 /* Section name (index into the section header string table). */
+ Type uint32 /* Section type. */
+ Flags uint64 /* Section flags. */
+ Addr uint64 /* Address in memory image. */
+ Off uint64 /* Offset in file. */
+ Size uint64 /* Size in bytes. */
+ Link uint32 /* Index of a related section. */
+ Info uint32 /* Depends on section type. */
+ Addralign uint64 /* Alignment in bytes. */
+ Entsize uint64 /* Size of each entry in section. */
+}
+
+// ELF64 Program header.
+type Prog64 struct {
+ Type uint32 /* Entry type. */
+ Flags uint32 /* Access permission flags. */
+ Off uint64 /* File offset of contents. */
+ Vaddr uint64 /* Virtual address in memory image. */
+ Paddr uint64 /* Physical address (not used). */
+ Filesz uint64 /* Size of contents in file. */
+ Memsz uint64 /* Size of contents in memory. */
+ Align uint64 /* Alignment in memory and file. */
+}
+
+// ELF64 Dynamic structure. The ".dynamic" section contains an array of them.
+type Dyn64 struct {
+ Tag int64 /* Entry type. */
+ Val uint64 /* Integer/address value */
+}
+
+// ELF64 Compression header.
+type Chdr64 struct {
+ Type uint32
+ _ uint32 /* Reserved. */
+ Size uint64
+ Addralign uint64
+}
+
+/*
+ * Relocation entries.
+ */
+
+/* ELF64 relocations that don't need an addend field. */
+type Rel64 struct {
+ Off uint64 /* Location to be relocated. */
+ Info uint64 /* Relocation type and symbol index. */
+}
+
+/* ELF64 relocations that need an addend field. */
+type Rela64 struct {
+ Off uint64 /* Location to be relocated. */
+ Info uint64 /* Relocation type and symbol index. */
+ Addend int64 /* Addend. */
+}
+
+func R_SYM64(info uint64) uint32 { return uint32(info >> 32) }
+func R_TYPE64(info uint64) uint32 { return uint32(info) }
+func R_INFO(sym, typ uint32) uint64 { return uint64(sym)<<32 | uint64(typ) }
+
+// ELF64 symbol table entries.
+type Sym64 struct {
+ Name uint32 /* String table index of name. */
+ Info uint8 /* Type and binding information. */
+ Other uint8 /* Reserved (not used). */
+ Shndx uint16 /* Section index of symbol. */
+ Value uint64 /* Symbol value. */
+ Size uint64 /* Size of associated object. */
+}
+
+const Sym64Size = 24
+
+type intName struct {
+ i uint32
+ s string
+}
+
+func stringName(i uint32, names []intName, goSyntax bool) string {
+ for _, n := range names {
+ if n.i == i {
+ if goSyntax {
+ return "elf." + n.s
+ }
+ return n.s
+ }
+ }
+
+ // second pass - look for smaller to add with.
+ // assume sorted already
+ for j := len(names) - 1; j >= 0; j-- {
+ n := names[j]
+ if n.i < i {
+ s := n.s
+ if goSyntax {
+ s = "elf." + s
+ }
+ return s + "+" + strconv.FormatUint(uint64(i-n.i), 10)
+ }
+ }
+
+ return strconv.FormatUint(uint64(i), 10)
+}
+
+func flagName(i uint32, names []intName, goSyntax bool) string {
+ s := ""
+ for _, n := range names {
+ if n.i&i == n.i {
+ if len(s) > 0 {
+ s += "+"
+ }
+ if goSyntax {
+ s += "elf."
+ }
+ s += n.s
+ i -= n.i
+ }
+ }
+ if len(s) == 0 {
+ return "0x" + strconv.FormatUint(uint64(i), 16)
+ }
+ if i != 0 {
+ s += "+0x" + strconv.FormatUint(uint64(i), 16)
+ }
+ return s
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/file.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/file.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/file.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/file.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,1197 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package elf implements access to ELF object files.
+package elf
+
+import (
+ "bytes"
+ "compress/zlib"
+ "debug/dwarf"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// TODO: error reporting detail
+
+/*
+ * Internal ELF representation
+ */
+
+// A FileHeader represents an ELF file header.
+type FileHeader struct {
+ Class Class
+ Data Data
+ Version Version
+ OSABI OSABI
+ ABIVersion uint8
+ ByteOrder binary.ByteOrder
+ Type Type
+ Machine Machine
+ Entry uint64
+}
+
+// A File represents an open ELF file.
+type File struct {
+ FileHeader
+ Sections []*Section
+ Progs []*Prog
+ closer io.Closer
+ gnuNeed []verneed
+ gnuVersym []byte
+}
+
+// A SectionHeader represents a single ELF section header.
+type SectionHeader struct {
+ Name string
+ Type SectionType
+ Flags SectionFlag
+ Addr uint64
+ Offset uint64
+ Size uint64
+ Link uint32
+ Info uint32
+ Addralign uint64
+ Entsize uint64
+
+ // FileSize is the size of this section in the file in bytes.
+ // If a section is compressed, FileSize is the size of the
+ // compressed data, while Size (above) is the size of the
+ // uncompressed data.
+ FileSize uint64
+}
+
+// A Section represents a single section in an ELF file.
+type Section struct {
+ SectionHeader
+
+ // Embed ReaderAt for ReadAt method.
+ // Do not embed SectionReader directly
+ // to avoid having Read and Seek.
+ // If a client wants Read and Seek it must use
+ // Open() to avoid fighting over the seek offset
+ // with other clients.
+ //
+ // ReaderAt may be nil if the section is not easily available
+ // in a random-access form. For example, a compressed section
+ // may have a nil ReaderAt.
+ io.ReaderAt
+ sr *io.SectionReader
+
+ compressionType CompressionType
+ compressionOffset int64
+}
+
+// Data reads and returns the contents of the ELF section.
+// Even if the section is stored compressed in the ELF file,
+// Data returns uncompressed data.
+func (s *Section) Data() ([]byte, error) {
+ dat := make([]byte, s.Size)
+ n, err := io.ReadFull(s.Open(), dat)
+ return dat[0:n], err
+}
+
+// stringTable reads and returns the string table given by the
+// specified link value.
+func (f *File) stringTable(link uint32) ([]byte, error) {
+ if link <= 0 || link >= uint32(len(f.Sections)) {
+ return nil, errors.New("section has invalid string table link")
+ }
+ return f.Sections[link].Data()
+}
+
+// Open returns a new ReadSeeker reading the ELF section.
+// Even if the section is stored compressed in the ELF file,
+// the ReadSeeker reads uncompressed data.
+func (s *Section) Open() io.ReadSeeker {
+ if s.Flags&SHF_COMPRESSED == 0 {
+ return io.NewSectionReader(s.sr, 0, 1<<63-1)
+ }
+ if s.compressionType == COMPRESS_ZLIB {
+ return &readSeekerFromReader{
+ reset: func() (io.Reader, error) {
+ fr := io.NewSectionReader(s.sr, s.compressionOffset, int64(s.FileSize)-s.compressionOffset)
+ return zlib.NewReader(fr)
+ },
+ size: int64(s.Size),
+ }
+ }
+ err := &FormatError{int64(s.Offset), "unknown compression type", s.compressionType}
+ return errorReader{err}
+}
+
+// A ProgHeader represents a single ELF program header.
+type ProgHeader struct {
+ Type ProgType
+ Flags ProgFlag
+ Off uint64
+ Vaddr uint64
+ Paddr uint64
+ Filesz uint64
+ Memsz uint64
+ Align uint64
+}
+
+// A Prog represents a single ELF program header in an ELF binary.
+type Prog struct {
+ ProgHeader
+
+ // Embed ReaderAt for ReadAt method.
+ // Do not embed SectionReader directly
+ // to avoid having Read and Seek.
+ // If a client wants Read and Seek it must use
+ // Open() to avoid fighting over the seek offset
+ // with other clients.
+ io.ReaderAt
+ sr *io.SectionReader
+}
+
+// Open returns a new ReadSeeker reading the ELF program body.
+func (p *Prog) Open() io.ReadSeeker { return io.NewSectionReader(p.sr, 0, 1<<63-1) }
+
+// A Symbol represents an entry in an ELF symbol table section.
+type Symbol struct {
+ Name string
+ Info, Other byte
+ Section SectionIndex
+ Value, Size uint64
+}
+
+/*
+ * ELF reader
+ */
+
+type FormatError struct {
+ off int64
+ msg string
+ val interface{}
+}
+
+func (e *FormatError) Error() string {
+ msg := e.msg
+ if e.val != nil {
+ msg += fmt.Sprintf(" '%v' ", e.val)
+ }
+ msg += fmt.Sprintf("in record at byte %#x", e.off)
+ return msg
+}
+
+// Open opens the named file using os.Open and prepares it for use as an ELF binary.
+func Open(name string) (*File, error) {
+ f, err := os.Open(name)
+ if err != nil {
+ return nil, err
+ }
+ ff, err := NewFile(f)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ ff.closer = f
+ return ff, nil
+}
+
+// Close closes the File.
+// If the File was created using NewFile directly instead of Open,
+// Close has no effect.
+func (f *File) Close() error {
+ var err error
+ if f.closer != nil {
+ err = f.closer.Close()
+ f.closer = nil
+ }
+ return err
+}
+
+// SectionByType returns the first section in f with the
+// given type, or nil if there is no such section.
+func (f *File) SectionByType(typ SectionType) *Section {
+ for _, s := range f.Sections {
+ if s.Type == typ {
+ return s
+ }
+ }
+ return nil
+}
+
+// NewFile creates a new File for accessing an ELF binary in an underlying reader.
+// The ELF binary is expected to start at position 0 in the ReaderAt.
+func NewFile(r io.ReaderAt) (*File, error) {
+ sr := io.NewSectionReader(r, 0, 1<<63-1)
+ // Read and decode ELF identifier
+ var ident [16]uint8
+ if _, err := r.ReadAt(ident[0:], 0); err != nil {
+ return nil, err
+ }
+ if ident[0] != '\x7f' || ident[1] != 'E' || ident[2] != 'L' || ident[3] != 'F' {
+ return nil, &FormatError{0, "bad magic number", ident[0:4]}
+ }
+
+ f := new(File)
+ f.Class = Class(ident[EI_CLASS])
+ switch f.Class {
+ case ELFCLASS32:
+ case ELFCLASS64:
+ // ok
+ default:
+ return nil, &FormatError{0, "unknown ELF class", f.Class}
+ }
+
+ f.Data = Data(ident[EI_DATA])
+ switch f.Data {
+ case ELFDATA2LSB:
+ f.ByteOrder = binary.LittleEndian
+ case ELFDATA2MSB:
+ f.ByteOrder = binary.BigEndian
+ default:
+ return nil, &FormatError{0, "unknown ELF data encoding", f.Data}
+ }
+
+ f.Version = Version(ident[EI_VERSION])
+ if f.Version != EV_CURRENT {
+ return nil, &FormatError{0, "unknown ELF version", f.Version}
+ }
+
+ f.OSABI = OSABI(ident[EI_OSABI])
+ f.ABIVersion = ident[EI_ABIVERSION]
+
+ // Read ELF file header
+ var phoff int64
+ var phentsize, phnum int
+ var shoff int64
+ var shentsize, shnum, shstrndx int
+ shstrndx = -1
+ switch f.Class {
+ case ELFCLASS32:
+ hdr := new(Header32)
+ sr.Seek(0, os.SEEK_SET)
+ if err := binary.Read(sr, f.ByteOrder, hdr); err != nil {
+ return nil, err
+ }
+ f.Type = Type(hdr.Type)
+ f.Machine = Machine(hdr.Machine)
+ f.Entry = uint64(hdr.Entry)
+ if v := Version(hdr.Version); v != f.Version {
+ return nil, &FormatError{0, "mismatched ELF version", v}
+ }
+ phoff = int64(hdr.Phoff)
+ phentsize = int(hdr.Phentsize)
+ phnum = int(hdr.Phnum)
+ shoff = int64(hdr.Shoff)
+ shentsize = int(hdr.Shentsize)
+ shnum = int(hdr.Shnum)
+ shstrndx = int(hdr.Shstrndx)
+ case ELFCLASS64:
+ hdr := new(Header64)
+ sr.Seek(0, os.SEEK_SET)
+ if err := binary.Read(sr, f.ByteOrder, hdr); err != nil {
+ return nil, err
+ }
+ f.Type = Type(hdr.Type)
+ f.Machine = Machine(hdr.Machine)
+ f.Entry = uint64(hdr.Entry)
+ if v := Version(hdr.Version); v != f.Version {
+ return nil, &FormatError{0, "mismatched ELF version", v}
+ }
+ phoff = int64(hdr.Phoff)
+ phentsize = int(hdr.Phentsize)
+ phnum = int(hdr.Phnum)
+ shoff = int64(hdr.Shoff)
+ shentsize = int(hdr.Shentsize)
+ shnum = int(hdr.Shnum)
+ shstrndx = int(hdr.Shstrndx)
+ }
+
+ if shnum > 0 && shoff > 0 && (shstrndx < 0 || shstrndx >= shnum) {
+ return nil, &FormatError{0, "invalid ELF shstrndx", shstrndx}
+ }
+
+ // Read program headers
+ f.Progs = make([]*Prog, phnum)
+ for i := 0; i < phnum; i++ {
+ off := phoff + int64(i)*int64(phentsize)
+ sr.Seek(off, os.SEEK_SET)
+ p := new(Prog)
+ switch f.Class {
+ case ELFCLASS32:
+ ph := new(Prog32)
+ if err := binary.Read(sr, f.ByteOrder, ph); err != nil {
+ return nil, err
+ }
+ p.ProgHeader = ProgHeader{
+ Type: ProgType(ph.Type),
+ Flags: ProgFlag(ph.Flags),
+ Off: uint64(ph.Off),
+ Vaddr: uint64(ph.Vaddr),
+ Paddr: uint64(ph.Paddr),
+ Filesz: uint64(ph.Filesz),
+ Memsz: uint64(ph.Memsz),
+ Align: uint64(ph.Align),
+ }
+ case ELFCLASS64:
+ ph := new(Prog64)
+ if err := binary.Read(sr, f.ByteOrder, ph); err != nil {
+ return nil, err
+ }
+ p.ProgHeader = ProgHeader{
+ Type: ProgType(ph.Type),
+ Flags: ProgFlag(ph.Flags),
+ Off: uint64(ph.Off),
+ Vaddr: uint64(ph.Vaddr),
+ Paddr: uint64(ph.Paddr),
+ Filesz: uint64(ph.Filesz),
+ Memsz: uint64(ph.Memsz),
+ Align: uint64(ph.Align),
+ }
+ }
+ p.sr = io.NewSectionReader(r, int64(p.Off), int64(p.Filesz))
+ p.ReaderAt = p.sr
+ f.Progs[i] = p
+ }
+
+ // Read section headers
+ f.Sections = make([]*Section, shnum)
+ names := make([]uint32, shnum)
+ for i := 0; i < shnum; i++ {
+ off := shoff + int64(i)*int64(shentsize)
+ sr.Seek(off, os.SEEK_SET)
+ s := new(Section)
+ switch f.Class {
+ case ELFCLASS32:
+ sh := new(Section32)
+ if err := binary.Read(sr, f.ByteOrder, sh); err != nil {
+ return nil, err
+ }
+ names[i] = sh.Name
+ s.SectionHeader = SectionHeader{
+ Type: SectionType(sh.Type),
+ Flags: SectionFlag(sh.Flags),
+ Addr: uint64(sh.Addr),
+ Offset: uint64(sh.Off),
+ FileSize: uint64(sh.Size),
+ Link: uint32(sh.Link),
+ Info: uint32(sh.Info),
+ Addralign: uint64(sh.Addralign),
+ Entsize: uint64(sh.Entsize),
+ }
+ case ELFCLASS64:
+ sh := new(Section64)
+ if err := binary.Read(sr, f.ByteOrder, sh); err != nil {
+ return nil, err
+ }
+ names[i] = sh.Name
+ s.SectionHeader = SectionHeader{
+ Type: SectionType(sh.Type),
+ Flags: SectionFlag(sh.Flags),
+ Offset: uint64(sh.Off),
+ FileSize: uint64(sh.Size),
+ Addr: uint64(sh.Addr),
+ Link: uint32(sh.Link),
+ Info: uint32(sh.Info),
+ Addralign: uint64(sh.Addralign),
+ Entsize: uint64(sh.Entsize),
+ }
+ }
+ s.sr = io.NewSectionReader(r, int64(s.Offset), int64(s.FileSize))
+
+ if s.Flags&SHF_COMPRESSED == 0 {
+ s.ReaderAt = s.sr
+ s.Size = s.FileSize
+ } else {
+ // Read the compression header.
+ switch f.Class {
+ case ELFCLASS32:
+ ch := new(Chdr32)
+ if err := binary.Read(s.sr, f.ByteOrder, ch); err != nil {
+ return nil, err
+ }
+ s.compressionType = CompressionType(ch.Type)
+ s.Size = uint64(ch.Size)
+ s.Addralign = uint64(ch.Addralign)
+ s.compressionOffset = int64(binary.Size(ch))
+ case ELFCLASS64:
+ ch := new(Chdr64)
+ if err := binary.Read(s.sr, f.ByteOrder, ch); err != nil {
+ return nil, err
+ }
+ s.compressionType = CompressionType(ch.Type)
+ s.Size = ch.Size
+ s.Addralign = ch.Addralign
+ s.compressionOffset = int64(binary.Size(ch))
+ }
+ }
+
+ f.Sections[i] = s
+ }
+
+ if len(f.Sections) == 0 {
+ return f, nil
+ }
+
+ // Load section header string table.
+ shstrtab, err := f.Sections[shstrndx].Data()
+ if err != nil {
+ return nil, err
+ }
+ for i, s := range f.Sections {
+ var ok bool
+ s.Name, ok = getString(shstrtab, int(names[i]))
+ if !ok {
+ return nil, &FormatError{shoff + int64(i*shentsize), "bad section name index", names[i]}
+ }
+ }
+
+ return f, nil
+}
+
+// getSymbols returns a slice of Symbols from parsing the symbol table
+// with the given type, along with the associated string table.
+func (f *File) getSymbols(typ SectionType) ([]Symbol, []byte, error) {
+ switch f.Class {
+ case ELFCLASS64:
+ return f.getSymbols64(typ)
+
+ case ELFCLASS32:
+ return f.getSymbols32(typ)
+ }
+
+ return nil, nil, errors.New("not implemented")
+}
+
+// ErrNoSymbols is returned by File.Symbols and File.DynamicSymbols
+// if there is no such section in the File.
+var ErrNoSymbols = errors.New("no symbol section")
+
+func (f *File) getSymbols32(typ SectionType) ([]Symbol, []byte, error) {
+ symtabSection := f.SectionByType(typ)
+ if symtabSection == nil {
+ return nil, nil, ErrNoSymbols
+ }
+
+ data, err := symtabSection.Data()
+ if err != nil {
+ return nil, nil, errors.New("cannot load symbol section")
+ }
+ symtab := bytes.NewReader(data)
+ if symtab.Len()%Sym32Size != 0 {
+ return nil, nil, errors.New("length of symbol section is not a multiple of SymSize")
+ }
+
+ strdata, err := f.stringTable(symtabSection.Link)
+ if err != nil {
+ return nil, nil, errors.New("cannot load string table section")
+ }
+
+ // The first entry is all zeros.
+ var skip [Sym32Size]byte
+ symtab.Read(skip[:])
+
+ symbols := make([]Symbol, symtab.Len()/Sym32Size)
+
+ i := 0
+ var sym Sym32
+ for symtab.Len() > 0 {
+ binary.Read(symtab, f.ByteOrder, &sym)
+ str, _ := getString(strdata, int(sym.Name))
+ symbols[i].Name = str
+ symbols[i].Info = sym.Info
+ symbols[i].Other = sym.Other
+ symbols[i].Section = SectionIndex(sym.Shndx)
+ symbols[i].Value = uint64(sym.Value)
+ symbols[i].Size = uint64(sym.Size)
+ i++
+ }
+
+ return symbols, strdata, nil
+}
+
+func (f *File) getSymbols64(typ SectionType) ([]Symbol, []byte, error) {
+ symtabSection := f.SectionByType(typ)
+ if symtabSection == nil {
+ return nil, nil, ErrNoSymbols
+ }
+
+ data, err := symtabSection.Data()
+ if err != nil {
+ return nil, nil, errors.New("cannot load symbol section")
+ }
+ symtab := bytes.NewReader(data)
+ if symtab.Len()%Sym64Size != 0 {
+ return nil, nil, errors.New("length of symbol section is not a multiple of Sym64Size")
+ }
+
+ strdata, err := f.stringTable(symtabSection.Link)
+ if err != nil {
+ return nil, nil, errors.New("cannot load string table section")
+ }
+
+ // The first entry is all zeros.
+ var skip [Sym64Size]byte
+ symtab.Read(skip[:])
+
+ symbols := make([]Symbol, symtab.Len()/Sym64Size)
+
+ i := 0
+ var sym Sym64
+ for symtab.Len() > 0 {
+ binary.Read(symtab, f.ByteOrder, &sym)
+ str, _ := getString(strdata, int(sym.Name))
+ symbols[i].Name = str
+ symbols[i].Info = sym.Info
+ symbols[i].Other = sym.Other
+ symbols[i].Section = SectionIndex(sym.Shndx)
+ symbols[i].Value = sym.Value
+ symbols[i].Size = sym.Size
+ i++
+ }
+
+ return symbols, strdata, nil
+}
+
+// getString extracts a string from an ELF string table.
+func getString(section []byte, start int) (string, bool) {
+ if start < 0 || start >= len(section) {
+ return "", false
+ }
+
+ for end := start; end < len(section); end++ {
+ if section[end] == 0 {
+ return string(section[start:end]), true
+ }
+ }
+ return "", false
+}
+
+// Section returns a section with the given name, or nil if no such
+// section exists.
+func (f *File) Section(name string) *Section {
+ for _, s := range f.Sections {
+ if s.Name == name {
+ return s
+ }
+ }
+ return nil
+}
+
+// applyRelocations applies relocations to dst. rels is a relocations section
+// in RELA format.
+func (f *File) applyRelocations(dst []byte, rels []byte) error {
+ switch {
+ case f.Class == ELFCLASS64 && f.Machine == EM_X86_64:
+ return f.applyRelocationsAMD64(dst, rels)
+ case f.Class == ELFCLASS32 && f.Machine == EM_386:
+ return f.applyRelocations386(dst, rels)
+ case f.Class == ELFCLASS32 && f.Machine == EM_ARM:
+ return f.applyRelocationsARM(dst, rels)
+ case f.Class == ELFCLASS64 && f.Machine == EM_AARCH64:
+ return f.applyRelocationsARM64(dst, rels)
+ case f.Class == ELFCLASS32 && f.Machine == EM_PPC:
+ return f.applyRelocationsPPC(dst, rels)
+ case f.Class == ELFCLASS64 && f.Machine == EM_PPC64:
+ return f.applyRelocationsPPC64(dst, rels)
+ case f.Class == ELFCLASS64 && f.Machine == EM_MIPS:
+ return f.applyRelocationsMIPS64(dst, rels)
+ default:
+ return errors.New("applyRelocations: not implemented")
+ }
+}
+
+func (f *File) applyRelocationsAMD64(dst []byte, rels []byte) error {
+ // 24 is the size of Rela64.
+ if len(rels)%24 != 0 {
+ return errors.New("length of relocation section is not a multiple of 24")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela64
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ symNo := rela.Info >> 32
+ t := R_X86_64(rela.Info & 0xffff)
+
+ if symNo == 0 || symNo > uint64(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ if SymType(sym.Info&0xf) != STT_SECTION {
+ // We don't handle non-section relocations for now.
+ continue
+ }
+
+ // There are relocations, so this must be a normal
+ // object file, and we only look at section symbols,
+ // so we assume that the symbol value is 0.
+
+ switch t {
+ case R_X86_64_64:
+ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend))
+ case R_X86_64_32:
+ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend))
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocations386(dst []byte, rels []byte) error {
+ // 8 is the size of Rel32.
+ if len(rels)%8 != 0 {
+ return errors.New("length of relocation section is not a multiple of 8")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rel Rel32
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rel)
+ symNo := rel.Info >> 8
+ t := R_386(rel.Info & 0xff)
+
+ if symNo == 0 || symNo > uint32(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+
+ if t == R_386_32 {
+ if rel.Off+4 >= uint32(len(dst)) {
+ continue
+ }
+ val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4])
+ val += uint32(sym.Value)
+ f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val)
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocationsARM(dst []byte, rels []byte) error {
+ // 8 is the size of Rel32.
+ if len(rels)%8 != 0 {
+ return errors.New("length of relocation section is not a multiple of 8")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rel Rel32
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rel)
+ symNo := rel.Info >> 8
+ t := R_ARM(rel.Info & 0xff)
+
+ if symNo == 0 || symNo > uint32(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+
+ switch t {
+ case R_ARM_ABS32:
+ if rel.Off+4 >= uint32(len(dst)) {
+ continue
+ }
+ val := f.ByteOrder.Uint32(dst[rel.Off : rel.Off+4])
+ val += uint32(sym.Value)
+ f.ByteOrder.PutUint32(dst[rel.Off:rel.Off+4], val)
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocationsARM64(dst []byte, rels []byte) error {
+ // 24 is the size of Rela64.
+ if len(rels)%24 != 0 {
+ return errors.New("length of relocation section is not a multiple of 24")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela64
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ symNo := rela.Info >> 32
+ t := R_AARCH64(rela.Info & 0xffff)
+
+ if symNo == 0 || symNo > uint64(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ if SymType(sym.Info&0xf) != STT_SECTION {
+ // We don't handle non-section relocations for now.
+ continue
+ }
+
+ // There are relocations, so this must be a normal
+ // object file, and we only look at section symbols,
+ // so we assume that the symbol value is 0.
+
+ switch t {
+ case R_AARCH64_ABS64:
+ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend))
+ case R_AARCH64_ABS32:
+ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend))
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocationsPPC(dst []byte, rels []byte) error {
+ // 12 is the size of Rela32.
+ if len(rels)%12 != 0 {
+ return errors.New("length of relocation section is not a multiple of 12")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela32
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ symNo := rela.Info >> 8
+ t := R_PPC(rela.Info & 0xff)
+
+ if symNo == 0 || symNo > uint32(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ if SymType(sym.Info&0xf) != STT_SECTION {
+ // We don't handle non-section relocations for now.
+ continue
+ }
+
+ switch t {
+ case R_PPC_ADDR32:
+ if rela.Off+4 >= uint32(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend))
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocationsPPC64(dst []byte, rels []byte) error {
+ // 24 is the size of Rela64.
+ if len(rels)%24 != 0 {
+ return errors.New("length of relocation section is not a multiple of 24")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela64
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ symNo := rela.Info >> 32
+ t := R_PPC64(rela.Info & 0xffff)
+
+ if symNo == 0 || symNo > uint64(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ if SymType(sym.Info&0xf) != STT_SECTION {
+ // We don't handle non-section relocations for now.
+ continue
+ }
+
+ switch t {
+ case R_PPC64_ADDR64:
+ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend))
+ case R_PPC64_ADDR32:
+ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend))
+ }
+ }
+
+ return nil
+}
+
+func (f *File) applyRelocationsMIPS64(dst []byte, rels []byte) error {
+ // 24 is the size of Rela64.
+ if len(rels)%24 != 0 {
+ return errors.New("length of relocation section is not a multiple of 24")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela64
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ var symNo uint64
+ var t R_MIPS
+ if f.ByteOrder == binary.BigEndian {
+ symNo = rela.Info >> 32
+ t = R_MIPS(rela.Info & 0xff)
+ } else {
+ symNo = rela.Info & 0xffffffff
+ t = R_MIPS(rela.Info >> 56)
+ }
+
+ if symNo == 0 || symNo > uint64(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ if SymType(sym.Info&0xf) != STT_SECTION {
+ // We don't handle non-section relocations for now.
+ continue
+ }
+
+ switch t {
+ case R_MIPS_64:
+ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend))
+ case R_MIPS_32:
+ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend))
+ }
+ }
+
+ return nil
+}
+
+func (f *File) DWARF() (*dwarf.Data, error) {
+ // sectionData gets the data for s, checks its size, and
+ // applies any applicable relations.
+ sectionData := func(i int, s *Section) ([]byte, error) {
+ b, err := s.Data()
+ if err != nil && uint64(len(b)) < s.Size {
+ return nil, err
+ }
+
+ if len(b) >= 12 && string(b[:4]) == "ZLIB" {
+ dlen := binary.BigEndian.Uint64(b[4:12])
+ dbuf := make([]byte, dlen)
+ r, err := zlib.NewReader(bytes.NewBuffer(b[12:]))
+ if err != nil {
+ return nil, err
+ }
+ if _, err := io.ReadFull(r, dbuf); err != nil {
+ return nil, err
+ }
+ if err := r.Close(); err != nil {
+ return nil, err
+ }
+ b = dbuf
+ }
+
+ for _, r := range f.Sections {
+ if r.Type != SHT_RELA && r.Type != SHT_REL {
+ continue
+ }
+ if int(r.Info) != i {
+ continue
+ }
+ rd, err := r.Data()
+ if err != nil {
+ return nil, err
+ }
+ err = f.applyRelocations(b, rd)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return b, nil
+ }
+
+ // There are many other DWARF sections, but these
+ // are the ones the debug/dwarf package uses.
+ // Don't bother loading others.
+ var dat = map[string][]byte{"abbrev": nil, "info": nil, "str": nil, "line": nil}
+ for i, s := range f.Sections {
+ suffix := ""
+ switch {
+ case strings.HasPrefix(s.Name, ".debug_"):
+ suffix = s.Name[7:]
+ case strings.HasPrefix(s.Name, ".zdebug_"):
+ suffix = s.Name[8:]
+ default:
+ continue
+ }
+ if _, ok := dat[suffix]; !ok {
+ continue
+ }
+ b, err := sectionData(i, s)
+ if err != nil {
+ return nil, err
+ }
+ dat[suffix] = b
+ }
+
+ d, err := dwarf.New(dat["abbrev"], nil, nil, dat["info"], dat["line"], nil, nil, dat["str"])
+ if err != nil {
+ return nil, err
+ }
+
+ // Look for DWARF4 .debug_types sections.
+ for i, s := range f.Sections {
+ if s.Name == ".debug_types" {
+ b, err := sectionData(i, s)
+ if err != nil {
+ return nil, err
+ }
+
+ err = d.AddTypes(fmt.Sprintf("types-%d", i), b)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return d, nil
+}
+
+// Symbols returns the symbol table for f. The symbols will be listed in the order
+// they appear in f.
+//
+// For compatibility with Go 1.0, Symbols omits the null symbol at index 0.
+// After retrieving the symbols as symtab, an externally supplied index x
+// corresponds to symtab[x-1], not symtab[x].
+func (f *File) Symbols() ([]Symbol, error) {
+ sym, _, err := f.getSymbols(SHT_SYMTAB)
+ return sym, err
+}
+
+// DynamicSymbols returns the dynamic symbol table for f. The symbols
+// will be listed in the order they appear in f.
+//
+// For compatibility with Symbols, DynamicSymbols omits the null symbol at index 0.
+// After retrieving the symbols as symtab, an externally supplied index x
+// corresponds to symtab[x-1], not symtab[x].
+func (f *File) DynamicSymbols() ([]Symbol, error) {
+ sym, _, err := f.getSymbols(SHT_DYNSYM)
+ return sym, err
+}
+
+type ImportedSymbol struct {
+ Name string
+ Version string
+ Library string
+}
+
+// ImportedSymbols returns the names of all symbols
+// referred to by the binary f that are expected to be
+// satisfied by other libraries at dynamic load time.
+// It does not return weak symbols.
+func (f *File) ImportedSymbols() ([]ImportedSymbol, error) {
+ sym, str, err := f.getSymbols(SHT_DYNSYM)
+ if err != nil {
+ return nil, err
+ }
+ f.gnuVersionInit(str)
+ var all []ImportedSymbol
+ for i, s := range sym {
+ if ST_BIND(s.Info) == STB_GLOBAL && s.Section == SHN_UNDEF {
+ all = append(all, ImportedSymbol{Name: s.Name})
+ f.gnuVersion(i, &all[len(all)-1])
+ }
+ }
+ return all, nil
+}
+
+type verneed struct {
+ File string
+ Name string
+}
+
+// gnuVersionInit parses the GNU version tables
+// for use by calls to gnuVersion.
+func (f *File) gnuVersionInit(str []byte) {
+ // Accumulate verneed information.
+ vn := f.SectionByType(SHT_GNU_VERNEED)
+ if vn == nil {
+ return
+ }
+ d, _ := vn.Data()
+
+ var need []verneed
+ i := 0
+ for {
+ if i+16 > len(d) {
+ break
+ }
+ vers := f.ByteOrder.Uint16(d[i : i+2])
+ if vers != 1 {
+ break
+ }
+ cnt := f.ByteOrder.Uint16(d[i+2 : i+4])
+ fileoff := f.ByteOrder.Uint32(d[i+4 : i+8])
+ aux := f.ByteOrder.Uint32(d[i+8 : i+12])
+ next := f.ByteOrder.Uint32(d[i+12 : i+16])
+ file, _ := getString(str, int(fileoff))
+
+ var name string
+ j := i + int(aux)
+ for c := 0; c < int(cnt); c++ {
+ if j+16 > len(d) {
+ break
+ }
+ // hash := f.ByteOrder.Uint32(d[j:j+4])
+ // flags := f.ByteOrder.Uint16(d[j+4:j+6])
+ other := f.ByteOrder.Uint16(d[j+6 : j+8])
+ nameoff := f.ByteOrder.Uint32(d[j+8 : j+12])
+ next := f.ByteOrder.Uint32(d[j+12 : j+16])
+ name, _ = getString(str, int(nameoff))
+ ndx := int(other)
+ if ndx >= len(need) {
+ a := make([]verneed, 2*(ndx+1))
+ copy(a, need)
+ need = a
+ }
+
+ need[ndx] = verneed{file, name}
+ if next == 0 {
+ break
+ }
+ j += int(next)
+ }
+
+ if next == 0 {
+ break
+ }
+ i += int(next)
+ }
+
+ // Versym parallels symbol table, indexing into verneed.
+ vs := f.SectionByType(SHT_GNU_VERSYM)
+ if vs == nil {
+ return
+ }
+ d, _ = vs.Data()
+
+ f.gnuNeed = need
+ f.gnuVersym = d
+}
+
+// gnuVersion adds Library and Version information to sym,
+// which came from offset i of the symbol table.
+func (f *File) gnuVersion(i int, sym *ImportedSymbol) {
+ // Each entry is two bytes.
+ i = (i + 1) * 2
+ if i >= len(f.gnuVersym) {
+ return
+ }
+ j := int(f.ByteOrder.Uint16(f.gnuVersym[i:]))
+ if j < 2 || j >= len(f.gnuNeed) {
+ return
+ }
+ n := &f.gnuNeed[j]
+ sym.Library = n.File
+ sym.Version = n.Name
+}
+
+// ImportedLibraries returns the names of all libraries
+// referred to by the binary f that are expected to be
+// linked with the binary at dynamic link time.
+func (f *File) ImportedLibraries() ([]string, error) {
+ return f.DynString(DT_NEEDED)
+}
+
+// DynString returns the strings listed for the given tag in the file's dynamic
+// section.
+//
+// The tag must be one that takes string values: DT_NEEDED, DT_SONAME, DT_RPATH, or
+// DT_RUNPATH.
+func (f *File) DynString(tag DynTag) ([]string, error) {
+ switch tag {
+ case DT_NEEDED, DT_SONAME, DT_RPATH, DT_RUNPATH:
+ default:
+ return nil, fmt.Errorf("non-string-valued tag %v", tag)
+ }
+ ds := f.SectionByType(SHT_DYNAMIC)
+ if ds == nil {
+ // not dynamic, so no libraries
+ return nil, nil
+ }
+ d, err := ds.Data()
+ if err != nil {
+ return nil, err
+ }
+ str, err := f.stringTable(ds.Link)
+ if err != nil {
+ return nil, err
+ }
+ var all []string
+ for len(d) > 0 {
+ var t DynTag
+ var v uint64
+ switch f.Class {
+ case ELFCLASS32:
+ t = DynTag(f.ByteOrder.Uint32(d[0:4]))
+ v = uint64(f.ByteOrder.Uint32(d[4:8]))
+ d = d[8:]
+ case ELFCLASS64:
+ t = DynTag(f.ByteOrder.Uint64(d[0:8]))
+ v = f.ByteOrder.Uint64(d[8:16])
+ d = d[16:]
+ }
+ if t == tag {
+ s, ok := getString(str, int(v))
+ if ok {
+ all = append(all, s)
+ }
+ }
+ }
+ return all, nil
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/file_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/file_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/elf/file_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/elf/file_test.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,716 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package elf
+
+import (
+ "bytes"
+ "compress/gzip"
+ "debug/dwarf"
+ "encoding/binary"
+ "io"
+ "math/rand"
+ "net"
+ "os"
+ "path"
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+type fileTest struct {
+ file string
+ hdr FileHeader
+ sections []SectionHeader
+ progs []ProgHeader
+ needed []string
+}
+
+var fileTests = []fileTest{
+ {
+ "testdata/gcc-386-freebsd-exec",
+ FileHeader{ELFCLASS32, ELFDATA2LSB, EV_CURRENT, ELFOSABI_FREEBSD, 0, binary.LittleEndian, ET_EXEC, EM_386, 0x80483cc},
+ []SectionHeader{
+ {"", SHT_NULL, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+ {".interp", SHT_PROGBITS, SHF_ALLOC, 0x80480d4, 0xd4, 0x15, 0x0, 0x0, 0x1, 0x0, 0x15},
+ {".hash", SHT_HASH, SHF_ALLOC, 0x80480ec, 0xec, 0x90, 0x3, 0x0, 0x4, 0x4, 0x90},
+ {".dynsym", SHT_DYNSYM, SHF_ALLOC, 0x804817c, 0x17c, 0x110, 0x4, 0x1, 0x4, 0x10, 0x110},
+ {".dynstr", SHT_STRTAB, SHF_ALLOC, 0x804828c, 0x28c, 0xbb, 0x0, 0x0, 0x1, 0x0, 0xbb},
+ {".rel.plt", SHT_REL, SHF_ALLOC, 0x8048348, 0x348, 0x20, 0x3, 0x7, 0x4, 0x8, 0x20},
+ {".init", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x8048368, 0x368, 0x11, 0x0, 0x0, 0x4, 0x0, 0x11},
+ {".plt", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x804837c, 0x37c, 0x50, 0x0, 0x0, 0x4, 0x4, 0x50},
+ {".text", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x80483cc, 0x3cc, 0x180, 0x0, 0x0, 0x4, 0x0, 0x180},
+ {".fini", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x804854c, 0x54c, 0xc, 0x0, 0x0, 0x4, 0x0, 0xc},
+ {".rodata", SHT_PROGBITS, SHF_ALLOC, 0x8048558, 0x558, 0xa3, 0x0, 0x0, 0x1, 0x0, 0xa3},
+ {".data", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x80495fc, 0x5fc, 0xc, 0x0, 0x0, 0x4, 0x0, 0xc},
+ {".eh_frame", SHT_PROGBITS, SHF_ALLOC, 0x8049608, 0x608, 0x4, 0x0, 0x0, 0x4, 0x0, 0x4},
+ {".dynamic", SHT_DYNAMIC, SHF_WRITE + SHF_ALLOC, 0x804960c, 0x60c, 0x98, 0x4, 0x0, 0x4, 0x8, 0x98},
+ {".ctors", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x80496a4, 0x6a4, 0x8, 0x0, 0x0, 0x4, 0x0, 0x8},
+ {".dtors", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x80496ac, 0x6ac, 0x8, 0x0, 0x0, 0x4, 0x0, 0x8},
+ {".jcr", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x80496b4, 0x6b4, 0x4, 0x0, 0x0, 0x4, 0x0, 0x4},
+ {".got", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x80496b8, 0x6b8, 0x1c, 0x0, 0x0, 0x4, 0x4, 0x1c},
+ {".bss", SHT_NOBITS, SHF_WRITE + SHF_ALLOC, 0x80496d4, 0x6d4, 0x20, 0x0, 0x0, 0x4, 0x0, 0x20},
+ {".comment", SHT_PROGBITS, 0x0, 0x0, 0x6d4, 0x12d, 0x0, 0x0, 0x1, 0x0, 0x12d},
+ {".debug_aranges", SHT_PROGBITS, 0x0, 0x0, 0x801, 0x20, 0x0, 0x0, 0x1, 0x0, 0x20},
+ {".debug_pubnames", SHT_PROGBITS, 0x0, 0x0, 0x821, 0x1b, 0x0, 0x0, 0x1, 0x0, 0x1b},
+ {".debug_info", SHT_PROGBITS, 0x0, 0x0, 0x83c, 0x11d, 0x0, 0x0, 0x1, 0x0, 0x11d},
+ {".debug_abbrev", SHT_PROGBITS, 0x0, 0x0, 0x959, 0x41, 0x0, 0x0, 0x1, 0x0, 0x41},
+ {".debug_line", SHT_PROGBITS, 0x0, 0x0, 0x99a, 0x35, 0x0, 0x0, 0x1, 0x0, 0x35},
+ {".debug_frame", SHT_PROGBITS, 0x0, 0x0, 0x9d0, 0x30, 0x0, 0x0, 0x4, 0x0, 0x30},
+ {".debug_str", SHT_PROGBITS, 0x0, 0x0, 0xa00, 0xd, 0x0, 0x0, 0x1, 0x0, 0xd},
+ {".shstrtab", SHT_STRTAB, 0x0, 0x0, 0xa0d, 0xf8, 0x0, 0x0, 0x1, 0x0, 0xf8},
+ {".symtab", SHT_SYMTAB, 0x0, 0x0, 0xfb8, 0x4b0, 0x1d, 0x38, 0x4, 0x10, 0x4b0},
+ {".strtab", SHT_STRTAB, 0x0, 0x0, 0x1468, 0x206, 0x0, 0x0, 0x1, 0x0, 0x206},
+ },
+ []ProgHeader{
+ {PT_PHDR, PF_R + PF_X, 0x34, 0x8048034, 0x8048034, 0xa0, 0xa0, 0x4},
+ {PT_INTERP, PF_R, 0xd4, 0x80480d4, 0x80480d4, 0x15, 0x15, 0x1},
+ {PT_LOAD, PF_R + PF_X, 0x0, 0x8048000, 0x8048000, 0x5fb, 0x5fb, 0x1000},
+ {PT_LOAD, PF_R + PF_W, 0x5fc, 0x80495fc, 0x80495fc, 0xd8, 0xf8, 0x1000},
+ {PT_DYNAMIC, PF_R + PF_W, 0x60c, 0x804960c, 0x804960c, 0x98, 0x98, 0x4},
+ },
+ []string{"libc.so.6"},
+ },
+ {
+ "testdata/gcc-amd64-linux-exec",
+ FileHeader{ELFCLASS64, ELFDATA2LSB, EV_CURRENT, ELFOSABI_NONE, 0, binary.LittleEndian, ET_EXEC, EM_X86_64, 0x4003e0},
+ []SectionHeader{
+ {"", SHT_NULL, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+ {".interp", SHT_PROGBITS, SHF_ALLOC, 0x400200, 0x200, 0x1c, 0x0, 0x0, 0x1, 0x0, 0x1c},
+ {".note.ABI-tag", SHT_NOTE, SHF_ALLOC, 0x40021c, 0x21c, 0x20, 0x0, 0x0, 0x4, 0x0, 0x20},
+ {".hash", SHT_HASH, SHF_ALLOC, 0x400240, 0x240, 0x24, 0x5, 0x0, 0x8, 0x4, 0x24},
+ {".gnu.hash", SHT_LOOS + 268435446, SHF_ALLOC, 0x400268, 0x268, 0x1c, 0x5, 0x0, 0x8, 0x0, 0x1c},
+ {".dynsym", SHT_DYNSYM, SHF_ALLOC, 0x400288, 0x288, 0x60, 0x6, 0x1, 0x8, 0x18, 0x60},
+ {".dynstr", SHT_STRTAB, SHF_ALLOC, 0x4002e8, 0x2e8, 0x3d, 0x0, 0x0, 0x1, 0x0, 0x3d},
+ {".gnu.version", SHT_HIOS, SHF_ALLOC, 0x400326, 0x326, 0x8, 0x5, 0x0, 0x2, 0x2, 0x8},
+ {".gnu.version_r", SHT_LOOS + 268435454, SHF_ALLOC, 0x400330, 0x330, 0x20, 0x6, 0x1, 0x8, 0x0, 0x20},
+ {".rela.dyn", SHT_RELA, SHF_ALLOC, 0x400350, 0x350, 0x18, 0x5, 0x0, 0x8, 0x18, 0x18},
+ {".rela.plt", SHT_RELA, SHF_ALLOC, 0x400368, 0x368, 0x30, 0x5, 0xc, 0x8, 0x18, 0x30},
+ {".init", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x400398, 0x398, 0x18, 0x0, 0x0, 0x4, 0x0, 0x18},
+ {".plt", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x4003b0, 0x3b0, 0x30, 0x0, 0x0, 0x4, 0x10, 0x30},
+ {".text", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x4003e0, 0x3e0, 0x1b4, 0x0, 0x0, 0x10, 0x0, 0x1b4},
+ {".fini", SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR, 0x400594, 0x594, 0xe, 0x0, 0x0, 0x4, 0x0, 0xe},
+ {".rodata", SHT_PROGBITS, SHF_ALLOC, 0x4005a4, 0x5a4, 0x11, 0x0, 0x0, 0x4, 0x0, 0x11},
+ {".eh_frame_hdr", SHT_PROGBITS, SHF_ALLOC, 0x4005b8, 0x5b8, 0x24, 0x0, 0x0, 0x4, 0x0, 0x24},
+ {".eh_frame", SHT_PROGBITS, SHF_ALLOC, 0x4005e0, 0x5e0, 0xa4, 0x0, 0x0, 0x8, 0x0, 0xa4},
+ {".ctors", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x600688, 0x688, 0x10, 0x0, 0x0, 0x8, 0x0, 0x10},
+ {".dtors", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x600698, 0x698, 0x10, 0x0, 0x0, 0x8, 0x0, 0x10},
+ {".jcr", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x6006a8, 0x6a8, 0x8, 0x0, 0x0, 0x8, 0x0, 0x8},
+ {".dynamic", SHT_DYNAMIC, SHF_WRITE + SHF_ALLOC, 0x6006b0, 0x6b0, 0x1a0, 0x6, 0x0, 0x8, 0x10, 0x1a0},
+ {".got", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x600850, 0x850, 0x8, 0x0, 0x0, 0x8, 0x8, 0x8},
+ {".got.plt", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x600858, 0x858, 0x28, 0x0, 0x0, 0x8, 0x8, 0x28},
+ {".data", SHT_PROGBITS, SHF_WRITE + SHF_ALLOC, 0x600880, 0x880, 0x18, 0x0, 0x0, 0x8, 0x0, 0x18},
+ {".bss", SHT_NOBITS, SHF_WRITE + SHF_ALLOC, 0x600898, 0x898, 0x8, 0x0, 0x0, 0x4, 0x0, 0x8},
+ {".comment", SHT_PROGBITS, 0x0, 0x0, 0x898, 0x126, 0x0, 0x0, 0x1, 0x0, 0x126},
+ {".debug_aranges", SHT_PROGBITS, 0x0, 0x0, 0x9c0, 0x90, 0x0, 0x0, 0x10, 0x0, 0x90},
+ {".debug_pubnames", SHT_PROGBITS, 0x0, 0x0, 0xa50, 0x25, 0x0, 0x0, 0x1, 0x0, 0x25},
+ {".debug_info", SHT_PROGBITS, 0x0, 0x0, 0xa75, 0x1a7, 0x0, 0x0, 0x1, 0x0, 0x1a7},
+ {".debug_abbrev", SHT_PROGBITS, 0x0, 0x0, 0xc1c, 0x6f, 0x0, 0x0, 0x1, 0x0, 0x6f},
+ {".debug_line", SHT_PROGBITS, 0x0, 0x0, 0xc8b, 0x13f, 0x0, 0x0, 0x1, 0x0, 0x13f},
+ {".debug_str", SHT_PROGBITS, SHF_MERGE + SHF_STRINGS, 0x0, 0xdca, 0xb1, 0x0, 0x0, 0x1, 0x1, 0xb1},
+ {".debug_ranges", SHT_PROGBITS, 0x0, 0x0, 0xe80, 0x90, 0x0, 0x0, 0x10, 0x0, 0x90},
+ {".shstrtab", SHT_STRTAB, 0x0, 0x0, 0xf10, 0x149, 0x0, 0x0, 0x1, 0x0, 0x149},
+ {".symtab", SHT_SYMTAB, 0x0, 0x0, 0x19a0, 0x6f0, 0x24, 0x39, 0x8, 0x18, 0x6f0},
+ {".strtab", SHT_STRTAB, 0x0, 0x0, 0x2090, 0x1fc, 0x0, 0x0, 0x1, 0x0, 0x1fc},
+ },
+ []ProgHeader{
+ {PT_PHDR, PF_R + PF_X, 0x40, 0x400040, 0x400040, 0x1c0, 0x1c0, 0x8},
+ {PT_INTERP, PF_R, 0x200, 0x400200, 0x400200, 0x1c, 0x1c, 1},
+ {PT_LOAD, PF_R + PF_X, 0x0, 0x400000, 0x400000, 0x684, 0x684, 0x200000},
+ {PT_LOAD, PF_R + PF_W, 0x688, 0x600688, 0x600688, 0x210, 0x218, 0x200000},
+ {PT_DYNAMIC, PF_R + PF_W, 0x6b0, 0x6006b0, 0x6006b0, 0x1a0, 0x1a0, 0x8},
+ {PT_NOTE, PF_R, 0x21c, 0x40021c, 0x40021c, 0x20, 0x20, 0x4},
+ {PT_LOOS + 0x474E550, PF_R, 0x5b8, 0x4005b8, 0x4005b8, 0x24, 0x24, 0x4},
+ {PT_LOOS + 0x474E551, PF_R + PF_W, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8},
+ },
+ []string{"libc.so.6"},
+ },
+ {
+ "testdata/hello-world-core.gz",
+ FileHeader{ELFCLASS64, ELFDATA2LSB, EV_CURRENT, ELFOSABI_NONE, 0x0, binary.LittleEndian, ET_CORE, EM_X86_64, 0x0},
+ []SectionHeader{},
+ []ProgHeader{
+ {Type: PT_NOTE, Flags: 0x0, Off: 0x3f8, Vaddr: 0x0, Paddr: 0x0, Filesz: 0x8ac, Memsz: 0x0, Align: 0x0},
+ {Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0x1000, Vaddr: 0x400000, Paddr: 0x0, Filesz: 0x0, Memsz: 0x1000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_R, Off: 0x1000, Vaddr: 0x401000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x2000, Vaddr: 0x402000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0x3000, Vaddr: 0x7f54078b8000, Paddr: 0x0, Filesz: 0x0, Memsz: 0x1b5000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: 0x0, Off: 0x3000, Vaddr: 0x7f5407a6d000, Paddr: 0x0, Filesz: 0x0, Memsz: 0x1ff000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_R, Off: 0x3000, Vaddr: 0x7f5407c6c000, Paddr: 0x0, Filesz: 0x4000, Memsz: 0x4000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x7000, Vaddr: 0x7f5407c70000, Paddr: 0x0, Filesz: 0x2000, Memsz: 0x2000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x9000, Vaddr: 0x7f5407c72000, Paddr: 0x0, Filesz: 0x5000, Memsz: 0x5000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0xe000, Vaddr: 0x7f5407c77000, Paddr: 0x0, Filesz: 0x0, Memsz: 0x22000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0xe000, Vaddr: 0x7f5407e81000, Paddr: 0x0, Filesz: 0x3000, Memsz: 0x3000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x11000, Vaddr: 0x7f5407e96000, Paddr: 0x0, Filesz: 0x3000, Memsz: 0x3000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_R, Off: 0x14000, Vaddr: 0x7f5407e99000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x15000, Vaddr: 0x7f5407e9a000, Paddr: 0x0, Filesz: 0x2000, Memsz: 0x2000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_W + PF_R, Off: 0x17000, Vaddr: 0x7fff79972000, Paddr: 0x0, Filesz: 0x23000, Memsz: 0x23000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0x3a000, Vaddr: 0x7fff799f8000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
+ {Type: PT_LOAD, Flags: PF_X + PF_R, Off: 0x3b000, Vaddr: 0xffffffffff600000, Paddr: 0x0, Filesz: 0x1000, Memsz: 0x1000, Align: 0x1000},
+ },
+ nil,
+ },
+ {
+ "testdata/compressed-32.obj",
+ FileHeader{ELFCLASS32, ELFDATA2LSB, EV_CURRENT, ELFOSABI_NONE, 0x0, binary.LittleEndian, ET_REL, EM_386, 0x0},
+ []SectionHeader{
+ {"", SHT_NULL, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+ {".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, 0x0, 0x34, 0x17, 0x0, 0x0, 0x1, 0x0, 0x17},
+ {".rel.text", SHT_REL, SHF_INFO_LINK, 0x0, 0x3dc, 0x10, 0x13, 0x1, 0x4, 0x8, 0x10},
+ {".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, 0x0, 0x4b, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".bss", SHT_NOBITS, SHF_WRITE | SHF_ALLOC, 0x0, 0x4b, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".rodata", SHT_PROGBITS, SHF_ALLOC, 0x0, 0x4b, 0xd, 0x0, 0x0, 0x1, 0x0, 0xd},
+ {".debug_info", SHT_PROGBITS, SHF_COMPRESSED, 0x0, 0x58, 0xb4, 0x0, 0x0, 0x1, 0x0, 0x84},
+ {".rel.debug_info", SHT_REL, SHF_INFO_LINK, 0x0, 0x3ec, 0xa0, 0x13, 0x6, 0x4, 0x8, 0xa0},
+ {".debug_abbrev", SHT_PROGBITS, 0x0, 0x0, 0xdc, 0x5a, 0x0, 0x0, 0x1, 0x0, 0x5a},
+ {".debug_aranges", SHT_PROGBITS, 0x0, 0x0, 0x136, 0x20, 0x0, 0x0, 0x1, 0x0, 0x20},
+ {".rel.debug_aranges", SHT_REL, SHF_INFO_LINK, 0x0, 0x48c, 0x10, 0x13, 0x9, 0x4, 0x8, 0x10},
+ {".debug_line", SHT_PROGBITS, 0x0, 0x0, 0x156, 0x5c, 0x0, 0x0, 0x1, 0x0, 0x5c},
+ {".rel.debug_line", SHT_REL, SHF_INFO_LINK, 0x0, 0x49c, 0x8, 0x13, 0xb, 0x4, 0x8, 0x8},
+ {".debug_str", SHT_PROGBITS, SHF_MERGE | SHF_STRINGS | SHF_COMPRESSED, 0x0, 0x1b2, 0x10f, 0x0, 0x0, 0x1, 0x1, 0xb3},
+ {".comment", SHT_PROGBITS, SHF_MERGE | SHF_STRINGS, 0x0, 0x265, 0x2a, 0x0, 0x0, 0x1, 0x1, 0x2a},
+ {".note.GNU-stack", SHT_PROGBITS, 0x0, 0x0, 0x28f, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".eh_frame", SHT_PROGBITS, SHF_ALLOC, 0x0, 0x290, 0x38, 0x0, 0x0, 0x4, 0x0, 0x38},
+ {".rel.eh_frame", SHT_REL, SHF_INFO_LINK, 0x0, 0x4a4, 0x8, 0x13, 0x10, 0x4, 0x8, 0x8},
+ {".shstrtab", SHT_STRTAB, 0x0, 0x0, 0x4ac, 0xab, 0x0, 0x0, 0x1, 0x0, 0xab},
+ {".symtab", SHT_SYMTAB, 0x0, 0x0, 0x2c8, 0x100, 0x14, 0xe, 0x4, 0x10, 0x100},
+ {".strtab", SHT_STRTAB, 0x0, 0x0, 0x3c8, 0x13, 0x0, 0x0, 0x1, 0x0, 0x13},
+ },
+ []ProgHeader{},
+ nil,
+ },
+ {
+ "testdata/compressed-64.obj",
+ FileHeader{ELFCLASS64, ELFDATA2LSB, EV_CURRENT, ELFOSABI_NONE, 0x0, binary.LittleEndian, ET_REL, EM_X86_64, 0x0},
+ []SectionHeader{
+ {"", SHT_NULL, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0},
+ {".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, 0x0, 0x40, 0x1b, 0x0, 0x0, 0x1, 0x0, 0x1b},
+ {".rela.text", SHT_RELA, SHF_INFO_LINK, 0x0, 0x488, 0x30, 0x13, 0x1, 0x8, 0x18, 0x30},
+ {".data", SHT_PROGBITS, SHF_WRITE | SHF_ALLOC, 0x0, 0x5b, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".bss", SHT_NOBITS, SHF_WRITE | SHF_ALLOC, 0x0, 0x5b, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".rodata", SHT_PROGBITS, SHF_ALLOC, 0x0, 0x5b, 0xd, 0x0, 0x0, 0x1, 0x0, 0xd},
+ {".debug_info", SHT_PROGBITS, SHF_COMPRESSED, 0x0, 0x68, 0xba, 0x0, 0x0, 0x1, 0x0, 0x72},
+ {".rela.debug_info", SHT_RELA, SHF_INFO_LINK, 0x0, 0x4b8, 0x1c8, 0x13, 0x6, 0x8, 0x18, 0x1c8},
+ {".debug_abbrev", SHT_PROGBITS, 0x0, 0x0, 0xda, 0x5c, 0x0, 0x0, 0x1, 0x0, 0x5c},
+ {".debug_aranges", SHT_PROGBITS, SHF_COMPRESSED, 0x0, 0x136, 0x30, 0x0, 0x0, 0x1, 0x0, 0x2f},
+ {".rela.debug_aranges", SHT_RELA, SHF_INFO_LINK, 0x0, 0x680, 0x30, 0x13, 0x9, 0x8, 0x18, 0x30},
+ {".debug_line", SHT_PROGBITS, 0x0, 0x0, 0x165, 0x60, 0x0, 0x0, 0x1, 0x0, 0x60},
+ {".rela.debug_line", SHT_RELA, SHF_INFO_LINK, 0x0, 0x6b0, 0x18, 0x13, 0xb, 0x8, 0x18, 0x18},
+ {".debug_str", SHT_PROGBITS, SHF_MERGE | SHF_STRINGS | SHF_COMPRESSED, 0x0, 0x1c5, 0x104, 0x0, 0x0, 0x1, 0x1, 0xc3},
+ {".comment", SHT_PROGBITS, SHF_MERGE | SHF_STRINGS, 0x0, 0x288, 0x2a, 0x0, 0x0, 0x1, 0x1, 0x2a},
+ {".note.GNU-stack", SHT_PROGBITS, 0x0, 0x0, 0x2b2, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0},
+ {".eh_frame", SHT_PROGBITS, SHF_ALLOC, 0x0, 0x2b8, 0x38, 0x0, 0x0, 0x8, 0x0, 0x38},
+ {".rela.eh_frame", SHT_RELA, SHF_INFO_LINK, 0x0, 0x6c8, 0x18, 0x13, 0x10, 0x8, 0x18, 0x18},
+ {".shstrtab", SHT_STRTAB, 0x0, 0x0, 0x6e0, 0xb0, 0x0, 0x0, 0x1, 0x0, 0xb0},
+ {".symtab", SHT_SYMTAB, 0x0, 0x0, 0x2f0, 0x180, 0x14, 0xe, 0x8, 0x18, 0x180},
+ {".strtab", SHT_STRTAB, 0x0, 0x0, 0x470, 0x13, 0x0, 0x0, 0x1, 0x0, 0x13},
+ },
+ []ProgHeader{},
+ nil,
+ },
+}
+
+func TestOpen(t *testing.T) {
+ for i := range fileTests {
+ tt := &fileTests[i]
+
+ var f *File
+ var err error
+ if path.Ext(tt.file) == ".gz" {
+ var r io.ReaderAt
+ if r, err = decompress(tt.file); err == nil {
+ f, err = NewFile(r)
+ }
+ } else {
+ f, err = Open(tt.file)
+ }
+ if err != nil {
+ t.Errorf("cannot open file %s: %v", tt.file, err)
+ continue
+ }
+ defer f.Close()
+ if !reflect.DeepEqual(f.FileHeader, tt.hdr) {
+ t.Errorf("open %s:\n\thave %#v\n\twant %#v\n", tt.file, f.FileHeader, tt.hdr)
+ continue
+ }
+ for i, s := range f.Sections {
+ if i >= len(tt.sections) {
+ break
+ }
+ sh := &tt.sections[i]
+ if !reflect.DeepEqual(&s.SectionHeader, sh) {
+ t.Errorf("open %s, section %d:\n\thave %#v\n\twant %#v\n", tt.file, i, &s.SectionHeader, sh)
+ }
+ }
+ for i, p := range f.Progs {
+ if i >= len(tt.progs) {
+ break
+ }
+ ph := &tt.progs[i]
+ if !reflect.DeepEqual(&p.ProgHeader, ph) {
+ t.Errorf("open %s, program %d:\n\thave %#v\n\twant %#v\n", tt.file, i, &p.ProgHeader, ph)
+ }
+ }
+ tn := len(tt.sections)
+ fn := len(f.Sections)
+ if tn != fn {
+ t.Errorf("open %s: len(Sections) = %d, want %d", tt.file, fn, tn)
+ }
+ tn = len(tt.progs)
+ fn = len(f.Progs)
+ if tn != fn {
+ t.Errorf("open %s: len(Progs) = %d, want %d", tt.file, fn, tn)
+ }
+ tl := tt.needed
+ fl, err := f.ImportedLibraries()
+ if err != nil {
+ t.Error(err)
+ }
+ if !reflect.DeepEqual(tl, fl) {
+ t.Errorf("open %s: DT_NEEDED = %v, want %v", tt.file, tl, fl)
+ }
+ }
+}
+
+// elf.NewFile requires io.ReaderAt, which compress/gzip cannot
+// provide. Decompress the file to a bytes.Reader.
+func decompress(gz string) (io.ReaderAt, error) {
+ in, err := os.Open(gz)
+ if err != nil {
+ return nil, err
+ }
+ defer in.Close()
+ r, err := gzip.NewReader(in)
+ if err != nil {
+ return nil, err
+ }
+ var out bytes.Buffer
+ _, err = io.Copy(&out, r)
+ return bytes.NewReader(out.Bytes()), err
+}
+
+type relocationTestEntry struct {
+ entryNumber int
+ entry *dwarf.Entry
+}
+
+type relocationTest struct {
+ file string
+ entries []relocationTestEntry
+}
+
+var relocationTests = []relocationTest{
+ {
+ "testdata/go-relocation-test-gcc441-x86-64.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc441-x86.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.4.1", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "t.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: uint64(0x5), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc424-x86-64.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.2.4 (Ubuntu 4.2.4-1ubuntu4)", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc424.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: uint64(0x6), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc482-aarch64.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -g -fstack-protector", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(0x24), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc492-arm.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 20141224 (prerelease) -march=armv7-a -mfloat-abi=hard -mfpu=vfpv3-d16 -mtls-dialect=gnu -g", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc492.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/root/go/src/debug/elf/testdata", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(0x28), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-clang-arm.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "Debian clang version 3.5.0-10 (tags/RELEASE_350/final) (based on LLVM 3.5.0)", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrStmtList, Val: int64(0x0), Class: dwarf.ClassLinePtr},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(48), Class: dwarf.ClassConstant},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc5-ppc.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C11 5.0.0 20150116 (experimental) -Asystem=linux -Asystem=unix -Asystem=posix -g", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc5-ppc.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(0x44), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc482-ppc64le.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.8.2 -Asystem=linux -Asystem=unix -Asystem=posix -msecure-plt -mtune=power8 -mcpu=power7 -gdwarf-2 -fstack-protector", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-gcc482-ppc64le.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: uint64(0x24), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc492-mips64.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.2 -meb -mabi=64 -march=mips3 -mtune=mips64 -mllsc -mno-shared -g", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-gcc493-mips64le.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C 4.9.3 -mel -mabi=64 -mllsc -mno-shared -g -fstack-protector-strong", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(1), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(100), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/go-relocation-test-clang-x86.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "clang version google3-trunk (trunk r209387)", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "go-relocation-test-clang.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ },
+ }},
+ },
+ },
+ {
+ "testdata/gcc-amd64-openbsd-debug-with-rela.obj",
+ []relocationTestEntry{
+ {203, &dwarf.Entry{
+ Offset: 0xc62,
+ Tag: dwarf.TagMember,
+ Children: false,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrName, Val: "it_interval", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrDeclLine, Val: int64(236), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference},
+ {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x0}, Class: dwarf.ClassExprLoc},
+ },
+ }},
+ {204, &dwarf.Entry{
+ Offset: 0xc70,
+ Tag: dwarf.TagMember,
+ Children: false,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrName, Val: "it_value", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrDeclFile, Val: int64(7), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrDeclLine, Val: int64(237), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrType, Val: dwarf.Offset(0xb7f), Class: dwarf.ClassReference},
+ {Attr: dwarf.AttrDataMemberLoc, Val: []byte{0x23, 0x10}, Class: dwarf.ClassExprLoc},
+ },
+ }},
+ },
+ },
+}
+
+func TestDWARFRelocations(t *testing.T) {
+ for i, test := range relocationTests {
+ f, err := Open(test.file)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ dwarf, err := f.DWARF()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ for _, testEntry := range test.entries {
+ reader := dwarf.Reader()
+ for j := 0; j < testEntry.entryNumber; j++ {
+ entry, err := reader.Next()
+ if entry == nil || err != nil {
+ t.Errorf("Failed to skip to entry %d: %v", testEntry.entryNumber, err)
+ continue
+ }
+ }
+ entry, err := reader.Next()
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if !reflect.DeepEqual(testEntry.entry, entry) {
+ t.Errorf("#%d/%d: mismatch: got:%#v want:%#v", i, testEntry.entryNumber, entry, testEntry.entry)
+ continue
+ }
+ }
+ }
+}
+
+func TestCompressedDWARF(t *testing.T) {
+ // Test file built with GCC 4.8.4 and as 2.24 using:
+ // gcc -Wa,--compress-debug-sections -g -c -o zdebug-test-gcc484-x86-64.obj hello.c
+ f, err := Open("testdata/zdebug-test-gcc484-x86-64.obj")
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarf, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ reader := dwarf.Reader()
+ n := 0
+ for {
+ entry, err := reader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+ n++
+ }
+ if n != 18 {
+ t.Fatalf("want %d DWARF entries, got %d", 18, n)
+ }
+}
+
+func TestCompressedSection(t *testing.T) {
+ // Test files built with gcc -g -S hello.c and assembled with
+ // --compress-debug-sections=zlib-gabi.
+ f, err := Open("testdata/compressed-64.obj")
+ if err != nil {
+ t.Fatal(err)
+ }
+ sec := f.Section(".debug_info")
+ wantData := []byte{
+ 182, 0, 0, 0, 4, 0, 0, 0, 0, 0, 8, 1, 0, 0, 0, 0,
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 8, 7,
+ 0, 0, 0, 0, 2, 1, 8, 0, 0, 0, 0, 2, 2, 7, 0, 0,
+ 0, 0, 2, 4, 7, 0, 0, 0, 0, 2, 1, 6, 0, 0, 0, 0,
+ 2, 2, 5, 0, 0, 0, 0, 3, 4, 5, 105, 110, 116, 0, 2, 8,
+ 5, 0, 0, 0, 0, 2, 8, 7, 0, 0, 0, 0, 4, 8, 114, 0,
+ 0, 0, 2, 1, 6, 0, 0, 0, 0, 5, 0, 0, 0, 0, 1, 4,
+ 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 0, 0,
+ 1, 156, 179, 0, 0, 0, 6, 0, 0, 0, 0, 1, 4, 87, 0, 0,
+ 0, 2, 145, 108, 6, 0, 0, 0, 0, 1, 4, 179, 0, 0, 0, 2,
+ 145, 96, 0, 4, 8, 108, 0, 0, 0, 0,
+ }
+
+ // Test Data method.
+ b, err := sec.Data()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(wantData, b) {
+ t.Fatalf("want data %x, got %x", wantData, b)
+ }
+
+ // Test Open method and seeking.
+ buf, have, count := make([]byte, len(b)), make([]bool, len(b)), 0
+ sf := sec.Open()
+ if got, err := sf.Seek(0, 2); got != int64(len(b)) || err != nil {
+ t.Fatalf("want seek end %d, got %d error %v", len(b), got, err)
+ }
+ if n, err := sf.Read(buf); n != 0 || err != io.EOF {
+ t.Fatalf("want EOF with 0 bytes, got %v with %d bytes", err, n)
+ }
+ pos := int64(len(buf))
+ for count < len(buf) {
+ // Construct random seek arguments.
+ whence := rand.Intn(3)
+ target := rand.Int63n(int64(len(buf)))
+ var offset int64
+ switch whence {
+ case 0:
+ offset = target
+ case 1:
+ offset = target - pos
+ case 2:
+ offset = target - int64(len(buf))
+ }
+ pos, err = sf.Seek(offset, whence)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if pos != target {
+ t.Fatalf("want position %d, got %d", target, pos)
+ }
+
+ // Read data from the new position.
+ end := pos + 16
+ if end > int64(len(buf)) {
+ end = int64(len(buf))
+ }
+ n, err := sf.Read(buf[pos:end])
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < n; i++ {
+ if !have[pos] {
+ have[pos] = true
+ count++
+ }
+ pos++
+ }
+ }
+ if !bytes.Equal(wantData, buf) {
+ t.Fatalf("want data %x, got %x", wantData, buf)
+ }
+}
+
+func TestNoSectionOverlaps(t *testing.T) {
+ // Ensure 6l outputs sections without overlaps.
+ if runtime.GOOS != "linux" && runtime.GOOS != "freebsd" {
+ return // not ELF
+ }
+ _ = net.ResolveIPAddr // force dynamic linkage
+ f, err := Open(os.Args[0])
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ for i, si := range f.Sections {
+ sih := si.SectionHeader
+ if sih.Type == SHT_NOBITS {
+ continue
+ }
+ for j, sj := range f.Sections {
+ sjh := sj.SectionHeader
+ if i == j || sjh.Type == SHT_NOBITS || sih.Offset == sjh.Offset && sih.Size == 0 {
+ continue
+ }
+ if sih.Offset >= sjh.Offset && sih.Offset < sjh.Offset+sjh.Size {
+ t.Errorf("ld produced ELF with section %s within %s: 0x%x <= 0x%x..0x%x < 0x%x",
+ sih.Name, sjh.Name, sjh.Offset, sih.Offset, sih.Offset+sih.Size, sjh.Offset+sjh.Size)
+ }
+ }
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/gosym/pclntab.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/gosym/pclntab.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/debug/gosym/pclntab.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/debug/gosym/pclntab.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,453 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * Line tables
+ */
+
+package gosym
+
+import (
+ "encoding/binary"
+ "sync"
+)
+
+// A LineTable is a data structure mapping program counters to line numbers.
+//
+// In Go 1.1 and earlier, each function (represented by a Func) had its own LineTable,
+// and the line number corresponded to a numbering of all source lines in the
+// program, across all files. That absolute line number would then have to be
+// converted separately to a file name and line number within the file.
+//
+// In Go 1.2, the format of the data changed so that there is a single LineTable
+// for the entire program, shared by all Funcs, and there are no absolute line
+// numbers, just line numbers within specific files.
+//
+// For the most part, LineTable's methods should be treated as an internal
+// detail of the package; callers should use the methods on Table instead.
+type LineTable struct {
+ Data []byte
+ PC uint64
+ Line int
+
+ // Go 1.2 state
+ mu sync.Mutex
+ go12 int // is this in Go 1.2 format? -1 no, 0 unknown, 1 yes
+ binary binary.ByteOrder
+ quantum uint32
+ ptrsize uint32
+ functab []byte
+ nfunctab uint32
+ filetab []byte
+ nfiletab uint32
+ fileMap map[string]uint32
+}
+
+// NOTE(rsc): This is wrong for GOARCH=arm, which uses a quantum of 4,
+// but we have no idea whether we're using arm or not. This only
+// matters in the old (pre-Go 1.2) symbol table format, so it's not worth
+// fixing.
+const oldQuantum = 1
+
+func (t *LineTable) parse(targetPC uint64, targetLine int) (b []byte, pc uint64, line int) {
+ // The PC/line table can be thought of as a sequence of
+ // *
+ // batches. Each update batch results in a (pc, line) pair,
+ // where line applies to every PC from pc up to but not
+ // including the pc of the next pair.
+ //
+ // Here we process each update individually, which simplifies
+ // the code, but makes the corner cases more confusing.
+ b, pc, line = t.Data, t.PC, t.Line
+ for pc <= targetPC && line != targetLine && len(b) > 0 {
+ code := b[0]
+ b = b[1:]
+ switch {
+ case code == 0:
+ if len(b) < 4 {
+ b = b[0:0]
+ break
+ }
+ val := binary.BigEndian.Uint32(b)
+ b = b[4:]
+ line += int(val)
+ case code <= 64:
+ line += int(code)
+ case code <= 128:
+ line -= int(code - 64)
+ default:
+ pc += oldQuantum * uint64(code-128)
+ continue
+ }
+ pc += oldQuantum
+ }
+ return b, pc, line
+}
+
+func (t *LineTable) slice(pc uint64) *LineTable {
+ data, pc, line := t.parse(pc, -1)
+ return &LineTable{Data: data, PC: pc, Line: line}
+}
+
+// PCToLine returns the line number for the given program counter.
+// Callers should use Table's PCToLine method instead.
+func (t *LineTable) PCToLine(pc uint64) int {
+ if t.isGo12() {
+ return t.go12PCToLine(pc)
+ }
+ _, _, line := t.parse(pc, -1)
+ return line
+}
+
+// LineToPC returns the program counter for the given line number,
+// considering only program counters before maxpc.
+// Callers should use Table's LineToPC method instead.
+func (t *LineTable) LineToPC(line int, maxpc uint64) uint64 {
+ if t.isGo12() {
+ return 0
+ }
+ _, pc, line1 := t.parse(maxpc, line)
+ if line1 != line {
+ return 0
+ }
+ // Subtract quantum from PC to account for post-line increment
+ return pc - oldQuantum
+}
+
+// NewLineTable returns a new PC/line table
+// corresponding to the encoded data.
+// Text must be the start address of the
+// corresponding text segment.
+func NewLineTable(data []byte, text uint64) *LineTable {
+ return &LineTable{Data: data, PC: text, Line: 0}
+}
+
+// Go 1.2 symbol table format.
+// See golang.org/s/go12symtab.
+//
+// A general note about the methods here: rather than try to avoid
+// index out of bounds errors, we trust Go to detect them, and then
+// we recover from the panics and treat them as indicative of a malformed
+// or incomplete table.
+//
+// The methods called by symtab.go, which begin with "go12" prefixes,
+// are expected to have that recovery logic.
+
+// isGo12 reports whether this is a Go 1.2 (or later) symbol table.
+func (t *LineTable) isGo12() bool {
+ t.go12Init()
+ return t.go12 == 1
+}
+
+const go12magic = 0xfffffffb
+
+// uintptr returns the pointer-sized value encoded at b.
+// The pointer size is dictated by the table being read.
+func (t *LineTable) uintptr(b []byte) uint64 {
+ if t.ptrsize == 4 {
+ return uint64(t.binary.Uint32(b))
+ }
+ return t.binary.Uint64(b)
+}
+
+// go12init initializes the Go 1.2 metadata if t is a Go 1.2 symbol table.
+func (t *LineTable) go12Init() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.go12 != 0 {
+ return
+ }
+
+ defer func() {
+ // If we panic parsing, assume it's not a Go 1.2 symbol table.
+ recover()
+ }()
+
+ // Check header: 4-byte magic, two zeros, pc quantum, pointer size.
+ t.go12 = -1 // not Go 1.2 until proven otherwise
+ if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
+ (t.Data[6] != 1 && t.Data[6] != 4) || // pc quantum
+ (t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
+ return
+ }
+
+ switch uint32(go12magic) {
+ case binary.LittleEndian.Uint32(t.Data):
+ t.binary = binary.LittleEndian
+ case binary.BigEndian.Uint32(t.Data):
+ t.binary = binary.BigEndian
+ default:
+ return
+ }
+
+ t.quantum = uint32(t.Data[6])
+ t.ptrsize = uint32(t.Data[7])
+
+ t.nfunctab = uint32(t.uintptr(t.Data[8:]))
+ t.functab = t.Data[8+t.ptrsize:]
+ functabsize := t.nfunctab*2*t.ptrsize + t.ptrsize
+ fileoff := t.binary.Uint32(t.functab[functabsize:])
+ t.functab = t.functab[:functabsize]
+ t.filetab = t.Data[fileoff:]
+ t.nfiletab = t.binary.Uint32(t.filetab)
+ t.filetab = t.filetab[:t.nfiletab*4]
+
+ t.go12 = 1 // so far so good
+}
+
+// go12Funcs returns a slice of Funcs derived from the Go 1.2 pcln table.
+func (t *LineTable) go12Funcs() []Func {
+ // Assume it is malformed and return nil on error.
+ defer func() {
+ recover()
+ }()
+
+ n := len(t.functab) / int(t.ptrsize) / 2
+ funcs := make([]Func, n)
+ for i := range funcs {
+ f := &funcs[i]
+ f.Entry = uint64(t.uintptr(t.functab[2*i*int(t.ptrsize):]))
+ f.End = uint64(t.uintptr(t.functab[(2*i+2)*int(t.ptrsize):]))
+ info := t.Data[t.uintptr(t.functab[(2*i+1)*int(t.ptrsize):]):]
+ f.LineTable = t
+ f.FrameSize = int(t.binary.Uint32(info[t.ptrsize+2*4:]))
+ f.Sym = &Sym{
+ Value: f.Entry,
+ Type: 'T',
+ Name: t.string(t.binary.Uint32(info[t.ptrsize:])),
+ GoType: 0,
+ Func: f,
+ }
+ }
+ return funcs
+}
+
+// findFunc returns the func corresponding to the given program counter.
+func (t *LineTable) findFunc(pc uint64) []byte {
+ if pc < t.uintptr(t.functab) || pc >= t.uintptr(t.functab[len(t.functab)-int(t.ptrsize):]) {
+ return nil
+ }
+
+ // The function table is a list of 2*nfunctab+1 uintptrs,
+ // alternating program counters and offsets to func structures.
+ f := t.functab
+ nf := t.nfunctab
+ for nf > 0 {
+ m := nf / 2
+ fm := f[2*t.ptrsize*m:]
+ if t.uintptr(fm) <= pc && pc < t.uintptr(fm[2*t.ptrsize:]) {
+ return t.Data[t.uintptr(fm[t.ptrsize:]):]
+ } else if pc < t.uintptr(fm) {
+ nf = m
+ } else {
+ f = f[(m+1)*2*t.ptrsize:]
+ nf -= m + 1
+ }
+ }
+ return nil
+}
+
+// readvarint reads, removes, and returns a varint from *pp.
+func (t *LineTable) readvarint(pp *[]byte) uint32 {
+ var v, shift uint32
+ p := *pp
+ for shift = 0; ; shift += 7 {
+ b := p[0]
+ p = p[1:]
+ v |= (uint32(b) & 0x7F) << shift
+ if b&0x80 == 0 {
+ break
+ }
+ }
+ *pp = p
+ return v
+}
+
+// string returns a Go string found at off.
+func (t *LineTable) string(off uint32) string {
+ for i := off; ; i++ {
+ if t.Data[i] == 0 {
+ return string(t.Data[off:i])
+ }
+ }
+}
+
+// step advances to the next pc, value pair in the encoded table.
+func (t *LineTable) step(p *[]byte, pc *uint64, val *int32, first bool) bool {
+ uvdelta := t.readvarint(p)
+ if uvdelta == 0 && !first {
+ return false
+ }
+ if uvdelta&1 != 0 {
+ uvdelta = ^(uvdelta >> 1)
+ } else {
+ uvdelta >>= 1
+ }
+ vdelta := int32(uvdelta)
+ pcdelta := t.readvarint(p) * t.quantum
+ *pc += uint64(pcdelta)
+ *val += vdelta
+ return true
+}
+
+// pcvalue reports the value associated with the target pc.
+// off is the offset to the beginning of the pc-value table,
+// and entry is the start PC for the corresponding function.
+func (t *LineTable) pcvalue(off uint32, entry, targetpc uint64) int32 {
+ if off == 0 {
+ return -1
+ }
+ p := t.Data[off:]
+
+ val := int32(-1)
+ pc := entry
+ for t.step(&p, &pc, &val, pc == entry) {
+ if targetpc < pc {
+ return val
+ }
+ }
+ return -1
+}
+
+// findFileLine scans one function in the binary looking for a
+// program counter in the given file on the given line.
+// It does so by running the pc-value tables mapping program counter
+// to file number. Since most functions come from a single file, these
+// are usually short and quick to scan. If a file match is found, then the
+// code goes to the expense of looking for a simultaneous line number match.
+func (t *LineTable) findFileLine(entry uint64, filetab, linetab uint32, filenum, line int32) uint64 {
+ if filetab == 0 || linetab == 0 {
+ return 0
+ }
+
+ fp := t.Data[filetab:]
+ fl := t.Data[linetab:]
+ fileVal := int32(-1)
+ filePC := entry
+ lineVal := int32(-1)
+ linePC := entry
+ fileStartPC := filePC
+ for t.step(&fp, &filePC, &fileVal, filePC == entry) {
+ if fileVal == filenum && fileStartPC < filePC {
+ // fileVal is in effect starting at fileStartPC up to
+ // but not including filePC, and it's the file we want.
+ // Run the PC table looking for a matching line number
+ // or until we reach filePC.
+ lineStartPC := linePC
+ for linePC < filePC && t.step(&fl, &linePC, &lineVal, linePC == entry) {
+ // lineVal is in effect until linePC, and lineStartPC < filePC.
+ if lineVal == line {
+ if fileStartPC <= lineStartPC {
+ return lineStartPC
+ }
+ if fileStartPC < linePC {
+ return fileStartPC
+ }
+ }
+ lineStartPC = linePC
+ }
+ }
+ fileStartPC = filePC
+ }
+ return 0
+}
+
+// go12PCToLine maps program counter to line number for the Go 1.2 pcln table.
+func (t *LineTable) go12PCToLine(pc uint64) (line int) {
+ defer func() {
+ if recover() != nil {
+ line = -1
+ }
+ }()
+
+ f := t.findFunc(pc)
+ if f == nil {
+ return -1
+ }
+ entry := t.uintptr(f)
+ linetab := t.binary.Uint32(f[t.ptrsize+5*4:])
+ return int(t.pcvalue(linetab, entry, pc))
+}
+
+// go12PCToFile maps program counter to file name for the Go 1.2 pcln table.
+func (t *LineTable) go12PCToFile(pc uint64) (file string) {
+ defer func() {
+ if recover() != nil {
+ file = ""
+ }
+ }()
+
+ f := t.findFunc(pc)
+ if f == nil {
+ return ""
+ }
+ entry := t.uintptr(f)
+ filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
+ fno := t.pcvalue(filetab, entry, pc)
+ if fno <= 0 {
+ return ""
+ }
+ return t.string(t.binary.Uint32(t.filetab[4*fno:]))
+}
+
+// go12LineToPC maps a (file, line) pair to a program counter for the Go 1.2 pcln table.
+func (t *LineTable) go12LineToPC(file string, line int) (pc uint64) {
+ defer func() {
+ if recover() != nil {
+ pc = 0
+ }
+ }()
+
+ t.initFileMap()
+ filenum := t.fileMap[file]
+ if filenum == 0 {
+ return 0
+ }
+
+ // Scan all functions.
+ // If this turns out to be a bottleneck, we could build a map[int32][]int32
+ // mapping file number to a list of functions with code from that file.
+ for i := uint32(0); i < t.nfunctab; i++ {
+ f := t.Data[t.uintptr(t.functab[2*t.ptrsize*i+t.ptrsize:]):]
+ entry := t.uintptr(f)
+ filetab := t.binary.Uint32(f[t.ptrsize+4*4:])
+ linetab := t.binary.Uint32(f[t.ptrsize+5*4:])
+ pc := t.findFileLine(entry, filetab, linetab, int32(filenum), int32(line))
+ if pc != 0 {
+ return pc
+ }
+ }
+ return 0
+}
+
+// initFileMap initializes the map from file name to file number.
+func (t *LineTable) initFileMap() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.fileMap != nil {
+ return
+ }
+ m := make(map[string]uint32)
+
+ for i := uint32(1); i < t.nfiletab; i++ {
+ s := t.string(t.binary.Uint32(t.filetab[4*i:]))
+ m[s] = i
+ }
+ t.fileMap = m
+}
+
+// go12MapFiles adds to m a key for every file in the Go 1.2 LineTable.
+// Every key maps to obj. That's not a very interesting map, but it provides
+// a way for callers to obtain the list of files in the program.
+func (t *LineTable) go12MapFiles(m map[string]*Obj, obj *Obj) {
+ defer func() {
+ recover()
+ }()
+
+ t.initFileMap()
+ for file := range t.fileMap {
+ m[file] = obj
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/go/build/build.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/go/build/build.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/go/build/build.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/go/build/build.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,1523 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package build
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/doc"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ pathpkg "path"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// A Context specifies the supporting context for a build.
+type Context struct {
+ GOARCH string // target architecture
+ GOOS string // target operating system
+ GOROOT string // Go root
+ GOPATH string // Go path
+ CgoEnabled bool // whether cgo can be used
+ UseAllFiles bool // use files regardless of +build lines, file names
+ Compiler string // compiler to assume when computing target paths
+
+ // The build and release tags specify build constraints
+ // that should be considered satisfied when processing +build lines.
+ // Clients creating a new context may customize BuildTags, which
+ // defaults to empty, but it is usually an error to customize ReleaseTags,
+ // which defaults to the list of Go releases the current release is compatible with.
+ // In addition to the BuildTags and ReleaseTags, build constraints
+ // consider the values of GOARCH and GOOS as satisfied tags.
+ BuildTags []string
+ ReleaseTags []string
+
+ // The install suffix specifies a suffix to use in the name of the installation
+ // directory. By default it is empty, but custom builds that need to keep
+ // their outputs separate can set InstallSuffix to do so. For example, when
+ // using the race detector, the go command uses InstallSuffix = "race", so
+ // that on a Linux/386 system, packages are written to a directory named
+ // "linux_386_race" instead of the usual "linux_386".
+ InstallSuffix string
+
+ // By default, Import uses the operating system's file system calls
+ // to read directories and files. To read from other sources,
+ // callers can set the following functions. They all have default
+ // behaviors that use the local file system, so clients need only set
+ // the functions whose behaviors they wish to change.
+
+ // JoinPath joins the sequence of path fragments into a single path.
+ // If JoinPath is nil, Import uses filepath.Join.
+ JoinPath func(elem ...string) string
+
+ // SplitPathList splits the path list into a slice of individual paths.
+ // If SplitPathList is nil, Import uses filepath.SplitList.
+ SplitPathList func(list string) []string
+
+ // IsAbsPath reports whether path is an absolute path.
+ // If IsAbsPath is nil, Import uses filepath.IsAbs.
+ IsAbsPath func(path string) bool
+
+ // IsDir reports whether the path names a directory.
+ // If IsDir is nil, Import calls os.Stat and uses the result's IsDir method.
+ IsDir func(path string) bool
+
+ // HasSubdir reports whether dir is a subdirectory of
+ // (perhaps multiple levels below) root.
+ // If so, HasSubdir sets rel to a slash-separated path that
+ // can be joined to root to produce a path equivalent to dir.
+ // If HasSubdir is nil, Import uses an implementation built on
+ // filepath.EvalSymlinks.
+ HasSubdir func(root, dir string) (rel string, ok bool)
+
+ // ReadDir returns a slice of os.FileInfo, sorted by Name,
+ // describing the content of the named directory.
+ // If ReadDir is nil, Import uses ioutil.ReadDir.
+ ReadDir func(dir string) (fi []os.FileInfo, err error)
+
+ // OpenFile opens a file (not a directory) for reading.
+ // If OpenFile is nil, Import uses os.Open.
+ OpenFile func(path string) (r io.ReadCloser, err error)
+}
+
+// joinPath calls ctxt.JoinPath (if not nil) or else filepath.Join.
+func (ctxt *Context) joinPath(elem ...string) string {
+ if f := ctxt.JoinPath; f != nil {
+ return f(elem...)
+ }
+ return filepath.Join(elem...)
+}
+
+// splitPathList calls ctxt.SplitPathList (if not nil) or else filepath.SplitList.
+func (ctxt *Context) splitPathList(s string) []string {
+ if f := ctxt.SplitPathList; f != nil {
+ return f(s)
+ }
+ return filepath.SplitList(s)
+}
+
+// isAbsPath calls ctxt.IsAbsPath (if not nil) or else filepath.IsAbs.
+func (ctxt *Context) isAbsPath(path string) bool {
+ if f := ctxt.IsAbsPath; f != nil {
+ return f(path)
+ }
+ return filepath.IsAbs(path)
+}
+
+// isDir calls ctxt.IsDir (if not nil) or else uses os.Stat.
+func (ctxt *Context) isDir(path string) bool {
+ if f := ctxt.IsDir; f != nil {
+ return f(path)
+ }
+ fi, err := os.Stat(path)
+ return err == nil && fi.IsDir()
+}
+
+// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
+// the local file system to answer the question.
+func (ctxt *Context) hasSubdir(root, dir string) (rel string, ok bool) {
+ if f := ctxt.HasSubdir; f != nil {
+ return f(root, dir)
+ }
+
+ // Try using paths we received.
+ if rel, ok = hasSubdir(root, dir); ok {
+ return
+ }
+
+ // Try expanding symlinks and comparing
+ // expanded against unexpanded and
+ // expanded against expanded.
+ rootSym, _ := filepath.EvalSymlinks(root)
+ dirSym, _ := filepath.EvalSymlinks(dir)
+
+ if rel, ok = hasSubdir(rootSym, dir); ok {
+ return
+ }
+ if rel, ok = hasSubdir(root, dirSym); ok {
+ return
+ }
+ return hasSubdir(rootSym, dirSym)
+}
+
+func hasSubdir(root, dir string) (rel string, ok bool) {
+ const sep = string(filepath.Separator)
+ root = filepath.Clean(root)
+ if !strings.HasSuffix(root, sep) {
+ root += sep
+ }
+ dir = filepath.Clean(dir)
+ if !strings.HasPrefix(dir, root) {
+ return "", false
+ }
+ return filepath.ToSlash(dir[len(root):]), true
+}
+
+// readDir calls ctxt.ReadDir (if not nil) or else ioutil.ReadDir.
+func (ctxt *Context) readDir(path string) ([]os.FileInfo, error) {
+ if f := ctxt.ReadDir; f != nil {
+ return f(path)
+ }
+ return ioutil.ReadDir(path)
+}
+
+// openFile calls ctxt.OpenFile (if not nil) or else os.Open.
+func (ctxt *Context) openFile(path string) (io.ReadCloser, error) {
+ if fn := ctxt.OpenFile; fn != nil {
+ return fn(path)
+ }
+
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err // nil interface
+ }
+ return f, nil
+}
+
+// isFile determines whether path is a file by trying to open it.
+// It reuses openFile instead of adding another function to the
+// list in Context.
+func (ctxt *Context) isFile(path string) bool {
+ f, err := ctxt.openFile(path)
+ if err != nil {
+ return false
+ }
+ f.Close()
+ return true
+}
+
+// gopath returns the list of Go path directories.
+func (ctxt *Context) gopath() []string {
+ var all []string
+ for _, p := range ctxt.splitPathList(ctxt.GOPATH) {
+ if p == "" || p == ctxt.GOROOT {
+ // Empty paths are uninteresting.
+ // If the path is the GOROOT, ignore it.
+ // People sometimes set GOPATH=$GOROOT.
+ // Do not get confused by this common mistake.
+ continue
+ }
+ if strings.HasPrefix(p, "~") {
+ // Path segments starting with ~ on Unix are almost always
+ // users who have incorrectly quoted ~ while setting GOPATH,
+ // preventing it from expanding to $HOME.
+ // The situation is made more confusing by the fact that
+ // bash allows quoted ~ in $PATH (most shells do not).
+ // Do not get confused by this, and do not try to use the path.
+ // It does not exist, and printing errors about it confuses
+ // those users even more, because they think "sure ~ exists!".
+ // The go command diagnoses this situation and prints a
+ // useful error.
+ // On Windows, ~ is used in short names, such as c:\progra~1
+ // for c:\program files.
+ continue
+ }
+ all = append(all, p)
+ }
+ return all
+}
+
+// SrcDirs returns a list of package source root directories.
+// It draws from the current Go root and Go path but omits directories
+// that do not exist.
+func (ctxt *Context) SrcDirs() []string {
+ var all []string
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
+ }
+ }
+ for _, p := range ctxt.gopath() {
+ dir := ctxt.joinPath(p, "src")
+ if ctxt.isDir(dir) {
+ all = append(all, dir)
+ }
+ }
+ return all
+}
+
+// Default is the default Context for builds.
+// It uses the GOARCH, GOOS, GOROOT, and GOPATH environment variables
+// if set, or else the compiled code's GOARCH, GOOS, and GOROOT.
+var Default Context = defaultContext()
+
+// Also known to cmd/dist/build.go.
+var cgoEnabled = map[string]bool{
+ "darwin/386": true,
+ "darwin/amd64": true,
+ "darwin/arm": true,
+ "darwin/arm64": true,
+ "dragonfly/amd64": true,
+ "freebsd/386": true,
+ "freebsd/amd64": true,
+ "freebsd/arm": true,
+ "linux/386": true,
+ "linux/amd64": true,
+ "linux/arm": true,
+ "linux/arm64": true,
+ "linux/ppc64le": true,
+ "android/386": true,
+ "android/amd64": true,
+ "android/arm": true,
+ "netbsd/386": true,
+ "netbsd/amd64": true,
+ "netbsd/arm": true,
+ "openbsd/386": true,
+ "openbsd/amd64": true,
+ "solaris/amd64": true,
+ "windows/386": true,
+ "windows/amd64": true,
+}
+
+func defaultContext() Context {
+ var c Context
+
+ c.GOARCH = envOr("GOARCH", runtime.GOARCH)
+ c.GOOS = envOr("GOOS", runtime.GOOS)
+ c.GOROOT = pathpkg.Clean(runtime.GOROOT())
+ c.GOPATH = envOr("GOPATH", "")
+ c.Compiler = runtime.Compiler
+
+ // Each major Go release in the Go 1.x series should add a tag here.
+ // Old tags should not be removed. That is, the go1.x tag is present
+ // in all releases >= Go 1.x. Code that requires Go 1.x or later should
+ // say "+build go1.x", and code that should only be built before Go 1.x
+ // (perhaps it is the stub to use in that case) should say "+build !go1.x".
+ c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6"}
+
+ switch os.Getenv("CGO_ENABLED") {
+ case "1":
+ c.CgoEnabled = true
+ case "0":
+ c.CgoEnabled = false
+ default:
+ // cgo must be explicitly enabled for cross compilation builds
+ if runtime.GOARCH == c.GOARCH && runtime.GOOS == c.GOOS {
+ c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH]
+ break
+ }
+ c.CgoEnabled = false
+ }
+
+ return c
+}
+
+func envOr(name, def string) string {
+ s := os.Getenv(name)
+ if s == "" {
+ return def
+ }
+ return s
+}
+
+// An ImportMode controls the behavior of the Import method.
+type ImportMode uint
+
+const (
+ // If FindOnly is set, Import stops after locating the directory
+ // that should contain the sources for a package. It does not
+ // read any files in the directory.
+ FindOnly ImportMode = 1 << iota
+
+ // If AllowBinary is set, Import can be satisfied by a compiled
+ // package object without corresponding sources.
+ AllowBinary
+
+ // If ImportComment is set, parse import comments on package statements.
+ // Import returns an error if it finds a comment it cannot understand
+ // or finds conflicting comments in multiple source files.
+ // See golang.org/s/go14customimport for more information.
+ ImportComment
+
+ // By default, Import searches vendor directories
+ // that apply in the given source directory before searching
+ // the GOROOT and GOPATH roots.
+ // If an Import finds and returns a package using a vendor
+ // directory, the resulting ImportPath is the complete path
+ // to the package, including the path elements leading up
+ // to and including "vendor".
+ // For example, if Import("y", "x/subdir", 0) finds
+ // "x/vendor/y", the returned package's ImportPath is "x/vendor/y",
+ // not plain "y".
+ // See golang.org/s/go15vendor for more information.
+ //
+ // Setting IgnoreVendor ignores vendor directories.
+ IgnoreVendor
+)
+
+// A Package describes the Go package found in a directory.
+type Package struct {
+ Dir string // directory containing package sources
+ Name string // package name
+ ImportComment string // path in import comment on package statement
+ Doc string // documentation synopsis
+ ImportPath string // import path of package ("" if unknown)
+ Root string // root of Go tree where this package lives
+ SrcRoot string // package source root directory ("" if unknown)
+ PkgRoot string // package install root directory ("" if unknown)
+ PkgTargetRoot string // architecture dependent install root directory ("" if unknown)
+ BinDir string // command install directory ("" if unknown)
+ Goroot bool // package found in Go root
+ PkgObj string // installed .a file
+ AllTags []string // tags that can influence file selection in this directory
+ ConflictDir string // this directory shadows Dir in $GOPATH
+
+ // Source files
+ GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles)
+ CgoFiles []string // .go source files that import "C"
+ IgnoredGoFiles []string // .go source files ignored for this build
+ InvalidGoFiles []string // .go source files with detected problems (parse error, wrong package name, and so on)
+ CFiles []string // .c source files
+ CXXFiles []string // .cc, .cpp and .cxx source files
+ MFiles []string // .m (Objective-C) source files
+ HFiles []string // .h, .hh, .hpp and .hxx source files
+ SFiles []string // .s source files
+ SwigFiles []string // .swig files
+ SwigCXXFiles []string // .swigcxx files
+ SysoFiles []string // .syso system object files to add to archive
+
+ // Cgo directives
+ CgoCFLAGS []string // Cgo CFLAGS directives
+ CgoCPPFLAGS []string // Cgo CPPFLAGS directives
+ CgoCXXFLAGS []string // Cgo CXXFLAGS directives
+ CgoLDFLAGS []string // Cgo LDFLAGS directives
+ CgoPkgConfig []string // Cgo pkg-config directives
+
+ // Dependency information
+ Imports []string // imports from GoFiles, CgoFiles
+ ImportPos map[string][]token.Position // line information for Imports
+
+ // Test information
+ TestGoFiles []string // _test.go files in package
+ TestImports []string // imports from TestGoFiles
+ TestImportPos map[string][]token.Position // line information for TestImports
+ XTestGoFiles []string // _test.go files outside package
+ XTestImports []string // imports from XTestGoFiles
+ XTestImportPos map[string][]token.Position // line information for XTestImports
+}
+
+// IsCommand reports whether the package is considered a
+// command to be installed (not just a library).
+// Packages named "main" are treated as commands.
+func (p *Package) IsCommand() bool {
+ return p.Name == "main"
+}
+
+// ImportDir is like Import but processes the Go package found in
+// the named directory.
+func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return ctxt.Import(".", dir, mode)
+}
+
+// NoGoError is the error used by Import to describe a directory
+// containing no buildable Go source files. (It may still contain
+// test files, files hidden by build tags, and so on.)
+type NoGoError struct {
+ Dir string
+}
+
+func (e *NoGoError) Error() string {
+ return "no buildable Go source files in " + e.Dir
+}
+
+// MultiplePackageError describes a directory containing
+// multiple buildable Go source files for multiple packages.
+type MultiplePackageError struct {
+ Dir string // directory containing files
+ Packages []string // package names found
+ Files []string // corresponding files: Files[i] declares package Packages[i]
+}
+
+func (e *MultiplePackageError) Error() string {
+ // Error string limited to two entries for compatibility.
+ return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir)
+}
+
+func nameExt(name string) string {
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ return ""
+ }
+ return name[i:]
+}
+
+// Import returns details about the Go package named by the import path,
+// interpreting local import paths relative to the srcDir directory.
+// If the path is a local import path naming a package that can be imported
+// using a standard import path, the returned package will set p.ImportPath
+// to that path.
+//
+// In the directory containing the package, .go, .c, .h, and .s files are
+// considered part of the package except for:
+//
+// - .go files in package documentation
+// - files starting with _ or . (likely editor temporary files)
+// - files with build constraints not satisfied by the context
+//
+// If an error occurs, Import returns a non-nil error and a non-nil
+// *Package containing partial information.
+//
+func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) {
+ p := &Package{
+ ImportPath: path,
+ }
+ if path == "" {
+ return p, fmt.Errorf("import %q: invalid import path", path)
+ }
+
+ var pkgtargetroot string
+ var pkga string
+ var pkgerr error
+ suffix := ""
+ if ctxt.InstallSuffix != "" {
+ suffix = "_" + ctxt.InstallSuffix
+ }
+ switch ctxt.Compiler {
+ case "gccgo":
+ pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ case "gc":
+ pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix
+ default:
+ // Save error for end of function.
+ pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler)
+ }
+ setPkga := func() {
+ switch ctxt.Compiler {
+ case "gccgo":
+ dir, elem := pathpkg.Split(p.ImportPath)
+ pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a"
+ case "gc":
+ pkga = pkgtargetroot + "/" + p.ImportPath + ".a"
+ }
+ }
+ setPkga()
+
+ binaryOnly := false
+ if IsLocalImport(path) {
+ pkga = "" // local imports have no installed path
+ if srcDir == "" {
+ return p, fmt.Errorf("import %q: import relative to unknown directory", path)
+ }
+ if !ctxt.isAbsPath(path) {
+ p.Dir = ctxt.joinPath(srcDir, path)
+ }
+ // Determine canonical import path, if any.
+ // Exclude results where the import path would include /testdata/.
+ inTestdata := func(sub string) bool {
+ return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata"
+ }
+ if ctxt.GOROOT != "" {
+ root := ctxt.joinPath(ctxt.GOROOT, "src")
+ if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) {
+ p.Goroot = true
+ p.ImportPath = sub
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ }
+ all := ctxt.gopath()
+ for i, root := range all {
+ rootsrc := ctxt.joinPath(root, "src")
+ if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) {
+ // We found a potential import path for dir,
+ // but check that using it wouldn't find something
+ // else first.
+ if ctxt.GOROOT != "" {
+ if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) {
+ p.ConflictDir = dir
+ goto Found
+ }
+ }
+ for _, earlyRoot := range all[:i] {
+ if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) {
+ p.ConflictDir = dir
+ goto Found
+ }
+ }
+
+ // sub would not name some other directory instead of this one.
+ // Record it.
+ p.ImportPath = sub
+ p.Root = root
+ goto Found
+ }
+ }
+ // It's okay that we didn't find a root containing dir.
+ // Keep going with the information we have.
+ } else {
+ if strings.HasPrefix(path, "/") {
+ return p, fmt.Errorf("import %q: cannot import absolute path", path)
+ }
+
+ // tried records the location of unsuccessful package lookups
+ var tried struct {
+ vendor []string
+ goroot string
+ gopath []string
+ }
+ gopath := ctxt.gopath()
+
+ // Vendor directories get first chance to satisfy import.
+ if mode&IgnoreVendor == 0 && srcDir != "" {
+ searchVendor := func(root string, isGoroot bool) bool {
+ sub, ok := ctxt.hasSubdir(root, srcDir)
+ if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") {
+ return false
+ }
+ for {
+ vendor := ctxt.joinPath(root, sub, "vendor")
+ if ctxt.isDir(vendor) {
+ dir := ctxt.joinPath(vendor, path)
+ if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) {
+ p.Dir = dir
+ p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/")
+ p.Goroot = isGoroot
+ p.Root = root
+ setPkga() // p.ImportPath changed
+ return true
+ }
+ tried.vendor = append(tried.vendor, dir)
+ }
+ i := strings.LastIndex(sub, "/")
+ if i < 0 {
+ break
+ }
+ sub = sub[:i]
+ }
+ return false
+ }
+ if searchVendor(ctxt.GOROOT, true) {
+ goto Found
+ }
+ for _, root := range gopath {
+ if searchVendor(root, false) {
+ goto Found
+ }
+ }
+ }
+
+ // Determine directory from import path.
+ if ctxt.GOROOT != "" {
+ dir := ctxt.joinPath(ctxt.GOROOT, "src", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Goroot = true
+ p.Root = ctxt.GOROOT
+ goto Found
+ }
+ tried.goroot = dir
+ }
+ for _, root := range gopath {
+ dir := ctxt.joinPath(root, "src", path)
+ isDir := ctxt.isDir(dir)
+ binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga))
+ if isDir || binaryOnly {
+ p.Dir = dir
+ p.Root = root
+ goto Found
+ }
+ tried.gopath = append(tried.gopath, dir)
+ }
+
+ // package was not found
+ var paths []string
+ format := "\t%s (vendor tree)"
+ for _, dir := range tried.vendor {
+ paths = append(paths, fmt.Sprintf(format, dir))
+ format = "\t%s"
+ }
+ if tried.goroot != "" {
+ paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot))
+ } else {
+ paths = append(paths, "\t($GOROOT not set)")
+ }
+ format = "\t%s (from $GOPATH)"
+ for _, dir := range tried.gopath {
+ paths = append(paths, fmt.Sprintf(format, dir))
+ format = "\t%s"
+ }
+ if len(tried.gopath) == 0 {
+ paths = append(paths, "\t($GOPATH not set)")
+ }
+ return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n"))
+ }
+
+Found:
+ if p.Root != "" {
+ p.SrcRoot = ctxt.joinPath(p.Root, "src")
+ p.PkgRoot = ctxt.joinPath(p.Root, "pkg")
+ p.BinDir = ctxt.joinPath(p.Root, "bin")
+ if pkga != "" {
+ p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot)
+ p.PkgObj = ctxt.joinPath(p.Root, pkga)
+ }
+ }
+
+ if mode&FindOnly != 0 {
+ return p, pkgerr
+ }
+ if binaryOnly && (mode&AllowBinary) != 0 {
+ return p, pkgerr
+ }
+
+ dirs, err := ctxt.readDir(p.Dir)
+ if err != nil {
+ return p, err
+ }
+
+ var badGoError error
+ var Sfiles []string // files with ".S" (capital S)
+ var firstFile, firstCommentFile string
+ imported := make(map[string][]token.Position)
+ testImported := make(map[string][]token.Position)
+ xTestImported := make(map[string][]token.Position)
+ allTags := make(map[string]bool)
+ fset := token.NewFileSet()
+ for _, d := range dirs {
+ if d.IsDir() {
+ continue
+ }
+
+ name := d.Name()
+ ext := nameExt(name)
+
+ badFile := func(err error) {
+ if badGoError == nil {
+ badGoError = err
+ }
+ p.InvalidGoFiles = append(p.InvalidGoFiles, name)
+ }
+
+ match, data, filename, err := ctxt.matchFile(p.Dir, name, true, allTags)
+ if err != nil {
+ badFile(err)
+ continue
+ }
+ if !match {
+ if ext == ".go" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
+ continue
+ }
+
+ // Going to save the file. For non-Go files, can stop here.
+ switch ext {
+ case ".c":
+ p.CFiles = append(p.CFiles, name)
+ continue
+ case ".cc", ".cpp", ".cxx":
+ p.CXXFiles = append(p.CXXFiles, name)
+ continue
+ case ".m":
+ p.MFiles = append(p.MFiles, name)
+ continue
+ case ".h", ".hh", ".hpp", ".hxx":
+ p.HFiles = append(p.HFiles, name)
+ continue
+ case ".s":
+ p.SFiles = append(p.SFiles, name)
+ continue
+ case ".S":
+ Sfiles = append(Sfiles, name)
+ continue
+ case ".swig":
+ p.SwigFiles = append(p.SwigFiles, name)
+ continue
+ case ".swigcxx":
+ p.SwigCXXFiles = append(p.SwigCXXFiles, name)
+ continue
+ case ".syso":
+ // binary objects to add to package archive
+ // Likely of the form foo_windows.syso, but
+ // the name was vetted above with goodOSArchFile.
+ p.SysoFiles = append(p.SysoFiles, name)
+ continue
+ }
+
+ pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments)
+ if err != nil {
+ badFile(err)
+ continue
+ }
+
+ pkg := pf.Name.Name
+ if pkg == "documentation" {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ continue
+ }
+
+ isTest := strings.HasSuffix(name, "_test.go")
+ isXTest := false
+ if isTest && strings.HasSuffix(pkg, "_test") {
+ isXTest = true
+ pkg = pkg[:len(pkg)-len("_test")]
+ }
+
+ if p.Name == "" {
+ p.Name = pkg
+ firstFile = name
+ } else if pkg != p.Name {
+ badFile(&MultiplePackageError{
+ Dir: p.Dir,
+ Packages: []string{p.Name, pkg},
+ Files: []string{firstFile, name},
+ })
+ p.InvalidGoFiles = append(p.InvalidGoFiles, name)
+ }
+ if pf.Doc != nil && p.Doc == "" {
+ p.Doc = doc.Synopsis(pf.Doc.Text())
+ }
+
+ if mode&ImportComment != 0 {
+ qcom, line := findImportComment(data)
+ if line != 0 {
+ com, err := strconv.Unquote(qcom)
+ if err != nil {
+ badFile(fmt.Errorf("%s:%d: cannot parse import comment", filename, line))
+ } else if p.ImportComment == "" {
+ p.ImportComment = com
+ firstCommentFile = name
+ } else if p.ImportComment != com {
+ badFile(fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir))
+ }
+ }
+ }
+
+ // Record imports and information about cgo.
+ isCgo := false
+ for _, decl := range pf.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, dspec := range d.Specs {
+ spec, ok := dspec.(*ast.ImportSpec)
+ if !ok {
+ continue
+ }
+ quoted := spec.Path.Value
+ path, err := strconv.Unquote(quoted)
+ if err != nil {
+ log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted)
+ }
+ if isXTest {
+ xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos()))
+ } else if isTest {
+ testImported[path] = append(testImported[path], fset.Position(spec.Pos()))
+ } else {
+ imported[path] = append(imported[path], fset.Position(spec.Pos()))
+ }
+ if path == "C" {
+ if isTest {
+ badFile(fmt.Errorf("use of cgo in test %s not supported", filename))
+ } else {
+ cg := spec.Doc
+ if cg == nil && len(d.Specs) == 1 {
+ cg = d.Doc
+ }
+ if cg != nil {
+ if err := ctxt.saveCgo(filename, p, cg); err != nil {
+ badFile(err)
+ }
+ }
+ isCgo = true
+ }
+ }
+ }
+ }
+ if isCgo {
+ allTags["cgo"] = true
+ if ctxt.CgoEnabled {
+ p.CgoFiles = append(p.CgoFiles, name)
+ } else {
+ p.IgnoredGoFiles = append(p.IgnoredGoFiles, name)
+ }
+ } else if isXTest {
+ p.XTestGoFiles = append(p.XTestGoFiles, name)
+ } else if isTest {
+ p.TestGoFiles = append(p.TestGoFiles, name)
+ } else {
+ p.GoFiles = append(p.GoFiles, name)
+ }
+ }
+ if badGoError != nil {
+ return p, badGoError
+ }
+ if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
+ return p, &NoGoError{p.Dir}
+ }
+
+ for tag := range allTags {
+ p.AllTags = append(p.AllTags, tag)
+ }
+ sort.Strings(p.AllTags)
+
+ p.Imports, p.ImportPos = cleanImports(imported)
+ p.TestImports, p.TestImportPos = cleanImports(testImported)
+ p.XTestImports, p.XTestImportPos = cleanImports(xTestImported)
+
+ // add the .S files only if we are using cgo
+ // (which means gcc will compile them).
+ // The standard assemblers expect .s files.
+ if len(p.CgoFiles) > 0 {
+ p.SFiles = append(p.SFiles, Sfiles...)
+ sort.Strings(p.SFiles)
+ }
+
+ return p, pkgerr
+}
+
+// hasGoFiles reports whether dir contains any files with names ending in .go.
+// For a vendor check we must exclude directories that contain no .go files.
+// Otherwise it is not possible to vendor just a/b/c and still import the
+// non-vendored a/b. See golang.org/issue/13832.
+func hasGoFiles(ctxt *Context, dir string) bool {
+ ents, _ := ctxt.readDir(dir)
+ for _, ent := range ents {
+ if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") {
+ return true
+ }
+ }
+ return false
+}
+
+func findImportComment(data []byte) (s string, line int) {
+ // expect keyword package
+ word, data := parseWord(data)
+ if string(word) != "package" {
+ return "", 0
+ }
+
+ // expect package name
+ _, data = parseWord(data)
+
+ // now ready for import comment, a // or /* */ comment
+ // beginning and ending on the current line.
+ for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') {
+ data = data[1:]
+ }
+
+ var comment []byte
+ switch {
+ case bytes.HasPrefix(data, slashSlash):
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ i = len(data)
+ }
+ comment = data[2:i]
+ case bytes.HasPrefix(data, slashStar):
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ // malformed comment
+ return "", 0
+ }
+ comment = data[:i]
+ if bytes.Contains(comment, newline) {
+ return "", 0
+ }
+ }
+ comment = bytes.TrimSpace(comment)
+
+ // split comment into `import`, `"pkg"`
+ word, arg := parseWord(comment)
+ if string(word) != "import" {
+ return "", 0
+ }
+
+ line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline)
+ return strings.TrimSpace(string(arg)), line
+}
+
+var (
+ slashSlash = []byte("//")
+ slashStar = []byte("/*")
+ starSlash = []byte("*/")
+ newline = []byte("\n")
+)
+
+// skipSpaceOrComment returns data with any leading spaces or comments removed.
+func skipSpaceOrComment(data []byte) []byte {
+ for len(data) > 0 {
+ switch data[0] {
+ case ' ', '\t', '\r', '\n':
+ data = data[1:]
+ continue
+ case '/':
+ if bytes.HasPrefix(data, slashSlash) {
+ i := bytes.Index(data, newline)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+1:]
+ continue
+ }
+ if bytes.HasPrefix(data, slashStar) {
+ data = data[2:]
+ i := bytes.Index(data, starSlash)
+ if i < 0 {
+ return nil
+ }
+ data = data[i+2:]
+ continue
+ }
+ }
+ break
+ }
+ return data
+}
+
+// parseWord skips any leading spaces or comments in data
+// and then parses the beginning of data as an identifier or keyword,
+// returning that word and what remains after the word.
+func parseWord(data []byte) (word, rest []byte) {
+ data = skipSpaceOrComment(data)
+
+ // Parse past leading word characters.
+ rest = data
+ for {
+ r, size := utf8.DecodeRune(rest)
+ if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' {
+ rest = rest[size:]
+ continue
+ }
+ break
+ }
+
+ word = data[:len(data)-len(rest)]
+ if len(word) == 0 {
+ return nil, nil
+ }
+
+ return word, rest
+}
+
+// MatchFile reports whether the file with the given name in the given directory
+// matches the context and would be included in a Package created by ImportDir
+// of that directory.
+//
+// MatchFile considers the name of the file and may use ctxt.OpenFile to
+// read some or all of the file's content.
+func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) {
+ match, _, _, err = ctxt.matchFile(dir, name, false, nil)
+ return
+}
+
+// matchFile determines whether the file with the given name in the given directory
+// should be included in the package being constructed.
+// It returns the data read from the file.
+// If returnImports is true and name denotes a Go program, matchFile reads
+// until the end of the imports (and returns that data) even though it only
+// considers text until the first non-comment.
+// If allTags is non-nil, matchFile records any encountered build tag
+// by setting allTags[tag] = true.
+func (ctxt *Context) matchFile(dir, name string, returnImports bool, allTags map[string]bool) (match bool, data []byte, filename string, err error) {
+ if strings.HasPrefix(name, "_") ||
+ strings.HasPrefix(name, ".") {
+ return
+ }
+
+ i := strings.LastIndex(name, ".")
+ if i < 0 {
+ i = len(name)
+ }
+ ext := name[i:]
+
+ if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles {
+ return
+ }
+
+ switch ext {
+ case ".go", ".c", ".cc", ".cxx", ".cpp", ".m", ".s", ".h", ".hh", ".hpp", ".hxx", ".S", ".swig", ".swigcxx":
+ // tentatively okay - read to make sure
+ case ".syso":
+ // binary, no reading
+ match = true
+ return
+ default:
+ // skip
+ return
+ }
+
+ filename = ctxt.joinPath(dir, name)
+ f, err := ctxt.openFile(filename)
+ if err != nil {
+ return
+ }
+
+ if strings.HasSuffix(filename, ".go") {
+ data, err = readImports(f, false, nil)
+ } else {
+ data, err = readComments(f)
+ }
+ f.Close()
+ if err != nil {
+ err = fmt.Errorf("read %s: %v", filename, err)
+ return
+ }
+
+ // Look for +build comments to accept or reject the file.
+ if !ctxt.shouldBuild(data, allTags) && !ctxt.UseAllFiles {
+ return
+ }
+
+ match = true
+ return
+}
+
+func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) {
+ all := make([]string, 0, len(m))
+ for path := range m {
+ all = append(all, path)
+ }
+ sort.Strings(all)
+ return all, m
+}
+
+// Import is shorthand for Default.Import.
+func Import(path, srcDir string, mode ImportMode) (*Package, error) {
+ return Default.Import(path, srcDir, mode)
+}
+
+// ImportDir is shorthand for Default.ImportDir.
+func ImportDir(dir string, mode ImportMode) (*Package, error) {
+ return Default.ImportDir(dir, mode)
+}
+
+var slashslash = []byte("//")
+
+// shouldBuild reports whether it is okay to use this file,
+// The rule is that in the file's leading run of // comments
+// and blank lines, which must be followed by a blank line
+// (to avoid including a Go package clause doc comment),
+// lines beginning with '// +build' are taken as build directives.
+//
+// The file is accepted only if each such line lists something
+// matching the file. For example:
+//
+// // +build windows linux
+//
+// marks the file as applicable only on Windows and Linux.
+//
+func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool) bool {
+ // Pass 1. Identify leading run of // comments and blank lines,
+ // which must be followed by a blank line.
+ end := 0
+ p := content
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if len(line) == 0 { // Blank line
+ end = len(content) - len(p)
+ continue
+ }
+ if !bytes.HasPrefix(line, slashslash) { // Not comment line
+ break
+ }
+ }
+ content = content[:end]
+
+ // Pass 2. Process each line in the run.
+ p = content
+ allok := true
+ for len(p) > 0 {
+ line := p
+ if i := bytes.IndexByte(line, '\n'); i >= 0 {
+ line, p = line[:i], p[i+1:]
+ } else {
+ p = p[len(p):]
+ }
+ line = bytes.TrimSpace(line)
+ if bytes.HasPrefix(line, slashslash) {
+ line = bytes.TrimSpace(line[len(slashslash):])
+ if len(line) > 0 && line[0] == '+' {
+ // Looks like a comment +line.
+ f := strings.Fields(string(line))
+ if f[0] == "+build" {
+ ok := false
+ for _, tok := range f[1:] {
+ if ctxt.match(tok, allTags) {
+ ok = true
+ }
+ }
+ if !ok {
+ allok = false
+ }
+ }
+ }
+ }
+ }
+
+ return allok
+}
+
+// saveCgo saves the information from the #cgo lines in the import "C" comment.
+// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives
+// that affect the way cgo's C code is built.
+func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error {
+ text := cg.Text()
+ for _, line := range strings.Split(text, "\n") {
+ orig := line
+
+ // Line is
+ // #cgo [GOOS/GOARCH...] LDFLAGS: stuff
+ //
+ line = strings.TrimSpace(line)
+ if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') {
+ continue
+ }
+
+ // Split at colon.
+ line = strings.TrimSpace(line[4:])
+ i := strings.Index(line, ":")
+ if i < 0 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ line, argstr := line[:i], line[i+1:]
+
+ // Parse GOOS/GOARCH stuff.
+ f := strings.Fields(line)
+ if len(f) < 1 {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+
+ cond, verb := f[:len(f)-1], f[len(f)-1]
+ if len(cond) > 0 {
+ ok := false
+ for _, c := range cond {
+ if ctxt.match(c, nil) {
+ ok = true
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+
+ args, err := splitQuoted(argstr)
+ if err != nil {
+ return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig)
+ }
+ var ok bool
+ for i, arg := range args {
+ if arg, ok = expandSrcDir(arg, di.Dir); !ok {
+ return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg)
+ }
+ args[i] = arg
+ }
+
+ switch verb {
+ case "CFLAGS":
+ di.CgoCFLAGS = append(di.CgoCFLAGS, args...)
+ case "CPPFLAGS":
+ di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...)
+ case "CXXFLAGS":
+ di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...)
+ case "LDFLAGS":
+ di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...)
+ case "pkg-config":
+ di.CgoPkgConfig = append(di.CgoPkgConfig, args...)
+ default:
+ return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig)
+ }
+ }
+ return nil
+}
+
+// expandSrcDir expands any occurrence of ${SRCDIR}, making sure
+// the result is safe for the shell.
+func expandSrcDir(str string, srcdir string) (string, bool) {
+ // "\" delimited paths cause safeCgoName to fail
+ // so convert native paths with a different delimeter
+ // to "/" before starting (eg: on windows).
+ srcdir = filepath.ToSlash(srcdir)
+
+ // Spaces are tolerated in ${SRCDIR}, but not anywhere else.
+ chunks := strings.Split(str, "${SRCDIR}")
+ if len(chunks) < 2 {
+ return str, safeCgoName(str, false)
+ }
+ ok := true
+ for _, chunk := range chunks {
+ ok = ok && (chunk == "" || safeCgoName(chunk, false))
+ }
+ ok = ok && (srcdir == "" || safeCgoName(srcdir, true))
+ res := strings.Join(chunks, srcdir)
+ return res, ok && res != ""
+}
+
+// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN.
+// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay.
+// See golang.org/issue/6038.
+// The @ is for OS X. See golang.org/issue/13720.
+const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@"
+const safeSpaces = " "
+
+var safeBytes = []byte(safeSpaces + safeString)
+
+func safeCgoName(s string, spaces bool) bool {
+ if s == "" {
+ return false
+ }
+ safe := safeBytes
+ if !spaces {
+ safe = safe[len(safeSpaces):]
+ }
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; c < 0x80 && bytes.IndexByte(safe, c) < 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// splitQuoted splits the string s around each instance of one or more consecutive
+// white space characters while taking into account quotes and escaping, and
+// returns an array of substrings of s or an empty list if s contains only white space.
+// Single quotes and double quotes are recognized to prevent splitting within the
+// quoted region, and are removed from the resulting substrings. If a quote in s
+// isn't closed err will be set and r will have the unclosed argument as the
+// last element. The backslash is used for escaping.
+//
+// For example, the following string:
+//
+// a b:"c d" 'e''f' "g\""
+//
+// Would be parsed as:
+//
+// []string{"a", "b:c d", "ef", `g"`}
+//
+func splitQuoted(s string) (r []string, err error) {
+ var args []string
+ arg := make([]rune, len(s))
+ escaped := false
+ quoted := false
+ quote := '\x00'
+ i := 0
+ for _, rune := range s {
+ switch {
+ case escaped:
+ escaped = false
+ case rune == '\\':
+ escaped = true
+ continue
+ case quote != '\x00':
+ if rune == quote {
+ quote = '\x00'
+ continue
+ }
+ case rune == '"' || rune == '\'':
+ quoted = true
+ quote = rune
+ continue
+ case unicode.IsSpace(rune):
+ if quoted || i > 0 {
+ quoted = false
+ args = append(args, string(arg[:i]))
+ i = 0
+ }
+ continue
+ }
+ arg[i] = rune
+ i++
+ }
+ if quoted || i > 0 {
+ args = append(args, string(arg[:i]))
+ }
+ if quote != 0 {
+ err = errors.New("unclosed quote")
+ } else if escaped {
+ err = errors.New("unfinished escaping")
+ }
+ return args, err
+}
+
+// match reports whether the name is one of:
+//
+// $GOOS
+// $GOARCH
+// cgo (if cgo is enabled)
+// !cgo (if cgo is disabled)
+// ctxt.Compiler
+// !ctxt.Compiler
+// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags)
+// !tag (if tag is not listed in ctxt.BuildTags or ctxt.ReleaseTags)
+// a comma-separated list of any of these
+//
+func (ctxt *Context) match(name string, allTags map[string]bool) bool {
+ if name == "" {
+ if allTags != nil {
+ allTags[name] = true
+ }
+ return false
+ }
+ if i := strings.Index(name, ","); i >= 0 {
+ // comma-separated list
+ ok1 := ctxt.match(name[:i], allTags)
+ ok2 := ctxt.match(name[i+1:], allTags)
+ return ok1 && ok2
+ }
+ if strings.HasPrefix(name, "!!") { // bad syntax, reject always
+ return false
+ }
+ if strings.HasPrefix(name, "!") { // negation
+ return len(name) > 1 && !ctxt.match(name[1:], allTags)
+ }
+
+ if allTags != nil {
+ allTags[name] = true
+ }
+
+ // Tags must be letters, digits, underscores or dots.
+ // Unlike in Go identifiers, all digits are fine (e.g., "386").
+ for _, c := range name {
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' {
+ return false
+ }
+ }
+
+ // special tags
+ if ctxt.CgoEnabled && name == "cgo" {
+ return true
+ }
+ if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler {
+ return true
+ }
+ if ctxt.GOOS == "android" && name == "linux" {
+ return true
+ }
+
+ // other tags
+ for _, tag := range ctxt.BuildTags {
+ if tag == name {
+ return true
+ }
+ }
+ for _, tag := range ctxt.ReleaseTags {
+ if tag == name {
+ return true
+ }
+ }
+
+ return false
+}
+
+// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH
+// suffix which does not match the current system.
+// The recognized name formats are:
+//
+// name_$(GOOS).*
+// name_$(GOARCH).*
+// name_$(GOOS)_$(GOARCH).*
+// name_$(GOOS)_test.*
+// name_$(GOARCH)_test.*
+// name_$(GOOS)_$(GOARCH)_test.*
+//
+// An exception: if GOOS=android, then files with GOOS=linux are also matched.
+func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool {
+ if dot := strings.Index(name, "."); dot != -1 {
+ name = name[:dot]
+ }
+
+ // Before Go 1.4, a file called "linux.go" would be equivalent to having a
+ // build tag "linux" in that file. For Go 1.4 and beyond, we require this
+ // auto-tagging to apply only to files with a non-empty prefix, so
+ // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating
+ // systems, such as android, to arrive without breaking existing code with
+ // innocuous source code in "android.go". The easiest fix: cut everything
+ // in the name before the initial _.
+ i := strings.Index(name, "_")
+ if i < 0 {
+ return true
+ }
+ name = name[i:] // ignore everything before first _
+
+ l := strings.Split(name, "_")
+ if n := len(l); n > 0 && l[n-1] == "test" {
+ l = l[:n-1]
+ }
+ n := len(l)
+ if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-2]] = true
+ allTags[l[n-1]] = true
+ }
+ if l[n-1] != ctxt.GOARCH {
+ return false
+ }
+ if ctxt.GOOS == "android" && l[n-2] == "linux" {
+ return true
+ }
+ return l[n-2] == ctxt.GOOS
+ }
+ if n >= 1 && knownOS[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-1]] = true
+ }
+ if ctxt.GOOS == "android" && l[n-1] == "linux" {
+ return true
+ }
+ return l[n-1] == ctxt.GOOS
+ }
+ if n >= 1 && knownArch[l[n-1]] {
+ if allTags != nil {
+ allTags[l[n-1]] = true
+ }
+ return l[n-1] == ctxt.GOARCH
+ }
+ return true
+}
+
+var knownOS = make(map[string]bool)
+var knownArch = make(map[string]bool)
+
+func init() {
+ for _, v := range strings.Fields(goosList) {
+ knownOS[v] = true
+ }
+ for _, v := range strings.Fields(goarchList) {
+ knownArch[v] = true
+ }
+}
+
+// ToolDir is the directory containing build tools.
+var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH)
+
+// IsLocalImport reports whether the import path is
+// a local import path, like ".", "..", "./foo", or "../foo".
+func IsLocalImport(path string) bool {
+ return path == "." || path == ".." ||
+ strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../")
+}
+
+// ArchChar returns "?" and an error.
+// In earlier versions of Go, the returned string was used to derive
+// the compiler and linker tool names, the default object file suffix,
+// and the default linker output name. As of Go 1.5, those strings
+// no longer vary by architecture; they are compile, link, .o, and a.out, respectively.
+func ArchChar(goarch string) (string, error) {
+ return "?", errors.New("architecture letter no longer used")
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/hash/crc32/crc32_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/hash/crc32/crc32_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/hash/crc32/crc32_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/hash/crc32/crc32_generic.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 arm arm64 mips64 mips64le ppc64 ppc64le
+
+package crc32
+
+// The file contains the generic version of updateCastagnoli which just calls
+// the software implementation.
+
+func updateCastagnoli(crc uint32, p []byte) uint32 {
+ return update(crc, castagnoliTable, p)
+}
+
+func updateIEEE(crc uint32, p []byte) uint32 {
+ // only use slicing-by-8 when input is >= 4KB
+ if len(p) >= 4096 {
+ ieeeTable8Once.Do(func() {
+ ieeeTable8 = makeTable8(IEEE)
+ })
+ return updateSlicingBy8(crc, ieeeTable8, p)
+ }
+ return update(crc, IEEETable, p)
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/net/http/fs_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/net/http/fs_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/net/http/fs_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/net/http/fs_test.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,1077 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http_test
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "mime/multipart"
+ "net"
+ . "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+const (
+ testFile = "testdata/file"
+ testFileLen = 11
+)
+
+type wantRange struct {
+ start, end int64 // range [start,end)
+}
+
+var itoa = strconv.Itoa
+
+var ServeFileRangeTests = []struct {
+ r string
+ code int
+ ranges []wantRange
+}{
+ {r: "", code: StatusOK},
+ {r: "bytes=0-4", code: StatusPartialContent, ranges: []wantRange{{0, 5}}},
+ {r: "bytes=2-", code: StatusPartialContent, ranges: []wantRange{{2, testFileLen}}},
+ {r: "bytes=-5", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 5, testFileLen}}},
+ {r: "bytes=3-7", code: StatusPartialContent, ranges: []wantRange{{3, 8}}},
+ {r: "bytes=0-0,-2", code: StatusPartialContent, ranges: []wantRange{{0, 1}, {testFileLen - 2, testFileLen}}},
+ {r: "bytes=0-1,5-8", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, 9}}},
+ {r: "bytes=0-1,5-", code: StatusPartialContent, ranges: []wantRange{{0, 2}, {5, testFileLen}}},
+ {r: "bytes=5-1000", code: StatusPartialContent, ranges: []wantRange{{5, testFileLen}}},
+ {r: "bytes=0-,1-,2-,3-,4-", code: StatusOK}, // ignore wasteful range request
+ {r: "bytes=0-9", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen - 1}}},
+ {r: "bytes=0-10", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
+ {r: "bytes=0-11", code: StatusPartialContent, ranges: []wantRange{{0, testFileLen}}},
+ {r: "bytes=10-11", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
+ {r: "bytes=10-", code: StatusPartialContent, ranges: []wantRange{{testFileLen - 1, testFileLen}}},
+ {r: "bytes=11-", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=11-12", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=12-12", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=11-100", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=12-100", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=100-", code: StatusRequestedRangeNotSatisfiable},
+ {r: "bytes=100-1000", code: StatusRequestedRangeNotSatisfiable},
+}
+
+func TestServeFile(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "testdata/file")
+ }))
+ defer ts.Close()
+
+ var err error
+
+ file, err := ioutil.ReadFile(testFile)
+ if err != nil {
+ t.Fatal("reading file:", err)
+ }
+
+ // set up the Request (re-used for all tests)
+ var req Request
+ req.Header = make(Header)
+ if req.URL, err = url.Parse(ts.URL); err != nil {
+ t.Fatal("ParseURL:", err)
+ }
+ req.Method = "GET"
+
+ // straight GET
+ _, body := getBody(t, "straight get", req)
+ if !bytes.Equal(body, file) {
+ t.Fatalf("body mismatch: got %q, want %q", body, file)
+ }
+
+ // Range tests
+Cases:
+ for _, rt := range ServeFileRangeTests {
+ if rt.r != "" {
+ req.Header.Set("Range", rt.r)
+ }
+ resp, body := getBody(t, fmt.Sprintf("range test %q", rt.r), req)
+ if resp.StatusCode != rt.code {
+ t.Errorf("range=%q: StatusCode=%d, want %d", rt.r, resp.StatusCode, rt.code)
+ }
+ if rt.code == StatusRequestedRangeNotSatisfiable {
+ continue
+ }
+ wantContentRange := ""
+ if len(rt.ranges) == 1 {
+ rng := rt.ranges[0]
+ wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
+ }
+ cr := resp.Header.Get("Content-Range")
+ if cr != wantContentRange {
+ t.Errorf("range=%q: Content-Range = %q, want %q", rt.r, cr, wantContentRange)
+ }
+ ct := resp.Header.Get("Content-Type")
+ if len(rt.ranges) == 1 {
+ rng := rt.ranges[0]
+ wantBody := file[rng.start:rng.end]
+ if !bytes.Equal(body, wantBody) {
+ t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
+ }
+ if strings.HasPrefix(ct, "multipart/byteranges") {
+ t.Errorf("range=%q content-type = %q; unexpected multipart/byteranges", rt.r, ct)
+ }
+ }
+ if len(rt.ranges) > 1 {
+ typ, params, err := mime.ParseMediaType(ct)
+ if err != nil {
+ t.Errorf("range=%q content-type = %q; %v", rt.r, ct, err)
+ continue
+ }
+ if typ != "multipart/byteranges" {
+ t.Errorf("range=%q content-type = %q; want multipart/byteranges", rt.r, typ)
+ continue
+ }
+ if params["boundary"] == "" {
+ t.Errorf("range=%q content-type = %q; lacks boundary", rt.r, ct)
+ continue
+ }
+ if g, w := resp.ContentLength, int64(len(body)); g != w {
+ t.Errorf("range=%q Content-Length = %d; want %d", rt.r, g, w)
+ continue
+ }
+ mr := multipart.NewReader(bytes.NewReader(body), params["boundary"])
+ for ri, rng := range rt.ranges {
+ part, err := mr.NextPart()
+ if err != nil {
+ t.Errorf("range=%q, reading part index %d: %v", rt.r, ri, err)
+ continue Cases
+ }
+ wantContentRange = fmt.Sprintf("bytes %d-%d/%d", rng.start, rng.end-1, testFileLen)
+ if g, w := part.Header.Get("Content-Range"), wantContentRange; g != w {
+ t.Errorf("range=%q: part Content-Range = %q; want %q", rt.r, g, w)
+ }
+ body, err := ioutil.ReadAll(part)
+ if err != nil {
+ t.Errorf("range=%q, reading part index %d body: %v", rt.r, ri, err)
+ continue Cases
+ }
+ wantBody := file[rng.start:rng.end]
+ if !bytes.Equal(body, wantBody) {
+ t.Errorf("range=%q: body = %q, want %q", rt.r, body, wantBody)
+ }
+ }
+ _, err = mr.NextPart()
+ if err != io.EOF {
+ t.Errorf("range=%q; expected final error io.EOF; got %v", rt.r, err)
+ }
+ }
+ }
+}
+
+func TestServeFile_DotDot(t *testing.T) {
+ tests := []struct {
+ req string
+ wantStatus int
+ }{
+ {"/testdata/file", 200},
+ {"/../file", 400},
+ {"/..", 400},
+ {"/../", 400},
+ {"/../foo", 400},
+ {"/..\\foo", 400},
+ {"/file/a", 200},
+ {"/file/a..", 200},
+ {"/file/a/..", 400},
+ {"/file/a\\..", 400},
+ }
+ for _, tt := range tests {
+ req, err := ReadRequest(bufio.NewReader(strings.NewReader("GET " + tt.req + " HTTP/1.1\r\nHost: foo\r\n\r\n")))
+ if err != nil {
+ t.Errorf("bad request %q: %v", tt.req, err)
+ continue
+ }
+ rec := httptest.NewRecorder()
+ ServeFile(rec, req, "testdata/file")
+ if rec.Code != tt.wantStatus {
+ t.Errorf("for request %q, status = %d; want %d", tt.req, rec.Code, tt.wantStatus)
+ }
+ }
+}
+
+var fsRedirectTestData = []struct {
+ original, redirect string
+}{
+ {"/test/index.html", "/test/"},
+ {"/test/testdata", "/test/testdata/"},
+ {"/test/testdata/file/", "/test/testdata/file"},
+}
+
+func TestFSRedirect(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(StripPrefix("/test", FileServer(Dir("."))))
+ defer ts.Close()
+
+ for _, data := range fsRedirectTestData {
+ res, err := Get(ts.URL + data.original)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res.Body.Close()
+ if g, e := res.Request.URL.Path, data.redirect; g != e {
+ t.Errorf("redirect from %s: got %s, want %s", data.original, g, e)
+ }
+ }
+}
+
+type testFileSystem struct {
+ open func(name string) (File, error)
+}
+
+func (fs *testFileSystem) Open(name string) (File, error) {
+ return fs.open(name)
+}
+
+func TestFileServerCleans(t *testing.T) {
+ defer afterTest(t)
+ ch := make(chan string, 1)
+ fs := FileServer(&testFileSystem{func(name string) (File, error) {
+ ch <- name
+ return nil, errors.New("file does not exist")
+ }})
+ tests := []struct {
+ reqPath, openArg string
+ }{
+ {"/foo.txt", "/foo.txt"},
+ {"//foo.txt", "/foo.txt"},
+ {"/../foo.txt", "/foo.txt"},
+ }
+ req, _ := NewRequest("GET", "http://example.com", nil)
+ for n, test := range tests {
+ rec := httptest.NewRecorder()
+ req.URL.Path = test.reqPath
+ fs.ServeHTTP(rec, req)
+ if got := <-ch; got != test.openArg {
+ t.Errorf("test %d: got %q, want %q", n, got, test.openArg)
+ }
+ }
+}
+
+func TestFileServerEscapesNames(t *testing.T) {
+ defer afterTest(t)
+ const dirListPrefix = "\n"
+ const dirListSuffix = "\n
\n"
+ tests := []struct {
+ name, escaped string
+ }{
+ {`simple_name`, `simple_name`},
+ {`"'<>&`, `"'<>&`},
+ {`?foo=bar#baz`, `?foo=bar#baz`},
+ {`?foo`, `<combo>?foo`},
+ }
+
+ // We put each test file in its own directory in the fakeFS so we can look at it in isolation.
+ fs := make(fakeFS)
+ for i, test := range tests {
+ testFile := &fakeFileInfo{basename: test.name}
+ fs[fmt.Sprintf("/%d", i)] = &fakeFileInfo{
+ dir: true,
+ modtime: time.Unix(1000000000, 0).UTC(),
+ ents: []*fakeFileInfo{testFile},
+ }
+ fs[fmt.Sprintf("/%d/%s", i, test.name)] = testFile
+ }
+
+ ts := httptest.NewServer(FileServer(&fs))
+ defer ts.Close()
+ for i, test := range tests {
+ url := fmt.Sprintf("%s/%d", ts.URL, i)
+ res, err := Get(url)
+ if err != nil {
+ t.Fatalf("test %q: Get: %v", test.name, err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("test %q: read Body: %v", test.name, err)
+ }
+ s := string(b)
+ if !strings.HasPrefix(s, dirListPrefix) || !strings.HasSuffix(s, dirListSuffix) {
+ t.Errorf("test %q: listing dir, full output is %q, want prefix %q and suffix %q", test.name, s, dirListPrefix, dirListSuffix)
+ }
+ if trimmed := strings.TrimSuffix(strings.TrimPrefix(s, dirListPrefix), dirListSuffix); trimmed != test.escaped {
+ t.Errorf("test %q: listing dir, filename escaped to %q, want %q", test.name, trimmed, test.escaped)
+ }
+ res.Body.Close()
+ }
+}
+
+func TestFileServerSortsNames(t *testing.T) {
+ defer afterTest(t)
+ const contents = "I am a fake file"
+ dirMod := time.Unix(123, 0).UTC()
+ fileMod := time.Unix(1000000000, 0).UTC()
+ fs := fakeFS{
+ "/": &fakeFileInfo{
+ dir: true,
+ modtime: dirMod,
+ ents: []*fakeFileInfo{
+ {
+ basename: "b",
+ modtime: fileMod,
+ contents: contents,
+ },
+ {
+ basename: "a",
+ modtime: fileMod,
+ contents: contents,
+ },
+ },
+ },
+ }
+
+ ts := httptest.NewServer(FileServer(&fs))
+ defer ts.Close()
+
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatalf("Get: %v", err)
+ }
+ defer res.Body.Close()
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("read Body: %v", err)
+ }
+ s := string(b)
+ if !strings.Contains(s, "a\nb") {
+ t.Errorf("output appears to be unsorted:\n%s", s)
+ }
+}
+
+func mustRemoveAll(dir string) {
+ err := os.RemoveAll(dir)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func TestFileServerImplicitLeadingSlash(t *testing.T) {
+ defer afterTest(t)
+ tempDir, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatalf("TempDir: %v", err)
+ }
+ defer mustRemoveAll(tempDir)
+ if err := ioutil.WriteFile(filepath.Join(tempDir, "foo.txt"), []byte("Hello world"), 0644); err != nil {
+ t.Fatalf("WriteFile: %v", err)
+ }
+ ts := httptest.NewServer(StripPrefix("/bar/", FileServer(Dir(tempDir))))
+ defer ts.Close()
+ get := func(suffix string) string {
+ res, err := Get(ts.URL + suffix)
+ if err != nil {
+ t.Fatalf("Get %s: %v", suffix, err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatalf("ReadAll %s: %v", suffix, err)
+ }
+ res.Body.Close()
+ return string(b)
+ }
+ if s := get("/bar/"); !strings.Contains(s, ">foo.txt<") {
+ t.Logf("expected a directory listing with foo.txt, got %q", s)
+ }
+ if s := get("/bar/foo.txt"); s != "Hello world" {
+ t.Logf("expected %q, got %q", "Hello world", s)
+ }
+}
+
+func TestDirJoin(t *testing.T) {
+ if runtime.GOOS == "windows" {
+ t.Skip("skipping test on windows")
+ }
+ wfi, err := os.Stat("/etc/hosts")
+ if err != nil {
+ t.Skip("skipping test; no /etc/hosts file")
+ }
+ test := func(d Dir, name string) {
+ f, err := d.Open(name)
+ if err != nil {
+ t.Fatalf("open of %s: %v", name, err)
+ }
+ defer f.Close()
+ gfi, err := f.Stat()
+ if err != nil {
+ t.Fatalf("stat of %s: %v", name, err)
+ }
+ if !os.SameFile(gfi, wfi) {
+ t.Errorf("%s got different file", name)
+ }
+ }
+ test(Dir("/etc/"), "/hosts")
+ test(Dir("/etc/"), "hosts")
+ test(Dir("/etc/"), "../../../../hosts")
+ test(Dir("/etc"), "/hosts")
+ test(Dir("/etc"), "hosts")
+ test(Dir("/etc"), "../../../../hosts")
+
+ // Not really directories, but since we use this trick in
+ // ServeFile, test it:
+ test(Dir("/etc/hosts"), "")
+ test(Dir("/etc/hosts"), "/")
+ test(Dir("/etc/hosts"), "../")
+}
+
+func TestEmptyDirOpenCWD(t *testing.T) {
+ test := func(d Dir) {
+ name := "fs_test.go"
+ f, err := d.Open(name)
+ if err != nil {
+ t.Fatalf("open of %s: %v", name, err)
+ }
+ defer f.Close()
+ }
+ test(Dir(""))
+ test(Dir("."))
+ test(Dir("./"))
+}
+
+func TestServeFileContentType(t *testing.T) {
+ defer afterTest(t)
+ const ctype = "icecream/chocolate"
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ switch r.FormValue("override") {
+ case "1":
+ w.Header().Set("Content-Type", ctype)
+ case "2":
+ // Explicitly inhibit sniffing.
+ w.Header()["Content-Type"] = []string{}
+ }
+ ServeFile(w, r, "testdata/file")
+ }))
+ defer ts.Close()
+ get := func(override string, want []string) {
+ resp, err := Get(ts.URL + "?override=" + override)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if h := resp.Header["Content-Type"]; !reflect.DeepEqual(h, want) {
+ t.Errorf("Content-Type mismatch: got %v, want %v", h, want)
+ }
+ resp.Body.Close()
+ }
+ get("0", []string{"text/plain; charset=utf-8"})
+ get("1", []string{ctype})
+ get("2", nil)
+}
+
+func TestServeFileMimeType(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "testdata/style.css")
+ }))
+ defer ts.Close()
+ resp, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp.Body.Close()
+ want := "text/css; charset=utf-8"
+ if h := resp.Header.Get("Content-Type"); h != want {
+ t.Errorf("Content-Type mismatch: got %q, want %q", h, want)
+ }
+}
+
+func TestServeFileFromCWD(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ ServeFile(w, r, "fs_test.go")
+ }))
+ defer ts.Close()
+ r, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ r.Body.Close()
+ if r.StatusCode != 200 {
+ t.Fatalf("expected 200 OK, got %s", r.Status)
+ }
+}
+
+// Tests that ServeFile doesn't add a Content-Length if a Content-Encoding is
+// specified.
+func TestServeFileWithContentEncoding_h1(t *testing.T) { testServeFileWithContentEncoding(t, h1Mode) }
+func TestServeFileWithContentEncoding_h2(t *testing.T) { testServeFileWithContentEncoding(t, h2Mode) }
+func testServeFileWithContentEncoding(t *testing.T, h2 bool) {
+ defer afterTest(t)
+ cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
+ w.Header().Set("Content-Encoding", "foo")
+ ServeFile(w, r, "testdata/file")
+
+ // Because the testdata is so small, it would fit in
+ // both the h1 and h2 Server's write buffers. For h1,
+ // sendfile is used, though, forcing a header flush at
+ // the io.Copy. http2 doesn't do a header flush so
+ // buffers all 11 bytes and then adds its own
+ // Content-Length. To prevent the Server's
+ // Content-Length and test ServeFile only, flush here.
+ w.(Flusher).Flush()
+ }))
+ defer cst.close()
+ resp, err := cst.c.Get(cst.ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ resp.Body.Close()
+ if g, e := resp.ContentLength, int64(-1); g != e {
+ t.Errorf("Content-Length mismatch: got %d, want %d", g, e)
+ }
+}
+
+func TestServeIndexHtml(t *testing.T) {
+ defer afterTest(t)
+ const want = "index.html says hello\n"
+ ts := httptest.NewServer(FileServer(Dir(".")))
+ defer ts.Close()
+
+ for _, path := range []string{"/testdata/", "/testdata/index.html"} {
+ res, err := Get(ts.URL + path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal("reading Body:", err)
+ }
+ if s := string(b); s != want {
+ t.Errorf("for path %q got %q, want %q", path, s, want)
+ }
+ res.Body.Close()
+ }
+}
+
+func TestFileServerZeroByte(t *testing.T) {
+ defer afterTest(t)
+ ts := httptest.NewServer(FileServer(Dir(".")))
+ defer ts.Close()
+
+ res, err := Get(ts.URL + "/..\x00")
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal("reading Body:", err)
+ }
+ if res.StatusCode == 200 {
+ t.Errorf("got status 200; want an error. Body is:\n%s", string(b))
+ }
+}
+
+type fakeFileInfo struct {
+ dir bool
+ basename string
+ modtime time.Time
+ ents []*fakeFileInfo
+ contents string
+ err error
+}
+
+func (f *fakeFileInfo) Name() string { return f.basename }
+func (f *fakeFileInfo) Sys() interface{} { return nil }
+func (f *fakeFileInfo) ModTime() time.Time { return f.modtime }
+func (f *fakeFileInfo) IsDir() bool { return f.dir }
+func (f *fakeFileInfo) Size() int64 { return int64(len(f.contents)) }
+func (f *fakeFileInfo) Mode() os.FileMode {
+ if f.dir {
+ return 0755 | os.ModeDir
+ }
+ return 0644
+}
+
+type fakeFile struct {
+ io.ReadSeeker
+ fi *fakeFileInfo
+ path string // as opened
+ entpos int
+}
+
+func (f *fakeFile) Close() error { return nil }
+func (f *fakeFile) Stat() (os.FileInfo, error) { return f.fi, nil }
+func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) {
+ if !f.fi.dir {
+ return nil, os.ErrInvalid
+ }
+ var fis []os.FileInfo
+
+ limit := f.entpos + count
+ if count <= 0 || limit > len(f.fi.ents) {
+ limit = len(f.fi.ents)
+ }
+ for ; f.entpos < limit; f.entpos++ {
+ fis = append(fis, f.fi.ents[f.entpos])
+ }
+
+ if len(fis) == 0 && count > 0 {
+ return fis, io.EOF
+ } else {
+ return fis, nil
+ }
+}
+
+type fakeFS map[string]*fakeFileInfo
+
+func (fs fakeFS) Open(name string) (File, error) {
+ name = path.Clean(name)
+ f, ok := fs[name]
+ if !ok {
+ return nil, os.ErrNotExist
+ }
+ if f.err != nil {
+ return nil, f.err
+ }
+ return &fakeFile{ReadSeeker: strings.NewReader(f.contents), fi: f, path: name}, nil
+}
+
+func TestDirectoryIfNotModified(t *testing.T) {
+ defer afterTest(t)
+ const indexContents = "I am a fake index.html file"
+ fileMod := time.Unix(1000000000, 0).UTC()
+ fileModStr := fileMod.Format(TimeFormat)
+ dirMod := time.Unix(123, 0).UTC()
+ indexFile := &fakeFileInfo{
+ basename: "index.html",
+ modtime: fileMod,
+ contents: indexContents,
+ }
+ fs := fakeFS{
+ "/": &fakeFileInfo{
+ dir: true,
+ modtime: dirMod,
+ ents: []*fakeFileInfo{indexFile},
+ },
+ "/index.html": indexFile,
+ }
+
+ ts := httptest.NewServer(FileServer(fs))
+ defer ts.Close()
+
+ res, err := Get(ts.URL)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if string(b) != indexContents {
+ t.Fatalf("Got body %q; want %q", b, indexContents)
+ }
+ res.Body.Close()
+
+ lastMod := res.Header.Get("Last-Modified")
+ if lastMod != fileModStr {
+ t.Fatalf("initial Last-Modified = %q; want %q", lastMod, fileModStr)
+ }
+
+ req, _ := NewRequest("GET", ts.URL, nil)
+ req.Header.Set("If-Modified-Since", lastMod)
+
+ res, err = DefaultClient.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != 304 {
+ t.Fatalf("Code after If-Modified-Since request = %v; want 304", res.StatusCode)
+ }
+ res.Body.Close()
+
+ // Advance the index.html file's modtime, but not the directory's.
+ indexFile.modtime = indexFile.modtime.Add(1 * time.Hour)
+
+ res, err = DefaultClient.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res.StatusCode != 200 {
+ t.Fatalf("Code after second If-Modified-Since request = %v; want 200; res is %#v", res.StatusCode, res)
+ }
+ res.Body.Close()
+}
+
+func mustStat(t *testing.T, fileName string) os.FileInfo {
+ fi, err := os.Stat(fileName)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return fi
+}
+
+func TestServeContent(t *testing.T) {
+ defer afterTest(t)
+ type serveParam struct {
+ name string
+ modtime time.Time
+ content io.ReadSeeker
+ contentType string
+ etag string
+ }
+ servec := make(chan serveParam, 1)
+ ts := httptest.NewServer(HandlerFunc(func(w ResponseWriter, r *Request) {
+ p := <-servec
+ if p.etag != "" {
+ w.Header().Set("ETag", p.etag)
+ }
+ if p.contentType != "" {
+ w.Header().Set("Content-Type", p.contentType)
+ }
+ ServeContent(w, r, p.name, p.modtime, p.content)
+ }))
+ defer ts.Close()
+
+ type testCase struct {
+ // One of file or content must be set:
+ file string
+ content io.ReadSeeker
+
+ modtime time.Time
+ serveETag string // optional
+ serveContentType string // optional
+ reqHeader map[string]string
+ wantLastMod string
+ wantContentType string
+ wantStatus int
+ }
+ htmlModTime := mustStat(t, "testdata/index.html").ModTime()
+ tests := map[string]testCase{
+ "no_last_modified": {
+ file: "testdata/style.css",
+ wantContentType: "text/css; charset=utf-8",
+ wantStatus: 200,
+ },
+ "with_last_modified": {
+ file: "testdata/index.html",
+ wantContentType: "text/html; charset=utf-8",
+ modtime: htmlModTime,
+ wantLastMod: htmlModTime.UTC().Format(TimeFormat),
+ wantStatus: 200,
+ },
+ "not_modified_modtime": {
+ file: "testdata/style.css",
+ modtime: htmlModTime,
+ reqHeader: map[string]string{
+ "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
+ },
+ wantStatus: 304,
+ },
+ "not_modified_modtime_with_contenttype": {
+ file: "testdata/style.css",
+ serveContentType: "text/css", // explicit content type
+ modtime: htmlModTime,
+ reqHeader: map[string]string{
+ "If-Modified-Since": htmlModTime.UTC().Format(TimeFormat),
+ },
+ wantStatus: 304,
+ },
+ "not_modified_etag": {
+ file: "testdata/style.css",
+ serveETag: `"foo"`,
+ reqHeader: map[string]string{
+ "If-None-Match": `"foo"`,
+ },
+ wantStatus: 304,
+ },
+ "not_modified_etag_no_seek": {
+ content: panicOnSeek{nil}, // should never be called
+ serveETag: `"foo"`,
+ reqHeader: map[string]string{
+ "If-None-Match": `"foo"`,
+ },
+ wantStatus: 304,
+ },
+ "range_good": {
+ file: "testdata/style.css",
+ serveETag: `"A"`,
+ reqHeader: map[string]string{
+ "Range": "bytes=0-4",
+ },
+ wantStatus: StatusPartialContent,
+ wantContentType: "text/css; charset=utf-8",
+ },
+ // An If-Range resource for entity "A", but entity "B" is now current.
+ // The Range request should be ignored.
+ "range_no_match": {
+ file: "testdata/style.css",
+ serveETag: `"A"`,
+ reqHeader: map[string]string{
+ "Range": "bytes=0-4",
+ "If-Range": `"B"`,
+ },
+ wantStatus: 200,
+ wantContentType: "text/css; charset=utf-8",
+ },
+ "range_with_modtime": {
+ file: "testdata/style.css",
+ modtime: time.Date(2014, 6, 25, 17, 12, 18, 0 /* nanos */, time.UTC),
+ reqHeader: map[string]string{
+ "Range": "bytes=0-4",
+ "If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
+ },
+ wantStatus: StatusPartialContent,
+ wantContentType: "text/css; charset=utf-8",
+ wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
+ },
+ "range_with_modtime_nanos": {
+ file: "testdata/style.css",
+ modtime: time.Date(2014, 6, 25, 17, 12, 18, 123 /* nanos */, time.UTC),
+ reqHeader: map[string]string{
+ "Range": "bytes=0-4",
+ "If-Range": "Wed, 25 Jun 2014 17:12:18 GMT",
+ },
+ wantStatus: StatusPartialContent,
+ wantContentType: "text/css; charset=utf-8",
+ wantLastMod: "Wed, 25 Jun 2014 17:12:18 GMT",
+ },
+ "unix_zero_modtime": {
+ content: strings.NewReader("foo"),
+ modtime: time.Unix(0, 0),
+ wantStatus: StatusOK,
+ wantContentType: "text/html; charset=utf-8",
+ },
+ }
+ for testName, tt := range tests {
+ var content io.ReadSeeker
+ if tt.file != "" {
+ f, err := os.Open(tt.file)
+ if err != nil {
+ t.Fatalf("test %q: %v", testName, err)
+ }
+ defer f.Close()
+ content = f
+ } else {
+ content = tt.content
+ }
+
+ servec <- serveParam{
+ name: filepath.Base(tt.file),
+ content: content,
+ modtime: tt.modtime,
+ etag: tt.serveETag,
+ contentType: tt.serveContentType,
+ }
+ req, err := NewRequest("GET", ts.URL, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for k, v := range tt.reqHeader {
+ req.Header.Set(k, v)
+ }
+ res, err := DefaultClient.Do(req)
+ if err != nil {
+ t.Fatal(err)
+ }
+ io.Copy(ioutil.Discard, res.Body)
+ res.Body.Close()
+ if res.StatusCode != tt.wantStatus {
+ t.Errorf("test %q: status = %d; want %d", testName, res.StatusCode, tt.wantStatus)
+ }
+ if g, e := res.Header.Get("Content-Type"), tt.wantContentType; g != e {
+ t.Errorf("test %q: content-type = %q, want %q", testName, g, e)
+ }
+ if g, e := res.Header.Get("Last-Modified"), tt.wantLastMod; g != e {
+ t.Errorf("test %q: last-modified = %q, want %q", testName, g, e)
+ }
+ }
+}
+
+// Issue 12991
+func TestServerFileStatError(t *testing.T) {
+ rec := httptest.NewRecorder()
+ r, _ := NewRequest("GET", "http://foo/", nil)
+ redirect := false
+ name := "file.txt"
+ fs := issue12991FS{}
+ ExportServeFile(rec, r, fs, name, redirect)
+ if body := rec.Body.String(); !strings.Contains(body, "403") || !strings.Contains(body, "Forbidden") {
+ t.Errorf("wanted 403 forbidden message; got: %s", body)
+ }
+}
+
+type issue12991FS struct{}
+
+func (issue12991FS) Open(string) (File, error) { return issue12991File{}, nil }
+
+type issue12991File struct{ File }
+
+func (issue12991File) Stat() (os.FileInfo, error) { return nil, os.ErrPermission }
+func (issue12991File) Close() error { return nil }
+
+func TestServeContentErrorMessages(t *testing.T) {
+ defer afterTest(t)
+ fs := fakeFS{
+ "/500": &fakeFileInfo{
+ err: errors.New("random error"),
+ },
+ "/403": &fakeFileInfo{
+ err: &os.PathError{Err: os.ErrPermission},
+ },
+ }
+ ts := httptest.NewServer(FileServer(fs))
+ defer ts.Close()
+ for _, code := range []int{403, 404, 500} {
+ res, err := DefaultClient.Get(fmt.Sprintf("%s/%d", ts.URL, code))
+ if err != nil {
+ t.Errorf("Error fetching /%d: %v", code, err)
+ continue
+ }
+ if res.StatusCode != code {
+ t.Errorf("For /%d, status code = %d; want %d", code, res.StatusCode, code)
+ }
+ res.Body.Close()
+ }
+}
+
+// verifies that sendfile is being used on Linux
+func TestLinuxSendfile(t *testing.T) {
+ defer afterTest(t)
+ if runtime.GOOS != "linux" {
+ t.Skip("skipping; linux-only test")
+ }
+ if _, err := exec.LookPath("strace"); err != nil {
+ t.Skip("skipping; strace not found in path")
+ }
+
+ ln, err := net.Listen("tcp", "127.0.0.1:0")
+ if err != nil {
+ t.Fatal(err)
+ }
+ lnf, err := ln.(*net.TCPListener).File()
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ln.Close()
+
+ syscalls := "sendfile,sendfile64"
+ switch runtime.GOARCH {
+ case "mips64", "mips64le":
+ // mips64 strace doesn't support sendfile64 and will error out
+ // if we specify that with `-e trace='.
+ syscalls = "sendfile"
+ }
+
+ var buf bytes.Buffer
+ child := exec.Command("strace", "-f", "-q", "-e", "trace="+syscalls, os.Args[0], "-test.run=TestLinuxSendfileChild")
+ child.ExtraFiles = append(child.ExtraFiles, lnf)
+ child.Env = append([]string{"GO_WANT_HELPER_PROCESS=1"}, os.Environ()...)
+ child.Stdout = &buf
+ child.Stderr = &buf
+ if err := child.Start(); err != nil {
+ t.Skipf("skipping; failed to start straced child: %v", err)
+ }
+
+ res, err := Get(fmt.Sprintf("http://%s/", ln.Addr()))
+ if err != nil {
+ t.Fatalf("http client error: %v", err)
+ }
+ _, err = io.Copy(ioutil.Discard, res.Body)
+ if err != nil {
+ t.Fatalf("client body read error: %v", err)
+ }
+ res.Body.Close()
+
+ // Force child to exit cleanly.
+ Post(fmt.Sprintf("http://%s/quit", ln.Addr()), "", nil)
+ child.Wait()
+
+ rx := regexp.MustCompile(`sendfile(64)?\(\d+,\s*\d+,\s*NULL,\s*\d+\)\s*=\s*\d+\s*\n`)
+ rxResume := regexp.MustCompile(`<\.\.\. sendfile(64)? resumed> \)\s*=\s*\d+\s*\n`)
+ out := buf.String()
+ if !rx.MatchString(out) && !rxResume.MatchString(out) {
+ t.Errorf("no sendfile system call found in:\n%s", out)
+ }
+}
+
+func getBody(t *testing.T, testName string, req Request) (*Response, []byte) {
+ r, err := DefaultClient.Do(&req)
+ if err != nil {
+ t.Fatalf("%s: for URL %q, send error: %v", testName, req.URL.String(), err)
+ }
+ b, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Fatalf("%s: for URL %q, reading body: %v", testName, req.URL.String(), err)
+ }
+ return r, b
+}
+
+// TestLinuxSendfileChild isn't a real test. It's used as a helper process
+// for TestLinuxSendfile.
+func TestLinuxSendfileChild(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+ fd3 := os.NewFile(3, "ephemeral-port-listener")
+ ln, err := net.FileListener(fd3)
+ if err != nil {
+ panic(err)
+ }
+ mux := NewServeMux()
+ mux.Handle("/", FileServer(Dir("testdata")))
+ mux.HandleFunc("/quit", func(ResponseWriter, *Request) {
+ os.Exit(0)
+ })
+ s := &Server{Handler: mux}
+ err = s.Serve(ln)
+ if err != nil {
+ panic(err)
+ }
+}
+
+func TestFileServerCleanPath(t *testing.T) {
+ tests := []struct {
+ path string
+ wantCode int
+ wantOpen []string
+ }{
+ {"/", 200, []string{"/", "/index.html"}},
+ {"/dir", 301, []string{"/dir"}},
+ {"/dir/", 200, []string{"/dir", "/dir/index.html"}},
+ }
+ for _, tt := range tests {
+ var log []string
+ rr := httptest.NewRecorder()
+ req, _ := NewRequest("GET", "http://foo.localhost"+tt.path, nil)
+ FileServer(fileServerCleanPathDir{&log}).ServeHTTP(rr, req)
+ if !reflect.DeepEqual(log, tt.wantOpen) {
+ t.Logf("For %s: Opens = %q; want %q", tt.path, log, tt.wantOpen)
+ }
+ if rr.Code != tt.wantCode {
+ t.Logf("For %s: Response code = %d; want %d", tt.path, rr.Code, tt.wantCode)
+ }
+ }
+}
+
+type fileServerCleanPathDir struct {
+ log *[]string
+}
+
+func (d fileServerCleanPathDir) Open(path string) (File, error) {
+ *(d.log) = append(*(d.log), path)
+ if path == "/" || path == "/dir" || path == "/dir/" {
+ // Just return back something that's a directory.
+ return Dir(".").Open(".")
+ }
+ return nil, os.ErrNotExist
+}
+
+type panicOnSeek struct{ io.ReadSeeker }
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/net/lookup_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/net/lookup_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/net/lookup_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/net/lookup_test.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,634 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package net
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+func lookupLocalhost(fn func(string) ([]IPAddr, error), host string) ([]IPAddr, error) {
+ switch host {
+ case "localhost":
+ return []IPAddr{
+ {IP: IPv4(127, 0, 0, 1)},
+ {IP: IPv6loopback},
+ }, nil
+ default:
+ return fn(host)
+ }
+}
+
+// The Lookup APIs use various sources such as local database, DNS or
+// mDNS, and may use platform-dependent DNS stub resolver if possible.
+// The APIs accept any of forms for a query; host name in various
+// encodings, UTF-8 encoded net name, domain name, FQDN or absolute
+// FQDN, but the result would be one of the forms and it depends on
+// the circumstances.
+
+var lookupGoogleSRVTests = []struct {
+ service, proto, name string
+ cname, target string
+}{
+ {
+ "xmpp-server", "tcp", "google.com",
+ "google.com.", "google.com.",
+ },
+ {
+ "xmpp-server", "tcp", "google.com.",
+ "google.com.", "google.com.",
+ },
+
+ // non-standard back door
+ {
+ "", "", "_xmpp-server._tcp.google.com",
+ "google.com.", "google.com.",
+ },
+ {
+ "", "", "_xmpp-server._tcp.google.com.",
+ "google.com.", "google.com.",
+ },
+}
+
+func TestLookupGoogleSRV(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGoogleSRVTests {
+ cname, srvs, err := LookupSRV(tt.service, tt.proto, tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(srvs) == 0 {
+ t.Error("got no record")
+ }
+ if !strings.HasSuffix(cname, tt.cname) {
+ t.Errorf("got %s; want %s", cname, tt.cname)
+ }
+ for _, srv := range srvs {
+ if !strings.HasSuffix(srv.Target, tt.target) {
+ t.Errorf("got %v; want a record containing %s", srv, tt.target)
+ }
+ }
+ }
+}
+
+var lookupGmailMXTests = []struct {
+ name, host string
+}{
+ {"gmail.com", "google.com."},
+ {"gmail.com.", "google.com."},
+}
+
+func TestLookupGmailMX(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGmailMXTests {
+ mxs, err := LookupMX(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(mxs) == 0 {
+ t.Error("got no record")
+ }
+ for _, mx := range mxs {
+ if !strings.HasSuffix(mx.Host, tt.host) {
+ t.Errorf("got %v; want a record containing %s", mx, tt.host)
+ }
+ }
+ }
+}
+
+var lookupGmailNSTests = []struct {
+ name, host string
+}{
+ {"gmail.com", "google.com."},
+ {"gmail.com.", "google.com."},
+}
+
+func TestLookupGmailNS(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGmailNSTests {
+ nss, err := LookupNS(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(nss) == 0 {
+ t.Error("got no record")
+ }
+ for _, ns := range nss {
+ if !strings.HasSuffix(ns.Host, tt.host) {
+ t.Errorf("got %v; want a record containing %s", ns, tt.host)
+ }
+ }
+ }
+}
+
+var lookupGmailTXTTests = []struct {
+ name, txt, host string
+}{
+ {"gmail.com", "spf", "google.com"},
+ {"gmail.com.", "spf", "google.com"},
+}
+
+func TestLookupGmailTXT(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGmailTXTTests {
+ txts, err := LookupTXT(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(txts) == 0 {
+ t.Error("got no record")
+ }
+ for _, txt := range txts {
+ if !strings.Contains(txt, tt.txt) || (!strings.HasSuffix(txt, tt.host) && !strings.HasSuffix(txt, tt.host+".")) {
+ t.Errorf("got %s; want a record containing %s, %s", txt, tt.txt, tt.host)
+ }
+ }
+ }
+}
+
+var lookupGooglePublicDNSAddrTests = []struct {
+ addr, name string
+}{
+ {"8.8.8.8", ".google.com."},
+ {"8.8.4.4", ".google.com."},
+
+ {"2001:4860:4860::8888", ".google.com."},
+ {"2001:4860:4860::8844", ".google.com."},
+}
+
+func TestLookupGooglePublicDNSAddr(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !supportsIPv6 || !*testIPv4 || !*testIPv6 {
+ t.Skip("both IPv4 and IPv6 are required")
+ }
+
+ for _, tt := range lookupGooglePublicDNSAddrTests {
+ names, err := LookupAddr(tt.addr)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(names) == 0 {
+ t.Error("got no record")
+ }
+ for _, name := range names {
+ if !strings.HasSuffix(name, tt.name) {
+ t.Errorf("got %s; want a record containing %s", name, tt.name)
+ }
+ }
+ }
+}
+
+func TestLookupIPv6LinkLocalAddr(t *testing.T) {
+ if !supportsIPv6 || !*testIPv6 {
+ t.Skip("IPv6 is required")
+ }
+
+ addrs, err := LookupHost("localhost")
+ if err != nil {
+ t.Fatal(err)
+ }
+ found := false
+ for _, addr := range addrs {
+ if addr == "fe80::1%lo0" {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+ if _, err := LookupAddr("fe80::1%lo0"); err != nil {
+ t.Error(err)
+ }
+}
+
+var lookupIANACNAMETests = []struct {
+ name, cname string
+}{
+ {"www.iana.org", "icann.org."},
+ {"www.iana.org.", "icann.org."},
+}
+
+func TestLookupIANACNAME(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupIANACNAMETests {
+ cname, err := LookupCNAME(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !strings.HasSuffix(cname, tt.cname) {
+ t.Errorf("got %s; want a record containing %s", cname, tt.cname)
+ }
+ }
+}
+
+var lookupGoogleHostTests = []struct {
+ name string
+}{
+ {"google.com"},
+ {"google.com."},
+}
+
+func TestLookupGoogleHost(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGoogleHostTests {
+ addrs, err := LookupHost(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(addrs) == 0 {
+ t.Error("got no record")
+ }
+ for _, addr := range addrs {
+ if ParseIP(addr) == nil {
+ t.Errorf("got %q; want a literal IP address", addr)
+ }
+ }
+ }
+}
+
+var lookupGoogleIPTests = []struct {
+ name string
+}{
+ {"google.com"},
+ {"google.com."},
+}
+
+func TestLookupGoogleIP(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for _, tt := range lookupGoogleIPTests {
+ ips, err := LookupIP(tt.name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if len(ips) == 0 {
+ t.Error("got no record")
+ }
+ for _, ip := range ips {
+ if ip.To4() == nil && ip.To16() == nil {
+ t.Errorf("got %v; want an IP address", ip)
+ }
+ }
+ }
+}
+
+var revAddrTests = []struct {
+ Addr string
+ Reverse string
+ ErrPrefix string
+}{
+ {"1.2.3.4", "4.3.2.1.in-addr.arpa.", ""},
+ {"245.110.36.114", "114.36.110.245.in-addr.arpa.", ""},
+ {"::ffff:12.34.56.78", "78.56.34.12.in-addr.arpa.", ""},
+ {"::1", "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa.", ""},
+ {"1::", "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1.0.0.0.ip6.arpa.", ""},
+ {"1234:567::89a:bcde", "e.d.c.b.a.9.8.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
+ {"1234:567:fefe:bcbc:adad:9e4a:89a:bcde", "e.d.c.b.a.9.8.0.a.4.e.9.d.a.d.a.c.b.c.b.e.f.e.f.7.6.5.0.4.3.2.1.ip6.arpa.", ""},
+ {"1.2.3", "", "unrecognized address"},
+ {"1.2.3.4.5", "", "unrecognized address"},
+ {"1234:567:bcbca::89a:bcde", "", "unrecognized address"},
+ {"1234:567::bcbc:adad::89a:bcde", "", "unrecognized address"},
+}
+
+func TestReverseAddress(t *testing.T) {
+ for i, tt := range revAddrTests {
+ a, err := reverseaddr(tt.Addr)
+ if len(tt.ErrPrefix) > 0 && err == nil {
+ t.Errorf("#%d: expected %q, got (error)", i, tt.ErrPrefix)
+ continue
+ }
+ if len(tt.ErrPrefix) == 0 && err != nil {
+ t.Errorf("#%d: expected , got %q (error)", i, err)
+ }
+ if err != nil && err.(*DNSError).Err != tt.ErrPrefix {
+ t.Errorf("#%d: expected %q, got %q (mismatched error)", i, tt.ErrPrefix, err.(*DNSError).Err)
+ }
+ if a != tt.Reverse {
+ t.Errorf("#%d: expected %q, got %q (reverse address)", i, tt.Reverse, a)
+ }
+ }
+}
+
+func TestLookupIPDeadline(t *testing.T) {
+ if !*testDNSFlood {
+ t.Skip("test disabled; use -dnsflood to enable")
+ }
+
+ const N = 5000
+ const timeout = 3 * time.Second
+ c := make(chan error, 2*N)
+ for i := 0; i < N; i++ {
+ name := fmt.Sprintf("%d.net-test.golang.org", i)
+ go func() {
+ _, err := lookupIPDeadline(name, time.Now().Add(timeout/2))
+ c <- err
+ }()
+ go func() {
+ _, err := lookupIPDeadline(name, time.Now().Add(timeout))
+ c <- err
+ }()
+ }
+ qstats := struct {
+ succeeded, failed int
+ timeout, temporary, other int
+ unknown int
+ }{}
+ deadline := time.After(timeout + time.Second)
+ for i := 0; i < 2*N; i++ {
+ select {
+ case <-deadline:
+ t.Fatal("deadline exceeded")
+ case err := <-c:
+ switch err := err.(type) {
+ case nil:
+ qstats.succeeded++
+ case Error:
+ qstats.failed++
+ if err.Timeout() {
+ qstats.timeout++
+ }
+ if err.Temporary() {
+ qstats.temporary++
+ }
+ if !err.Timeout() && !err.Temporary() {
+ qstats.other++
+ }
+ default:
+ qstats.failed++
+ qstats.unknown++
+ }
+ }
+ }
+
+ // A high volume of DNS queries for sub-domain of golang.org
+ // would be coordinated by authoritative or recursive server,
+ // or stub resolver which implements query-response rate
+ // limitation, so we can expect some query successes and more
+ // failures including timeout, temporary and other here.
+ // As a rule, unknown must not be shown but it might possibly
+ // happen due to issue 4856 for now.
+ t.Logf("%v succeeded, %v failed (%v timeout, %v temporary, %v other, %v unknown)", qstats.succeeded, qstats.failed, qstats.timeout, qstats.temporary, qstats.other, qstats.unknown)
+}
+
+func TestLookupDotsWithLocalSource(t *testing.T) {
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ for i, fn := range []func() func(){forceGoDNS, forceCgoDNS} {
+ fixup := fn()
+ if fixup == nil {
+ continue
+ }
+ names, err := LookupAddr("127.0.0.1")
+ fixup()
+ if err != nil {
+ t.Logf("#%d: %v", i, err)
+ continue
+ }
+ mode := "netgo"
+ if i == 1 {
+ mode = "netcgo"
+ }
+ loop:
+ for i, name := range names {
+ if strings.Index(name, ".") == len(name)-1 { // "localhost" not "localhost."
+ for j := range names {
+ if j == i {
+ continue
+ }
+ if names[j] == name[:len(name)-1] {
+ // It's OK if we find the name without the dot,
+ // as some systems say 127.0.0.1 localhost localhost.
+ continue loop
+ }
+ }
+ t.Errorf("%s: got %s; want %s", mode, name, name[:len(name)-1])
+ } else if strings.Contains(name, ".") && !strings.HasSuffix(name, ".") { // "localhost.localdomain." not "localhost.localdomain"
+ t.Errorf("%s: got %s; want name ending with trailing dot", mode, name)
+ }
+ }
+ }
+}
+
+func TestLookupDotsWithRemoteSource(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" || !*testExternal {
+ t.Skip("avoid external network")
+ }
+ if !supportsIPv4 || !*testIPv4 {
+ t.Skip("IPv4 is required")
+ }
+
+ if fixup := forceGoDNS(); fixup != nil {
+ testDots(t, "go")
+ fixup()
+ }
+ if fixup := forceCgoDNS(); fixup != nil {
+ testDots(t, "cgo")
+ fixup()
+ }
+}
+
+func testDots(t *testing.T, mode string) {
+ names, err := LookupAddr("8.8.8.8") // Google dns server
+ if err != nil {
+ t.Errorf("LookupAddr(8.8.8.8): %v (mode=%v)", err, mode)
+ } else {
+ for _, name := range names {
+ if !strings.HasSuffix(name, ".google.com.") {
+ t.Errorf("LookupAddr(8.8.8.8) = %v, want names ending in .google.com. with trailing dot (mode=%v)", names, mode)
+ break
+ }
+ }
+ }
+
+ cname, err := LookupCNAME("www.mit.edu")
+ if err != nil || !strings.HasSuffix(cname, ".") {
+ t.Errorf("LookupCNAME(www.mit.edu) = %v, %v, want cname ending in . with trailing dot (mode=%v)", cname, err, mode)
+ }
+
+ mxs, err := LookupMX("google.com")
+ if err != nil {
+ t.Errorf("LookupMX(google.com): %v (mode=%v)", err, mode)
+ } else {
+ for _, mx := range mxs {
+ if !strings.HasSuffix(mx.Host, ".google.com.") {
+ t.Errorf("LookupMX(google.com) = %v, want names ending in .google.com. with trailing dot (mode=%v)", mxString(mxs), mode)
+ break
+ }
+ }
+ }
+
+ nss, err := LookupNS("google.com")
+ if err != nil {
+ t.Errorf("LookupNS(google.com): %v (mode=%v)", err, mode)
+ } else {
+ for _, ns := range nss {
+ if !strings.HasSuffix(ns.Host, ".google.com.") {
+ t.Errorf("LookupNS(google.com) = %v, want names ending in .google.com. with trailing dot (mode=%v)", nsString(nss), mode)
+ break
+ }
+ }
+ }
+
+ cname, srvs, err := LookupSRV("xmpp-server", "tcp", "google.com")
+ if err != nil {
+ t.Errorf("LookupSRV(xmpp-server, tcp, google.com): %v (mode=%v)", err, mode)
+ } else {
+ if !strings.HasSuffix(cname, ".google.com.") {
+ t.Errorf("LookupSRV(xmpp-server, tcp, google.com) returned cname=%v, want name ending in .google.com. with trailing dot (mode=%v)", cname, mode)
+ }
+ for _, srv := range srvs {
+ if !strings.HasSuffix(srv.Target, ".google.com.") {
+ t.Errorf("LookupSRV(xmpp-server, tcp, google.com) returned addrs=%v, want names ending in .google.com. with trailing dot (mode=%v)", srvString(srvs), mode)
+ break
+ }
+ }
+ }
+}
+
+func mxString(mxs []*MX) string {
+ var buf bytes.Buffer
+ sep := ""
+ fmt.Fprintf(&buf, "[")
+ for _, mx := range mxs {
+ fmt.Fprintf(&buf, "%s%s:%d", sep, mx.Host, mx.Pref)
+ sep = " "
+ }
+ fmt.Fprintf(&buf, "]")
+ return buf.String()
+}
+
+func nsString(nss []*NS) string {
+ var buf bytes.Buffer
+ sep := ""
+ fmt.Fprintf(&buf, "[")
+ for _, ns := range nss {
+ fmt.Fprintf(&buf, "%s%s", sep, ns.Host)
+ sep = " "
+ }
+ fmt.Fprintf(&buf, "]")
+ return buf.String()
+}
+
+func srvString(srvs []*SRV) string {
+ var buf bytes.Buffer
+ sep := ""
+ fmt.Fprintf(&buf, "[")
+ for _, srv := range srvs {
+ fmt.Fprintf(&buf, "%s%s:%d:%d:%d", sep, srv.Target, srv.Port, srv.Priority, srv.Weight)
+ sep = " "
+ }
+ fmt.Fprintf(&buf, "]")
+ return buf.String()
+}
+
+var lookupPortTests = []struct {
+ network string
+ name string
+ port int
+ ok bool
+}{
+ {"tcp", "0", 0, true},
+ {"tcp", "echo", 7, true},
+ {"tcp", "discard", 9, true},
+ {"tcp", "systat", 11, true},
+ {"tcp", "daytime", 13, true},
+ {"tcp", "chargen", 19, true},
+ {"tcp", "ftp-data", 20, true},
+ {"tcp", "ftp", 21, true},
+ {"tcp", "telnet", 23, true},
+ {"tcp", "smtp", 25, true},
+ {"tcp", "time", 37, true},
+ {"tcp", "domain", 53, true},
+ {"tcp", "finger", 79, true},
+ {"tcp", "42", 42, true},
+
+ {"udp", "0", 0, true},
+ {"udp", "echo", 7, true},
+ {"udp", "tftp", 69, true},
+ {"udp", "bootpc", 68, true},
+ {"udp", "bootps", 67, true},
+ {"udp", "domain", 53, true},
+ {"udp", "ntp", 123, true},
+ {"udp", "snmp", 161, true},
+ {"udp", "syslog", 514, true},
+ {"udp", "42", 42, true},
+
+ {"--badnet--", "zzz", 0, false},
+ {"tcp", "--badport--", 0, false},
+ {"tcp", "-1", 0, false},
+ {"tcp", "65536", 0, false},
+ {"udp", "-1", 0, false},
+ {"udp", "65536", 0, false},
+
+ // Issue 13610: LookupPort("tcp", "")
+ {"tcp", "", 0, true},
+ {"tcp6", "", 0, true},
+ {"tcp4", "", 0, true},
+ {"udp", "", 0, true},
+}
+
+func TestLookupPort(t *testing.T) {
+ switch runtime.GOOS {
+ case "nacl":
+ t.Skipf("not supported on %s", runtime.GOOS)
+ }
+
+ for _, tt := range lookupPortTests {
+ if port, err := LookupPort(tt.network, tt.name); port != tt.port || (err == nil) != tt.ok {
+ t.Errorf("LookupPort(%q, %q) = %d, %v; want %d", tt.network, tt.name, port, err, tt.port)
+ }
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/cgocall.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/cgocall.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/cgocall.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/cgocall.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,602 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Cgo call and callback support.
+//
+// To call into the C function f from Go, the cgo-generated code calls
+// runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
+// gcc-compiled function written by cgo.
+//
+// runtime.cgocall (below) locks g to m, calls entersyscall
+// so as not to block other goroutines or the garbage collector,
+// and then calls runtime.asmcgocall(_cgo_Cfunc_f, frame).
+//
+// runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
+// (assumed to be an operating system-allocated stack, so safe to run
+// gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
+//
+// _cgo_Cfunc_f invokes the actual C function f with arguments
+// taken from the frame structure, records the results in the frame,
+// and returns to runtime.asmcgocall.
+//
+// After it regains control, runtime.asmcgocall switches back to the
+// original g (m->curg)'s stack and returns to runtime.cgocall.
+//
+// After it regains control, runtime.cgocall calls exitsyscall, which blocks
+// until this m can run Go code without violating the $GOMAXPROCS limit,
+// and then unlocks g from m.
+//
+// The above description skipped over the possibility of the gcc-compiled
+// function f calling back into Go. If that happens, we continue down
+// the rabbit hole during the execution of f.
+//
+// To make it possible for gcc-compiled C code to call a Go function p.GoF,
+// cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
+// know about packages). The gcc-compiled C function f calls GoF.
+//
+// GoF calls crosscall2(_cgoexp_GoF, frame, framesize). Crosscall2
+// (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument
+// adapter from the gcc function call ABI to the 6c function call ABI.
+// It is called from gcc to call 6c functions. In this case it calls
+// _cgoexp_GoF(frame, framesize), still running on m->g0's stack
+// and outside the $GOMAXPROCS limit. Thus, this code cannot yet
+// call arbitrary Go code directly and must be careful not to allocate
+// memory or use up m->g0's stack.
+//
+// _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize).
+// (The reason for having _cgoexp_GoF instead of writing a crosscall3
+// to make this call directly is that _cgoexp_GoF, because it is compiled
+// with 6c instead of gcc, can refer to dotted names like
+// runtime.cgocallback and p.GoF.)
+//
+// runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's
+// stack to the original g (m->curg)'s stack, on which it calls
+// runtime.cgocallbackg(p.GoF, frame, framesize).
+// As part of the stack switch, runtime.cgocallback saves the current
+// SP as m->g0->sched.sp, so that any use of m->g0's stack during the
+// execution of the callback will be done below the existing stack frames.
+// Before overwriting m->g0->sched.sp, it pushes the old value on the
+// m->g0 stack, so that it can be restored later.
+//
+// runtime.cgocallbackg (below) is now running on a real goroutine
+// stack (not an m->g0 stack). First it calls runtime.exitsyscall, which will
+// block until the $GOMAXPROCS limit allows running this goroutine.
+// Once exitsyscall has returned, it is safe to do things like call the memory
+// allocator or invoke the Go callback function p.GoF. runtime.cgocallbackg
+// first defers a function to unwind m->g0.sched.sp, so that if p.GoF
+// panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
+// and the m->curg stack will be unwound in lock step.
+// Then it calls p.GoF. Finally it pops but does not execute the deferred
+// function, calls runtime.entersyscall, and returns to runtime.cgocallback.
+//
+// After it regains control, runtime.cgocallback switches back to
+// m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old
+// m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
+//
+// _cgoexp_GoF immediately returns to crosscall2, which restores the
+// callee-save registers for gcc and returns to GoF, which returns to f.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+// Call from Go to C.
+//go:nosplit
+func cgocall(fn, arg unsafe.Pointer) int32 {
+ if !iscgo && GOOS != "solaris" && GOOS != "windows" {
+ throw("cgocall unavailable")
+ }
+
+ if fn == nil {
+ throw("cgocall nil")
+ }
+
+ if raceenabled {
+ racereleasemerge(unsafe.Pointer(&racecgosync))
+ }
+
+ /*
+ * Lock g to m to ensure we stay on the same stack if we do a
+ * cgo callback. Add entry to defer stack in case of panic.
+ */
+ lockOSThread()
+ mp := getg().m
+ mp.ncgocall++
+ mp.ncgo++
+ defer endcgo(mp)
+
+ /*
+ * Announce we are entering a system call
+ * so that the scheduler knows to create another
+ * M to run goroutines while we are in the
+ * foreign code.
+ *
+ * The call to asmcgocall is guaranteed not to
+ * split the stack and does not allocate memory,
+ * so it is safe to call while "in a system call", outside
+ * the $GOMAXPROCS accounting.
+ */
+ entersyscall(0)
+ errno := asmcgocall(fn, arg)
+ exitsyscall(0)
+
+ return errno
+}
+
+//go:nosplit
+func endcgo(mp *m) {
+ mp.ncgo--
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&racecgosync))
+ }
+
+ unlockOSThread() // invalidates mp
+}
+
+// Helper functions for cgo code.
+
+func cmalloc(n uintptr) unsafe.Pointer {
+ var args struct {
+ n uint64
+ ret unsafe.Pointer
+ }
+ args.n = uint64(n)
+ cgocall(_cgo_malloc, unsafe.Pointer(&args))
+ if args.ret == nil {
+ throw("C malloc failed")
+ }
+ return args.ret
+}
+
+func cfree(p unsafe.Pointer) {
+ cgocall(_cgo_free, p)
+}
+
+// Call from C back to Go.
+//go:nosplit
+func cgocallbackg() {
+ gp := getg()
+ if gp != gp.m.curg {
+ println("runtime: bad g in cgocallback")
+ exit(2)
+ }
+
+ // Save current syscall parameters, so m.syscall can be
+ // used again if callback decide to make syscall.
+ syscall := gp.m.syscall
+
+ // entersyscall saves the caller's SP to allow the GC to trace the Go
+ // stack. However, since we're returning to an earlier stack frame and
+ // need to pair with the entersyscall() call made by cgocall, we must
+ // save syscall* and let reentersyscall restore them.
+ savedsp := unsafe.Pointer(gp.syscallsp)
+ savedpc := gp.syscallpc
+ exitsyscall(0) // coming out of cgo call
+ cgocallbackg1()
+ // going back to cgo call
+ reentersyscall(savedpc, uintptr(savedsp))
+
+ gp.m.syscall = syscall
+}
+
+func cgocallbackg1() {
+ gp := getg()
+ if gp.m.needextram {
+ gp.m.needextram = false
+ systemstack(newextram)
+ }
+
+ if gp.m.ncgo == 0 {
+ // The C call to Go came from a thread not currently running
+ // any Go. In the case of -buildmode=c-archive or c-shared,
+ // this call may be coming in before package initialization
+ // is complete. Wait until it is.
+ <-main_init_done
+ }
+
+ // Add entry to defer stack in case of panic.
+ restore := true
+ defer unwindm(&restore)
+
+ if raceenabled {
+ raceacquire(unsafe.Pointer(&racecgosync))
+ }
+
+ type args struct {
+ fn *funcval
+ arg unsafe.Pointer
+ argsize uintptr
+ }
+ var cb *args
+
+ // Location of callback arguments depends on stack frame layout
+ // and size of stack frame of cgocallback_gofunc.
+ sp := gp.m.g0.sched.sp
+ switch GOARCH {
+ default:
+ throw("cgocallbackg is unimplemented on arch")
+ case "arm":
+ // On arm, stack frame is two words and there's a saved LR between
+ // SP and the stack frame and between the stack frame and the arguments.
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
+ case "arm64":
+ // On arm64, stack frame is four words and there's a saved LR between
+ // SP and the stack frame and between the stack frame and the arguments.
+ cb = (*args)(unsafe.Pointer(sp + 5*sys.PtrSize))
+ case "amd64":
+ // On amd64, stack frame is one word, plus caller PC.
+ if framepointer_enabled {
+ // In this case, there's also saved BP.
+ cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
+ break
+ }
+ cb = (*args)(unsafe.Pointer(sp + 2*sys.PtrSize))
+ case "386":
+ // On 386, stack frame is three words, plus caller PC.
+ cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
+ case "ppc64", "ppc64le":
+ // On ppc64, the callback arguments are in the arguments area of
+ // cgocallback's stack frame. The stack looks like this:
+ // +--------------------+------------------------------+
+ // | | ... |
+ // | cgoexp_$fn +------------------------------+
+ // | | fixed frame area |
+ // +--------------------+------------------------------+
+ // | | arguments area |
+ // | cgocallback +------------------------------+ <- sp + 2*minFrameSize + 2*ptrSize
+ // | | fixed frame area |
+ // +--------------------+------------------------------+ <- sp + minFrameSize + 2*ptrSize
+ // | | local variables (2 pointers) |
+ // | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
+ // | | fixed frame area |
+ // +--------------------+------------------------------+ <- sp
+ cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
+ }
+
+ // Invoke callback.
+ // NOTE(rsc): passing nil for argtype means that the copying of the
+ // results back into cb.arg happens without any corresponding write barriers.
+ // For cgo, cb.arg points into a C stack frame and therefore doesn't
+ // hold any pointers that the GC can find anyway - the write barrier
+ // would be a no-op.
+ reflectcall(nil, unsafe.Pointer(cb.fn), unsafe.Pointer(cb.arg), uint32(cb.argsize), 0)
+
+ if raceenabled {
+ racereleasemerge(unsafe.Pointer(&racecgosync))
+ }
+ if msanenabled {
+ // Tell msan that we wrote to the entire argument block.
+ // This tells msan that we set the results.
+ // Since we have already called the function it doesn't
+ // matter that we are writing to the non-result parameters.
+ msanwrite(cb.arg, cb.argsize)
+ }
+
+ // Do not unwind m->g0->sched.sp.
+ // Our caller, cgocallback, will do that.
+ restore = false
+}
+
+func unwindm(restore *bool) {
+ if !*restore {
+ return
+ }
+ // Restore sp saved by cgocallback during
+ // unwind of g's stack (see comment at top of file).
+ mp := acquirem()
+ sched := &mp.g0.sched
+ switch GOARCH {
+ default:
+ throw("unwindm not implemented")
+ case "386", "amd64", "arm", "ppc64", "ppc64le":
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
+ case "arm64":
+ sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
+ }
+ releasem(mp)
+}
+
+// called from assembly
+func badcgocallback() {
+ throw("misaligned stack in cgocallback")
+}
+
+// called from (incomplete) assembly
+func cgounimpl() {
+ throw("cgo not implemented")
+}
+
+var racecgosync uint64 // represents possible synchronization in C code
+
+// Pointer checking for cgo code.
+
+// We want to detect all cases where a program that does not use
+// unsafe makes a cgo call passing a Go pointer to memory that
+// contains a Go pointer. Here a Go pointer is defined as a pointer
+// to memory allocated by the Go runtime. Programs that use unsafe
+// can evade this restriction easily, so we don't try to catch them.
+// The cgo program will rewrite all possibly bad pointer arguments to
+// call cgoCheckPointer, where we can catch cases of a Go pointer
+// pointing to a Go pointer.
+
+// Complicating matters, taking the address of a slice or array
+// element permits the C program to access all elements of the slice
+// or array. In that case we will see a pointer to a single element,
+// but we need to check the entire data structure.
+
+// The cgoCheckPointer call takes additional arguments indicating that
+// it was called on an address expression. An additional argument of
+// true means that it only needs to check a single element. An
+// additional argument of a slice or array means that it needs to
+// check the entire slice/array, but nothing else. Otherwise, the
+// pointer could be anything, and we check the entire heap object,
+// which is conservative but safe.
+
+// When and if we implement a moving garbage collector,
+// cgoCheckPointer will pin the pointer for the duration of the cgo
+// call. (This is necessary but not sufficient; the cgo program will
+// also have to change to pin Go pointers that can not point to Go
+// pointers.)
+
+// cgoCheckPointer checks if the argument contains a Go pointer that
+// points to a Go pointer, and panics if it does. It returns the pointer.
+func cgoCheckPointer(ptr interface{}, args ...interface{}) interface{} {
+ if debug.cgocheck == 0 {
+ return ptr
+ }
+
+ ep := (*eface)(unsafe.Pointer(&ptr))
+ t := ep._type
+
+ top := true
+ if len(args) > 0 && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
+ p := ep.data
+ if t.kind&kindDirectIface == 0 {
+ p = *(*unsafe.Pointer)(p)
+ }
+ if !cgoIsGoPointer(p) {
+ return ptr
+ }
+ aep := (*eface)(unsafe.Pointer(&args[0]))
+ switch aep._type.kind & kindMask {
+ case kindBool:
+ if t.kind&kindMask == kindUnsafePointer {
+ // We don't know the type of the element.
+ break
+ }
+ pt := (*ptrtype)(unsafe.Pointer(t))
+ cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
+ return ptr
+ case kindSlice:
+ // Check the slice rather than the pointer.
+ ep = aep
+ t = ep._type
+ case kindArray:
+ // Check the array rather than the pointer.
+ // Pass top as false since we have a pointer
+ // to the array.
+ ep = aep
+ t = ep._type
+ top = false
+ default:
+ throw("can't happen")
+ }
+ }
+
+ cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
+ return ptr
+}
+
+const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
+const cgoResultFail = "cgo result has Go pointer"
+
+// cgoCheckArg is the real work of cgoCheckPointer. The argument p
+// is either a pointer to the value (of type t), or the value itself,
+// depending on indir. The top parameter is whether we are at the top
+// level, where Go pointers are allowed.
+func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
+ if t.kind&kindNoPointers != 0 {
+ // If the type has no pointers there is nothing to do.
+ return
+ }
+
+ switch t.kind & kindMask {
+ default:
+ throw("can't happen")
+ case kindArray:
+ at := (*arraytype)(unsafe.Pointer(t))
+ if !indir {
+ if at.len != 1 {
+ throw("can't happen")
+ }
+ cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
+ return
+ }
+ for i := uintptr(0); i < at.len; i++ {
+ cgoCheckArg(at.elem, p, true, top, msg)
+ p = add(p, at.elem.size)
+ }
+ case kindChan, kindMap:
+ // These types contain internal pointers that will
+ // always be allocated in the Go heap. It's never OK
+ // to pass them to C.
+ panic(errorString(msg))
+ case kindFunc:
+ if indir {
+ p = *(*unsafe.Pointer)(p)
+ }
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ panic(errorString(msg))
+ case kindInterface:
+ it := *(**_type)(p)
+ if it == nil {
+ return
+ }
+ // A type known at compile time is OK since it's
+ // constant. A type not known at compile time will be
+ // in the heap and will not be OK.
+ if inheap(uintptr(unsafe.Pointer(it))) {
+ panic(errorString(msg))
+ }
+ p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
+ case kindSlice:
+ st := (*slicetype)(unsafe.Pointer(t))
+ s := (*slice)(p)
+ p = s.array
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ if st.elem.kind&kindNoPointers != 0 {
+ return
+ }
+ for i := 0; i < s.cap; i++ {
+ cgoCheckArg(st.elem, p, true, false, msg)
+ p = add(p, st.elem.size)
+ }
+ case kindString:
+ ss := (*stringStruct)(p)
+ if !cgoIsGoPointer(ss.str) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+ case kindStruct:
+ st := (*structtype)(unsafe.Pointer(t))
+ if !indir {
+ if len(st.fields) != 1 {
+ throw("can't happen")
+ }
+ cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
+ return
+ }
+ for _, f := range st.fields {
+ cgoCheckArg(f.typ, add(p, f.offset), true, top, msg)
+ }
+ case kindPtr, kindUnsafePointer:
+ if indir {
+ p = *(*unsafe.Pointer)(p)
+ }
+
+ if !cgoIsGoPointer(p) {
+ return
+ }
+ if !top {
+ panic(errorString(msg))
+ }
+
+ cgoCheckUnknownPointer(p, msg)
+ }
+}
+
+// cgoCheckUnknownPointer is called for an arbitrary pointer into Go
+// memory. It checks whether that Go memory contains any other
+// pointer into Go memory. If it does, we panic.
+// The return values are unused but useful to see in panic tracebacks.
+func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
+ if cgoInRange(p, mheap_.arena_start, mheap_.arena_used) {
+ if !inheap(uintptr(p)) {
+ // On 32-bit systems it is possible for C's allocated memory
+ // to have addresses between arena_start and arena_used.
+ // Either this pointer is a stack or an unused span or it's
+ // a C allocation. Escape analysis should prevent the first,
+ // garbage collection should prevent the second,
+ // and the third is completely OK.
+ return
+ }
+
+ b, hbits, span := heapBitsForObject(uintptr(p), 0, 0)
+ base = b
+ if base == 0 {
+ return
+ }
+ n := span.elemsize
+ for i = uintptr(0); i < n; i += sys.PtrSize {
+ bits := hbits.bits()
+ if i >= 2*sys.PtrSize && bits&bitMarked == 0 {
+ // No more possible pointers.
+ break
+ }
+ if bits&bitPointer != 0 {
+ if cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
+ panic(errorString(msg))
+ }
+ }
+ hbits = hbits.next()
+ }
+
+ return
+ }
+
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
+ // We have no way to know the size of the object.
+ // We have to assume that it might contain a pointer.
+ panic(errorString(msg))
+ }
+ // In the text or noptr sections, we know that the
+ // pointer does not point to a Go pointer.
+ }
+
+ return
+}
+
+// cgoIsGoPointer returns whether the pointer is a Go pointer--a
+// pointer to Go memory. We only care about Go memory that might
+// contain pointers.
+//go:nosplit
+//go:nowritebarrierrec
+func cgoIsGoPointer(p unsafe.Pointer) bool {
+ if p == nil {
+ return false
+ }
+
+ if cgoInRange(p, mheap_.arena_start, mheap_.arena_used) {
+ return true
+ }
+
+ for datap := &firstmoduledata; datap != nil; datap = datap.next {
+ if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// cgoInRange returns whether p is between start and end.
+//go:nosplit
+//go:nowritebarrierrec
+func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
+ return start <= uintptr(p) && uintptr(p) < end
+}
+
+// cgoCheckResult is called to check the result parameter of an
+// exported Go function. It panics if the result is or contains a Go
+// pointer.
+func cgoCheckResult(val interface{}) {
+ if debug.cgocheck == 0 {
+ return
+ }
+
+ ep := (*eface)(unsafe.Pointer(&val))
+ t := ep._type
+ cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/extern.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/extern.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/extern.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/extern.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,234 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package runtime contains operations that interact with Go's runtime system,
+such as functions to control goroutines. It also includes the low-level type information
+used by the reflect package; see reflect's documentation for the programmable
+interface to the run-time type system.
+
+Environment Variables
+
+The following environment variables ($name or %name%, depending on the host
+operating system) control the run-time behavior of Go programs. The meanings
+and use may change from release to release.
+
+The GOGC variable sets the initial garbage collection target percentage.
+A collection is triggered when the ratio of freshly allocated data to live data
+remaining after the previous collection reaches this percentage. The default
+is GOGC=100. Setting GOGC=off disables the garbage collector entirely.
+The runtime/debug package's SetGCPercent function allows changing this
+percentage at run time. See https://golang.org/pkg/runtime/debug/#SetGCPercent.
+
+The GODEBUG variable controls debugging variables within the runtime.
+It is a comma-separated list of name=val pairs setting these named variables:
+
+ allocfreetrace: setting allocfreetrace=1 causes every allocation to be
+ profiled and a stack trace printed on each object's allocation and free.
+
+ cgocheck: setting cgocheck=0 disables all checks for packages
+ using cgo to incorrectly pass Go pointers to non-Go code.
+ Setting cgocheck=1 (the default) enables relatively cheap
+ checks that may miss some errors. Setting cgocheck=2 enables
+ expensive checks that should not miss any errors, but will
+ cause your program to run slower.
+
+ efence: setting efence=1 causes the allocator to run in a mode
+ where each object is allocated on a unique page and addresses are
+ never recycled.
+
+ gccheckmark: setting gccheckmark=1 enables verification of the
+ garbage collector's concurrent mark phase by performing a
+ second mark pass while the world is stopped. If the second
+ pass finds a reachable object that was not found by concurrent
+ mark, the garbage collector will panic.
+
+ gcpacertrace: setting gcpacertrace=1 causes the garbage collector to
+ print information about the internal state of the concurrent pacer.
+
+ gcshrinkstackoff: setting gcshrinkstackoff=1 disables moving goroutines
+ onto smaller stacks. In this mode, a goroutine's stack can only grow.
+
+ gcstackbarrieroff: setting gcstackbarrieroff=1 disables the use of stack barriers
+ that allow the garbage collector to avoid repeating a stack scan during the
+ mark termination phase.
+
+ gcstackbarrierall: setting gcstackbarrierall=1 installs stack barriers
+ in every stack frame, rather than in exponentially-spaced frames.
+
+ gcstoptheworld: setting gcstoptheworld=1 disables concurrent garbage collection,
+ making every garbage collection a stop-the-world event. Setting gcstoptheworld=2
+ also disables concurrent sweeping after the garbage collection finishes.
+
+ gctrace: setting gctrace=1 causes the garbage collector to emit a single line to standard
+ error at each collection, summarizing the amount of memory collected and the
+ length of the pause. Setting gctrace=2 emits the same summary but also
+ repeats each collection. The format of this line is subject to change.
+ Currently, it is:
+ gc # @#s #%: #+#+# ms clock, #+#/#/#+# ms cpu, #->#-># MB, # MB goal, # P
+ where the fields are as follows:
+ gc # the GC number, incremented at each GC
+ @#s time in seconds since program start
+ #% percentage of time spent in GC since program start
+ #+...+# wall-clock/CPU times for the phases of the GC
+ #->#-># MB heap size at GC start, at GC end, and live heap
+ # MB goal goal heap size
+ # P number of processors used
+ The phases are stop-the-world (STW) sweep termination, concurrent
+ mark and scan, and STW mark termination. The CPU times
+ for mark/scan are broken down in to assist time (GC performed in
+ line with allocation), background GC time, and idle GC time.
+ If the line ends with "(forced)", this GC was forced by a
+ runtime.GC() call and all phases are STW.
+
+ memprofilerate: setting memprofilerate=X will update the value of runtime.MemProfileRate.
+ When set to 0 memory profiling is disabled. Refer to the description of
+ MemProfileRate for the default value.
+
+ invalidptr: defaults to invalidptr=1, causing the garbage collector and stack
+ copier to crash the program if an invalid pointer value (for example, 1)
+ is found in a pointer-typed location. Setting invalidptr=0 disables this check.
+ This should only be used as a temporary workaround to diagnose buggy code.
+ The real fix is to not store integers in pointer-typed locations.
+
+ sbrk: setting sbrk=1 replaces the memory allocator and garbage collector
+ with a trivial allocator that obtains memory from the operating system and
+ never reclaims any memory.
+
+ scavenge: scavenge=1 enables debugging mode of heap scavenger.
+
+ scheddetail: setting schedtrace=X and scheddetail=1 causes the scheduler to emit
+ detailed multiline info every X milliseconds, describing state of the scheduler,
+ processors, threads and goroutines.
+
+ schedtrace: setting schedtrace=X causes the scheduler to emit a single line to standard
+ error every X milliseconds, summarizing the scheduler state.
+
+The net and net/http packages also refer to debugging variables in GODEBUG.
+See the documentation for those packages for details.
+
+The GOMAXPROCS variable limits the number of operating system threads that
+can execute user-level Go code simultaneously. There is no limit to the number of threads
+that can be blocked in system calls on behalf of Go code; those do not count against
+the GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes
+the limit.
+
+The GOTRACEBACK variable controls the amount of output generated when a Go
+program fails due to an unrecovered panic or an unexpected runtime condition.
+By default, a failure prints a stack trace for the current goroutine,
+eliding functions internal to the run-time system, and then exits with exit code 2.
+The failure prints stack traces for all goroutines if there is no current goroutine
+or the failure is internal to the run-time.
+GOTRACEBACK=none omits the goroutine stack traces entirely.
+GOTRACEBACK=single (the default) behaves as described above.
+GOTRACEBACK=all adds stack traces for all user-created goroutines.
+GOTRACEBACK=system is like ``all'' but adds stack frames for run-time functions
+and shows goroutines created internally by the run-time.
+GOTRACEBACK=crash is like ``system'' but crashes in an operating system-specific
+manner instead of exiting. For example, on Unix systems, the crash raises
+SIGABRT to trigger a core dump.
+For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for
+none, all, and system, respectively.
+The runtime/debug package's SetTraceback function allows increasing the
+amount of output at run time, but it cannot reduce the amount below that
+specified by the environment variable.
+See https://golang.org/pkg/runtime/debug/#SetTraceback.
+
+The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete
+the set of Go environment variables. They influence the building of Go programs
+(see https://golang.org/cmd/go and https://golang.org/pkg/go/build).
+GOARCH, GOOS, and GOROOT are recorded at compile time and made available by
+constants or functions in this package, but they do not influence the execution
+of the run-time system.
+*/
+package runtime
+
+import "runtime/internal/sys"
+
+// Caller reports file and line number information about function invocations on
+// the calling goroutine's stack. The argument skip is the number of stack frames
+// to ascend, with 0 identifying the caller of Caller. (For historical reasons the
+// meaning of skip differs between Caller and Callers.) The return values report the
+// program counter, file name, and line number within the file of the corresponding
+// call. The boolean ok is false if it was not possible to recover the information.
+func Caller(skip int) (pc uintptr, file string, line int, ok bool) {
+ // Ask for two PCs: the one we were asked for
+ // and what it called, so that we can see if it
+ // "called" sigpanic.
+ var rpc [2]uintptr
+ if callers(1+skip-1, rpc[:]) < 2 {
+ return
+ }
+ f := findfunc(rpc[1])
+ if f == nil {
+ // TODO(rsc): Probably a bug?
+ // The C version said "have retpc at least"
+ // but actually returned pc=0.
+ ok = true
+ return
+ }
+ pc = rpc[1]
+ xpc := pc
+ g := findfunc(rpc[0])
+ // All architectures turn faults into apparent calls to sigpanic.
+ // If we see a call to sigpanic, we do not back up the PC to find
+ // the line number of the call instruction, because there is no call.
+ if xpc > f.entry && (g == nil || g.entry != funcPC(sigpanic)) {
+ xpc--
+ }
+ file, line32 := funcline(f, xpc)
+ line = int(line32)
+ ok = true
+ return
+}
+
+// Callers fills the slice pc with the return program counters of function invocations
+// on the calling goroutine's stack. The argument skip is the number of stack frames
+// to skip before recording in pc, with 0 identifying the frame for Callers itself and
+// 1 identifying the caller of Callers.
+// It returns the number of entries written to pc.
+//
+// Note that since each slice entry pc[i] is a return program counter,
+// looking up the file and line for pc[i] (for example, using (*Func).FileLine)
+// will return the file and line number of the instruction immediately
+// following the call.
+// To look up the file and line number of the call itself, use pc[i]-1.
+// As an exception to this rule, if pc[i-1] corresponds to the function
+// runtime.sigpanic, then pc[i] is the program counter of a faulting
+// instruction and should be used without any subtraction.
+func Callers(skip int, pc []uintptr) int {
+ // runtime.callers uses pc.array==nil as a signal
+ // to print a stack trace. Pick off 0-length pc here
+ // so that we don't let a nil pc slice get to it.
+ if len(pc) == 0 {
+ return 0
+ }
+ return callers(skip, pc)
+}
+
+// GOROOT returns the root of the Go tree.
+// It uses the GOROOT environment variable, if set,
+// or else the root used during the Go build.
+func GOROOT() string {
+ s := gogetenv("GOROOT")
+ if s != "" {
+ return s
+ }
+ return sys.DefaultGoroot
+}
+
+// Version returns the Go tree's version string.
+// It is either the commit hash and date at the time of the build or,
+// when possible, a release tag like "go1.3".
+func Version() string {
+ return sys.TheVersion
+}
+
+// GOOS is the running program's operating system target:
+// one of darwin, freebsd, linux, and so on.
+const GOOS string = sys.TheGoos
+
+// GOARCH is the running program's architecture target:
+// 386, amd64, or arm.
+const GOARCH string = sys.TheGoarch
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/gcinfo_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/gcinfo_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/gcinfo_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/gcinfo_test.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,202 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime_test
+
+import (
+ "bytes"
+ "runtime"
+ "testing"
+)
+
+const (
+ typeScalar = 0
+ typePointer = 1
+)
+
+// TestGCInfo tests that various objects in heap, data and bss receive correct GC pointer type info.
+func TestGCInfo(t *testing.T) {
+ verifyGCInfo(t, "bss Ptr", &bssPtr, infoPtr)
+ verifyGCInfo(t, "bss ScalarPtr", &bssScalarPtr, infoScalarPtr)
+ verifyGCInfo(t, "bss PtrScalar", &bssPtrScalar, infoPtrScalar)
+ verifyGCInfo(t, "bss BigStruct", &bssBigStruct, infoBigStruct())
+ verifyGCInfo(t, "bss string", &bssString, infoString)
+ verifyGCInfo(t, "bss slice", &bssSlice, infoSlice)
+ verifyGCInfo(t, "bss eface", &bssEface, infoEface)
+ verifyGCInfo(t, "bss iface", &bssIface, infoIface)
+
+ verifyGCInfo(t, "data Ptr", &dataPtr, infoPtr)
+ verifyGCInfo(t, "data ScalarPtr", &dataScalarPtr, infoScalarPtr)
+ verifyGCInfo(t, "data PtrScalar", &dataPtrScalar, infoPtrScalar)
+ verifyGCInfo(t, "data BigStruct", &dataBigStruct, infoBigStruct())
+ verifyGCInfo(t, "data string", &dataString, infoString)
+ verifyGCInfo(t, "data slice", &dataSlice, infoSlice)
+ verifyGCInfo(t, "data eface", &dataEface, infoEface)
+ verifyGCInfo(t, "data iface", &dataIface, infoIface)
+
+ verifyGCInfo(t, "stack Ptr", new(Ptr), infoPtr)
+ verifyGCInfo(t, "stack ScalarPtr", new(ScalarPtr), infoScalarPtr)
+ verifyGCInfo(t, "stack PtrScalar", new(PtrScalar), infoPtrScalar)
+ verifyGCInfo(t, "stack BigStruct", new(BigStruct), infoBigStruct())
+ verifyGCInfo(t, "stack string", new(string), infoString)
+ verifyGCInfo(t, "stack slice", new([]string), infoSlice)
+ verifyGCInfo(t, "stack eface", new(interface{}), infoEface)
+ verifyGCInfo(t, "stack iface", new(Iface), infoIface)
+
+ for i := 0; i < 10; i++ {
+ verifyGCInfo(t, "heap Ptr", escape(new(Ptr)), trimDead(padDead(infoPtr)))
+ verifyGCInfo(t, "heap PtrSlice", escape(&make([]*byte, 10)[0]), trimDead(infoPtr10))
+ verifyGCInfo(t, "heap ScalarPtr", escape(new(ScalarPtr)), trimDead(infoScalarPtr))
+ verifyGCInfo(t, "heap ScalarPtrSlice", escape(&make([]ScalarPtr, 4)[0]), trimDead(infoScalarPtr4))
+ verifyGCInfo(t, "heap PtrScalar", escape(new(PtrScalar)), trimDead(infoPtrScalar))
+ verifyGCInfo(t, "heap BigStruct", escape(new(BigStruct)), trimDead(infoBigStruct()))
+ verifyGCInfo(t, "heap string", escape(new(string)), trimDead(infoString))
+ verifyGCInfo(t, "heap eface", escape(new(interface{})), trimDead(infoEface))
+ verifyGCInfo(t, "heap iface", escape(new(Iface)), trimDead(infoIface))
+ }
+}
+
+func verifyGCInfo(t *testing.T, name string, p interface{}, mask0 []byte) {
+ mask := runtime.GCMask(p)
+ if bytes.Compare(mask, mask0) != 0 {
+ t.Errorf("bad GC program for %v:\nwant %+v\ngot %+v", name, mask0, mask)
+ return
+ }
+}
+
+func padDead(mask []byte) []byte {
+ // Because the dead bit isn't encoded until the third word,
+ // and because on 32-bit systems a one-word allocation
+ // uses a two-word block, the pointer info for a one-word
+ // object needs to be expanded to include an extra scalar
+ // on 32-bit systems to match the heap bitmap.
+ if runtime.PtrSize == 4 && len(mask) == 1 {
+ return []byte{mask[0], 0}
+ }
+ return mask
+}
+
+func trimDead(mask []byte) []byte {
+ for len(mask) > 2 && mask[len(mask)-1] == typeScalar {
+ mask = mask[:len(mask)-1]
+ }
+ return mask
+}
+
+var gcinfoSink interface{}
+
+func escape(p interface{}) interface{} {
+ gcinfoSink = p
+ return p
+}
+
+var infoPtr = []byte{typePointer}
+
+type Ptr struct {
+ *byte
+}
+
+var infoPtr10 = []byte{typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer, typePointer}
+
+type ScalarPtr struct {
+ q int
+ w *int
+ e int
+ r *int
+ t int
+ y *int
+}
+
+var infoScalarPtr = []byte{typeScalar, typePointer, typeScalar, typePointer, typeScalar, typePointer}
+
+var infoScalarPtr4 = append(append(append(append([]byte(nil), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...), infoScalarPtr...)
+
+type PtrScalar struct {
+ q *int
+ w int
+ e *int
+ r int
+ t *int
+ y int
+}
+
+var infoPtrScalar = []byte{typePointer, typeScalar, typePointer, typeScalar, typePointer, typeScalar}
+
+type BigStruct struct {
+ q *int
+ w byte
+ e [17]byte
+ r []byte
+ t int
+ y uint16
+ u uint64
+ i string
+}
+
+func infoBigStruct() []byte {
+ switch runtime.GOARCH {
+ case "386", "arm":
+ return []byte{
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
+ }
+ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le":
+ return []byte{
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
+ }
+ case "amd64p32":
+ return []byte{
+ typePointer, // q *int
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
+ typePointer, typeScalar, typeScalar, // r []byte
+ typeScalar, typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
+ typePointer, typeScalar, // i string
+ }
+ default:
+ panic("unknown arch")
+ }
+}
+
+type Iface interface {
+ f()
+}
+
+type IfaceImpl int
+
+func (IfaceImpl) f() {
+}
+
+var (
+ // BSS
+ bssPtr Ptr
+ bssScalarPtr ScalarPtr
+ bssPtrScalar PtrScalar
+ bssBigStruct BigStruct
+ bssString string
+ bssSlice []string
+ bssEface interface{}
+ bssIface Iface
+
+ // DATA
+ dataPtr = Ptr{new(byte)}
+ dataScalarPtr = ScalarPtr{q: 1}
+ dataPtrScalar = PtrScalar{w: 1}
+ dataBigStruct = BigStruct{w: 1}
+ dataString = "foo"
+ dataSlice = []string{"foo"}
+ dataEface interface{} = 42
+ dataIface Iface = IfaceImpl(42)
+
+ infoString = []byte{typePointer, typeScalar}
+ infoSlice = []byte{typePointer, typeScalar, typeScalar}
+ infoEface = []byte{typePointer, typePointer}
+ infoIface = []byte{typePointer, typePointer}
+)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/hash64.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/hash64.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/hash64.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/hash64.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hashing algorithm inspired by
+// xxhash: https://code.google.com/p/xxhash/
+// cityhash: https://code.google.com/p/cityhash/
+
+// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le
+
+package runtime
+
+import "unsafe"
+
+const (
+ // Constants for multiplication: four random odd 64-bit numbers.
+ m1 = 16877499708836156737
+ m2 = 2820277070424839065
+ m3 = 9497967016996688599
+ m4 = 15839092249703872147
+)
+
+func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
+ if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
+ return aeshash(p, seed, s)
+ }
+ h := uint64(seed + s*hashkey[0])
+tail:
+ switch {
+ case s == 0:
+ case s < 4:
+ h ^= uint64(*(*byte)(p))
+ h ^= uint64(*(*byte)(add(p, s>>1))) << 8
+ h ^= uint64(*(*byte)(add(p, s-1))) << 16
+ h = rotl_31(h*m1) * m2
+ case s <= 8:
+ h ^= uint64(readUnaligned32(p))
+ h ^= uint64(readUnaligned32(add(p, s-4))) << 32
+ h = rotl_31(h*m1) * m2
+ case s <= 16:
+ h ^= readUnaligned64(p)
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-8))
+ h = rotl_31(h*m1) * m2
+ case s <= 32:
+ h ^= readUnaligned64(p)
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, 8))
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-16))
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-8))
+ h = rotl_31(h*m1) * m2
+ default:
+ v1 := h
+ v2 := uint64(seed * hashkey[1])
+ v3 := uint64(seed * hashkey[2])
+ v4 := uint64(seed * hashkey[3])
+ for s >= 32 {
+ v1 ^= readUnaligned64(p)
+ v1 = rotl_31(v1*m1) * m2
+ p = add(p, 8)
+ v2 ^= readUnaligned64(p)
+ v2 = rotl_31(v2*m2) * m3
+ p = add(p, 8)
+ v3 ^= readUnaligned64(p)
+ v3 = rotl_31(v3*m3) * m4
+ p = add(p, 8)
+ v4 ^= readUnaligned64(p)
+ v4 = rotl_31(v4*m4) * m1
+ p = add(p, 8)
+ s -= 32
+ }
+ h = v1 ^ v2 ^ v3 ^ v4
+ goto tail
+ }
+
+ h ^= h >> 29
+ h *= m3
+ h ^= h >> 32
+ return uintptr(h)
+}
+
+// Note: in order to get the compiler to issue rotl instructions, we
+// need to constant fold the shift amount by hand.
+// TODO: convince the compiler to issue rotl instructions after inlining.
+func rotl_31(x uint64) uint64 {
+ return (x << 31) | (x >> (64 - 31))
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/noasm.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/noasm.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/noasm.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/noasm.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,62 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Routines that are implemented in assembly in asm_{amd64,386,arm,arm64,ppc64x}.s
+
+// +build mips64 mips64le
+
+package runtime
+
+import _ "unsafe" // for go:linkname
+
+func cmpstring(s1, s2 string) int {
+ l := len(s1)
+ if len(s2) < l {
+ l = len(s2)
+ }
+ for i := 0; i < l; i++ {
+ c1, c2 := s1[i], s2[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ }
+ if len(s1) < len(s2) {
+ return -1
+ }
+ if len(s1) > len(s2) {
+ return +1
+ }
+ return 0
+}
+
+//go:linkname bytes_Compare bytes.Compare
+func bytes_Compare(s1, s2 []byte) int {
+ l := len(s1)
+ if len(s2) < l {
+ l = len(s2)
+ }
+ if l == 0 || &s1[0] == &s2[0] {
+ goto samebytes
+ }
+ for i := 0; i < l; i++ {
+ c1, c2 := s1[i], s2[i]
+ if c1 < c2 {
+ return -1
+ }
+ if c1 > c2 {
+ return +1
+ }
+ }
+samebytes:
+ if len(s1) < len(s2) {
+ return -1
+ }
+ if len(s1) > len(s2) {
+ return +1
+ }
+ return 0
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/os1_linux_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/os1_linux_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/os1_linux_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/os1_linux_generic.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !mips64
+// +build !mips64le
+// +build linux
+
+package runtime
+
+var sigset_all = sigset{^uint32(0), ^uint32(0)}
+
+func sigaddset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] |= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigdelset(mask *sigset, i int) {
+ (*mask)[(i-1)/32] &^= 1 << ((uint32(i) - 1) & 31)
+}
+
+func sigfillset(mask *uint64) {
+ *mask = ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ copy((*mask)[:], m[:])
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/os2_linux_generic.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/os2_linux_generic.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/os2_linux_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/os2_linux_generic.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !mips64
+// +build !mips64le
+// +build linux
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 0
+ _SIG_UNBLOCK = 1
+ _SIG_SETMASK = 2
+ _RLIMIT_AS = 9
+)
+
+// It's hard to tease out exactly how big a Sigset is, but
+// rt_sigprocmask crashes if we get it wrong, so if binaries
+// are running, this is right.
+type sigset [2]uint32
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/runtime-gdb_test.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/runtime-gdb_test.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/runtime-gdb_test.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/runtime-gdb_test.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,171 @@
+package runtime_test
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "testing"
+)
+
+func checkGdbPython(t *testing.T) {
+ cmd := exec.Command("gdb", "-nx", "-q", "--batch", "-iex", "python import sys; print('go gdb python support')")
+ out, err := cmd.CombinedOutput()
+
+ if err != nil {
+ t.Skipf("skipping due to issue running gdb: %v", err)
+ }
+ if string(out) != "go gdb python support\n" {
+ t.Skipf("skipping due to lack of python gdb support: %s", out)
+ }
+
+ // Issue 11214 reports various failures with older versions of gdb.
+ out, err = exec.Command("gdb", "--version").CombinedOutput()
+ re := regexp.MustCompile(`([0-9]+)\.([0-9]+)`)
+ matches := re.FindSubmatch(out)
+ if len(matches) < 3 {
+ t.Skipf("skipping: can't determine gdb version from\n%s\n", out)
+ }
+ major, err1 := strconv.Atoi(string(matches[1]))
+ minor, err2 := strconv.Atoi(string(matches[2]))
+ if err1 != nil || err2 != nil {
+ t.Skipf("skipping: can't determine gdb version: %v, %v", err1, err2)
+ }
+ if major < 7 || (major == 7 && minor < 7) {
+ t.Skipf("skipping: gdb version %d.%d too old", major, minor)
+ }
+ t.Logf("gdb version %d.%d", major, minor)
+}
+
+const helloSource = `
+package main
+import "fmt"
+func main() {
+ mapvar := make(map[string]string,5)
+ mapvar["abc"] = "def"
+ mapvar["ghi"] = "jkl"
+ strvar := "abc"
+ ptrvar := &strvar
+ fmt.Println("hi") // line 10
+ _ = ptrvar
+}
+`
+
+func TestGdbPython(t *testing.T) {
+ if runtime.GOOS == "darwin" {
+ t.Skip("gdb does not work on darwin")
+ }
+ if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final {
+ t.Skip("gdb test can fail with GOROOT_FINAL pending")
+ }
+
+ checkGdbPython(t)
+
+ dir, err := ioutil.TempDir("", "go-build")
+ if err != nil {
+ t.Fatalf("failed to create temp directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "main.go")
+ err = ioutil.WriteFile(src, []byte(helloSource), 0644)
+ if err != nil {
+ t.Fatalf("failed to create file: %v", err)
+ }
+
+ cmd := exec.Command("go", "build", "-o", "a.exe")
+ cmd.Dir = dir
+ out, err := testEnv(cmd).CombinedOutput()
+ if err != nil {
+ t.Fatalf("building source %v\n%s", err, out)
+ }
+
+ args := []string{"-nx", "-q", "--batch", "-iex",
+ fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
+ "-ex", "info auto-load python-scripts",
+ "-ex", "br main.go:10",
+ "-ex", "run",
+ "-ex", "echo BEGIN info goroutines\n",
+ "-ex", "info goroutines",
+ "-ex", "echo END\n",
+ "-ex", "echo BEGIN print mapvar\n",
+ "-ex", "print mapvar",
+ "-ex", "echo END\n",
+ "-ex", "echo BEGIN print strvar\n",
+ "-ex", "print strvar",
+ "-ex", "echo END\n",
+ "-ex", "echo BEGIN print ptrvar\n",
+ "-ex", "print ptrvar",
+ "-ex", "echo END\n"}
+
+ // without framepointer, gdb cannot backtrace our non-standard
+ // stack frames on RISC architectures.
+ canBackTrace := false
+ switch runtime.GOARCH {
+ case "amd64", "386", "ppc64", "ppc64le", "arm", "arm64", "mips64", "mips64le":
+ canBackTrace = true
+ args = append(args,
+ "-ex", "echo BEGIN goroutine 2 bt\n",
+ "-ex", "goroutine 2 bt",
+ "-ex", "echo END\n")
+ }
+
+ args = append(args, filepath.Join(dir, "a.exe"))
+ got, _ := exec.Command("gdb", args...).CombinedOutput()
+
+ firstLine := bytes.SplitN(got, []byte("\n"), 2)[0]
+ if string(firstLine) != "Loading Go Runtime support." {
+ // This can happen when using all.bash with
+ // GOROOT_FINAL set, because the tests are run before
+ // the final installation of the files.
+ cmd := exec.Command("go", "env", "GOROOT")
+ cmd.Env = []string{}
+ out, err := cmd.CombinedOutput()
+ if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) {
+ t.Skipf("skipping because GOROOT=%s does not exist", runtime.GOROOT())
+ }
+
+ _, file, _, _ := runtime.Caller(1)
+
+ t.Logf("package testing source file: %s", file)
+ t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got)
+ }
+
+ // Extract named BEGIN...END blocks from output
+ partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`)
+ blocks := map[string]string{}
+ for _, subs := range partRe.FindAllSubmatch(got, -1) {
+ blocks[string(subs[1])] = string(subs[2])
+ }
+
+ infoGoroutinesRe := regexp.MustCompile(`\*\s+\d+\s+running\s+`)
+ if bl := blocks["info goroutines"]; !infoGoroutinesRe.MatchString(bl) {
+ t.Fatalf("info goroutines failed: %s", bl)
+ }
+
+ printMapvarRe := regexp.MustCompile(`\Q = map[string]string = {["abc"] = "def", ["ghi"] = "jkl"}\E$`)
+ if bl := blocks["print mapvar"]; !printMapvarRe.MatchString(bl) {
+ t.Fatalf("print mapvar failed: %s", bl)
+ }
+
+ strVarRe := regexp.MustCompile(`\Q = "abc"\E$`)
+ if bl := blocks["print strvar"]; !strVarRe.MatchString(bl) {
+ t.Fatalf("print strvar failed: %s", bl)
+ }
+
+ if bl := blocks["print ptrvar"]; !strVarRe.MatchString(bl) {
+ t.Fatalf("print ptrvar failed: %s", bl)
+ }
+
+ btGoroutineRe := regexp.MustCompile(`^#0\s+runtime.+at`)
+ if bl := blocks["goroutine 2 bt"]; canBackTrace && !btGoroutineRe.MatchString(bl) {
+ t.Fatalf("goroutine 2 bt failed: %s", bl)
+ } else if !canBackTrace {
+ t.Logf("gdb cannot backtrace for GOARCH=%s, skipped goroutine backtrace test", runtime.GOARCH)
+ }
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/unaligned1.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/unaligned1.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/runtime/unaligned1.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/runtime/unaligned1.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 amd64 amd64p32 arm64
+
+package runtime
+
+import "unsafe"
+
+func readUnaligned32(p unsafe.Pointer) uint32 {
+ return *(*uint32)(p)
+}
+
+func readUnaligned64(p unsafe.Pointer) uint64 {
+ return *(*uint64)(p)
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/exec_linux.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/exec_linux.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/exec_linux.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/exec_linux.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,422 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package syscall
+
+import (
+ "unsafe"
+)
+
+// SysProcIDMap holds Container ID to Host ID mappings used for User Namespaces in Linux.
+// See user_namespaces(7).
+type SysProcIDMap struct {
+ ContainerID int // Container ID.
+ HostID int // Host ID.
+ Size int // Size.
+}
+
+type SysProcAttr struct {
+ Chroot string // Chroot.
+ Credential *Credential // Credential.
+ Ptrace bool // Enable tracing.
+ Setsid bool // Create session.
+ Setpgid bool // Set process group ID to Pgid, or, if Pgid == 0, to new pid.
+ Setctty bool // Set controlling terminal to fd Ctty (only meaningful if Setsid is set)
+ Noctty bool // Detach fd 0 from controlling terminal
+ Ctty int // Controlling TTY fd
+ Foreground bool // Place child's process group in foreground. (Implies Setpgid. Uses Ctty as fd of controlling TTY)
+ Pgid int // Child's process group ID if Setpgid.
+ Pdeathsig Signal // Signal that the process will get when its parent dies (Linux only)
+ Cloneflags uintptr // Flags for clone calls (Linux only)
+ UidMappings []SysProcIDMap // User ID mappings for user namespaces.
+ GidMappings []SysProcIDMap // Group ID mappings for user namespaces.
+ // GidMappingsEnableSetgroups enabling setgroups syscall.
+ // If false, then setgroups syscall will be disabled for the child process.
+ // This parameter is no-op if GidMappings == nil. Otherwise for unprivileged
+ // users this should be set to false for mappings work.
+ GidMappingsEnableSetgroups bool
+}
+
+// Implemented in runtime package.
+func runtime_BeforeFork()
+func runtime_AfterFork()
+
+// Fork, dup fd onto 0..len(fd), and exec(argv0, argvv, envv) in child.
+// If a dup or exec fails, write the errno error to pipe.
+// (Pipe is close-on-exec so if exec succeeds, it will be closed.)
+// In the child, this function must not acquire any locks, because
+// they might have been locked at the time of the fork. This means
+// no rescheduling, no malloc calls, and no new stack segments.
+// For the same reason compiler does not race instrument it.
+// The calls to RawSyscall are okay because they are assembly
+// functions that do not grow the stack.
+//go:norace
+func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) {
+ // Declare all variables at top in case any
+ // declarations require heap allocation (e.g., err1).
+ var (
+ r1 uintptr
+ err1 Errno
+ err2 Errno
+ nextfd int
+ i int
+ p [2]int
+ )
+
+ // Record parent PID so child can test if it has died.
+ ppid, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+
+ // Guard against side effects of shuffling fds below.
+ // Make sure that nextfd is beyond any currently open files so
+ // that we can't run the risk of overwriting any of them.
+ fd := make([]int, len(attr.Files))
+ nextfd = len(attr.Files)
+ for i, ufd := range attr.Files {
+ if nextfd < int(ufd) {
+ nextfd = int(ufd)
+ }
+ fd[i] = int(ufd)
+ }
+ nextfd++
+
+ // Allocate another pipe for parent to child communication for
+ // synchronizing writing of User ID/Group ID mappings.
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ if err := forkExecPipe(p[:]); err != nil {
+ return 0, err.(Errno)
+ }
+ }
+
+ // About to call fork.
+ // No more allocation or calls of non-assembly functions.
+ runtime_BeforeFork()
+ r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
+ if err1 != 0 {
+ runtime_AfterFork()
+ return 0, err1
+ }
+
+ if r1 != 0 {
+ // parent; return PID
+ runtime_AfterFork()
+ pid = int(r1)
+
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ Close(p[0])
+ err := writeUidGidMappings(pid, sys)
+ if err != nil {
+ err2 = err.(Errno)
+ }
+ RawSyscall(SYS_WRITE, uintptr(p[1]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
+ Close(p[1])
+ }
+
+ return pid, 0
+ }
+
+ // Fork succeeded, now in child.
+
+ // Wait for User ID/Group ID mappings to be written.
+ if sys.UidMappings != nil || sys.GidMappings != nil {
+ if _, _, err1 = RawSyscall(SYS_CLOSE, uintptr(p[1]), 0, 0); err1 != 0 {
+ goto childerror
+ }
+ r1, _, err1 = RawSyscall(SYS_READ, uintptr(p[0]), uintptr(unsafe.Pointer(&err2)), unsafe.Sizeof(err2))
+ if err1 != 0 {
+ goto childerror
+ }
+ if r1 != unsafe.Sizeof(err2) {
+ err1 = EINVAL
+ goto childerror
+ }
+ if err2 != 0 {
+ err1 = err2
+ goto childerror
+ }
+ }
+
+ // Enable tracing if requested.
+ if sys.Ptrace {
+ _, _, err1 = RawSyscall(SYS_PTRACE, uintptr(PTRACE_TRACEME), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Session ID
+ if sys.Setsid {
+ _, _, err1 = RawSyscall(SYS_SETSID, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set process group
+ if sys.Setpgid || sys.Foreground {
+ // Place child in process group.
+ _, _, err1 = RawSyscall(SYS_SETPGID, 0, uintptr(sys.Pgid), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ if sys.Foreground {
+ pgrp := int32(sys.Pgid)
+ if pgrp == 0 {
+ r1, _, err1 = RawSyscall(SYS_GETPID, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+
+ pgrp = int32(r1)
+ }
+
+ // Place process group in foreground.
+ _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSPGRP), uintptr(unsafe.Pointer(&pgrp)))
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Chroot
+ if chroot != nil {
+ _, _, err1 = RawSyscall(SYS_CHROOT, uintptr(unsafe.Pointer(chroot)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // User and groups
+ if cred := sys.Credential; cred != nil {
+ ngroups := uintptr(len(cred.Groups))
+ if ngroups > 0 {
+ groups := unsafe.Pointer(&cred.Groups[0])
+ _, _, err1 = RawSyscall(SYS_SETGROUPS, ngroups, uintptr(groups), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ _, _, err1 = RawSyscall(SYS_SETGID, uintptr(cred.Gid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ _, _, err1 = RawSyscall(SYS_SETUID, uintptr(cred.Uid), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Chdir
+ if dir != nil {
+ _, _, err1 = RawSyscall(SYS_CHDIR, uintptr(unsafe.Pointer(dir)), 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Parent death signal
+ if sys.Pdeathsig != 0 {
+ _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_PDEATHSIG, uintptr(sys.Pdeathsig), 0, 0, 0, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+
+ // Signal self if parent is already dead. This might cause a
+ // duplicate signal in rare cases, but it won't matter when
+ // using SIGKILL.
+ r1, _, _ = RawSyscall(SYS_GETPPID, 0, 0, 0)
+ if r1 != ppid {
+ pid, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ _, _, err1 := RawSyscall(SYS_KILL, pid, uintptr(sys.Pdeathsig), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+ }
+
+ // Pass 1: look for fd[i] < i and move those up above len(fd)
+ // so that pass 2 won't stomp on an fd it needs later.
+ if pipe < nextfd {
+ _, _, err1 = RawSyscall(_SYS_dup, uintptr(pipe), uintptr(nextfd), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ pipe = nextfd
+ nextfd++
+ }
+ for i = 0; i < len(fd); i++ {
+ if fd[i] >= 0 && fd[i] < int(i) {
+ _, _, err1 = RawSyscall(_SYS_dup, uintptr(fd[i]), uintptr(nextfd), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ RawSyscall(SYS_FCNTL, uintptr(nextfd), F_SETFD, FD_CLOEXEC)
+ fd[i] = nextfd
+ nextfd++
+ if nextfd == pipe { // don't stomp on pipe
+ nextfd++
+ }
+ }
+ }
+
+ // Pass 2: dup fd[i] down onto i.
+ for i = 0; i < len(fd); i++ {
+ if fd[i] == -1 {
+ RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
+ continue
+ }
+ if fd[i] == int(i) {
+ // dup2(i, i) won't clear close-on-exec flag on Linux,
+ // probably not elsewhere either.
+ _, _, err1 = RawSyscall(SYS_FCNTL, uintptr(fd[i]), F_SETFD, 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ continue
+ }
+ // The new fd is created NOT close-on-exec,
+ // which is exactly what we want.
+ _, _, err1 = RawSyscall(_SYS_dup, uintptr(fd[i]), uintptr(i), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // By convention, we don't close-on-exec the fds we are
+ // started with, so if len(fd) < 3, close 0, 1, 2 as needed.
+ // Programs that know they inherit fds >= 3 will need
+ // to set them close-on-exec.
+ for i = len(fd); i < 3; i++ {
+ RawSyscall(SYS_CLOSE, uintptr(i), 0, 0)
+ }
+
+ // Detach fd 0 from tty
+ if sys.Noctty {
+ _, _, err1 = RawSyscall(SYS_IOCTL, 0, uintptr(TIOCNOTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Set the controlling TTY to Ctty
+ if sys.Setctty {
+ _, _, err1 = RawSyscall(SYS_IOCTL, uintptr(sys.Ctty), uintptr(TIOCSCTTY), 0)
+ if err1 != 0 {
+ goto childerror
+ }
+ }
+
+ // Time to exec.
+ _, _, err1 = RawSyscall(SYS_EXECVE,
+ uintptr(unsafe.Pointer(argv0)),
+ uintptr(unsafe.Pointer(&argv[0])),
+ uintptr(unsafe.Pointer(&envv[0])))
+
+childerror:
+ // send error code on pipe
+ RawSyscall(SYS_WRITE, uintptr(pipe), uintptr(unsafe.Pointer(&err1)), unsafe.Sizeof(err1))
+ for {
+ RawSyscall(SYS_EXIT, 253, 0, 0)
+ }
+}
+
+// Try to open a pipe with O_CLOEXEC set on both file descriptors.
+func forkExecPipe(p []int) (err error) {
+ err = Pipe2(p, O_CLOEXEC)
+ // pipe2 was added in 2.6.27 and our minimum requirement is 2.6.23, so it
+ // might not be implemented.
+ if err == ENOSYS {
+ if err = Pipe(p); err != nil {
+ return
+ }
+ if _, err = fcntl(p[0], F_SETFD, FD_CLOEXEC); err != nil {
+ return
+ }
+ _, err = fcntl(p[1], F_SETFD, FD_CLOEXEC)
+ }
+ return
+}
+
+// writeIDMappings writes the user namespace User ID or Group ID mappings to the specified path.
+func writeIDMappings(path string, idMap []SysProcIDMap) error {
+ fd, err := Open(path, O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ data := ""
+ for _, im := range idMap {
+ data = data + itoa(im.ContainerID) + " " + itoa(im.HostID) + " " + itoa(im.Size) + "\n"
+ }
+
+ bytes, err := ByteSliceFromString(data)
+ if err != nil {
+ Close(fd)
+ return err
+ }
+
+ if _, err := Write(fd, bytes); err != nil {
+ Close(fd)
+ return err
+ }
+
+ if err := Close(fd); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// writeSetgroups writes to /proc/PID/setgroups "deny" if enable is false
+// and "allow" if enable is true.
+// This is needed since kernel 3.19, because you can't write gid_map without
+// disabling setgroups() system call.
+func writeSetgroups(pid int, enable bool) error {
+ sgf := "/proc/" + itoa(pid) + "/setgroups"
+ fd, err := Open(sgf, O_RDWR, 0)
+ if err != nil {
+ return err
+ }
+
+ var data []byte
+ if enable {
+ data = []byte("allow")
+ } else {
+ data = []byte("deny")
+ }
+
+ if _, err := Write(fd, data); err != nil {
+ Close(fd)
+ return err
+ }
+
+ return Close(fd)
+}
+
+// writeUidGidMappings writes User ID and Group ID mappings for user namespaces
+// for a process and it is called from the parent process.
+func writeUidGidMappings(pid int, sys *SysProcAttr) error {
+ if sys.UidMappings != nil {
+ uidf := "/proc/" + itoa(pid) + "/uid_map"
+ if err := writeIDMappings(uidf, sys.UidMappings); err != nil {
+ return err
+ }
+ }
+
+ if sys.GidMappings != nil {
+ // If the kernel is too old to support /proc/PID/setgroups, writeSetGroups will return ENOENT; this is OK.
+ if err := writeSetgroups(pid, sys.GidMappingsEnableSetgroups); err != nil && err != ENOENT {
+ return err
+ }
+ gidf := "/proc/" + itoa(pid) + "/gid_map"
+ if err := writeIDMappings(gidf, sys.GidMappings); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/mkall.sh 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/mkall.sh
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/mkall.sh 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/mkall.sh 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,292 @@
+#!/usr/bin/env bash
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# The syscall package provides access to the raw system call
+# interface of the underlying operating system. Porting Go to
+# a new architecture/operating system combination requires
+# some manual effort, though there are tools that automate
+# much of the process. The auto-generated files have names
+# beginning with z.
+#
+# This script runs or (given -n) prints suggested commands to generate z files
+# for the current system. Running those commands is not automatic.
+# This script is documentation more than anything else.
+#
+# * asm_${GOOS}_${GOARCH}.s
+#
+# This hand-written assembly file implements system call dispatch.
+# There are three entry points:
+#
+# func Syscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+# func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr);
+# func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr);
+#
+# The first and second are the standard ones; they differ only in
+# how many arguments can be passed to the kernel.
+# The third is for low-level use by the ForkExec wrapper;
+# unlike the first two, it does not call into the scheduler to
+# let it know that a system call is running.
+#
+# * syscall_${GOOS}.go
+#
+# This hand-written Go file implements system calls that need
+# special handling and lists "//sys" comments giving prototypes
+# for ones that can be auto-generated. Mksyscall reads those
+# comments to generate the stubs.
+#
+# * syscall_${GOOS}_${GOARCH}.go
+#
+# Same as syscall_${GOOS}.go except that it contains code specific
+# to ${GOOS} on one particular architecture.
+#
+# * types_${GOOS}.c
+#
+# This hand-written C file includes standard C headers and then
+# creates typedef or enum names beginning with a dollar sign
+# (use of $ in variable names is a gcc extension). The hardest
+# part about preparing this file is figuring out which headers to
+# include and which symbols need to be #defined to get the
+# actual data structures that pass through to the kernel system calls.
+# Some C libraries present alternate versions for binary compatibility
+# and translate them on the way in and out of system calls, but
+# there is almost always a #define that can get the real ones.
+# See types_darwin.c and types_linux.c for examples.
+#
+# * zerror_${GOOS}_${GOARCH}.go
+#
+# This machine-generated file defines the system's error numbers,
+# error strings, and signal numbers. The generator is "mkerrors.sh".
+# Usually no arguments are needed, but mkerrors.sh will pass its
+# arguments on to godefs.
+#
+# * zsyscall_${GOOS}_${GOARCH}.go
+#
+# Generated by mksyscall.pl; see syscall_${GOOS}.go above.
+#
+# * zsysnum_${GOOS}_${GOARCH}.go
+#
+# Generated by mksysnum_${GOOS}.
+#
+# * ztypes_${GOOS}_${GOARCH}.go
+#
+# Generated by godefs; see types_${GOOS}.c above.
+
+GOOSARCH="${GOOS}_${GOARCH}"
+
+# defaults
+mksyscall="./mksyscall.pl"
+mkerrors="./mkerrors.sh"
+zerrors="zerrors_$GOOSARCH.go"
+mksysctl=""
+zsysctl="zsysctl_$GOOSARCH.go"
+mksysnum=
+mktypes=
+run="sh"
+
+case "$1" in
+-syscalls)
+ for i in zsyscall*go
+ do
+ sed 1q $i | sed 's;^// ;;' | sh > _$i && gofmt < _$i > $i
+ rm _$i
+ done
+ exit 0
+ ;;
+-n)
+ run="cat"
+ shift
+esac
+
+case "$#" in
+0)
+ ;;
+*)
+ echo 'usage: mkall.sh [-n]' 1>&2
+ exit 2
+esac
+
+GOOSARCH_in=syscall_$GOOSARCH.go
+case "$GOOSARCH" in
+_* | *_ | _)
+ echo 'undefined $GOOS_$GOARCH:' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+darwin_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+darwin_arm64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_darwin.pl /usr/include/sys/syscall.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+dragonfly_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -dragonfly"
+ mksysnum="curl -s 'http://gitweb.dragonflybsd.org/dragonfly.git/blob_plain/HEAD:/sys/kern/syscalls.master' | ./mksysnum_dragonfly.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+freebsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://svn.freebsd.org/base/stable/10/sys/kern/syscalls.master' | ./mksysnum_freebsd.pl"
+ # Let the type of C char be singed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32"
+ mksysnum="./mksysnum_linux.pl /usr/include/asm/unistd_32.h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_amd64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd_64.h /usr/include/x86_64-linux-gnu/asm/unistd_64.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -arm"
+ mksysnum="curl -s 'http://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/plain/arch/arm/include/uapi/asm/unistd.h' | ./mksysnum_linux.pl -"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_arm64)
+ unistd_h=$(ls -1 /usr/include/asm/unistd.h /usr/include/asm-generic/unistd.h 2>/dev/null | head -1)
+ if [ "$unistd_h" = "" ]; then
+ echo >&2 cannot find unistd_64.h
+ exit 1
+ fi
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ # Let the type of C char be singed for making the bare syscall
+ # API consistent across over platforms.
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs -- -fsigned-char"
+ ;;
+linux_ppc64)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+linux_ppc64le)
+ GOOSARCH_in=syscall_linux_ppc64x.go
+ unistd_h=/usr/include/powerpc64le-linux-gnu/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+nacl_386)
+ mkerrors=""
+ mksyscall="./mksyscall.pl -l32 -nacl"
+ mksysnum=""
+ mktypes=""
+ ;;
+nacl_amd64p32)
+ mkerrors=""
+ mksyscall="./mksyscall.pl -nacl"
+ mksysnum=""
+ mktypes=""
+ ;;
+netbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+netbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -netbsd"
+ mksysnum="curl -s 'http://cvsweb.netbsd.org/bsdweb.cgi/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_netbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_386)
+ mkerrors="$mkerrors -m32"
+ mksyscall="./mksyscall.pl -l32 -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_amd64)
+ mkerrors="$mkerrors -m64"
+ mksyscall="./mksyscall.pl -openbsd"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+openbsd_arm)
+ mkerrors="$mkerrors"
+ mksyscall="./mksyscall.pl -l32 -openbsd -arm"
+ mksysctl="./mksysctl_openbsd.pl"
+ zsysctl="zsysctl_openbsd.go"
+ mksysnum="curl -s 'http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/sys/kern/syscalls.master' | ./mksysnum_openbsd.pl"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+plan9_386)
+ mkerrors=
+ mksyscall="./mksyscall.pl -l32 -plan9"
+ mksysnum="./mksysnum_plan9.sh /n/sources/plan9/sys/src/libc/9syscall/sys.h"
+ mktypes="XXX"
+ ;;
+solaris_amd64)
+ mksyscall="./mksyscall_solaris.pl"
+ mkerrors="$mkerrors -m64"
+ mksysnum=
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
+windows_*)
+ echo 'run "go generate syscall_windows.go" instead' 1>&2
+ exit 1
+ ;;
+*)
+ echo 'unrecognized $GOOS_$GOARCH: ' "$GOOSARCH" 1>&2
+ exit 1
+ ;;
+esac
+
+(
+ if [ -n "$mkerrors" ]; then echo "$mkerrors |gofmt >$zerrors"; fi
+ syscall_goos="syscall_$GOOS.go"
+ case "$GOOS" in
+ darwin | dragonfly | freebsd | netbsd | openbsd)
+ syscall_goos="syscall_bsd.go $syscall_goos"
+ ;;
+ esac
+ if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
+ if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
+ if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
+) | $run
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/mksyscall.pl 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/mksyscall.pl
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/mksyscall.pl 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/mksyscall.pl 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,319 @@
+#!/usr/bin/env perl
+# Copyright 2009 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# This program reads a file containing function prototypes
+# (like syscall_darwin.go) and generates system call bodies.
+# The prototypes are marked by lines beginning with "//sys"
+# and read like func declarations if //sys is replaced by func, but:
+# * The parameter lists must give a name for each argument.
+# This includes return parameters.
+# * The parameter lists must give a type for each argument:
+# the (x, y, z int) shorthand is not allowed.
+# * If the return parameter is an error number, it must be named errno.
+
+# A line beginning with //sysnb is like //sys, except that the
+# goroutine will not be suspended during the execution of the system
+# call. This must only be used for system calls which can never
+# block, as otherwise the system call could cause all goroutines to
+# hang.
+
+use strict;
+
+my $cmdline = "mksyscall.pl " . join(' ', @ARGV);
+my $errors = 0;
+my $_32bit = "";
+my $plan9 = 0;
+my $openbsd = 0;
+my $netbsd = 0;
+my $dragonfly = 0;
+my $nacl = 0;
+my $arm = 0; # 64-bit value should use (even, odd)-pair
+
+if($ARGV[0] eq "-b32") {
+ $_32bit = "big-endian";
+ shift;
+} elsif($ARGV[0] eq "-l32") {
+ $_32bit = "little-endian";
+ shift;
+}
+if($ARGV[0] eq "-plan9") {
+ $plan9 = 1;
+ shift;
+}
+if($ARGV[0] eq "-openbsd") {
+ $openbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-netbsd") {
+ $netbsd = 1;
+ shift;
+}
+if($ARGV[0] eq "-dragonfly") {
+ $dragonfly = 1;
+ shift;
+}
+if($ARGV[0] eq "-nacl") {
+ $nacl = 1;
+ shift;
+}
+if($ARGV[0] eq "-arm") {
+ $arm = 1;
+ shift;
+}
+
+if($ARGV[0] =~ /^-/) {
+ print STDERR "usage: mksyscall.pl [-b32 | -l32] [file ...]\n";
+ exit 1;
+}
+
+sub parseparamlist($) {
+ my ($list) = @_;
+ $list =~ s/^\s*//;
+ $list =~ s/\s*$//;
+ if($list eq "") {
+ return ();
+ }
+ return split(/\s*,\s*/, $list);
+}
+
+sub parseparam($) {
+ my ($p) = @_;
+ if($p !~ /^(\S*) (\S*)$/) {
+ print STDERR "$ARGV:$.: malformed parameter: $p\n";
+ $errors = 1;
+ return ("xx", "int");
+ }
+ return ($1, $2);
+}
+
+my $text = "";
+while(<>) {
+ chomp;
+ s/\s+/ /g;
+ s/^\s+//;
+ s/\s+$//;
+ my $nonblock = /^\/\/sysnb /;
+ next if !/^\/\/sys / && !$nonblock;
+
+ # Line must be of the form
+ # func Open(path string, mode int, perm int) (fd int, errno error)
+ # Split into name, in params, out params.
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
+ print STDERR "$ARGV:$.: malformed //sys declaration\n";
+ $errors = 1;
+ next;
+ }
+ my ($func, $in, $out, $sysname) = ($2, $3, $4, $5);
+
+ # Split argument lists on comma.
+ my @in = parseparamlist($in);
+ my @out = parseparamlist($out);
+
+ # Try in vain to keep people from editing this file.
+ # The theory is that they jump into the middle of the file
+ # without reading the header.
+ $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n";
+
+ # Go function header.
+ my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : "";
+ $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl;
+
+ # Check if err return available
+ my $errvar = "";
+ foreach my $p (@out) {
+ my ($name, $type) = parseparam($p);
+ if($type eq "error") {
+ $errvar = $name;
+ last;
+ }
+ }
+
+ # Prepare arguments to Syscall.
+ my @args = ();
+ my @uses = ();
+ my $n = 0;
+ foreach my $p (@in) {
+ my ($name, $type) = parseparam($p);
+ if($type =~ /^\*/) {
+ push @args, "uintptr(unsafe.Pointer($name))";
+ } elsif($type eq "string" && $errvar ne "") {
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, $errvar = BytePtrFromString($name)\n";
+ $text .= "\tif $errvar != nil {\n\t\treturn\n\t}\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type eq "string") {
+ print STDERR "$ARGV:$.: $func uses string arguments, but has no error return\n";
+ $text .= "\tvar _p$n *byte\n";
+ $text .= "\t_p$n, _ = BytePtrFromString($name)\n";
+ push @args, "uintptr(unsafe.Pointer(_p$n))";
+ push @uses, "use(unsafe.Pointer(_p$n))";
+ $n++;
+ } elsif($type =~ /^\[\](.*)/) {
+ # Convert slice into pointer, length.
+ # Have to be careful not to take address of &a[0] if len == 0:
+ # pass dummy pointer in that case.
+ # Used to pass nil, but some OSes or simulators reject write(fd, nil, 0).
+ $text .= "\tvar _p$n unsafe.Pointer\n";
+ $text .= "\tif len($name) > 0 {\n\t\t_p$n = unsafe.Pointer(\&${name}[0])\n\t}";
+ $text .= " else {\n\t\t_p$n = unsafe.Pointer(&_zero)\n\t}";
+ $text .= "\n";
+ push @args, "uintptr(_p$n)", "uintptr(len($name))";
+ $n++;
+ } elsif($type eq "int64" && ($openbsd || $netbsd)) {
+ push @args, "0";
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $dragonfly) {
+ if ($func !~ /^extp(read|write)/i) {
+ push @args, "0";
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } elsif($_32bit eq "little-endian") {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ } else {
+ push @args, "uintptr($name)";
+ }
+ } elsif($type eq "int64" && $_32bit ne "") {
+ if(@args % 2 && $arm) {
+ # arm abi specifies 64-bit argument uses
+ # (even, odd) pair
+ push @args, "0"
+ }
+ if($_32bit eq "big-endian") {
+ push @args, "uintptr($name>>32)", "uintptr($name)";
+ } else {
+ push @args, "uintptr($name)", "uintptr($name>>32)";
+ }
+ } else {
+ push @args, "uintptr($name)";
+ }
+ }
+
+ # Determine which form to use; pad args with zeros.
+ my $asm = "Syscall";
+ if ($nonblock) {
+ $asm = "RawSyscall";
+ }
+ if(@args <= 3) {
+ while(@args < 3) {
+ push @args, "0";
+ }
+ } elsif(@args <= 6) {
+ $asm .= "6";
+ while(@args < 6) {
+ push @args, "0";
+ }
+ } elsif(@args <= 9) {
+ $asm .= "9";
+ while(@args < 9) {
+ push @args, "0";
+ }
+ } else {
+ print STDERR "$ARGV:$.: too many arguments to system call\n";
+ }
+
+ # System call number.
+ if($sysname eq "") {
+ $sysname = "SYS_$func";
+ $sysname =~ s/([a-z])([A-Z])/${1}_$2/g; # turn FooBar into Foo_Bar
+ $sysname =~ y/a-z/A-Z/;
+ if($nacl) {
+ $sysname =~ y/A-Z/a-z/;
+ }
+ }
+
+ # Actual call.
+ my $args = join(', ', @args);
+ my $call = "$asm($sysname, $args)";
+
+ # Assign return values.
+ my $body = "";
+ my @ret = ("_", "_", "_");
+ my $do_errno = 0;
+ for(my $i=0; $i<@out; $i++) {
+ my $p = $out[$i];
+ my ($name, $type) = parseparam($p);
+ my $reg = "";
+ if($name eq "err" && !$plan9) {
+ $reg = "e1";
+ $ret[2] = $reg;
+ $do_errno = 1;
+ } elsif($name eq "err" && $plan9) {
+ $ret[0] = "r0";
+ $ret[2] = "e1";
+ next;
+ } else {
+ $reg = sprintf("r%d", $i);
+ $ret[$i] = $reg;
+ }
+ if($type eq "bool") {
+ $reg = "$reg != 0";
+ }
+ if($type eq "int64" && $_32bit ne "") {
+ # 64-bit number in r1:r0 or r0:r1.
+ if($i+2 > @out) {
+ print STDERR "$ARGV:$.: not enough registers for int64 return\n";
+ }
+ if($_32bit eq "big-endian") {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i, $i+1);
+ } else {
+ $reg = sprintf("int64(r%d)<<32 | int64(r%d)", $i+1, $i);
+ }
+ $ret[$i] = sprintf("r%d", $i);
+ $ret[$i+1] = sprintf("r%d", $i+1);
+ }
+ if($reg ne "e1" || $plan9) {
+ $body .= "\t$name = $type($reg)\n";
+ }
+ }
+ if ($ret[0] eq "_" && $ret[1] eq "_" && $ret[2] eq "_") {
+ $text .= "\t$call\n";
+ } else {
+ $text .= "\t$ret[0], $ret[1], $ret[2] := $call\n";
+ }
+ foreach my $use (@uses) {
+ $text .= "\t$use\n";
+ }
+ $text .= $body;
+
+ if ($plan9 && $ret[2] eq "e1") {
+ $text .= "\tif int32(r0) == -1 {\n";
+ $text .= "\t\terr = e1\n";
+ $text .= "\t}\n";
+ } elsif ($do_errno) {
+ $text .= "\tif e1 != 0 {\n";
+ $text .= "\t\terr = errnoErr(e1)\n";
+ $text .= "\t}\n";
+ }
+ $text .= "\treturn\n";
+ $text .= "}\n\n";
+}
+
+chomp $text;
+chomp $text;
+
+if($errors) {
+ exit 1;
+}
+
+print < len(buf) || buf[n-1] != 0 {
+ return "", EINVAL
+ }
+ return string(buf[0 : n-1]), nil
+}
+
+func Getgroups() (gids []int, err error) {
+ n, err := getgroups(0, nil)
+ if err != nil {
+ return nil, err
+ }
+ if n == 0 {
+ return nil, nil
+ }
+
+ // Sanity check group count. Max is 1<<16 on Linux.
+ if n < 0 || n > 1<<20 {
+ return nil, EINVAL
+ }
+
+ a := make([]_Gid_t, n)
+ n, err = getgroups(n, &a[0])
+ if err != nil {
+ return nil, err
+ }
+ gids = make([]int, n)
+ for i, v := range a[0:n] {
+ gids[i] = int(v)
+ }
+ return
+}
+
+func Setgroups(gids []int) (err error) {
+ if len(gids) == 0 {
+ return setgroups(0, nil)
+ }
+
+ a := make([]_Gid_t, len(gids))
+ for i, v := range gids {
+ a[i] = _Gid_t(v)
+ }
+ return setgroups(len(a), &a[0])
+}
+
+type WaitStatus uint32
+
+// Wait status is 7 bits at bottom, either 0 (exited),
+// 0x7F (stopped), or a signal number that caused an exit.
+// The 0x80 bit is whether there was a core dump.
+// An extra number (exit code, signal causing a stop)
+// is in the high bits. At least that's the idea.
+// There are various irregularities. For example, the
+// "continued" status is 0xFFFF, distinguishing itself
+// from stopped via the core dump bit.
+
+const (
+ mask = 0x7F
+ core = 0x80
+ exited = 0x00
+ stopped = 0x7F
+ shift = 8
+)
+
+func (w WaitStatus) Exited() bool { return w&mask == exited }
+
+func (w WaitStatus) Signaled() bool { return w&mask != stopped && w&mask != exited }
+
+func (w WaitStatus) Stopped() bool { return w&0xFF == stopped }
+
+func (w WaitStatus) Continued() bool { return w == 0xFFFF }
+
+func (w WaitStatus) CoreDump() bool { return w.Signaled() && w&core != 0 }
+
+func (w WaitStatus) ExitStatus() int {
+ if !w.Exited() {
+ return -1
+ }
+ return int(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) Signal() Signal {
+ if !w.Signaled() {
+ return -1
+ }
+ return Signal(w & mask)
+}
+
+func (w WaitStatus) StopSignal() Signal {
+ if !w.Stopped() {
+ return -1
+ }
+ return Signal(w>>shift) & 0xFF
+}
+
+func (w WaitStatus) TrapCause() int {
+ if w.StopSignal() != SIGTRAP {
+ return -1
+ }
+ return int(w>>shift) >> 8
+}
+
+//sys wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error)
+
+func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, err error) {
+ var status _C_int
+ wpid, err = wait4(pid, &status, options, rusage)
+ if wstatus != nil {
+ *wstatus = WaitStatus(status)
+ }
+ return
+}
+
+func Mkfifo(path string, mode uint32) (err error) {
+ return Mknod(path, mode|S_IFIFO, 0)
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Port < 0 || sa.Port > 0xFFFF {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_INET6
+ p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+ p[0] = byte(sa.Port >> 8)
+ p[1] = byte(sa.Port)
+ sa.raw.Scope_id = sa.ZoneId
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ name := sa.Name
+ n := len(name)
+ if n >= len(sa.raw.Path) {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_UNIX
+ for i := 0; i < n; i++ {
+ sa.raw.Path[i] = int8(name[i])
+ }
+ // length is family (uint16), name, NUL.
+ sl := _Socklen(2)
+ if n > 0 {
+ sl += _Socklen(n) + 1
+ }
+ if sa.raw.Path[0] == '@' {
+ sa.raw.Path[0] = 0
+ // Don't count trailing NUL for abstract address.
+ sl--
+ }
+
+ return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+type SockaddrLinklayer struct {
+ Protocol uint16
+ Ifindex int
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]byte
+ raw RawSockaddrLinklayer
+}
+
+func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff {
+ return nil, 0, EINVAL
+ }
+ sa.raw.Family = AF_PACKET
+ sa.raw.Protocol = sa.Protocol
+ sa.raw.Ifindex = int32(sa.Ifindex)
+ sa.raw.Hatype = sa.Hatype
+ sa.raw.Pkttype = sa.Pkttype
+ sa.raw.Halen = sa.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.raw.Addr[i] = sa.Addr[i]
+ }
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
+}
+
+type SockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+ raw RawSockaddrNetlink
+}
+
+func (sa *SockaddrNetlink) sockaddr() (unsafe.Pointer, _Socklen, error) {
+ sa.raw.Family = AF_NETLINK
+ sa.raw.Pad = sa.Pad
+ sa.raw.Pid = sa.Pid
+ sa.raw.Groups = sa.Groups
+ return unsafe.Pointer(&sa.raw), SizeofSockaddrNetlink, nil
+}
+
+func anyToSockaddr(rsa *RawSockaddrAny) (Sockaddr, error) {
+ switch rsa.Addr.Family {
+ case AF_NETLINK:
+ pp := (*RawSockaddrNetlink)(unsafe.Pointer(rsa))
+ sa := new(SockaddrNetlink)
+ sa.Family = pp.Family
+ sa.Pad = pp.Pad
+ sa.Pid = pp.Pid
+ sa.Groups = pp.Groups
+ return sa, nil
+
+ case AF_PACKET:
+ pp := (*RawSockaddrLinklayer)(unsafe.Pointer(rsa))
+ sa := new(SockaddrLinklayer)
+ sa.Protocol = pp.Protocol
+ sa.Ifindex = int(pp.Ifindex)
+ sa.Hatype = pp.Hatype
+ sa.Pkttype = pp.Pkttype
+ sa.Halen = pp.Halen
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_UNIX:
+ pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+ sa := new(SockaddrUnix)
+ if pp.Path[0] == 0 {
+ // "Abstract" Unix domain socket.
+ // Rewrite leading NUL as @ for textual display.
+ // (This is the standard convention.)
+ // Not friendly to overwrite in place,
+ // but the callers below don't care.
+ pp.Path[0] = '@'
+ }
+
+ // Assume path ends at NUL.
+ // This is not technically the Linux semantics for
+ // abstract Unix domain sockets--they are supposed
+ // to be uninterpreted fixed-size binary blobs--but
+ // everyone uses this convention.
+ n := 0
+ for n < len(pp.Path) && pp.Path[n] != 0 {
+ n++
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n]
+ sa.Name = string(bytes)
+ return sa, nil
+
+ case AF_INET:
+ pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet4)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+
+ case AF_INET6:
+ pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+ sa := new(SockaddrInet6)
+ p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+ sa.Port = int(p[0])<<8 + int(p[1])
+ sa.ZoneId = pp.Scope_id
+ for i := 0; i < len(sa.Addr); i++ {
+ sa.Addr[i] = pp.Addr[i]
+ }
+ return sa, nil
+ }
+ return nil, EAFNOSUPPORT
+}
+
+func Accept(fd int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept(fd, &rsa, &len)
+ if err != nil {
+ return
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Accept4(fd int, flags int) (nfd int, sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ nfd, err = accept4(fd, &rsa, &len, flags)
+ if err != nil {
+ return
+ }
+ if len > SizeofSockaddrAny {
+ panic("RawSockaddrAny too small")
+ }
+ sa, err = anyToSockaddr(&rsa)
+ if err != nil {
+ Close(nfd)
+ nfd = 0
+ }
+ return
+}
+
+func Getsockname(fd int) (sa Sockaddr, err error) {
+ var rsa RawSockaddrAny
+ var len _Socklen = SizeofSockaddrAny
+ if err = getsockname(fd, &rsa, &len); err != nil {
+ return
+ }
+ return anyToSockaddr(&rsa)
+}
+
+func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) {
+ vallen := _Socklen(4)
+ err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen)
+ return value, err
+}
+
+func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) {
+ var value IPMreq
+ vallen := _Socklen(SizeofIPMreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) {
+ var value IPMreqn
+ vallen := _Socklen(SizeofIPMreqn)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) {
+ var value IPv6Mreq
+ vallen := _Socklen(SizeofIPv6Mreq)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) {
+ var value IPv6MTUInfo
+ vallen := _Socklen(SizeofIPv6MTUInfo)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) {
+ var value ICMPv6Filter
+ vallen := _Socklen(SizeofICMPv6Filter)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func GetsockoptUcred(fd, level, opt int) (*Ucred, error) {
+ var value Ucred
+ vallen := _Socklen(SizeofUcred)
+ err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen)
+ return &value, err
+}
+
+func SetsockoptIPMreqn(fd, level, opt int, mreq *IPMreqn) (err error) {
+ return setsockopt(fd, level, opt, unsafe.Pointer(mreq), unsafe.Sizeof(*mreq))
+}
+
+func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) {
+ var msg Msghdr
+ var rsa RawSockaddrAny
+ msg.Name = (*byte)(unsafe.Pointer(&rsa))
+ msg.Namelen = uint32(SizeofSockaddrAny)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // receive at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = recvmsg(fd, &msg, flags); err != nil {
+ return
+ }
+ oobn = int(msg.Controllen)
+ recvflags = int(msg.Flags)
+ // source address is only specified if the socket is unconnected
+ if rsa.Addr.Family != AF_UNSPEC {
+ from, err = anyToSockaddr(&rsa)
+ }
+ return
+}
+
+func Sendmsg(fd int, p, oob []byte, to Sockaddr, flags int) (err error) {
+ _, err = SendmsgN(fd, p, oob, to, flags)
+ return
+}
+
+func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) {
+ var ptr unsafe.Pointer
+ var salen _Socklen
+ if to != nil {
+ var err error
+ ptr, salen, err = to.sockaddr()
+ if err != nil {
+ return 0, err
+ }
+ }
+ var msg Msghdr
+ msg.Name = (*byte)(unsafe.Pointer(ptr))
+ msg.Namelen = uint32(salen)
+ var iov Iovec
+ if len(p) > 0 {
+ iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+ iov.SetLen(len(p))
+ }
+ var dummy byte
+ if len(oob) > 0 {
+ // send at least one normal byte
+ if len(p) == 0 {
+ iov.Base = &dummy
+ iov.SetLen(1)
+ }
+ msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+ msg.SetControllen(len(oob))
+ }
+ msg.Iov = &iov
+ msg.Iovlen = 1
+ if n, err = sendmsg(fd, &msg, flags); err != nil {
+ return 0, err
+ }
+ if len(oob) > 0 && len(p) == 0 {
+ n = 0
+ }
+ return n, nil
+}
+
+// BindToDevice binds the socket associated with fd to device.
+func BindToDevice(fd int, device string) (err error) {
+ return SetsockoptString(fd, SOL_SOCKET, SO_BINDTODEVICE, device)
+}
+
+//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error)
+
+func ptracePeek(req int, pid int, addr uintptr, out []byte) (count int, err error) {
+ // The peek requests are machine-size oriented, so we wrap it
+ // to retrieve arbitrary-length data.
+
+ // The ptrace syscall differs from glibc's ptrace.
+ // Peeks returns the word in *data, not as the return value.
+
+ var buf [sizeofPtr]byte
+
+ // Leading edge. PEEKTEXT/PEEKDATA don't require aligned
+ // access (PEEKUSER warns that it might), but if we don't
+ // align our reads, we might straddle an unmapped page
+ // boundary and not get the bytes leading up to the page
+ // boundary.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ err = ptrace(req, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(out, buf[addr%sizeofPtr:])
+ out = out[n:]
+ }
+
+ // Remainder.
+ for len(out) > 0 {
+ // We use an internal buffer to guarantee alignment.
+ // It's not documented if this is necessary, but we're paranoid.
+ err = ptrace(req, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copied := copy(out, buf[0:])
+ n += copied
+ out = out[copied:]
+ }
+
+ return n, nil
+}
+
+func PtracePeekText(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKTEXT, pid, addr, out)
+}
+
+func PtracePeekData(pid int, addr uintptr, out []byte) (count int, err error) {
+ return ptracePeek(PTRACE_PEEKDATA, pid, addr, out)
+}
+
+func ptracePoke(pokeReq int, peekReq int, pid int, addr uintptr, data []byte) (count int, err error) {
+ // As for ptracePeek, we need to align our accesses to deal
+ // with the possibility of straddling an invalid page.
+
+ // Leading edge.
+ n := 0
+ if addr%sizeofPtr != 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr-addr%sizeofPtr, uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return 0, err
+ }
+ n += copy(buf[addr%sizeofPtr:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr-addr%sizeofPtr, word)
+ if err != nil {
+ return 0, err
+ }
+ data = data[n:]
+ }
+
+ // Interior.
+ for len(data) > sizeofPtr {
+ word := *((*uintptr)(unsafe.Pointer(&data[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += sizeofPtr
+ data = data[sizeofPtr:]
+ }
+
+ // Trailing edge.
+ if len(data) > 0 {
+ var buf [sizeofPtr]byte
+ err = ptrace(peekReq, pid, addr+uintptr(n), uintptr(unsafe.Pointer(&buf[0])))
+ if err != nil {
+ return n, err
+ }
+ copy(buf[0:], data)
+ word := *((*uintptr)(unsafe.Pointer(&buf[0])))
+ err = ptrace(pokeReq, pid, addr+uintptr(n), word)
+ if err != nil {
+ return n, err
+ }
+ n += len(data)
+ }
+
+ return n, nil
+}
+
+func PtracePokeText(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKETEXT, PTRACE_PEEKTEXT, pid, addr, data)
+}
+
+func PtracePokeData(pid int, addr uintptr, data []byte) (count int, err error) {
+ return ptracePoke(PTRACE_POKEDATA, PTRACE_PEEKDATA, pid, addr, data)
+}
+
+func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) {
+ return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) {
+ return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+func PtraceSetOptions(pid int, options int) (err error) {
+ return ptrace(PTRACE_SETOPTIONS, pid, 0, uintptr(options))
+}
+
+func PtraceGetEventMsg(pid int) (msg uint, err error) {
+ var data _C_long
+ err = ptrace(PTRACE_GETEVENTMSG, pid, 0, uintptr(unsafe.Pointer(&data)))
+ msg = uint(data)
+ return
+}
+
+func PtraceCont(pid int, signal int) (err error) {
+ return ptrace(PTRACE_CONT, pid, 0, uintptr(signal))
+}
+
+func PtraceSyscall(pid int, signal int) (err error) {
+ return ptrace(PTRACE_SYSCALL, pid, 0, uintptr(signal))
+}
+
+func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) }
+
+func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) }
+
+func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) }
+
+//sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error)
+
+func Reboot(cmd int) (err error) {
+ return reboot(LINUX_REBOOT_MAGIC1, LINUX_REBOOT_MAGIC2, cmd, "")
+}
+
+func clen(n []byte) int {
+ for i := 0; i < len(n); i++ {
+ if n[i] == 0 {
+ return i
+ }
+ }
+ return len(n)
+}
+
+func ReadDirent(fd int, buf []byte) (n int, err error) {
+ return Getdents(fd, buf)
+}
+
+func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
+ origlen := len(buf)
+ count = 0
+ for max != 0 && len(buf) > 0 {
+ dirent := (*Dirent)(unsafe.Pointer(&buf[0]))
+ buf = buf[dirent.Reclen:]
+ if dirent.Ino == 0 { // File absent in directory.
+ continue
+ }
+ bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
+ var name = string(bytes[0:clen(bytes[:])])
+ if name == "." || name == ".." { // Useless names
+ continue
+ }
+ max--
+ count++
+ names = append(names, name)
+ }
+ return origlen - len(buf), count, names
+}
+
+//sys mount(source string, target string, fstype string, flags uintptr, data *byte) (err error)
+
+func Mount(source string, target string, fstype string, flags uintptr, data string) (err error) {
+ // Certain file systems get rather angry and EINVAL if you give
+ // them an empty string of data, rather than NULL.
+ if data == "" {
+ return mount(source, target, fstype, flags, nil)
+ }
+ datap, err := BytePtrFromString(data)
+ if err != nil {
+ return err
+ }
+ err = mount(source, target, fstype, flags, datap)
+ use(unsafe.Pointer(datap))
+ return err
+}
+
+// Sendto
+// Recvfrom
+// Socketpair
+
+/*
+ * Direct access
+ */
+//sys Acct(path string) (err error)
+//sys Adjtimex(buf *Timex) (state int, err error)
+//sys Chdir(path string) (err error)
+//sys Chroot(path string) (err error)
+//sys Close(fd int) (err error)
+//sys Dup(oldfd int) (fd int, err error)
+//sys Dup3(oldfd int, newfd int, flags int) (err error)
+//sysnb EpollCreate(size int) (fd int, err error)
+//sysnb EpollCreate1(flag int) (fd int, err error)
+//sysnb EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error)
+//sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
+//sys Exit(code int) = SYS_EXIT_GROUP
+//sys Faccessat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fallocate(fd int, mode uint32, off int64, len int64) (err error)
+//sys Fchdir(fd int) (err error)
+//sys Fchmod(fd int, mode uint32) (err error)
+//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
+//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
+//sys fcntl(fd int, cmd int, arg int) (val int, err error)
+//sys Fdatasync(fd int) (err error)
+//sys Flock(fd int, how int) (err error)
+//sys Fsync(fd int) (err error)
+//sys Getdents(fd int, buf []byte) (n int, err error) = _SYS_getdents
+//sysnb Getpgid(pid int) (pgid int, err error)
+
+func Getpgrp() (pid int) {
+ pid, _ = Getpgid(0)
+ return
+}
+
+//sysnb Getpid() (pid int)
+//sysnb Getppid() (ppid int)
+//sys Getpriority(which int, who int) (prio int, err error)
+//sysnb Getrusage(who int, rusage *Rusage) (err error)
+//sysnb Gettid() (tid int)
+//sys Getxattr(path string, attr string, dest []byte) (sz int, err error)
+//sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error)
+//sysnb InotifyInit1(flags int) (fd int, err error)
+//sysnb InotifyRmWatch(fd int, watchdesc uint32) (success int, err error)
+//sysnb Kill(pid int, sig Signal) (err error)
+//sys Klogctl(typ int, buf []byte) (n int, err error) = SYS_SYSLOG
+//sys Listxattr(path string, dest []byte) (sz int, err error)
+//sys Mkdirat(dirfd int, path string, mode uint32) (err error)
+//sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error)
+//sys Nanosleep(time *Timespec, leftover *Timespec) (err error)
+//sys Pause() (err error)
+//sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
+//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64
+//sys read(fd int, p []byte) (n int, err error)
+//sys Removexattr(path string, attr string) (err error)
+//sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
+//sys Setdomainname(p []byte) (err error)
+//sys Sethostname(p []byte) (err error)
+//sysnb Setpgid(pid int, pgid int) (err error)
+//sysnb Setsid() (pid int, err error)
+//sysnb Settimeofday(tv *Timeval) (err error)
+
+// issue 1435.
+// On linux Setuid and Setgid only affects the current thread, not the process.
+// This does not match what most callers expect so we must return an error
+// here rather than letting the caller think that the call succeeded.
+
+func Setuid(uid int) (err error) {
+ return EOPNOTSUPP
+}
+
+func Setgid(gid int) (err error) {
+ return EOPNOTSUPP
+}
+
+//sys Setpriority(which int, who int, prio int) (err error)
+//sys Setxattr(path string, attr string, data []byte, flags int) (err error)
+//sys Sync()
+//sysnb Sysinfo(info *Sysinfo_t) (err error)
+//sys Tee(rfd int, wfd int, len int, flags int) (n int64, err error)
+//sysnb Tgkill(tgid int, tid int, sig Signal) (err error)
+//sysnb Times(tms *Tms) (ticks uintptr, err error)
+//sysnb Umask(mask int) (oldmask int)
+//sysnb Uname(buf *Utsname) (err error)
+//sys Unmount(target string, flags int) (err error) = SYS_UMOUNT2
+//sys Unshare(flags int) (err error)
+//sys Ustat(dev int, ubuf *Ustat_t) (err error)
+//sys Utime(path string, buf *Utimbuf) (err error)
+//sys write(fd int, p []byte) (n int, err error)
+//sys exitThread(code int) (err error) = SYS_EXIT
+//sys readlen(fd int, p *byte, np int) (n int, err error) = SYS_READ
+//sys writelen(fd int, p *byte, np int) (n int, err error) = SYS_WRITE
+
+// mmap varies by architecture; see syscall_linux_*.go.
+//sys munmap(addr uintptr, length uintptr) (err error)
+
+var mapper = &mmapper{
+ active: make(map[*byte][]byte),
+ mmap: mmap,
+ munmap: munmap,
+}
+
+func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) {
+ return mapper.Mmap(fd, offset, length, prot, flags)
+}
+
+func Munmap(b []byte) (err error) {
+ return mapper.Munmap(b)
+}
+
+//sys Madvise(b []byte, advice int) (err error)
+//sys Mprotect(b []byte, prot int) (err error)
+//sys Mlock(b []byte) (err error)
+//sys Munlock(b []byte) (err error)
+//sys Mlockall(flags int) (err error)
+//sys Munlockall() (err error)
+
+/*
+ * Unimplemented
+ */
+// AddKey
+// AfsSyscall
+// Alarm
+// ArchPrctl
+// Brk
+// Capget
+// Capset
+// ClockGetres
+// ClockGettime
+// ClockNanosleep
+// ClockSettime
+// Clone
+// CreateModule
+// DeleteModule
+// EpollCtlOld
+// EpollPwait
+// EpollWaitOld
+// Eventfd
+// Execve
+// Fadvise64
+// Fgetxattr
+// Flistxattr
+// Fork
+// Fremovexattr
+// Fsetxattr
+// Futex
+// GetKernelSyms
+// GetMempolicy
+// GetRobustList
+// GetThreadArea
+// Getitimer
+// Getpmsg
+// IoCancel
+// IoDestroy
+// IoGetevents
+// IoSetup
+// IoSubmit
+// Ioctl
+// IoprioGet
+// IoprioSet
+// KexecLoad
+// Keyctl
+// Lgetxattr
+// Llistxattr
+// LookupDcookie
+// Lremovexattr
+// Lsetxattr
+// Mbind
+// MigratePages
+// Mincore
+// ModifyLdt
+// Mount
+// MovePages
+// Mprotect
+// MqGetsetattr
+// MqNotify
+// MqOpen
+// MqTimedreceive
+// MqTimedsend
+// MqUnlink
+// Mremap
+// Msgctl
+// Msgget
+// Msgrcv
+// Msgsnd
+// Msync
+// Newfstatat
+// Nfsservctl
+// Personality
+// Poll
+// Ppoll
+// Prctl
+// Pselect6
+// Ptrace
+// Putpmsg
+// QueryModule
+// Quotactl
+// Readahead
+// Readv
+// RemapFilePages
+// RequestKey
+// RestartSyscall
+// RtSigaction
+// RtSigpending
+// RtSigprocmask
+// RtSigqueueinfo
+// RtSigreturn
+// RtSigsuspend
+// RtSigtimedwait
+// SchedGetPriorityMax
+// SchedGetPriorityMin
+// SchedGetaffinity
+// SchedGetparam
+// SchedGetscheduler
+// SchedRrGetInterval
+// SchedSetaffinity
+// SchedSetparam
+// SchedYield
+// Security
+// Semctl
+// Semget
+// Semop
+// Semtimedop
+// SetMempolicy
+// SetRobustList
+// SetThreadArea
+// SetTidAddress
+// Shmat
+// Shmctl
+// Shmdt
+// Shmget
+// Sigaltstack
+// Signalfd
+// Swapoff
+// Swapon
+// Sysfs
+// TimerCreate
+// TimerDelete
+// TimerGetoverrun
+// TimerGettime
+// TimerSettime
+// Timerfd
+// Tkill (obsolete)
+// Tuxcall
+// Umount2
+// Uselib
+// Utimensat
+// Vfork
+// Vhangup
+// Vmsplice
+// Vserver
+// Waitid
+// _Sysctl
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/types_linux.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/types_linux.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/src/syscall/types_linux.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/src/syscall/types_linux.go 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,401 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+Input to cgo -godefs. See also mkerrors.sh and mkall.sh
+*/
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package syscall
+
+/*
+#define _LARGEFILE_SOURCE
+#define _LARGEFILE64_SOURCE
+#define _FILE_OFFSET_BITS 64
+#define _GNU_SOURCE
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+enum {
+ sizeofPtr = sizeof(void*),
+};
+
+union sockaddr_all {
+ struct sockaddr s1; // this one gets used for fields
+ struct sockaddr_in s2; // these pad it out
+ struct sockaddr_in6 s3;
+ struct sockaddr_un s4;
+ struct sockaddr_ll s5;
+ struct sockaddr_nl s6;
+};
+
+struct sockaddr_any {
+ struct sockaddr addr;
+ char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)];
+};
+
+// copied from /usr/include/linux/un.h
+struct my_sockaddr_un {
+ sa_family_t sun_family;
+#if defined(__ARM_EABI__) || defined(__powerpc64__)
+ // on ARM and PPC char is by default unsigned
+ signed char sun_path[108];
+#else
+ char sun_path[108];
+#endif
+};
+
+#ifdef __ARM_EABI__
+typedef struct user_regs PtraceRegs;
+#elif defined(__aarch64__)
+typedef struct user_pt_regs PtraceRegs;
+#elif defined(__powerpc64__)
+typedef struct pt_regs PtraceRegs;
+#elif defined(__mips__)
+typedef struct user PtraceRegs;
+#else
+typedef struct user_regs_struct PtraceRegs;
+#endif
+
+// The real epoll_event is a union, and godefs doesn't handle it well.
+struct my_epoll_event {
+ uint32_t events;
+#ifdef __ARM_EABI__
+ // padding is not specified in linux/eventpoll.h but added to conform to the
+ // alignment requirements of EABI
+ int32_t padFd;
+#endif
+#ifdef __powerpc64__
+ int32_t _padFd;
+#endif
+ int32_t fd;
+ int32_t pad;
+};
+
+*/
+import "C"
+
+// Machine characteristics; for internal use.
+
+const (
+ sizeofPtr = C.sizeofPtr
+ sizeofShort = C.sizeof_short
+ sizeofInt = C.sizeof_int
+ sizeofLong = C.sizeof_long
+ sizeofLongLong = C.sizeof_longlong
+ PathMax = C.PATH_MAX
+)
+
+// Basic types
+
+type (
+ _C_short C.short
+ _C_int C.int
+ _C_long C.long
+ _C_long_long C.longlong
+)
+
+// Time
+
+type Timespec C.struct_timespec
+
+type Timeval C.struct_timeval
+
+type Timex C.struct_timex
+
+type Time_t C.time_t
+
+type Tms C.struct_tms
+
+type Utimbuf C.struct_utimbuf
+
+// Processes
+
+type Rusage C.struct_rusage
+
+type Rlimit C.struct_rlimit
+
+type _Gid_t C.gid_t
+
+// Files
+
+type Stat_t C.struct_stat
+
+type Statfs_t C.struct_statfs
+
+type Dirent C.struct_dirent
+
+type Fsid C.fsid_t
+
+type Flock_t C.struct_flock
+
+// Sockets
+
+type RawSockaddrInet4 C.struct_sockaddr_in
+
+type RawSockaddrInet6 C.struct_sockaddr_in6
+
+type RawSockaddrUnix C.struct_my_sockaddr_un
+
+type RawSockaddrLinklayer C.struct_sockaddr_ll
+
+type RawSockaddrNetlink C.struct_sockaddr_nl
+
+type RawSockaddr C.struct_sockaddr
+
+type RawSockaddrAny C.struct_sockaddr_any
+
+type _Socklen C.socklen_t
+
+type Linger C.struct_linger
+
+type Iovec C.struct_iovec
+
+type IPMreq C.struct_ip_mreq
+
+type IPMreqn C.struct_ip_mreqn
+
+type IPv6Mreq C.struct_ipv6_mreq
+
+type Msghdr C.struct_msghdr
+
+type Cmsghdr C.struct_cmsghdr
+
+type Inet4Pktinfo C.struct_in_pktinfo
+
+type Inet6Pktinfo C.struct_in6_pktinfo
+
+type IPv6MTUInfo C.struct_ip6_mtuinfo
+
+type ICMPv6Filter C.struct_icmp6_filter
+
+type Ucred C.struct_ucred
+
+type TCPInfo C.struct_tcp_info
+
+const (
+ SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in
+ SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+ SizeofSockaddrAny = C.sizeof_struct_sockaddr_any
+ SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un
+ SizeofSockaddrLinklayer = C.sizeof_struct_sockaddr_ll
+ SizeofSockaddrNetlink = C.sizeof_struct_sockaddr_nl
+ SizeofLinger = C.sizeof_struct_linger
+ SizeofIPMreq = C.sizeof_struct_ip_mreq
+ SizeofIPMreqn = C.sizeof_struct_ip_mreqn
+ SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+ SizeofMsghdr = C.sizeof_struct_msghdr
+ SizeofCmsghdr = C.sizeof_struct_cmsghdr
+ SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo
+ SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo
+ SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo
+ SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+ SizeofUcred = C.sizeof_struct_ucred
+ SizeofTCPInfo = C.sizeof_struct_tcp_info
+)
+
+// Netlink routing and interface messages
+
+const (
+ IFA_UNSPEC = C.IFA_UNSPEC
+ IFA_ADDRESS = C.IFA_ADDRESS
+ IFA_LOCAL = C.IFA_LOCAL
+ IFA_LABEL = C.IFA_LABEL
+ IFA_BROADCAST = C.IFA_BROADCAST
+ IFA_ANYCAST = C.IFA_ANYCAST
+ IFA_CACHEINFO = C.IFA_CACHEINFO
+ IFA_MULTICAST = C.IFA_MULTICAST
+ IFLA_UNSPEC = C.IFLA_UNSPEC
+ IFLA_ADDRESS = C.IFLA_ADDRESS
+ IFLA_BROADCAST = C.IFLA_BROADCAST
+ IFLA_IFNAME = C.IFLA_IFNAME
+ IFLA_MTU = C.IFLA_MTU
+ IFLA_LINK = C.IFLA_LINK
+ IFLA_QDISC = C.IFLA_QDISC
+ IFLA_STATS = C.IFLA_STATS
+ IFLA_COST = C.IFLA_COST
+ IFLA_PRIORITY = C.IFLA_PRIORITY
+ IFLA_MASTER = C.IFLA_MASTER
+ IFLA_WIRELESS = C.IFLA_WIRELESS
+ IFLA_PROTINFO = C.IFLA_PROTINFO
+ IFLA_TXQLEN = C.IFLA_TXQLEN
+ IFLA_MAP = C.IFLA_MAP
+ IFLA_WEIGHT = C.IFLA_WEIGHT
+ IFLA_OPERSTATE = C.IFLA_OPERSTATE
+ IFLA_LINKMODE = C.IFLA_LINKMODE
+ IFLA_LINKINFO = C.IFLA_LINKINFO
+ IFLA_NET_NS_PID = C.IFLA_NET_NS_PID
+ IFLA_IFALIAS = C.IFLA_IFALIAS
+ IFLA_MAX = C.IFLA_MAX
+ RT_SCOPE_UNIVERSE = C.RT_SCOPE_UNIVERSE
+ RT_SCOPE_SITE = C.RT_SCOPE_SITE
+ RT_SCOPE_LINK = C.RT_SCOPE_LINK
+ RT_SCOPE_HOST = C.RT_SCOPE_HOST
+ RT_SCOPE_NOWHERE = C.RT_SCOPE_NOWHERE
+ RT_TABLE_UNSPEC = C.RT_TABLE_UNSPEC
+ RT_TABLE_COMPAT = C.RT_TABLE_COMPAT
+ RT_TABLE_DEFAULT = C.RT_TABLE_DEFAULT
+ RT_TABLE_MAIN = C.RT_TABLE_MAIN
+ RT_TABLE_LOCAL = C.RT_TABLE_LOCAL
+ RT_TABLE_MAX = C.RT_TABLE_MAX
+ RTA_UNSPEC = C.RTA_UNSPEC
+ RTA_DST = C.RTA_DST
+ RTA_SRC = C.RTA_SRC
+ RTA_IIF = C.RTA_IIF
+ RTA_OIF = C.RTA_OIF
+ RTA_GATEWAY = C.RTA_GATEWAY
+ RTA_PRIORITY = C.RTA_PRIORITY
+ RTA_PREFSRC = C.RTA_PREFSRC
+ RTA_METRICS = C.RTA_METRICS
+ RTA_MULTIPATH = C.RTA_MULTIPATH
+ RTA_FLOW = C.RTA_FLOW
+ RTA_CACHEINFO = C.RTA_CACHEINFO
+ RTA_TABLE = C.RTA_TABLE
+ RTN_UNSPEC = C.RTN_UNSPEC
+ RTN_UNICAST = C.RTN_UNICAST
+ RTN_LOCAL = C.RTN_LOCAL
+ RTN_BROADCAST = C.RTN_BROADCAST
+ RTN_ANYCAST = C.RTN_ANYCAST
+ RTN_MULTICAST = C.RTN_MULTICAST
+ RTN_BLACKHOLE = C.RTN_BLACKHOLE
+ RTN_UNREACHABLE = C.RTN_UNREACHABLE
+ RTN_PROHIBIT = C.RTN_PROHIBIT
+ RTN_THROW = C.RTN_THROW
+ RTN_NAT = C.RTN_NAT
+ RTN_XRESOLVE = C.RTN_XRESOLVE
+ RTNLGRP_NONE = C.RTNLGRP_NONE
+ RTNLGRP_LINK = C.RTNLGRP_LINK
+ RTNLGRP_NOTIFY = C.RTNLGRP_NOTIFY
+ RTNLGRP_NEIGH = C.RTNLGRP_NEIGH
+ RTNLGRP_TC = C.RTNLGRP_TC
+ RTNLGRP_IPV4_IFADDR = C.RTNLGRP_IPV4_IFADDR
+ RTNLGRP_IPV4_MROUTE = C.RTNLGRP_IPV4_MROUTE
+ RTNLGRP_IPV4_ROUTE = C.RTNLGRP_IPV4_ROUTE
+ RTNLGRP_IPV4_RULE = C.RTNLGRP_IPV4_RULE
+ RTNLGRP_IPV6_IFADDR = C.RTNLGRP_IPV6_IFADDR
+ RTNLGRP_IPV6_MROUTE = C.RTNLGRP_IPV6_MROUTE
+ RTNLGRP_IPV6_ROUTE = C.RTNLGRP_IPV6_ROUTE
+ RTNLGRP_IPV6_IFINFO = C.RTNLGRP_IPV6_IFINFO
+ RTNLGRP_IPV6_PREFIX = C.RTNLGRP_IPV6_PREFIX
+ RTNLGRP_IPV6_RULE = C.RTNLGRP_IPV6_RULE
+ RTNLGRP_ND_USEROPT = C.RTNLGRP_ND_USEROPT
+ SizeofNlMsghdr = C.sizeof_struct_nlmsghdr
+ SizeofNlMsgerr = C.sizeof_struct_nlmsgerr
+ SizeofRtGenmsg = C.sizeof_struct_rtgenmsg
+ SizeofNlAttr = C.sizeof_struct_nlattr
+ SizeofRtAttr = C.sizeof_struct_rtattr
+ SizeofIfInfomsg = C.sizeof_struct_ifinfomsg
+ SizeofIfAddrmsg = C.sizeof_struct_ifaddrmsg
+ SizeofRtMsg = C.sizeof_struct_rtmsg
+ SizeofRtNexthop = C.sizeof_struct_rtnexthop
+)
+
+type NlMsghdr C.struct_nlmsghdr
+
+type NlMsgerr C.struct_nlmsgerr
+
+type RtGenmsg C.struct_rtgenmsg
+
+type NlAttr C.struct_nlattr
+
+type RtAttr C.struct_rtattr
+
+type IfInfomsg C.struct_ifinfomsg
+
+type IfAddrmsg C.struct_ifaddrmsg
+
+type RtMsg C.struct_rtmsg
+
+type RtNexthop C.struct_rtnexthop
+
+// Linux socket filter
+
+const (
+ SizeofSockFilter = C.sizeof_struct_sock_filter
+ SizeofSockFprog = C.sizeof_struct_sock_fprog
+)
+
+type SockFilter C.struct_sock_filter
+
+type SockFprog C.struct_sock_fprog
+
+// Inotify
+
+type InotifyEvent C.struct_inotify_event
+
+const SizeofInotifyEvent = C.sizeof_struct_inotify_event
+
+// Ptrace
+
+// Register structures
+type PtraceRegs C.PtraceRegs
+
+// Misc
+
+type FdSet C.fd_set
+
+type Sysinfo_t C.struct_sysinfo
+
+type Utsname C.struct_utsname
+
+type Ustat_t C.struct_ustat
+
+type EpollEvent C.struct_my_epoll_event
+
+const (
+ _AT_FDCWD = C.AT_FDCWD
+ _AT_REMOVEDIR = C.AT_REMOVEDIR
+ _AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW
+)
+
+// Terminal handling
+
+type Termios C.struct_termios
+
+const (
+ IUCLC = C.IUCLC
+ OLCUC = C.OLCUC
+ TCGETS = C.TCGETS
+ TCSETS = C.TCSETS
+ XCASE = C.XCASE
+)
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/test/fixedbugs/issue11656.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/fixedbugs/issue11656.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/test/fixedbugs/issue11656.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/fixedbugs/issue11656.go 2016-07-18 16:24:09.000000000 +0000
@@ -0,0 +1,75 @@
+// run
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// windows doesn't work, because Windows exception handling
+// delivers signals based on the current PC, and that current PC
+// doesn't go into the Go runtime.
+// +build !windows
+
+package main
+
+import (
+ "encoding/binary"
+ "runtime"
+ "runtime/debug"
+ "unsafe"
+)
+
+func main() {
+ debug.SetPanicOnFault(true)
+ defer func() {
+ if err := recover(); err == nil {
+ panic("not panicking")
+ }
+ pc, _, _, _ := runtime.Caller(10)
+ f := runtime.FuncForPC(pc)
+ if f == nil || f.Name() != "main.f" {
+ if f == nil {
+ println("no func for ", unsafe.Pointer(pc))
+ } else {
+ println("found func:", f.Name())
+ }
+ panic("cannot find main.f on stack")
+ }
+ }()
+ f(20)
+}
+
+func f(n int) {
+ if n > 0 {
+ f(n - 1)
+ }
+ var f struct {
+ x uintptr
+ }
+
+ // We want to force an illegal instruction, to get a crash
+ // at a PC value != 0.
+ // Not all systems make the data section non-executable.
+ ill := make([]byte, 64)
+ switch runtime.GOARCH {
+ case "386", "amd64":
+ binary.LittleEndian.PutUint16(ill, 0x0b0f) // ud2
+ case "arm":
+ binary.LittleEndian.PutUint32(ill, 0xe7f000f0) // no name, but permanently undefined
+ case "arm64":
+ binary.LittleEndian.PutUint32(ill, 0xd4207d00) // brk #1000
+ case "ppc64":
+ binary.BigEndian.PutUint32(ill, 0x7fe00008) // trap
+ case "ppc64le":
+ binary.LittleEndian.PutUint32(ill, 0x7fe00008) // trap
+ case "mips64":
+ binary.BigEndian.PutUint32(ill, 0x00000034) // trap
+ case "mips64le":
+ binary.LittleEndian.PutUint32(ill, 0x00000034) // trap
+ default:
+ // Just leave it as 0 and hope for the best.
+ }
+
+ f.x = uintptr(unsafe.Pointer(&ill[0]))
+ fn := *(*func())(unsafe.Pointer(&f))
+ fn()
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/test/init1.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/init1.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/test/init1.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/init1.go 2016-07-18 16:24:09.000000000 +0000
@@ -0,0 +1,53 @@
+// run
+
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that goroutines and garbage collection run during init.
+
+package main
+
+import "runtime"
+
+var x []byte
+
+func init() {
+ c := make(chan int)
+ go send(c)
+ <-c
+
+ const N = 1000
+ const MB = 1 << 20
+ b := make([]byte, MB)
+ for i := range b {
+ b[i] = byte(i%10 + '0')
+ }
+ s := string(b)
+
+ memstats := new(runtime.MemStats)
+ runtime.ReadMemStats(memstats)
+ sys, numGC := memstats.Sys, memstats.NumGC
+
+ // Generate 1,000 MB of garbage, only retaining 1 MB total.
+ for i := 0; i < N; i++ {
+ x = []byte(s)
+ }
+
+ // Verify that the garbage collector ran by seeing if we
+ // allocated fewer than N*MB bytes from the system.
+ runtime.ReadMemStats(memstats)
+ sys1, numGC1 := memstats.Sys, memstats.NumGC
+ if sys1-sys >= N*MB || numGC1 == numGC {
+ println("allocated 1000 chunks of", MB, "and used ", sys1-sys, "memory")
+ println("numGC went", numGC, "to", numGC)
+ panic("init1")
+ }
+}
+
+func send(c chan int) {
+ c <- 1
+}
+
+func main() {
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/test/nilptr3.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/nilptr3.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/test/nilptr3.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/nilptr3.go 2016-07-18 16:24:09.000000000 +0000
@@ -0,0 +1,195 @@
+// errorcheck -0 -d=nil
+// Fails on ppc64x because of incomplete optimization.
+// See issues 9058.
+// Same reason for mips64x.
+// +build !ppc64,!ppc64le,!mips64,!mips64le
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that nil checks are removed.
+// Optimization is enabled.
+
+package p
+
+type Struct struct {
+ X int
+ Y float64
+}
+
+type BigStruct struct {
+ X int
+ Y float64
+ A [1 << 20]int
+ Z string
+}
+
+type Empty struct {
+}
+
+type Empty1 struct {
+ Empty
+}
+
+var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1 << 26]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+)
+
+func f1() {
+ _ = *intp // ERROR "generated nil check"
+
+ // This one should be removed but the block copy needs
+ // to be turned into its own pseudo-op in order to see
+ // the indirect.
+ _ = *arrayp // ERROR "generated nil check"
+
+ // 0-byte indirect doesn't suffice.
+ // we don't registerize globals, so there are no removed repeated nil checks.
+ _ = *array0p // ERROR "generated nil check"
+ _ = *array0p // ERROR "generated nil check"
+
+ _ = *intp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "generated nil check"
+ _ = *structp // ERROR "generated nil check"
+ _ = *emptyp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "generated nil check"
+}
+
+func f2() {
+ var (
+ intp *int
+ arrayp *[10]int
+ array0p *[0]int
+ bigarrayp *[1 << 20]int
+ structp *Struct
+ bigstructp *BigStruct
+ emptyp *Empty
+ empty1p *Empty1
+ )
+
+ _ = *intp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "generated nil check"
+ _ = *array0p // ERROR "generated nil check"
+ _ = *array0p // ERROR "removed repeated nil check"
+ _ = *intp // ERROR "removed repeated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+ _ = *structp // ERROR "generated nil check"
+ _ = *emptyp // ERROR "generated nil check"
+ _ = *arrayp // ERROR "removed repeated nil check"
+ _ = *bigarrayp // ERROR "generated nil check" ARM removed nil check before indirect!!
+ _ = *bigstructp // ERROR "generated nil check"
+ _ = *empty1p // ERROR "generated nil check"
+}
+
+func fx10k() *[10000]int
+
+var b bool
+
+func f3(x *[10000]int) {
+ // Using a huge type and huge offsets so the compiler
+ // does not expect the memory hardware to fault.
+ _ = x[9999] // ERROR "generated nil check"
+
+ for {
+ if x[9999] != 0 { // ERROR "generated nil check"
+ break
+ }
+ }
+
+ x = fx10k()
+ _ = x[9999] // ERROR "generated nil check"
+ if b {
+ _ = x[9999] // ERROR "removed repeated nil check"
+ } else {
+ _ = x[9999] // ERROR "removed repeated nil check"
+ }
+ _ = x[9999] // ERROR "generated nil check"
+
+ x = fx10k()
+ if b {
+ _ = x[9999] // ERROR "generated nil check"
+ } else {
+ _ = x[9999] // ERROR "generated nil check"
+ }
+ _ = x[9999] // ERROR "generated nil check"
+
+ fx10k()
+ // This one is a bit redundant, if we figured out that
+ // x wasn't going to change across the function call.
+ // But it's a little complex to do and in practice doesn't
+ // matter enough.
+ _ = x[9999] // ERROR "generated nil check"
+}
+
+func f3a() {
+ x := fx10k()
+ y := fx10k()
+ z := fx10k()
+ _ = &x[9] // ERROR "generated nil check"
+ y = z
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "generated nil check"
+}
+
+func f3b() {
+ x := fx10k()
+ y := fx10k()
+ _ = &x[9] // ERROR "generated nil check"
+ y = x
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "removed repeated nil check"
+}
+
+func fx10() *[10]int
+
+func f4(x *[10]int) {
+ // Most of these have no checks because a real memory reference follows,
+ // and the offset is small enough that if x is nil, the address will still be
+ // in the first unmapped page of memory.
+
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ for {
+ if x[9] != 0 { // ERROR "removed nil check before indirect"
+ break
+ }
+ }
+
+ x = fx10()
+ _ = x[9] // ERROR "removed nil check before indirect"
+ if b {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ } else {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ }
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ x = fx10()
+ if b {
+ _ = x[9] // ERROR "removed nil check before indirect"
+ } else {
+ _ = &x[9] // ERROR "generated nil check"
+ }
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ fx10()
+ _ = x[9] // ERROR "removed nil check before indirect"
+
+ x = fx10()
+ y := fx10()
+ _ = &x[9] // ERROR "generated nil check"
+ y = x
+ _ = &x[9] // ERROR "removed repeated nil check"
+ x = y
+ _ = &x[9] // ERROR "removed repeated nil check"
+}
diff -pruN 1.6.3-1/.pc/0001-s390x-port.patch/test/nosplit.go 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/nosplit.go
--- 1.6.3-1/.pc/0001-s390x-port.patch/test/nosplit.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0001-s390x-port.patch/test/nosplit.go 2016-07-18 16:24:09.000000000 +0000
@@ -0,0 +1,377 @@
+// +build !nacl
+// run
+
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+var tests = `
+# These are test cases for the linker analysis that detects chains of
+# nosplit functions that would cause a stack overflow.
+#
+# Lines beginning with # are comments.
+#
+# Each test case describes a sequence of functions, one per line.
+# Each function definition is the function name, then the frame size,
+# then optionally the keyword 'nosplit', then the body of the function.
+# The body is assembly code, with some shorthands.
+# The shorthand 'call x' stands for CALL x(SB).
+# The shorthand 'callind' stands for 'CALL R0', where R0 is a register.
+# Each test case must define a function named main, and it must be first.
+# That is, a line beginning "main " indicates the start of a new test case.
+# Within a stanza, ; can be used instead of \n to separate lines.
+#
+# After the function definition, the test case ends with an optional
+# REJECT line, specifying the architectures on which the case should
+# be rejected. "REJECT" without any architectures means reject on all architectures.
+# The linker should accept the test case on systems not explicitly rejected.
+#
+# 64-bit systems do not attempt to execute test cases with frame sizes
+# that are only 32-bit aligned.
+
+# Ordinary function should work
+main 0
+
+# Large frame marked nosplit is always wrong.
+main 10000 nosplit
+REJECT
+
+# Calling a large frame is okay.
+main 0 call big
+big 10000
+
+# But not if the frame is nosplit.
+main 0 call big
+big 10000 nosplit
+REJECT
+
+# Recursion is okay.
+main 0 call main
+
+# Recursive nosplit runs out of space.
+main 0 nosplit call main
+REJECT
+
+# Chains of ordinary functions okay.
+main 0 call f1
+f1 80 call f2
+f2 80
+
+# Chains of nosplit must fit in the stack limit, 128 bytes.
+main 0 call f1
+f1 80 nosplit call f2
+f2 80 nosplit
+REJECT
+
+# Larger chains.
+main 0 call f1
+f1 16 call f2
+f2 16 call f3
+f3 16 call f4
+f4 16 call f5
+f5 16 call f6
+f6 16 call f7
+f7 16 call f8
+f8 16 call end
+end 1000
+
+main 0 call f1
+f1 16 nosplit call f2
+f2 16 nosplit call f3
+f3 16 nosplit call f4
+f4 16 nosplit call f5
+f5 16 nosplit call f6
+f6 16 nosplit call f7
+f7 16 nosplit call f8
+f8 16 nosplit call end
+end 1000
+REJECT
+
+# Test cases near the 128-byte limit.
+
+# Ordinary stack split frame is always okay.
+main 112
+main 116
+main 120
+main 124
+main 128
+main 132
+main 136
+
+# A nosplit leaf can use the whole 128-CallSize bytes available on entry.
+# (CallSize is 32 on ppc64)
+main 96 nosplit
+main 100 nosplit; REJECT ppc64 ppc64le
+main 104 nosplit; REJECT ppc64 ppc64le
+main 108 nosplit; REJECT ppc64 ppc64le
+main 112 nosplit; REJECT ppc64 ppc64le
+main 116 nosplit; REJECT ppc64 ppc64le
+main 120 nosplit; REJECT ppc64 ppc64le
+main 124 nosplit; REJECT ppc64 ppc64le
+main 128 nosplit; REJECT
+main 132 nosplit; REJECT
+main 136 nosplit; REJECT
+
+# Calling a nosplit function from a nosplit function requires
+# having room for the saved caller PC and the called frame.
+# Because ARM doesn't save LR in the leaf, it gets an extra 4 bytes.
+# Because arm64 doesn't save LR in the leaf, it gets an extra 8 bytes.
+# ppc64 doesn't save LR in the leaf, but CallSize is 32, so it gets 24 fewer bytes than amd64.
+main 96 nosplit call f; f 0 nosplit
+main 100 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
+main 104 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
+main 108 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
+main 112 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
+main 116 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le
+main 120 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64
+main 124 nosplit call f; f 0 nosplit; REJECT ppc64 ppc64le amd64 386
+main 128 nosplit call f; f 0 nosplit; REJECT
+main 132 nosplit call f; f 0 nosplit; REJECT
+main 136 nosplit call f; f 0 nosplit; REJECT
+
+# Calling a splitting function from a nosplit function requires
+# having room for the saved caller PC of the call but also the
+# saved caller PC for the call to morestack.
+# RISC architectures differ in the same way as before.
+main 96 nosplit call f; f 0 call f
+main 100 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
+main 104 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
+main 108 nosplit call f; f 0 call f; REJECT ppc64 ppc64le
+main 112 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
+main 116 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64
+main 120 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
+main 124 nosplit call f; f 0 call f; REJECT ppc64 ppc64le amd64 386
+main 128 nosplit call f; f 0 call f; REJECT
+main 132 nosplit call f; f 0 call f; REJECT
+main 136 nosplit call f; f 0 call f; REJECT
+
+# Indirect calls are assumed to be splitting functions.
+main 96 nosplit callind
+main 100 nosplit callind; REJECT ppc64 ppc64le
+main 104 nosplit callind; REJECT ppc64 ppc64le
+main 108 nosplit callind; REJECT ppc64 ppc64le
+main 112 nosplit callind; REJECT ppc64 ppc64le amd64
+main 116 nosplit callind; REJECT ppc64 ppc64le amd64
+main 120 nosplit callind; REJECT ppc64 ppc64le amd64 386
+main 124 nosplit callind; REJECT ppc64 ppc64le amd64 386
+main 128 nosplit callind; REJECT
+main 132 nosplit callind; REJECT
+main 136 nosplit callind; REJECT
+
+# Issue 7623
+main 0 call f; f 112
+main 0 call f; f 116
+main 0 call f; f 120
+main 0 call f; f 124
+main 0 call f; f 128
+main 0 call f; f 132
+main 0 call f; f 136
+`
+
+var (
+ commentRE = regexp.MustCompile(`(?m)^#.*`)
+ rejectRE = regexp.MustCompile(`(?s)\A(.+?)((\n|; *)REJECT(.*))?\z`)
+ lineRE = regexp.MustCompile(`(\w+) (\d+)( nosplit)?(.*)`)
+ callRE = regexp.MustCompile(`\bcall (\w+)\b`)
+ callindRE = regexp.MustCompile(`\bcallind\b`)
+)
+
+func main() {
+ goarch := os.Getenv("GOARCH")
+ if goarch == "" {
+ goarch = runtime.GOARCH
+ }
+
+ version, err := exec.Command("go", "tool", "compile", "-V").Output()
+ if err != nil {
+ bug()
+ fmt.Printf("running go tool compile -V: %v\n", err)
+ return
+ }
+ if strings.Contains(string(version), "framepointer") {
+ // Skip this test if GOEXPERIMENT=framepointer
+ return
+ }
+
+ dir, err := ioutil.TempDir("", "go-test-nosplit")
+ if err != nil {
+ bug()
+ fmt.Printf("creating temp dir: %v\n", err)
+ return
+ }
+ defer os.RemoveAll(dir)
+
+ tests = strings.Replace(tests, "\t", " ", -1)
+ tests = commentRE.ReplaceAllString(tests, "")
+
+ nok := 0
+ nfail := 0
+TestCases:
+ for len(tests) > 0 {
+ var stanza string
+ i := strings.Index(tests, "\nmain ")
+ if i < 0 {
+ stanza, tests = tests, ""
+ } else {
+ stanza, tests = tests[:i], tests[i+1:]
+ }
+
+ m := rejectRE.FindStringSubmatch(stanza)
+ if m == nil {
+ bug()
+ fmt.Printf("invalid stanza:\n\t%s\n", indent(stanza))
+ continue
+ }
+ lines := strings.TrimSpace(m[1])
+ reject := false
+ if m[2] != "" {
+ if strings.TrimSpace(m[4]) == "" {
+ reject = true
+ } else {
+ for _, rej := range strings.Fields(m[4]) {
+ if rej == goarch {
+ reject = true
+ }
+ }
+ }
+ }
+ if lines == "" && !reject {
+ continue
+ }
+
+ var gobuf bytes.Buffer
+ fmt.Fprintf(&gobuf, "package main\n")
+
+ var buf bytes.Buffer
+ ptrSize := 4
+ switch goarch {
+ case "mips64", "mips64le":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define CALL JAL\n#define REGISTER (R0)\n")
+ case "ppc64", "ppc64le":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (CTR)\n")
+ case "arm":
+ fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
+ case "arm64":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define CALL BL\n#define REGISTER (R0)\n")
+ case "amd64":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define REGISTER AX\n")
+ default:
+ fmt.Fprintf(&buf, "#define REGISTER AX\n")
+ }
+
+ for _, line := range strings.Split(lines, "\n") {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+ for i, subline := range strings.Split(line, ";") {
+ subline = strings.TrimSpace(subline)
+ if subline == "" {
+ continue
+ }
+ m := lineRE.FindStringSubmatch(subline)
+ if m == nil {
+ bug()
+ fmt.Printf("invalid function line: %s\n", subline)
+ continue TestCases
+ }
+ name := m[1]
+ size, _ := strconv.Atoi(m[2])
+
+ // The limit was originally 128 but is now 592.
+ // Instead of rewriting the test cases above, adjust
+ // the first stack frame to use up the extra bytes.
+ if i == 0 {
+ size += 592 - 128
+ // Noopt builds have a larger stackguard.
+ // See ../cmd/dist/buildruntime.go:stackGuardMultiplier
+ for _, s := range strings.Split(os.Getenv("GO_GCFLAGS"), " ") {
+ if s == "-N" {
+ size += 720
+ }
+ }
+ }
+
+ if size%ptrSize == 4 || goarch == "arm64" && size != 0 && (size+8)%16 != 0 {
+ continue TestCases
+ }
+ nosplit := m[3]
+ body := m[4]
+
+ if nosplit != "" {
+ nosplit = ",7"
+ } else {
+ nosplit = ",0"
+ }
+ body = callRE.ReplaceAllString(body, "CALL ·$1(SB);")
+ body = callindRE.ReplaceAllString(body, "CALL REGISTER;")
+
+ fmt.Fprintf(&gobuf, "func %s()\n", name)
+ fmt.Fprintf(&buf, "TEXT ·%s(SB)%s,$%d-0\n\t%s\n\tRET\n\n", name, nosplit, size, body)
+ }
+ }
+
+ if err := ioutil.WriteFile(filepath.Join(dir, "asm.s"), buf.Bytes(), 0666); err != nil {
+ log.Fatal(err)
+ }
+ if err := ioutil.WriteFile(filepath.Join(dir, "main.go"), gobuf.Bytes(), 0666); err != nil {
+ log.Fatal(err)
+ }
+
+ cmd := exec.Command("go", "build")
+ cmd.Dir = dir
+ output, err := cmd.CombinedOutput()
+ if err == nil {
+ nok++
+ if reject {
+ bug()
+ fmt.Printf("accepted incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
+ }
+ } else {
+ nfail++
+ if !reject {
+ bug()
+ fmt.Printf("rejected incorrectly:\n\t%s\n", indent(strings.TrimSpace(stanza)))
+ fmt.Printf("\n\tlinker output:\n\t%s\n", indent(string(output)))
+ }
+ }
+ }
+
+ if !bugged && (nok == 0 || nfail == 0) {
+ bug()
+ fmt.Printf("not enough test cases run\n")
+ }
+}
+
+func indent(s string) string {
+ return strings.Replace(s, "\n", "\n\t", -1)
+}
+
+var bugged = false
+
+func bug() {
+ if !bugged {
+ bugged = true
+ fmt.Printf("BUG\n")
+ }
+}
diff -pruN 1.6.3-1/.pc/0002-no-pie-when-race.patch/src/cmd/link/internal/ld/lib.go 1.6.3-1ubuntu1/.pc/0002-no-pie-when-race.patch/src/cmd/link/internal/ld/lib.go
--- 1.6.3-1/.pc/0002-no-pie-when-race.patch/src/cmd/link/internal/ld/lib.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0002-no-pie-when-race.patch/src/cmd/link/internal/ld/lib.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,2181 @@
+// Inferno utils/8l/asm.c
+// http://code.google.com/p/inferno-os/source/browse/utils/8l/asm.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ld
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/internal/obj"
+ "crypto/sha1"
+ "debug/elf"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+// Data layout and relocation.
+
+// Derived from Inferno utils/6l/l.h
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/l.h
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+type Arch struct {
+ Thechar int
+ Ptrsize int
+ Intsize int
+ Regsize int
+ Funcalign int
+ Maxalign int
+ Minlc int
+ Dwarfregsp int
+ Dwarfreglr int
+ Linuxdynld string
+ Freebsddynld string
+ Netbsddynld string
+ Openbsddynld string
+ Dragonflydynld string
+ Solarisdynld string
+ Adddynrel func(*LSym, *Reloc)
+ Archinit func()
+ Archreloc func(*Reloc, *LSym, *int64) int
+ Archrelocvariant func(*Reloc, *LSym, int64) int64
+ Asmb func()
+ Elfreloc1 func(*Reloc, int64) int
+ Elfsetupplt func()
+ Gentext func()
+ Machoreloc1 func(*Reloc, int64) int
+ PEreloc1 func(*Reloc, int64) bool
+ Lput func(uint32)
+ Wput func(uint16)
+ Vput func(uint64)
+}
+
+type Rpath struct {
+ set bool
+ val string
+}
+
+func (r *Rpath) Set(val string) error {
+ r.set = true
+ r.val = val
+ return nil
+}
+
+func (r *Rpath) String() string {
+ return r.val
+}
+
+var (
+ Thearch Arch
+ datap *LSym
+ Debug [128]int
+ Lcsize int32
+ rpath Rpath
+ Spsize int32
+ Symsize int32
+)
+
+// Terrible but standard terminology.
+// A segment describes a block of file to load into memory.
+// A section further describes the pieces of that block for
+// use in debuggers and such.
+
+const (
+ MINFUNC = 16 // minimum size for a function
+)
+
+type Segment struct {
+ Rwx uint8 // permission as usual unix bits (5 = r-x etc)
+ Vaddr uint64 // virtual address
+ Length uint64 // length in memory
+ Fileoff uint64 // file offset
+ Filelen uint64 // length on disk
+ Sect *Section
+}
+
+type Section struct {
+ Rwx uint8
+ Extnum int16
+ Align int32
+ Name string
+ Vaddr uint64
+ Length uint64
+ Next *Section
+ Seg *Segment
+ Elfsect *ElfShdr
+ Reloff uint64
+ Rellen uint64
+}
+
+// DynlinkingGo returns whether we are producing Go code that can live
+// in separate shared libraries linked together at runtime.
+func DynlinkingGo() bool {
+ return Buildmode == BuildmodeShared || Linkshared
+}
+
+// UseRelro returns whether to make use of "read only relocations" aka
+// relro.
+func UseRelro() bool {
+ switch Buildmode {
+ case BuildmodeCShared, BuildmodeShared, BuildmodePIE:
+ return Iself
+ default:
+ return false
+ }
+}
+
+var (
+ Thestring string
+ Thelinkarch *LinkArch
+ outfile string
+ dynexp []*LSym
+ dynlib []string
+ ldflag []string
+ havedynamic int
+ Funcalign int
+ iscgo bool
+ elfglobalsymndx int
+ flag_installsuffix string
+ flag_race int
+ flag_msan int
+ Buildmode BuildMode
+ Linkshared bool
+ tracksym string
+ interpreter string
+ tmpdir string
+ extld string
+ extldflags string
+ extar string
+ libgccfile string
+ debug_s int // backup old value of debug['s']
+ Ctxt *Link
+ HEADR int32
+ HEADTYPE int32
+ INITRND int32
+ INITTEXT int64
+ INITDAT int64
+ INITENTRY string /* entry point */
+ nerrors int
+ Linkmode int
+ liveness int64
+)
+
+// for dynexport field of LSym
+const (
+ CgoExportDynamic = 1 << 0
+ CgoExportStatic = 1 << 1
+)
+
+var (
+ Segtext Segment
+ Segrodata Segment
+ Segdata Segment
+ Segdwarf Segment
+)
+
+/* set by call to mywhatsys() */
+
+/* whence for ldpkg */
+const (
+ FileObj = 0 + iota
+ ArchiveObj
+ Pkgdef
+)
+
+var (
+ headstring string
+ // buffered output
+ Bso obj.Biobuf
+)
+
+var coutbuf struct {
+ *bufio.Writer
+ f *os.File
+}
+
+const (
+ symname = "__.GOSYMDEF"
+ pkgname = "__.PKGDEF"
+)
+
+var (
+ // Set if we see an object compiled by the host compiler that is not
+ // from a package that is known to support internal linking mode.
+ externalobj = false
+ goroot string
+ goarch string
+ goos string
+ theline string
+)
+
+func Lflag(arg string) {
+ Ctxt.Libdir = append(Ctxt.Libdir, arg)
+}
+
+// A BuildMode indicates the sort of object we are building:
+// "exe": build a main package and everything it imports into an executable.
+// "c-shared": build a main package, plus all packages that it imports, into a
+// single C shared library. The only callable symbols will be those functions
+// marked as exported.
+// "shared": combine all packages passed on the command line, and their
+// dependencies, into a single shared library that will be used when
+// building with the -linkshared option.
+type BuildMode uint8
+
+const (
+ BuildmodeUnset BuildMode = iota
+ BuildmodeExe
+ BuildmodePIE
+ BuildmodeCArchive
+ BuildmodeCShared
+ BuildmodeShared
+)
+
+func (mode *BuildMode) Set(s string) error {
+ goos := obj.Getgoos()
+ goarch := obj.Getgoarch()
+ badmode := func() error {
+ return fmt.Errorf("buildmode %s not supported on %s/%s", s, goos, goarch)
+ }
+ switch s {
+ default:
+ return fmt.Errorf("invalid buildmode: %q", s)
+ case "exe":
+ *mode = BuildmodeExe
+ case "pie":
+ switch goos {
+ case "android", "linux":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodePIE
+ case "c-archive":
+ switch goos {
+ case "darwin", "linux":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeCArchive
+ case "c-shared":
+ switch goarch {
+ case "386", "amd64", "arm", "arm64":
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeCShared
+ case "shared":
+ switch goos {
+ case "linux":
+ switch goarch {
+ case "386", "amd64", "arm", "arm64", "ppc64le", "s390x":
+ default:
+ return badmode()
+ }
+ default:
+ return badmode()
+ }
+ *mode = BuildmodeShared
+ }
+ return nil
+}
+
+func (mode *BuildMode) String() string {
+ switch *mode {
+ case BuildmodeUnset:
+ return "" // avoid showing a default in usage message
+ case BuildmodeExe:
+ return "exe"
+ case BuildmodePIE:
+ return "pie"
+ case BuildmodeCArchive:
+ return "c-archive"
+ case BuildmodeCShared:
+ return "c-shared"
+ case BuildmodeShared:
+ return "shared"
+ }
+ return fmt.Sprintf("BuildMode(%d)", uint8(*mode))
+}
+
+/*
+ * Unix doesn't like it when we write to a running (or, sometimes,
+ * recently run) binary, so remove the output file before writing it.
+ * On Windows 7, remove() can force a subsequent create() to fail.
+ * S_ISREG() does not exist on Plan 9.
+ */
+func mayberemoveoutfile() {
+ if fi, err := os.Lstat(outfile); err == nil && !fi.Mode().IsRegular() {
+ return
+ }
+ os.Remove(outfile)
+}
+
+func libinit() {
+ Funcalign = Thearch.Funcalign
+ mywhatsys() // get goroot, goarch, goos
+
+ // add goroot to the end of the libdir list.
+ suffix := ""
+
+ suffixsep := ""
+ if flag_installsuffix != "" {
+ suffixsep = "_"
+ suffix = flag_installsuffix
+ } else if flag_race != 0 {
+ suffixsep = "_"
+ suffix = "race"
+ } else if flag_msan != 0 {
+ suffixsep = "_"
+ suffix = "msan"
+ }
+
+ Lflag(fmt.Sprintf("%s/pkg/%s_%s%s%s", goroot, goos, goarch, suffixsep, suffix))
+
+ mayberemoveoutfile()
+ f, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
+ if err != nil {
+ Exitf("cannot create %s: %v", outfile, err)
+ }
+
+ coutbuf.Writer = bufio.NewWriter(f)
+ coutbuf.f = f
+
+ if INITENTRY == "" {
+ switch Buildmode {
+ case BuildmodeCShared, BuildmodeCArchive:
+ INITENTRY = fmt.Sprintf("_rt0_%s_%s_lib", goarch, goos)
+ case BuildmodeExe, BuildmodePIE:
+ INITENTRY = fmt.Sprintf("_rt0_%s_%s", goarch, goos)
+ case BuildmodeShared:
+ // No INITENTRY for -buildmode=shared
+ default:
+ Diag("unknown INITENTRY for buildmode %v", Buildmode)
+ }
+ }
+
+ if !DynlinkingGo() {
+ Linklookup(Ctxt, INITENTRY, 0).Type = obj.SXREF
+ }
+}
+
+func Exitf(format string, a ...interface{}) {
+ fmt.Fprintf(os.Stderr, os.Args[0]+": "+format+"\n", a...)
+ if coutbuf.f != nil {
+ coutbuf.f.Close()
+ mayberemoveoutfile()
+ }
+ Exit(2)
+}
+
+func errorexit() {
+ if coutbuf.f != nil {
+ if nerrors != 0 {
+ Cflush()
+ }
+ // For rmtemp run at atexit time on Windows.
+ if err := coutbuf.f.Close(); err != nil {
+ Exitf("close: %v", err)
+ }
+ }
+
+ if nerrors != 0 {
+ if coutbuf.f != nil {
+ mayberemoveoutfile()
+ }
+ Exit(2)
+ }
+
+ Exit(0)
+}
+
+func loadinternal(name string) {
+ found := 0
+ for i := 0; i < len(Ctxt.Libdir); i++ {
+ if Linkshared {
+ shlibname := fmt.Sprintf("%s/%s.shlibname", Ctxt.Libdir[i], name)
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, shlibname)
+ }
+ if _, err := os.Stat(shlibname); err == nil {
+ addlibpath(Ctxt, "internal", "internal", "", name, shlibname)
+ found = 1
+ break
+ }
+ }
+ pname := fmt.Sprintf("%s/%s.a", Ctxt.Libdir[i], name)
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "searching for %s.a in %s\n", name, pname)
+ }
+ if _, err := os.Stat(pname); err == nil {
+ addlibpath(Ctxt, "internal", "internal", pname, name, "")
+ found = 1
+ break
+ }
+ }
+
+ if found == 0 {
+ fmt.Fprintf(&Bso, "warning: unable to find %s.a\n", name)
+ }
+}
+
+func loadlib() {
+ switch Buildmode {
+ case BuildmodeCShared:
+ s := Linklookup(Ctxt, "runtime.islibrary", 0)
+ s.Dupok = 1
+ Adduint8(Ctxt, s, 1)
+ case BuildmodeCArchive:
+ s := Linklookup(Ctxt, "runtime.isarchive", 0)
+ s.Dupok = 1
+ Adduint8(Ctxt, s, 1)
+ }
+
+ loadinternal("runtime")
+ if Thearch.Thechar == '5' {
+ loadinternal("math")
+ }
+ if flag_race != 0 {
+ loadinternal("runtime/race")
+ }
+ if flag_msan != 0 {
+ loadinternal("runtime/msan")
+ }
+
+ var i int
+ for i = 0; i < len(Ctxt.Library); i++ {
+ iscgo = iscgo || Ctxt.Library[i].Pkg == "runtime/cgo"
+ if Ctxt.Library[i].Shlib == "" {
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].File, Ctxt.Library[i].Objref)
+ }
+ objfile(Ctxt.Library[i])
+ }
+ }
+
+ for i = 0; i < len(Ctxt.Library); i++ {
+ if Ctxt.Library[i].Shlib != "" {
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f autolib: %s (from %s)\n", obj.Cputime(), Ctxt.Library[i].Shlib, Ctxt.Library[i].Objref)
+ }
+ ldshlibsyms(Ctxt.Library[i].Shlib)
+ }
+ }
+
+ if Linkmode == LinkAuto {
+ if iscgo && externalobj {
+ Linkmode = LinkExternal
+ } else {
+ Linkmode = LinkInternal
+ }
+
+ // Force external linking for android.
+ if goos == "android" {
+ Linkmode = LinkExternal
+ }
+
+ // Force external linking for PIE executables, as
+ // internal linking does not support TLS_IE.
+ if Buildmode == BuildmodePIE {
+ Linkmode = LinkExternal
+ }
+
+ // cgo on Darwin must use external linking
+ // we can always use external linking, but then there will be circular
+ // dependency problems when compiling natively (external linking requires
+ // runtime/cgo, runtime/cgo requires cmd/cgo, but cmd/cgo needs to be
+ // compiled using external linking.)
+ if (Thearch.Thechar == '5' || Thearch.Thechar == '7') && HEADTYPE == obj.Hdarwin && iscgo {
+ Linkmode = LinkExternal
+ }
+
+ // Force external linking for msan.
+ if flag_msan != 0 {
+ Linkmode = LinkExternal
+ }
+ }
+
+ // cmd/7l doesn't support cgo internal linking
+ // This is https://golang.org/issue/10373.
+ if iscgo && goarch == "arm64" {
+ Linkmode = LinkExternal
+ }
+
+ if Linkmode == LinkExternal && !iscgo {
+ // This indicates a user requested -linkmode=external.
+ // The startup code uses an import of runtime/cgo to decide
+ // whether to initialize the TLS. So give it one. This could
+ // be handled differently but it's an unusual case.
+ loadinternal("runtime/cgo")
+
+ if i < len(Ctxt.Library) {
+ if Ctxt.Library[i].Shlib != "" {
+ ldshlibsyms(Ctxt.Library[i].Shlib)
+ } else {
+ if DynlinkingGo() {
+ Exitf("cannot implicitly include runtime/cgo in a shared library")
+ }
+ objfile(Ctxt.Library[i])
+ }
+ }
+ }
+
+ if Linkmode == LinkInternal {
+ // Drop all the cgo_import_static declarations.
+ // Turns out we won't be needing them.
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if s.Type == obj.SHOSTOBJ {
+ // If a symbol was marked both
+ // cgo_import_static and cgo_import_dynamic,
+ // then we want to make it cgo_import_dynamic
+ // now.
+ if s.Extname != "" && s.Dynimplib != "" && s.Cgoexport == 0 {
+ s.Type = obj.SDYNIMPORT
+ } else {
+ s.Type = 0
+ }
+ }
+ }
+ }
+
+ tlsg := Linklookup(Ctxt, "runtime.tlsg", 0)
+
+ // runtime.tlsg is used for external linking on platforms that do not define
+ // a variable to hold g in assembly (currently only intel).
+ if tlsg.Type == 0 {
+ tlsg.Type = obj.STLSBSS
+ tlsg.Size = int64(Thearch.Ptrsize)
+ } else if tlsg.Type != obj.SDYNIMPORT {
+ Diag("internal error: runtime declared tlsg variable %d", tlsg.Type)
+ }
+ tlsg.Reachable = true
+ Ctxt.Tlsg = tlsg
+
+ moduledata := Linklookup(Ctxt, "runtime.firstmoduledata", 0)
+ if moduledata.Type != 0 && moduledata.Type != obj.SDYNIMPORT {
+ // If the module (toolchain-speak for "executable or shared
+ // library") we are linking contains the runtime package, it
+ // will define the runtime.firstmoduledata symbol and we
+ // truncate it back to 0 bytes so we can define its entire
+ // contents in symtab.go:symtab().
+ moduledata.Size = 0
+
+ // In addition, on ARM, the runtime depends on the linker
+ // recording the value of GOARM.
+ if Thearch.Thechar == '5' {
+ s := Linklookup(Ctxt, "runtime.goarm", 0)
+
+ s.Type = obj.SRODATA
+ s.Size = 0
+ Adduint8(Ctxt, s, uint8(Ctxt.Goarm))
+ }
+ } else {
+ // If OTOH the module does not contain the runtime package,
+ // create a local symbol for the moduledata.
+ moduledata = Linklookup(Ctxt, "local.moduledata", 0)
+ moduledata.Local = true
+ }
+ // In all cases way we mark the moduledata as noptrdata to hide it from
+ // the GC.
+ moduledata.Type = obj.SNOPTRDATA
+ moduledata.Reachable = true
+ Ctxt.Moduledata = moduledata
+
+ // Now that we know the link mode, trim the dynexp list.
+ x := CgoExportDynamic
+
+ if Linkmode == LinkExternal {
+ x = CgoExportStatic
+ }
+ w := 0
+ for i := 0; i < len(dynexp); i++ {
+ if int(dynexp[i].Cgoexport)&x != 0 {
+ dynexp[w] = dynexp[i]
+ w++
+ }
+ }
+ dynexp = dynexp[:w]
+
+ // In internal link mode, read the host object files.
+ if Linkmode == LinkInternal {
+ hostobjs()
+
+ // If we have any undefined symbols in external
+ // objects, try to read them from the libgcc file.
+ any := false
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ for _, r := range s.R {
+ if r.Sym != nil && r.Sym.Type&obj.SMASK == obj.SXREF && r.Sym.Name != ".got" {
+ any = true
+ break
+ }
+ }
+ }
+ if any {
+ if libgccfile == "" {
+ if extld == "" {
+ extld = "gcc"
+ }
+ args := hostlinkArchArgs()
+ args = append(args, "--print-libgcc-file-name")
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "%s %v\n", extld, args)
+ }
+ out, err := exec.Command(extld, args...).Output()
+ if err != nil {
+ if Debug['v'] != 0 {
+ fmt.Fprintln(&Bso, "not using a libgcc file because compiler failed")
+ fmt.Fprintf(&Bso, "%v\n%s\n", err, out)
+ }
+ libgccfile = "none"
+ } else {
+ libgccfile = strings.TrimSpace(string(out))
+ }
+ }
+
+ if libgccfile != "none" {
+ hostArchive(libgccfile)
+ }
+ }
+ } else {
+ hostlinksetup()
+ }
+
+ // We've loaded all the code now.
+ // If there are no dynamic libraries needed, gcc disables dynamic linking.
+ // Because of this, glibc's dynamic ELF loader occasionally (like in version 2.13)
+ // assumes that a dynamic binary always refers to at least one dynamic library.
+ // Rather than be a source of test cases for glibc, disable dynamic linking
+ // the same way that gcc would.
+ //
+ // Exception: on OS X, programs such as Shark only work with dynamic
+ // binaries, so leave it enabled on OS X (Mach-O) binaries.
+ // Also leave it enabled on Solaris which doesn't support
+ // statically linked binaries.
+ switch Buildmode {
+ case BuildmodeExe, BuildmodePIE:
+ if havedynamic == 0 && HEADTYPE != obj.Hdarwin && HEADTYPE != obj.Hsolaris {
+ Debug['d'] = 1
+ }
+ }
+
+ importcycles()
+}
+
+/*
+ * look for the next file in an archive.
+ * adapted from libmach.
+ */
+func nextar(bp *obj.Biobuf, off int64, a *ArHdr) int64 {
+ if off&1 != 0 {
+ off++
+ }
+ obj.Bseek(bp, off, 0)
+ buf := make([]byte, SAR_HDR)
+ if n := obj.Bread(bp, buf); n < len(buf) {
+ if n >= 0 {
+ return 0
+ }
+ return -1
+ }
+
+ a.name = artrim(buf[0:16])
+ a.date = artrim(buf[16:28])
+ a.uid = artrim(buf[28:34])
+ a.gid = artrim(buf[34:40])
+ a.mode = artrim(buf[40:48])
+ a.size = artrim(buf[48:58])
+ a.fmag = artrim(buf[58:60])
+
+ arsize := atolwhex(a.size)
+ if arsize&1 != 0 {
+ arsize++
+ }
+ return int64(arsize) + SAR_HDR
+}
+
+func objfile(lib *Library) {
+ pkg := pathtoprefix(lib.Pkg)
+
+ if Debug['v'] > 1 {
+ fmt.Fprintf(&Bso, "%5.2f ldobj: %s (%s)\n", obj.Cputime(), lib.File, pkg)
+ }
+ Bso.Flush()
+ var err error
+ var f *obj.Biobuf
+ f, err = obj.Bopenr(lib.File)
+ if err != nil {
+ Exitf("cannot open file %s: %v", lib.File, err)
+ }
+
+ magbuf := make([]byte, len(ARMAG))
+ if obj.Bread(f, magbuf) != len(magbuf) || !strings.HasPrefix(string(magbuf), ARMAG) {
+ /* load it as a regular file */
+ l := obj.Bseek(f, 0, 2)
+
+ obj.Bseek(f, 0, 0)
+ ldobj(f, pkg, l, lib.File, lib.File, FileObj)
+ obj.Bterm(f)
+
+ return
+ }
+
+ /* skip over optional __.GOSYMDEF and process __.PKGDEF */
+ off := obj.Boffset(f)
+
+ var arhdr ArHdr
+ l := nextar(f, off, &arhdr)
+ var pname string
+ if l <= 0 {
+ Diag("%s: short read on archive file symbol header", lib.File)
+ goto out
+ }
+
+ if strings.HasPrefix(arhdr.name, symname) {
+ off += l
+ l = nextar(f, off, &arhdr)
+ if l <= 0 {
+ Diag("%s: short read on archive file symbol header", lib.File)
+ goto out
+ }
+ }
+
+ if !strings.HasPrefix(arhdr.name, pkgname) {
+ Diag("%s: cannot find package header", lib.File)
+ goto out
+ }
+
+ if Buildmode == BuildmodeShared {
+ before := obj.Boffset(f)
+ pkgdefBytes := make([]byte, atolwhex(arhdr.size))
+ obj.Bread(f, pkgdefBytes)
+ hash := sha1.Sum(pkgdefBytes)
+ lib.hash = hash[:]
+ obj.Bseek(f, before, 0)
+ }
+
+ off += l
+
+ ldpkg(f, pkg, atolwhex(arhdr.size), lib.File, Pkgdef)
+
+ /*
+ * load all the object files from the archive now.
+ * this gives us sequential file access and keeps us
+ * from needing to come back later to pick up more
+ * objects. it breaks the usual C archive model, but
+ * this is Go, not C. the common case in Go is that
+ * we need to load all the objects, and then we throw away
+ * the individual symbols that are unused.
+ *
+ * loading every object will also make it possible to
+ * load foreign objects not referenced by __.GOSYMDEF.
+ */
+ for {
+ l = nextar(f, off, &arhdr)
+ if l == 0 {
+ break
+ }
+ if l < 0 {
+ Exitf("%s: malformed archive", lib.File)
+ }
+
+ off += l
+
+ pname = fmt.Sprintf("%s(%s)", lib.File, arhdr.name)
+ l = atolwhex(arhdr.size)
+ ldobj(f, pkg, l, pname, lib.File, ArchiveObj)
+ }
+
+out:
+ obj.Bterm(f)
+}
+
+type Hostobj struct {
+ ld func(*obj.Biobuf, string, int64, string)
+ pkg string
+ pn string
+ file string
+ off int64
+ length int64
+}
+
+var hostobj []Hostobj
+
+// These packages can use internal linking mode.
+// Others trigger external mode.
+var internalpkg = []string{
+ "crypto/x509",
+ "net",
+ "os/user",
+ "runtime/cgo",
+ "runtime/race",
+ "runtime/msan",
+}
+
+func ldhostobj(ld func(*obj.Biobuf, string, int64, string), f *obj.Biobuf, pkg string, length int64, pn string, file string) *Hostobj {
+ isinternal := false
+ for i := 0; i < len(internalpkg); i++ {
+ if pkg == internalpkg[i] {
+ isinternal = true
+ break
+ }
+ }
+
+ // DragonFly declares errno with __thread, which results in a symbol
+ // type of R_386_TLS_GD or R_X86_64_TLSGD. The Go linker does not
+ // currently know how to handle TLS relocations, hence we have to
+ // force external linking for any libraries that link in code that
+ // uses errno. This can be removed if the Go linker ever supports
+ // these relocation types.
+ if HEADTYPE == obj.Hdragonfly {
+ if pkg == "net" || pkg == "os/user" {
+ isinternal = false
+ }
+ }
+
+ if !isinternal {
+ externalobj = true
+ }
+
+ hostobj = append(hostobj, Hostobj{})
+ h := &hostobj[len(hostobj)-1]
+ h.ld = ld
+ h.pkg = pkg
+ h.pn = pn
+ h.file = file
+ h.off = obj.Boffset(f)
+ h.length = length
+ return h
+}
+
+func hostobjs() {
+ var f *obj.Biobuf
+ var h *Hostobj
+
+ for i := 0; i < len(hostobj); i++ {
+ h = &hostobj[i]
+ var err error
+ f, err = obj.Bopenr(h.file)
+ if f == nil {
+ Exitf("cannot reopen %s: %v", h.pn, err)
+ }
+
+ obj.Bseek(f, h.off, 0)
+ h.ld(f, h.pkg, h.length, h.pn)
+ obj.Bterm(f)
+ }
+}
+
+// provided by lib9
+
+func rmtemp() {
+ os.RemoveAll(tmpdir)
+}
+
+func hostlinksetup() {
+ if Linkmode != LinkExternal {
+ return
+ }
+
+ // For external link, record that we need to tell the external linker -s,
+ // and turn off -s internally: the external linker needs the symbol
+ // information for its final link.
+ debug_s = Debug['s']
+ Debug['s'] = 0
+
+ // create temporary directory and arrange cleanup
+ if tmpdir == "" {
+ dir, err := ioutil.TempDir("", "go-link-")
+ if err != nil {
+ log.Fatal(err)
+ }
+ tmpdir = dir
+ AtExit(rmtemp)
+ }
+
+ // change our output to temporary object file
+ coutbuf.f.Close()
+ mayberemoveoutfile()
+
+ p := fmt.Sprintf("%s/go.o", tmpdir)
+ var err error
+ f, err := os.OpenFile(p, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0775)
+ if err != nil {
+ Exitf("cannot create %s: %v", p, err)
+ }
+
+ coutbuf.Writer = bufio.NewWriter(f)
+ coutbuf.f = f
+}
+
+// hostobjCopy creates a copy of the object files in hostobj in a
+// temporary directory.
+func hostobjCopy() (paths []string) {
+ var wg sync.WaitGroup
+ sema := make(chan struct{}, runtime.NumCPU()) // limit open file descriptors
+ for i, h := range hostobj {
+ h := h
+ dst := fmt.Sprintf("%s/%06d.o", tmpdir, i)
+ paths = append(paths, dst)
+
+ wg.Add(1)
+ go func() {
+ sema <- struct{}{}
+ defer func() {
+ <-sema
+ wg.Done()
+ }()
+ f, err := os.Open(h.file)
+ if err != nil {
+ Exitf("cannot reopen %s: %v", h.pn, err)
+ }
+ if _, err := f.Seek(h.off, 0); err != nil {
+ Exitf("cannot seek %s: %v", h.pn, err)
+ }
+
+ w, err := os.Create(dst)
+ if err != nil {
+ Exitf("cannot create %s: %v", dst, err)
+ }
+ if _, err := io.CopyN(w, f, h.length); err != nil {
+ Exitf("cannot write %s: %v", dst, err)
+ }
+ if err := w.Close(); err != nil {
+ Exitf("cannot close %s: %v", dst, err)
+ }
+ }()
+ }
+ wg.Wait()
+ return paths
+}
+
+// archive builds a .a archive from the hostobj object files.
+func archive() {
+ if Buildmode != BuildmodeCArchive {
+ return
+ }
+
+ if extar == "" {
+ extar = "ar"
+ }
+
+ mayberemoveoutfile()
+ argv := []string{extar, "-q", "-c", "-s", outfile}
+ argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
+
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "archive: %s\n", strings.Join(argv, " "))
+ Bso.Flush()
+ }
+
+ if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
+ Exitf("running %s failed: %v\n%s", argv[0], err, out)
+ }
+}
+
+func hostlink() {
+ if Linkmode != LinkExternal || nerrors > 0 {
+ return
+ }
+ if Buildmode == BuildmodeCArchive {
+ return
+ }
+
+ if extld == "" {
+ extld = "gcc"
+ }
+
+ var argv []string
+ argv = append(argv, extld)
+ argv = append(argv, hostlinkArchArgs()...)
+
+ if Debug['s'] == 0 && debug_s == 0 {
+ argv = append(argv, "-gdwarf-2")
+ } else {
+ argv = append(argv, "-s")
+ }
+
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-Wl,-no_pie,-headerpad,1144")
+ }
+ if HEADTYPE == obj.Hopenbsd {
+ argv = append(argv, "-Wl,-nopie")
+ }
+ if HEADTYPE == obj.Hwindows {
+ if headstring == "windowsgui" {
+ argv = append(argv, "-mwindows")
+ } else {
+ argv = append(argv, "-mconsole")
+ }
+ }
+
+ switch Buildmode {
+ case BuildmodeExe:
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-Wl,-pagezero_size,4000000")
+ }
+ case BuildmodePIE:
+ argv = append(argv, "-pie")
+ case BuildmodeCShared:
+ if HEADTYPE == obj.Hdarwin {
+ argv = append(argv, "-dynamiclib", "-Wl,-read_only_relocs,suppress")
+ } else {
+ // ELF.
+ argv = append(argv, "-Wl,-Bsymbolic")
+ if UseRelro() {
+ argv = append(argv, "-Wl,-z,relro")
+ }
+ // Pass -z nodelete to mark the shared library as
+ // non-closeable: a dlclose will do nothing.
+ argv = append(argv, "-shared", "-Wl,-z,nodelete")
+ }
+ case BuildmodeShared:
+ if UseRelro() {
+ argv = append(argv, "-Wl,-z,relro")
+ }
+ argv = append(argv, "-shared")
+ }
+
+ if Iself && DynlinkingGo() {
+ // We force all symbol resolution to be done at program startup
+ // because lazy PLT resolution can use large amounts of stack at
+ // times we cannot allow it to do so.
+ argv = append(argv, "-Wl,-znow")
+ }
+
+ if Iself && len(buildinfo) > 0 {
+ argv = append(argv, fmt.Sprintf("-Wl,--build-id=0x%x", buildinfo))
+ }
+
+ // On Windows, given -o foo, GCC will append ".exe" to produce
+ // "foo.exe". We have decided that we want to honor the -o
+ // option. To make this work, we append a '.' so that GCC
+ // will decide that the file already has an extension. We
+ // only want to do this when producing a Windows output file
+ // on a Windows host.
+ outopt := outfile
+ if goos == "windows" && runtime.GOOS == "windows" && filepath.Ext(outopt) == "" {
+ outopt += "."
+ }
+ argv = append(argv, "-o")
+ argv = append(argv, outopt)
+
+ if rpath.val != "" {
+ argv = append(argv, fmt.Sprintf("-Wl,-rpath,%s", rpath.val))
+ }
+
+ // Force global symbols to be exported for dlopen, etc.
+ if Iself {
+ argv = append(argv, "-rdynamic")
+ }
+
+ if strings.Contains(argv[0], "clang") {
+ argv = append(argv, "-Qunused-arguments")
+ }
+
+ argv = append(argv, fmt.Sprintf("%s/go.o", tmpdir))
+ argv = append(argv, hostobjCopy()...)
+
+ if Linkshared {
+ seenDirs := make(map[string]bool)
+ seenLibs := make(map[string]bool)
+ addshlib := func(path string) {
+ dir, base := filepath.Split(path)
+ if !seenDirs[dir] {
+ argv = append(argv, "-L"+dir)
+ if !rpath.set {
+ argv = append(argv, "-Wl,-rpath="+dir)
+ }
+ seenDirs[dir] = true
+ }
+ base = strings.TrimSuffix(base, ".so")
+ base = strings.TrimPrefix(base, "lib")
+ if !seenLibs[base] {
+ argv = append(argv, "-l"+base)
+ seenLibs[base] = true
+ }
+ }
+ for _, shlib := range Ctxt.Shlibs {
+ addshlib(shlib.Path)
+ for _, dep := range shlib.Deps {
+ if dep == "" {
+ continue
+ }
+ libpath := findshlib(dep)
+ if libpath != "" {
+ addshlib(libpath)
+ }
+ }
+ }
+ }
+
+ argv = append(argv, ldflag...)
+
+ for _, p := range strings.Fields(extldflags) {
+ argv = append(argv, p)
+
+ // clang, unlike GCC, passes -rdynamic to the linker
+ // even when linking with -static, causing a linker
+ // error when using GNU ld. So take out -rdynamic if
+ // we added it. We do it in this order, rather than
+ // only adding -rdynamic later, so that -extldflags
+ // can override -rdynamic without using -static.
+ if Iself && p == "-static" {
+ for i := range argv {
+ if argv[i] == "-rdynamic" {
+ argv[i] = "-static"
+ }
+ }
+ }
+ }
+ if HEADTYPE == obj.Hwindows {
+ argv = append(argv, peimporteddlls()...)
+ }
+
+ if Debug['v'] != 0 {
+ fmt.Fprintf(&Bso, "host link:")
+ for _, v := range argv {
+ fmt.Fprintf(&Bso, " %q", v)
+ }
+ fmt.Fprintf(&Bso, "\n")
+ Bso.Flush()
+ }
+
+ if out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput(); err != nil {
+ Exitf("running %s failed: %v\n%s", argv[0], err, out)
+ } else if Debug['v'] != 0 && len(out) > 0 {
+ fmt.Fprintf(&Bso, "%s", out)
+ Bso.Flush()
+ }
+
+ if Debug['s'] == 0 && debug_s == 0 && HEADTYPE == obj.Hdarwin {
+ // Skip combining dwarf on arm.
+ if Thearch.Thechar != '5' && Thearch.Thechar != '7' {
+ dsym := fmt.Sprintf("%s/go.dwarf", tmpdir)
+ if out, err := exec.Command("dsymutil", "-f", outfile, "-o", dsym).CombinedOutput(); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out)
+ }
+ // Skip combining if `dsymutil` didn't generate a file. See #11994.
+ if _, err := os.Stat(dsym); os.IsNotExist(err) {
+ return
+ }
+ // For os.Rename to work reliably, must be in same directory as outfile.
+ combinedOutput := outfile + "~"
+ if err := machoCombineDwarf(outfile, dsym, combinedOutput); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: combining dwarf failed: %v", os.Args[0], err)
+ }
+ os.Remove(outfile)
+ if err := os.Rename(combinedOutput, outfile); err != nil {
+ Ctxt.Cursym = nil
+ Exitf("%s: %v", os.Args[0], err)
+ }
+ }
+ }
+}
+
+// hostlinkArchArgs returns arguments to pass to the external linker
+// based on the architecture.
+func hostlinkArchArgs() []string {
+ switch Thearch.Thechar {
+ case '8':
+ return []string{"-m32"}
+ case '6', '9':
+ return []string{"-m64"}
+ case '5':
+ return []string{"-marm"}
+ case '7':
+ // nothing needed
+ }
+ return nil
+}
+
+// ldobj loads an input object. If it is a host object (an object
+// compiled by a non-Go compiler) it returns the Hostobj pointer. If
+// it is a Go object, it returns nil.
+func ldobj(f *obj.Biobuf, pkg string, length int64, pn string, file string, whence int) *Hostobj {
+ eof := obj.Boffset(f) + length
+
+ start := obj.Boffset(f)
+ c1 := obj.Bgetc(f)
+ c2 := obj.Bgetc(f)
+ c3 := obj.Bgetc(f)
+ c4 := obj.Bgetc(f)
+ obj.Bseek(f, start, 0)
+
+ magic := uint32(c1)<<24 | uint32(c2)<<16 | uint32(c3)<<8 | uint32(c4)
+ if magic == 0x7f454c46 { // \x7F E L F
+ return ldhostobj(ldelf, f, pkg, length, pn, file)
+ }
+
+ if magic&^1 == 0xfeedface || magic&^0x01000000 == 0xcefaedfe {
+ return ldhostobj(ldmacho, f, pkg, length, pn, file)
+ }
+
+ if c1 == 0x4c && c2 == 0x01 || c1 == 0x64 && c2 == 0x86 {
+ return ldhostobj(ldpe, f, pkg, length, pn, file)
+ }
+
+ /* check the header */
+ line := obj.Brdline(f, '\n')
+ if line == "" {
+ if obj.Blinelen(f) > 0 {
+ Diag("%s: not an object file", pn)
+ return nil
+ }
+ Diag("truncated object file: %s", pn)
+ return nil
+ }
+
+ if !strings.HasPrefix(line, "go object ") {
+ if strings.HasSuffix(pn, ".go") {
+ Exitf("%cl: input %s is not .%c file (use %cg to compile .go files)", Thearch.Thechar, pn, Thearch.Thechar, Thearch.Thechar)
+ }
+
+ if line == Thestring {
+ // old header format: just $GOOS
+ Diag("%s: stale object file", pn)
+ return nil
+ }
+
+ Diag("%s: not an object file", pn)
+ return nil
+ }
+
+ // First, check that the basic goos, goarch, and version match.
+ t := fmt.Sprintf("%s %s %s ", goos, obj.Getgoarch(), obj.Getgoversion())
+
+ line = strings.TrimRight(line, "\n")
+ if !strings.HasPrefix(line[10:]+" ", t) && Debug['f'] == 0 {
+ Diag("%s: object is [%s] expected [%s]", pn, line[10:], t)
+ return nil
+ }
+
+ // Second, check that longer lines match each other exactly,
+ // so that the Go compiler and write additional information
+ // that must be the same from run to run.
+ if len(line) >= len(t)+10 {
+ if theline == "" {
+ theline = line[10:]
+ } else if theline != line[10:] {
+ Diag("%s: object is [%s] expected [%s]", pn, line[10:], theline)
+ return nil
+ }
+ }
+
+ /* skip over exports and other info -- ends with \n!\n */
+ import0 := obj.Boffset(f)
+
+ c1 = '\n' // the last line ended in \n
+ c2 = obj.Bgetc(f)
+ c3 = obj.Bgetc(f)
+ for c1 != '\n' || c2 != '!' || c3 != '\n' {
+ c1 = c2
+ c2 = c3
+ c3 = obj.Bgetc(f)
+ if c3 == obj.Beof {
+ Diag("truncated object file: %s", pn)
+ return nil
+ }
+ }
+
+ import1 := obj.Boffset(f)
+
+ obj.Bseek(f, import0, 0)
+ ldpkg(f, pkg, import1-import0-2, pn, whence) // -2 for !\n
+ obj.Bseek(f, import1, 0)
+
+ ldobjfile(Ctxt, f, pkg, eof-obj.Boffset(f), pn)
+ return nil
+}
+
+func readelfsymboldata(f *elf.File, sym *elf.Symbol) []byte {
+ data := make([]byte, sym.Size)
+ sect := f.Sections[sym.Section]
+ if sect.Type != elf.SHT_PROGBITS && sect.Type != elf.SHT_NOTE {
+ Diag("reading %s from non-data section", sym.Name)
+ }
+ n, err := sect.ReadAt(data, int64(sym.Value-sect.Addr))
+ if uint64(n) != sym.Size {
+ Diag("reading contents of %s: %v", sym.Name, err)
+ }
+ return data
+}
+
+func readwithpad(r io.Reader, sz int32) ([]byte, error) {
+ data := make([]byte, Rnd(int64(sz), 4))
+ _, err := io.ReadFull(r, data)
+ if err != nil {
+ return nil, err
+ }
+ data = data[:sz]
+ return data, nil
+}
+
+func readnote(f *elf.File, name []byte, typ int32) ([]byte, error) {
+ for _, sect := range f.Sections {
+ if sect.Type != elf.SHT_NOTE {
+ continue
+ }
+ r := sect.Open()
+ for {
+ var namesize, descsize, noteType int32
+ err := binary.Read(r, f.ByteOrder, &namesize)
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return nil, fmt.Errorf("read namesize failed: %v", err)
+ }
+ err = binary.Read(r, f.ByteOrder, &descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read descsize failed: %v", err)
+ }
+ err = binary.Read(r, f.ByteOrder, ¬eType)
+ if err != nil {
+ return nil, fmt.Errorf("read type failed: %v", err)
+ }
+ noteName, err := readwithpad(r, namesize)
+ if err != nil {
+ return nil, fmt.Errorf("read name failed: %v", err)
+ }
+ desc, err := readwithpad(r, descsize)
+ if err != nil {
+ return nil, fmt.Errorf("read desc failed: %v", err)
+ }
+ if string(name) == string(noteName) && typ == noteType {
+ return desc, nil
+ }
+ }
+ }
+ return nil, nil
+}
+
+func findshlib(shlib string) string {
+ for _, libdir := range Ctxt.Libdir {
+ libpath := filepath.Join(libdir, shlib)
+ if _, err := os.Stat(libpath); err == nil {
+ return libpath
+ }
+ }
+ Diag("cannot find shared library: %s", shlib)
+ return ""
+}
+
+func ldshlibsyms(shlib string) {
+ libpath := findshlib(shlib)
+ if libpath == "" {
+ return
+ }
+ for _, processedlib := range Ctxt.Shlibs {
+ if processedlib.Path == libpath {
+ return
+ }
+ }
+ if Ctxt.Debugvlog > 1 && Ctxt.Bso != nil {
+ fmt.Fprintf(Ctxt.Bso, "%5.2f ldshlibsyms: found library with name %s at %s\n", obj.Cputime(), shlib, libpath)
+ Ctxt.Bso.Flush()
+ }
+
+ f, err := elf.Open(libpath)
+ if err != nil {
+ Diag("cannot open shared library: %s", libpath)
+ return
+ }
+
+ hash, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GOABIHASH_TAG)
+ if err != nil {
+ Diag("cannot read ABI hash from shared library %s: %v", libpath, err)
+ return
+ }
+
+ depsbytes, err := readnote(f, ELF_NOTE_GO_NAME, ELF_NOTE_GODEPS_TAG)
+ if err != nil {
+ Diag("cannot read dep list from shared library %s: %v", libpath, err)
+ return
+ }
+ deps := strings.Split(string(depsbytes), "\n")
+
+ syms, err := f.DynamicSymbols()
+ if err != nil {
+ Diag("cannot read symbols from shared library: %s", libpath)
+ return
+ }
+ gcdata_locations := make(map[uint64]*LSym)
+ for _, elfsym := range syms {
+ if elf.ST_TYPE(elfsym.Info) == elf.STT_NOTYPE || elf.ST_TYPE(elfsym.Info) == elf.STT_SECTION {
+ continue
+ }
+ lsym := Linklookup(Ctxt, elfsym.Name, 0)
+ // Because loadlib above loads all .a files before loading any shared
+ // libraries, any symbols we find that duplicate symbols already
+ // loaded should be ignored (the symbols from the .a files "win").
+ if lsym.Type != 0 {
+ continue
+ }
+ lsym.Type = obj.SDYNIMPORT
+ lsym.ElfType = elf.ST_TYPE(elfsym.Info)
+ lsym.Size = int64(elfsym.Size)
+ if elfsym.Section != elf.SHN_UNDEF {
+ // Set .File for the library that actually defines the symbol.
+ lsym.File = libpath
+ // The decodetype_* functions in decodetype.go need access to
+ // the type data.
+ if strings.HasPrefix(lsym.Name, "type.") && !strings.HasPrefix(lsym.Name, "type..") {
+ lsym.P = readelfsymboldata(f, &elfsym)
+ gcdata_locations[elfsym.Value+2*uint64(Thearch.Ptrsize)+8+1*uint64(Thearch.Ptrsize)] = lsym
+ }
+ }
+ }
+ gcdata_addresses := make(map[*LSym]uint64)
+ if Thearch.Thechar == '7' {
+ for _, sect := range f.Sections {
+ if sect.Type == elf.SHT_RELA {
+ var rela elf.Rela64
+ rdr := sect.Open()
+ for {
+ err := binary.Read(rdr, f.ByteOrder, &rela)
+ if err == io.EOF {
+ break
+ } else if err != nil {
+ Diag("reading relocation failed %v", err)
+ return
+ }
+ t := elf.R_AARCH64(rela.Info & 0xffff)
+ if t != elf.R_AARCH64_RELATIVE {
+ continue
+ }
+ if lsym, ok := gcdata_locations[rela.Off]; ok {
+ gcdata_addresses[lsym] = uint64(rela.Addend)
+ }
+ }
+ }
+ }
+ }
+
+ // We might have overwritten some functions above (this tends to happen for the
+ // autogenerated type equality/hashing functions) and we don't want to generated
+ // pcln table entries for these any more so unstitch them from the Textp linked
+ // list.
+ var last *LSym
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Type == obj.SDYNIMPORT {
+ continue
+ }
+
+ if last == nil {
+ Ctxt.Textp = s
+ } else {
+ last.Next = s
+ }
+ last = s
+ }
+
+ if last == nil {
+ Ctxt.Textp = nil
+ Ctxt.Etextp = nil
+ } else {
+ last.Next = nil
+ Ctxt.Etextp = last
+ }
+
+ Ctxt.Shlibs = append(Ctxt.Shlibs, Shlib{Path: libpath, Hash: hash, Deps: deps, File: f, gcdata_addresses: gcdata_addresses})
+}
+
+func mywhatsys() {
+ goroot = obj.Getgoroot()
+ goos = obj.Getgoos()
+ goarch = obj.Getgoarch()
+
+ if !strings.HasPrefix(goarch, Thestring) {
+ log.Fatalf("cannot use %cc with GOARCH=%s", Thearch.Thechar, goarch)
+ }
+}
+
+// Copied from ../gc/subr.c:/^pathtoprefix; must stay in sync.
+/*
+ * Convert raw string to the prefix that will be used in the symbol table.
+ * Invalid bytes turn into %xx. Right now the only bytes that need
+ * escaping are %, ., and ", but we escape all control characters too.
+ *
+ * If you edit this, edit ../gc/subr.c:/^pathtoprefix too.
+ * If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+ */
+func pathtoprefix(s string) string {
+ slash := strings.LastIndex(s, "/")
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
+ }
+ }
+ return s
+}
+
+func addsection(seg *Segment, name string, rwx int) *Section {
+ var l **Section
+
+ for l = &seg.Sect; *l != nil; l = &(*l).Next {
+ }
+ sect := new(Section)
+ sect.Rwx = uint8(rwx)
+ sect.Name = name
+ sect.Seg = seg
+ sect.Align = int32(Thearch.Ptrsize) // everything is at least pointer-aligned
+ *l = sect
+ return sect
+}
+
+func Le16(b []byte) uint16 {
+ return uint16(b[0]) | uint16(b[1])<<8
+}
+
+func Le32(b []byte) uint32 {
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func Le64(b []byte) uint64 {
+ return uint64(Le32(b)) | uint64(Le32(b[4:]))<<32
+}
+
+func Be16(b []byte) uint16 {
+ return uint16(b[0])<<8 | uint16(b[1])
+}
+
+func Be32(b []byte) uint32 {
+ return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3])
+}
+
+type Chain struct {
+ sym *LSym
+ up *Chain
+ limit int // limit on entry to sym
+}
+
+var morestack *LSym
+
+// TODO: Record enough information in new object files to
+// allow stack checks here.
+
+func haslinkregister() bool {
+ return Ctxt.FixedFrameSize() != 0
+}
+
+func callsize() int {
+ if haslinkregister() {
+ return 0
+ }
+ return Thearch.Regsize
+}
+
+func dostkcheck() {
+ var ch Chain
+
+ morestack = Linklookup(Ctxt, "runtime.morestack", 0)
+
+ // Every splitting function ensures that there are at least StackLimit
+ // bytes available below SP when the splitting prologue finishes.
+ // If the splitting function calls F, then F begins execution with
+ // at least StackLimit - callsize() bytes available.
+ // Check that every function behaves correctly with this amount
+ // of stack, following direct calls in order to piece together chains
+ // of non-splitting functions.
+ ch.up = nil
+
+ ch.limit = obj.StackLimit - callsize()
+
+ // Check every function, but do the nosplit functions in a first pass,
+ // to make the printed failure chains as short as possible.
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ // runtime.racesymbolizethunk is called from gcc-compiled C
+ // code running on the operating system thread stack.
+ // It uses more than the usual amount of stack but that's okay.
+ if s.Name == "runtime.racesymbolizethunk" {
+ continue
+ }
+
+ if s.Nosplit != 0 {
+ Ctxt.Cursym = s
+ ch.sym = s
+ stkcheck(&ch, 0)
+ }
+ }
+
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Nosplit == 0 {
+ Ctxt.Cursym = s
+ ch.sym = s
+ stkcheck(&ch, 0)
+ }
+ }
+}
+
+func stkcheck(up *Chain, depth int) int {
+ limit := up.limit
+ s := up.sym
+
+ // Don't duplicate work: only need to consider each
+ // function at top of safe zone once.
+ top := limit == obj.StackLimit-callsize()
+ if top {
+ if s.Stkcheck != 0 {
+ return 0
+ }
+ s.Stkcheck = 1
+ }
+
+ if depth > 100 {
+ Diag("nosplit stack check too deep")
+ stkbroke(up, 0)
+ return -1
+ }
+
+ if s.External != 0 || s.Pcln == nil {
+ // external function.
+ // should never be called directly.
+ // only diagnose the direct caller.
+ // TODO(mwhudson): actually think about this.
+ if depth == 1 && s.Type != obj.SXREF && !DynlinkingGo() &&
+ Buildmode != BuildmodePIE && Buildmode != BuildmodeCShared {
+ Diag("call to external function %s", s.Name)
+ }
+ return -1
+ }
+
+ if limit < 0 {
+ stkbroke(up, limit)
+ return -1
+ }
+
+ // morestack looks like it calls functions,
+ // but it switches the stack pointer first.
+ if s == morestack {
+ return 0
+ }
+
+ var ch Chain
+ ch.up = up
+
+ if s.Nosplit == 0 {
+ // Ensure we have enough stack to call morestack.
+ ch.limit = limit - callsize()
+ ch.sym = morestack
+ if stkcheck(&ch, depth+1) < 0 {
+ return -1
+ }
+ if !top {
+ return 0
+ }
+ // Raise limit to allow frame.
+ limit = int(obj.StackLimit+s.Locals) + int(Ctxt.FixedFrameSize())
+ }
+
+ // Walk through sp adjustments in function, consuming relocs.
+ ri := 0
+
+ endr := len(s.R)
+ var ch1 Chain
+ var pcsp Pciter
+ var r *Reloc
+ for pciterinit(Ctxt, &pcsp, &s.Pcln.Pcsp); pcsp.done == 0; pciternext(&pcsp) {
+ // pcsp.value is in effect for [pcsp.pc, pcsp.nextpc).
+
+ // Check stack size in effect for this span.
+ if int32(limit)-pcsp.value < 0 {
+ stkbroke(up, int(int32(limit)-pcsp.value))
+ return -1
+ }
+
+ // Process calls in this span.
+ for ; ri < endr && uint32(s.R[ri].Off) < pcsp.nextpc; ri++ {
+ r = &s.R[ri]
+ switch r.Type {
+ // Direct call.
+ case obj.R_CALL, obj.R_CALLARM, obj.R_CALLARM64, obj.R_CALLPOWER, obj.R_CALLMIPS:
+ ch.limit = int(int32(limit) - pcsp.value - int32(callsize()))
+ ch.sym = r.Sym
+ if stkcheck(&ch, depth+1) < 0 {
+ return -1
+ }
+
+ // Indirect call. Assume it is a call to a splitting function,
+ // so we have to make sure it can call morestack.
+ // Arrange the data structures to report both calls, so that
+ // if there is an error, stkprint shows all the steps involved.
+ case obj.R_CALLIND:
+ ch.limit = int(int32(limit) - pcsp.value - int32(callsize()))
+
+ ch.sym = nil
+ ch1.limit = ch.limit - callsize() // for morestack in called prologue
+ ch1.up = &ch
+ ch1.sym = morestack
+ if stkcheck(&ch1, depth+2) < 0 {
+ return -1
+ }
+ }
+ }
+ }
+
+ return 0
+}
+
+func stkbroke(ch *Chain, limit int) {
+ Diag("nosplit stack overflow")
+ stkprint(ch, limit)
+}
+
+func stkprint(ch *Chain, limit int) {
+ var name string
+
+ if ch.sym != nil {
+ name = ch.sym.Name
+ if ch.sym.Nosplit != 0 {
+ name += " (nosplit)"
+ }
+ } else {
+ name = "function pointer"
+ }
+
+ if ch.up == nil {
+ // top of chain. ch->sym != nil.
+ if ch.sym.Nosplit != 0 {
+ fmt.Printf("\t%d\tassumed on entry to %s\n", ch.limit, name)
+ } else {
+ fmt.Printf("\t%d\tguaranteed after split check in %s\n", ch.limit, name)
+ }
+ } else {
+ stkprint(ch.up, ch.limit+callsize())
+ if !haslinkregister() {
+ fmt.Printf("\t%d\ton entry to %s\n", ch.limit, name)
+ }
+ }
+
+ if ch.limit != limit {
+ fmt.Printf("\t%d\tafter %s uses %d\n", limit, name, ch.limit-limit)
+ }
+}
+
+func Cflush() {
+ if err := coutbuf.Writer.Flush(); err != nil {
+ Exitf("flushing %s: %v", coutbuf.f.Name(), err)
+ }
+}
+
+func Cpos() int64 {
+ off, err := coutbuf.f.Seek(0, 1)
+ if err != nil {
+ Exitf("seeking in output [0, 1]: %v", err)
+ }
+ return off + int64(coutbuf.Buffered())
+}
+
+func Cseek(p int64) {
+ Cflush()
+ if _, err := coutbuf.f.Seek(p, 0); err != nil {
+ Exitf("seeking in output [0, 1]: %v", err)
+ }
+}
+
+func Cwrite(p []byte) {
+ coutbuf.Write(p)
+}
+
+func Cput(c uint8) {
+ coutbuf.WriteByte(c)
+}
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: link [options] main.o\n")
+ obj.Flagprint(2)
+ Exit(2)
+}
+
+func setheadtype(s string) {
+ h := headtype(s)
+ if h < 0 {
+ Exitf("unknown header type -H %s", s)
+ }
+
+ headstring = s
+ HEADTYPE = int32(headtype(s))
+}
+
+func setinterp(s string) {
+ Debug['I'] = 1 // denote cmdline interpreter override
+ interpreter = s
+}
+
+func doversion() {
+ Exitf("version %s", obj.Getgoversion())
+}
+
+func genasmsym(put func(*LSym, string, int, int64, int64, int, *LSym)) {
+ // These symbols won't show up in the first loop below because we
+ // skip STEXT symbols. Normal STEXT symbols are emitted by walking textp.
+ s := Linklookup(Ctxt, "runtime.text", 0)
+
+ if s.Type == obj.STEXT {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
+ }
+ s = Linklookup(Ctxt, "runtime.etext", 0)
+ if s.Type == obj.STEXT {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), nil)
+ }
+
+ for s := Ctxt.Allsym; s != nil; s = s.Allsym {
+ if s.Hide != 0 || ((s.Name == "" || s.Name[0] == '.') && s.Version == 0 && s.Name != ".rathole" && s.Name != ".TOC.") {
+ continue
+ }
+ switch s.Type & obj.SMASK {
+ case obj.SCONST,
+ obj.SRODATA,
+ obj.SSYMTAB,
+ obj.SPCLNTAB,
+ obj.SINITARR,
+ obj.SDATA,
+ obj.SNOPTRDATA,
+ obj.SELFROSECT,
+ obj.SMACHOGOT,
+ obj.STYPE,
+ obj.SSTRING,
+ obj.SGOSTRING,
+ obj.SGOFUNC,
+ obj.SGCBITS,
+ obj.STYPERELRO,
+ obj.SSTRINGRELRO,
+ obj.SGOSTRINGRELRO,
+ obj.SGOFUNCRELRO,
+ obj.SGCBITSRELRO,
+ obj.SRODATARELRO,
+ obj.STYPELINK,
+ obj.SWINDOWS:
+ if !s.Reachable {
+ continue
+ }
+ put(s, s.Name, 'D', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+
+ case obj.SBSS, obj.SNOPTRBSS:
+ if !s.Reachable {
+ continue
+ }
+ if len(s.P) > 0 {
+ Diag("%s should not be bss (size=%d type=%d special=%d)", s.Name, int(len(s.P)), s.Type, s.Special)
+ }
+ put(s, s.Name, 'B', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+
+ case obj.SFILE:
+ put(nil, s.Name, 'f', s.Value, 0, int(s.Version), nil)
+
+ case obj.SHOSTOBJ:
+ if HEADTYPE == obj.Hwindows || Iself {
+ put(s, s.Name, 'U', s.Value, 0, int(s.Version), nil)
+ }
+
+ case obj.SDYNIMPORT:
+ if !s.Reachable {
+ continue
+ }
+ put(s, s.Extname, 'U', 0, 0, int(s.Version), nil)
+
+ case obj.STLSBSS:
+ if Linkmode == LinkExternal && HEADTYPE != obj.Hopenbsd {
+ put(s, s.Name, 't', Symaddr(s), s.Size, int(s.Version), s.Gotype)
+ }
+ }
+ }
+
+ var a *Auto
+ var off int32
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ put(s, s.Name, 'T', s.Value, s.Size, int(s.Version), s.Gotype)
+
+ // NOTE(ality): acid can't produce a stack trace without .frame symbols
+ put(nil, ".frame", 'm', int64(s.Locals)+int64(Thearch.Ptrsize), 0, 0, nil)
+
+ for a = s.Autom; a != nil; a = a.Link {
+ // Emit a or p according to actual offset, even if label is wrong.
+ // This avoids negative offsets, which cannot be encoded.
+ if a.Name != obj.A_AUTO && a.Name != obj.A_PARAM {
+ continue
+ }
+
+ // compute offset relative to FP
+ if a.Name == obj.A_PARAM {
+ off = a.Aoffset
+ } else {
+ off = a.Aoffset - int32(Thearch.Ptrsize)
+ }
+
+ // FP
+ if off >= 0 {
+ put(nil, a.Asym.Name, 'p', int64(off), 0, 0, a.Gotype)
+ continue
+ }
+
+ // SP
+ if off <= int32(-Thearch.Ptrsize) {
+ put(nil, a.Asym.Name, 'a', -(int64(off) + int64(Thearch.Ptrsize)), 0, 0, a.Gotype)
+ continue
+ }
+ }
+ }
+
+ // Otherwise, off is addressing the saved program counter.
+ // Something underhanded is going on. Say nothing.
+ if Debug['v'] != 0 || Debug['n'] != 0 {
+ fmt.Fprintf(&Bso, "%5.2f symsize = %d\n", obj.Cputime(), uint32(Symsize))
+ }
+ Bso.Flush()
+}
+
+func Symaddr(s *LSym) int64 {
+ if !s.Reachable {
+ Diag("unreachable symbol in symaddr - %s", s.Name)
+ }
+ return s.Value
+}
+
+func xdefine(p string, t int, v int64) {
+ s := Linklookup(Ctxt, p, 0)
+ s.Type = int16(t)
+ s.Value = v
+ s.Reachable = true
+ s.Special = 1
+ s.Local = true
+}
+
+func datoff(addr int64) int64 {
+ if uint64(addr) >= Segdata.Vaddr {
+ return int64(uint64(addr) - Segdata.Vaddr + Segdata.Fileoff)
+ }
+ if uint64(addr) >= Segtext.Vaddr {
+ return int64(uint64(addr) - Segtext.Vaddr + Segtext.Fileoff)
+ }
+ Diag("datoff %#x", addr)
+ return 0
+}
+
+func Entryvalue() int64 {
+ a := INITENTRY
+ if a[0] >= '0' && a[0] <= '9' {
+ return atolwhex(a)
+ }
+ s := Linklookup(Ctxt, a, 0)
+ if s.Type == 0 {
+ return INITTEXT
+ }
+ if s.Type != obj.STEXT {
+ Diag("entry not text: %s", s.Name)
+ }
+ return s.Value
+}
+
+func undefsym(s *LSym) {
+ var r *Reloc
+
+ Ctxt.Cursym = s
+ for i := 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil { // happens for some external ARM relocs
+ continue
+ }
+ if r.Sym.Type == obj.Sxxx || r.Sym.Type == obj.SXREF {
+ Diag("undefined: %s", r.Sym.Name)
+ }
+ if !r.Sym.Reachable {
+ Diag("use of unreachable symbol: %s", r.Sym.Name)
+ }
+ }
+}
+
+func undef() {
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ undefsym(s)
+ }
+ for s := datap; s != nil; s = s.Next {
+ undefsym(s)
+ }
+ if nerrors > 0 {
+ errorexit()
+ }
+}
+
+func callgraph() {
+ if Debug['c'] == 0 {
+ return
+ }
+
+ var i int
+ var r *Reloc
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM || r.Type == obj.R_CALLPOWER || r.Type == obj.R_CALLMIPS) && r.Sym.Type == obj.STEXT {
+ fmt.Fprintf(&Bso, "%s calls %s\n", s.Name, r.Sym.Name)
+ }
+ }
+ }
+}
+
+func Diag(format string, args ...interface{}) {
+ tn := ""
+ sep := ""
+ if Ctxt.Cursym != nil {
+ tn = Ctxt.Cursym.Name
+ sep = ": "
+ }
+ fmt.Printf("%s%s%s\n", tn, sep, fmt.Sprintf(format, args...))
+ nerrors++
+ if Debug['h'] != 0 {
+ panic("error")
+ }
+ if nerrors > 20 {
+ Exitf("too many errors")
+ }
+}
+
+func checkgo() {
+ if Debug['C'] == 0 {
+ return
+ }
+
+ // TODO(rsc,khr): Eventually we want to get to no Go-called C functions at all,
+ // which would simplify this logic quite a bit.
+
+ // Mark every Go-called C function with cfunc=2, recursively.
+ var changed int
+ var i int
+ var r *Reloc
+ var s *LSym
+ for {
+ changed = 0
+ for s = Ctxt.Textp; s != nil; s = s.Next {
+ if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
+ if r.Sym.Cfunc == 1 {
+ changed = 1
+ r.Sym.Cfunc = 2
+ }
+ }
+ }
+ }
+ }
+ if changed == 0 {
+ break
+ }
+ }
+
+ // Complain about Go-called C functions that can split the stack
+ // (that can be preempted for garbage collection or trigger a stack copy).
+ for s := Ctxt.Textp; s != nil; s = s.Next {
+ if s.Cfunc == 0 || (s.Cfunc == 2 && s.Nosplit != 0) {
+ for i = 0; i < len(s.R); i++ {
+ r = &s.R[i]
+ if r.Sym == nil {
+ continue
+ }
+ if (r.Type == obj.R_CALL || r.Type == obj.R_CALLARM) && r.Sym.Type == obj.STEXT {
+ if s.Cfunc == 0 && r.Sym.Cfunc == 2 && r.Sym.Nosplit == 0 {
+ fmt.Printf("Go %s calls C %s\n", s.Name, r.Sym.Name)
+ } else if s.Cfunc == 2 && s.Nosplit != 0 && r.Sym.Nosplit == 0 {
+ fmt.Printf("Go calls C %s calls %s\n", s.Name, r.Sym.Name)
+ }
+ }
+ }
+ }
+ }
+}
+
+func Rnd(v int64, r int64) int64 {
+ if r <= 0 {
+ return v
+ }
+ v += r - 1
+ c := v % r
+ if c < 0 {
+ c += r
+ }
+ v -= c
+ return v
+}
diff -pruN 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep/dep.go 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep/dep.go
--- 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep/dep.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep/dep.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,13 @@
+package dep
+
+var V int = 1
+
+var HasMask []string = []string{"hi"}
+
+type HasProg struct {
+ array [1024]*byte
+}
+
+func F() int {
+ return V
+}
diff -pruN 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep2/dep2.go 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep2/dep2.go
--- 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep2/dep2.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/misc/cgo/testshared/src/dep2/dep2.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,11 @@
+package dep2
+
+import "dep"
+
+var W int = 1
+
+var hasProg dep.HasProg
+
+func G() int {
+ return dep.F() + 1
+}
diff -pruN 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/src/cmd/compile/internal/gc/subr.go 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/src/cmd/compile/internal/gc/subr.go
--- 1.6.3-1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/src/cmd/compile/internal/gc/subr.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch/src/cmd/compile/internal/gc/subr.go 2016-07-18 16:24:06.000000000 +0000
@@ -0,0 +1,3451 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+type Error struct {
+ lineno int
+ seq int
+ msg string
+}
+
+var errors []Error
+
+func errorexit() {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ os.Exit(2)
+}
+
+func parserline() int {
+ return int(lineno)
+}
+
+func adderrorname(n *Node) {
+ if n.Op != ODOT {
+ return
+ }
+ old := fmt.Sprintf("%v: undefined: %v\n", n.Line(), n.Left)
+ if len(errors) > 0 && int32(errors[len(errors)-1].lineno) == n.Lineno && errors[len(errors)-1].msg == old {
+ errors[len(errors)-1].msg = fmt.Sprintf("%v: undefined: %v in %v\n", n.Line(), n.Left, n)
+ }
+}
+
+func adderr(line int, format string, args ...interface{}) {
+ errors = append(errors, Error{
+ seq: len(errors),
+ lineno: line,
+ msg: fmt.Sprintf("%v: %s\n", Ctxt.Line(line), fmt.Sprintf(format, args...)),
+ })
+}
+
+// errcmp sorts errors by line, then seq, then message.
+type errcmp []Error
+
+func (x errcmp) Len() int { return len(x) }
+func (x errcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x errcmp) Less(i, j int) bool {
+ a := &x[i]
+ b := &x[j]
+ if a.lineno != b.lineno {
+ return a.lineno < b.lineno
+ }
+ if a.seq != b.seq {
+ return a.seq < b.seq
+ }
+ return a.msg < b.msg
+}
+
+func Flusherrors() {
+ bstdout.Flush()
+ if len(errors) == 0 {
+ return
+ }
+ sort.Sort(errcmp(errors))
+ for i := 0; i < len(errors); i++ {
+ if i == 0 || errors[i].msg != errors[i-1].msg {
+ fmt.Printf("%s", errors[i].msg)
+ }
+ }
+ errors = errors[:0]
+}
+
+func hcrash() {
+ if Debug['h'] != 0 {
+ Flusherrors()
+ if outfile != "" {
+ os.Remove(outfile)
+ }
+ var x *int
+ *x = 0
+ }
+}
+
+func yyerrorl(line int, format string, args ...interface{}) {
+ adderr(line, format, args...)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(line))
+ errorexit()
+ }
+}
+
+var yyerror_lastsyntax int
+
+func Yyerror(format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ if strings.HasPrefix(msg, "syntax error") {
+ nsyntaxerrors++
+
+ // An unexpected EOF caused a syntax error. Use the previous
+ // line number since getc generated a fake newline character.
+ if curio.eofnl {
+ lexlineno = prevlineno
+ }
+
+ // only one syntax error per line
+ if int32(yyerror_lastsyntax) == lexlineno {
+ return
+ }
+ yyerror_lastsyntax = int(lexlineno)
+
+ // plain "syntax error" gets "near foo" added
+ if msg == "syntax error" {
+ yyerrorl(int(lexlineno), "syntax error near %s", lexbuf.String())
+ return
+ }
+
+ yyerrorl(int(lexlineno), "%s", msg)
+ return
+ }
+
+ adderr(parserline(), "%s", msg)
+
+ hcrash()
+ nerrors++
+ if nsavederrors+nerrors >= 10 && Debug['e'] == 0 {
+ Flusherrors()
+ fmt.Printf("%v: too many errors\n", Ctxt.Line(parserline()))
+ errorexit()
+ }
+}
+
+func Warn(fmt_ string, args ...interface{}) {
+ adderr(parserline(), fmt_, args...)
+
+ hcrash()
+}
+
+func Warnl(line int, fmt_ string, args ...interface{}) {
+ adderr(line, fmt_, args...)
+ if Debug['m'] != 0 {
+ Flusherrors()
+ }
+}
+
+func Fatalf(fmt_ string, args ...interface{}) {
+ Flusherrors()
+
+ fmt.Printf("%v: internal compiler error: ", Ctxt.Line(int(lineno)))
+ fmt.Printf(fmt_, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if strings.HasPrefix(obj.Getgoversion(), "release") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://golang.org/issue/new\n")
+ }
+
+ hcrash()
+ errorexit()
+}
+
+func linehistpragma(file string) {
+ if Debug['i'] != 0 {
+ fmt.Printf("pragma %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ }
+ Ctxt.AddImport(file)
+}
+
+func linehistpush(file string) {
+ if Debug['i'] != 0 {
+ fmt.Printf("import %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ }
+ Ctxt.LineHist.Push(int(lexlineno), file)
+}
+
+func linehistpop() {
+ if Debug['i'] != 0 {
+ fmt.Printf("end of import at line %v\n", Ctxt.Line(int(lexlineno)))
+ }
+ Ctxt.LineHist.Pop(int(lexlineno))
+}
+
+func linehistupdate(file string, off int) {
+ if Debug['i'] != 0 {
+ fmt.Printf("line %s at line %v\n", file, Ctxt.Line(int(lexlineno)))
+ }
+ Ctxt.LineHist.Update(int(lexlineno), file, off)
+}
+
+func setlineno(n *Node) int32 {
+ lno := lineno
+ if n != nil {
+ switch n.Op {
+ case ONAME, OTYPE, OPACK:
+ break
+
+ case OLITERAL:
+ if n.Sym != nil {
+ break
+ }
+ fallthrough
+
+ default:
+ lineno = n.Lineno
+ if lineno == 0 {
+ if Debug['K'] != 0 {
+ Warn("setlineno: line 0")
+ }
+ lineno = lno
+ }
+ }
+ }
+
+ return lno
+}
+
+func Lookup(name string) *Sym {
+ return localpkg.Lookup(name)
+}
+
+func Lookupf(format string, a ...interface{}) *Sym {
+ return Lookup(fmt.Sprintf(format, a...))
+}
+
+func LookupBytes(name []byte) *Sym {
+ return localpkg.LookupBytes(name)
+}
+
+var initSyms []*Sym
+
+var nopkg = &Pkg{
+ Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[name]; s != nil {
+ return s
+ }
+
+ s := &Sym{
+ Name: name,
+ Pkg: pkg,
+ Lexical: LNAME,
+ }
+ if name == "init" {
+ initSyms = append(initSyms, s)
+ }
+ pkg.Syms[name] = s
+ return s
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[string(name)]; s != nil {
+ return s
+ }
+ str := internString(name)
+ return pkg.Lookup(str)
+}
+
+func Pkglookup(name string, pkg *Pkg) *Sym {
+ return pkg.Lookup(name)
+}
+
+func restrictlookup(name string, pkg *Pkg) *Sym {
+ if !exportname(name) && pkg != localpkg {
+ Yyerror("cannot refer to unexported name %s.%s", pkg.Name, name)
+ }
+ return Pkglookup(name, pkg)
+}
+
+// find all the exported symbols in package opkg
+// and make them available in the current package
+func importdot(opkg *Pkg, pack *Node) {
+ var s1 *Sym
+ var pkgerror string
+
+ n := 0
+ for _, s := range opkg.Syms {
+ if s.Def == nil {
+ continue
+ }
+ if !exportname(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 = Lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror = fmt.Sprintf("during import %q", opkg.Path)
+ redeclare(s1, pkgerror)
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ if s1.Def.Name == nil {
+ Dump("s1def", s1.Def)
+ Fatalf("missing Name")
+ }
+ s1.Def.Name.Pack = pack
+ s1.Origpkg = opkg
+ n++
+ }
+
+ if n == 0 {
+ // can't possibly be used - there were no symbols
+ yyerrorl(int(pack.Lineno), "imported and not used: %q", opkg.Path)
+ }
+}
+
+func Nod(op Op, nleft *Node, nright *Node) *Node {
+ n := new(Node)
+ n.Op = op
+ n.Left = nleft
+ n.Right = nright
+ n.Lineno = int32(parserline())
+ n.Xoffset = BADWIDTH
+ n.Orig = n
+ switch op {
+ case OCLOSURE, ODCLFUNC:
+ n.Func = new(Func)
+ n.Func.FCurfn = Curfn
+ case ONAME:
+ n.Name = new(Name)
+ n.Name.Param = new(Param)
+ case OLABEL, OPACK:
+ n.Name = new(Name)
+ case ODCLFIELD:
+ if nleft != nil {
+ n.Name = nleft.Name
+ } else {
+ n.Name = new(Name)
+ n.Name.Param = new(Param)
+ }
+ }
+ if n.Name != nil {
+ n.Name.Curfn = Curfn
+ }
+ return n
+}
+
+func saveorignode(n *Node) {
+ if n.Orig != nil {
+ return
+ }
+ norig := Nod(n.Op, nil, nil)
+ *norig = *n
+ n.Orig = norig
+}
+
+// ispaddedfield reports whether the given field
+// is followed by padding. For the case where t is
+// the last field, total gives the size of the enclosing struct.
+func ispaddedfield(t *Type, total int64) bool {
+ if t.Etype != TFIELD {
+ Fatalf("ispaddedfield called non-field %v", t)
+ }
+ if t.Down == nil {
+ return t.Width+t.Type.Width != total
+ }
+ return t.Width+t.Type.Width != t.Down.Width
+}
+
+func algtype1(t *Type, bad **Type) int {
+ if bad != nil {
+ *bad = nil
+ }
+ if t.Broke {
+ return AMEM
+ }
+ if t.Noalg {
+ return ANOEQ
+ }
+
+ switch t.Etype {
+ // will be defined later.
+ case TANY, TFORW:
+ *bad = t
+
+ return -1
+
+ case TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TINT,
+ TUINT,
+ TUINTPTR,
+ TBOOL,
+ TPTR32,
+ TPTR64,
+ TCHAN,
+ TUNSAFEPTR:
+ return AMEM
+
+ case TFUNC, TMAP:
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+
+ case TFLOAT32:
+ return AFLOAT32
+
+ case TFLOAT64:
+ return AFLOAT64
+
+ case TCOMPLEX64:
+ return ACPLX64
+
+ case TCOMPLEX128:
+ return ACPLX128
+
+ case TSTRING:
+ return ASTRING
+
+ case TINTER:
+ if isnilinter(t) {
+ return ANILINTER
+ }
+ return AINTER
+
+ case TARRAY:
+ if Isslice(t) {
+ if bad != nil {
+ *bad = t
+ }
+ return ANOEQ
+ }
+
+ a := algtype1(t.Type, bad)
+ if a == ANOEQ || a == AMEM {
+ if a == ANOEQ && bad != nil {
+ *bad = t
+ }
+ return a
+ }
+
+ return -1 // needs special compare
+
+ case TSTRUCT:
+ if t.Type != nil && t.Type.Down == nil && !isblanksym(t.Type.Sym) {
+ // One-field struct is same as that one field alone.
+ return algtype1(t.Type.Type, bad)
+ }
+
+ ret := AMEM
+ var a int
+ for t1 := t.Type; t1 != nil; t1 = t1.Down {
+ // All fields must be comparable.
+ a = algtype1(t1.Type, bad)
+
+ if a == ANOEQ {
+ return ANOEQ
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || isblanksym(t1.Sym) || ispaddedfield(t1, t.Width) {
+ ret = -1
+ continue
+ }
+ }
+
+ return ret
+ }
+
+ Fatalf("algtype1: unexpected type %v", t)
+ return 0
+}
+
+func algtype(t *Type) int {
+ a := algtype1(t, nil)
+ if a == AMEM || a == ANOEQ {
+ if Isslice(t) {
+ return ASLICE
+ }
+ switch t.Width {
+ case 0:
+ return a + AMEM0 - AMEM
+
+ case 1:
+ return a + AMEM8 - AMEM
+
+ case 2:
+ return a + AMEM16 - AMEM
+
+ case 4:
+ return a + AMEM32 - AMEM
+
+ case 8:
+ return a + AMEM64 - AMEM
+
+ case 16:
+ return a + AMEM128 - AMEM
+ }
+ }
+
+ return a
+}
+
+func maptype(key *Type, val *Type) *Type {
+ if key != nil {
+ var bad *Type
+ atype := algtype1(key, &bad)
+ var mtype EType
+ if bad == nil {
+ mtype = key.Etype
+ } else {
+ mtype = bad.Etype
+ }
+ switch mtype {
+ default:
+ if atype == ANOEQ {
+ Yyerror("invalid map key type %v", key)
+ }
+
+ // will be resolved later.
+ case TANY:
+ break
+
+ // map[key] used during definition of key.
+ // postpone check until key is fully defined.
+ // if there are multiple uses of map[key]
+ // before key is fully defined, the error
+ // will only be printed for the first one.
+ // good enough.
+ case TFORW:
+ if key.Maplineno == 0 {
+ key.Maplineno = lineno
+ }
+ }
+ }
+
+ t := typ(TMAP)
+ t.Down = key
+ t.Type = val
+ return t
+}
+
+func typ(et EType) *Type {
+ t := new(Type)
+ t.Etype = et
+ t.Width = BADWIDTH
+ t.Lineno = int(lineno)
+ t.Orig = t
+ return t
+}
+
+// methcmp sorts by symbol, then by package path for unexported symbols.
+type methcmp []*Type
+
+func (x methcmp) Len() int { return len(x) }
+func (x methcmp) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x methcmp) Less(i, j int) bool {
+ a := x[i]
+ b := x[j]
+ if a.Sym == nil && b.Sym == nil {
+ return false
+ }
+ if a.Sym == nil {
+ return true
+ }
+ if b.Sym == nil {
+ return false
+ }
+ if a.Sym.Name != b.Sym.Name {
+ return a.Sym.Name < b.Sym.Name
+ }
+ if !exportname(a.Sym.Name) {
+ if a.Sym.Pkg.Path != b.Sym.Pkg.Path {
+ return a.Sym.Pkg.Path < b.Sym.Pkg.Path
+ }
+ }
+
+ return false
+}
+
+func sortinter(t *Type) *Type {
+ if t.Type == nil || t.Type.Down == nil {
+ return t
+ }
+
+ var a []*Type
+ for f := t.Type; f != nil; f = f.Down {
+ a = append(a, f)
+ }
+ sort.Sort(methcmp(a))
+
+ n := len(a) // n > 0 due to initial conditions.
+ for i := 0; i < n-1; i++ {
+ a[i].Down = a[i+1]
+ }
+ a[n-1].Down = nil
+
+ t.Type = a[0]
+ return t
+}
+
+func Nodintconst(v int64) *Node {
+ c := Nod(OLITERAL, nil, nil)
+ c.Addable = true
+ c.SetVal(Val{new(Mpint)})
+ Mpmovecfix(c.Val().U.(*Mpint), v)
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func nodfltconst(v *Mpflt) *Node {
+ c := Nod(OLITERAL, nil, nil)
+ c.Addable = true
+ c.SetVal(Val{newMpflt()})
+ mpmovefltflt(c.Val().U.(*Mpflt), v)
+ c.Type = Types[TIDEAL]
+ ullmancalc(c)
+ return c
+}
+
+func Nodconst(n *Node, t *Type, v int64) {
+ *n = Node{}
+ n.Op = OLITERAL
+ n.Addable = true
+ ullmancalc(n)
+ n.SetVal(Val{new(Mpint)})
+ Mpmovecfix(n.Val().U.(*Mpint), v)
+ n.Type = t
+
+ if Isfloat[t.Etype] {
+ Fatalf("nodconst: bad type %v", t)
+ }
+}
+
+func nodnil() *Node {
+ c := Nodintconst(0)
+ c.SetVal(Val{new(NilVal)})
+ c.Type = Types[TNIL]
+ return c
+}
+
+func Nodbool(b bool) *Node {
+ c := Nodintconst(0)
+ c.SetVal(Val{b})
+ c.Type = idealbool
+ return c
+}
+
+func aindex(b *Node, t *Type) *Type {
+ bound := int64(-1) // open bound
+ typecheck(&b, Erv)
+ if b != nil {
+ switch consttype(b) {
+ default:
+ Yyerror("array bound must be an integer expression")
+
+ case CTINT, CTRUNE:
+ bound = Mpgetfix(b.Val().U.(*Mpint))
+ if bound < 0 {
+ Yyerror("array bound must be non negative")
+ }
+ }
+ }
+
+ // fixed array
+ r := typ(TARRAY)
+
+ r.Type = t
+ r.Bound = bound
+ return r
+}
+
+// treecopy recursively copies n, with the exception of
+// ONAME, OLITERAL, OTYPE, and non-iota ONONAME leaves.
+// Copies of iota ONONAME nodes are assigned the current
+// value of iota_. If lineno != 0, it sets the line number
+// of newly allocated nodes to lineno.
+func treecopy(n *Node, lineno int32) *Node {
+ if n == nil {
+ return nil
+ }
+
+ var m *Node
+ switch n.Op {
+ default:
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ m.Orig = m
+ m.Left = treecopy(n.Left, lineno)
+ m.Right = treecopy(n.Right, lineno)
+ m.List = listtreecopy(n.List, lineno)
+ if lineno != 0 {
+ m.Lineno = lineno
+ }
+ if m.Name != nil && n.Op != ODCLFIELD {
+ Dump("treecopy", n)
+ Fatalf("treecopy Name")
+ }
+
+ case ONONAME:
+ if n.Sym == Lookup("iota") {
+ // Not sure yet whether this is the real iota,
+ // but make a copy of the Node* just in case,
+ // so that all the copies of this const definition
+ // don't have the same iota value.
+ m = Nod(OXXX, nil, nil)
+ *m = *n
+ if lineno != 0 {
+ m.Lineno = lineno
+ }
+ m.Name = new(Name)
+ *m.Name = *n.Name
+ m.Name.Iota = iota_
+ break
+ }
+ fallthrough
+
+ case ONAME, OLITERAL, OTYPE:
+ m = n
+ }
+
+ return m
+}
+
+// isnil reports whether n represents the universal untyped zero value "nil".
+func isnil(n *Node) bool {
+ // Check n.Orig because constant propagation may produce typed nil constants,
+ // which don't exist in the Go spec.
+ return Isconst(n.Orig, CTNIL)
+}
+
+func isptrto(t *Type, et EType) bool {
+ if t == nil {
+ return false
+ }
+ if !Isptr[t.Etype] {
+ return false
+ }
+ t = t.Type
+ if t == nil {
+ return false
+ }
+ if t.Etype != et {
+ return false
+ }
+ return true
+}
+
+func Istype(t *Type, et EType) bool {
+ return t != nil && t.Etype == et
+}
+
+func Isfixedarray(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound >= 0
+}
+
+func Isslice(t *Type) bool {
+ return t != nil && t.Etype == TARRAY && t.Bound < 0
+}
+
+func isblank(n *Node) bool {
+ if n == nil {
+ return false
+ }
+ return isblanksym(n.Sym)
+}
+
+func isblanksym(s *Sym) bool {
+ return s != nil && s.Name == "_"
+}
+
+func Isinter(t *Type) bool {
+ return t != nil && t.Etype == TINTER
+}
+
+func isnilinter(t *Type) bool {
+ if !Isinter(t) {
+ return false
+ }
+ if t.Type != nil {
+ return false
+ }
+ return true
+}
+
+func isideal(t *Type) bool {
+ if t == nil {
+ return false
+ }
+ if t == idealstring || t == idealbool {
+ return true
+ }
+ switch t.Etype {
+ case TNIL, TIDEAL:
+ return true
+ }
+
+ return false
+}
+
+// given receiver of type t (t == r or t == *r)
+// return type to hang methods off (r).
+func methtype(t *Type, mustname int) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // strip away pointer if it's there
+ if Isptr[t.Etype] {
+ if t.Sym != nil {
+ return nil
+ }
+ t = t.Type
+ if t == nil {
+ return nil
+ }
+ }
+
+ // need a type name
+ if t.Sym == nil && (mustname != 0 || t.Etype != TSTRUCT) {
+ return nil
+ }
+
+ // check types
+ if !issimple[t.Etype] {
+ switch t.Etype {
+ default:
+ return nil
+
+ case TSTRUCT,
+ TARRAY,
+ TMAP,
+ TCHAN,
+ TSTRING,
+ TFUNC:
+ break
+ }
+ }
+
+ return t
+}
+
+func cplxsubtype(et EType) EType {
+ switch et {
+ case TCOMPLEX64:
+ return TFLOAT32
+
+ case TCOMPLEX128:
+ return TFLOAT64
+ }
+
+ Fatalf("cplxsubtype: %v\n", Econv(et))
+ return 0
+}
+
+func eqnote(a, b *string) bool {
+ return a == b || a != nil && b != nil && *a == *b
+}
+
+type TypePairList struct {
+ t1 *Type
+ t2 *Type
+ next *TypePairList
+}
+
+func onlist(l *TypePairList, t1 *Type, t2 *Type) bool {
+ for ; l != nil; l = l.next {
+ if (l.t1 == t1 && l.t2 == t2) || (l.t1 == t2 && l.t2 == t1) {
+ return true
+ }
+ }
+ return false
+}
+
+// Return 1 if t1 and t2 are identical, following the spec rules.
+//
+// Any cyclic type must go through a named type, and if one is
+// named, it is only identical to the other if they are the same
+// pointer (t1 == t2), so there's no chance of chasing cycles
+// ad infinitum, so no need for a depth counter.
+func Eqtype(t1 *Type, t2 *Type) bool {
+ return eqtype1(t1, t2, nil)
+}
+
+func eqtype1(t1 *Type, t2 *Type, assumed_equal *TypePairList) bool {
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.Etype != t2.Etype {
+ return false
+ }
+ if t1.Sym != nil || t2.Sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t1.Etype {
+ case TUINT8:
+ if (t1 == Types[TUINT8] || t1 == bytetype) && (t2 == Types[TUINT8] || t2 == bytetype) {
+ return true
+ }
+
+ case TINT, TINT32:
+ if (t1 == Types[runetype.Etype] || t1 == runetype) && (t2 == Types[runetype.Etype] || t2 == runetype) {
+ return true
+ }
+ }
+
+ return false
+ }
+
+ if onlist(assumed_equal, t1, t2) {
+ return true
+ }
+ var l TypePairList
+ l.next = assumed_equal
+ l.t1 = t1
+ l.t2 = t2
+
+ switch t1.Etype {
+ case TINTER, TSTRUCT:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
+ if t1.Etype != TFIELD || t2.Etype != TFIELD {
+ Fatalf("struct/interface missing field: %v %v", t1, t2)
+ }
+ if t1.Sym != t2.Sym || t1.Embedded != t2.Embedded || !eqtype1(t1.Type, t2.Type, &l) || !eqnote(t1.Note, t2.Note) {
+ return false
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ return true
+ }
+ return false
+
+ // Loop over structs: receiver, in, out.
+ case TFUNC:
+ t1 = t1.Type
+ t2 = t2.Type
+ for ; t1 != nil && t2 != nil; t1, t2 = t1.Down, t2.Down {
+ if t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ Fatalf("func missing struct: %v %v", t1, t2)
+ }
+
+ // Loop over fields in structs, ignoring argument names.
+ ta := t1.Type
+ tb := t2.Type
+ for ; ta != nil && tb != nil; ta, tb = ta.Down, tb.Down {
+ if ta.Etype != TFIELD || tb.Etype != TFIELD {
+ Fatalf("func struct missing field: %v %v", ta, tb)
+ }
+ if ta.Isddd != tb.Isddd || !eqtype1(ta.Type, tb.Type, &l) {
+ return false
+ }
+ }
+
+ if ta != nil || tb != nil {
+ return false
+ }
+ }
+
+ if t1 == nil && t2 == nil {
+ return true
+ }
+ return false
+
+ case TARRAY:
+ if t1.Bound != t2.Bound {
+ return false
+ }
+
+ case TCHAN:
+ if t1.Chan != t2.Chan {
+ return false
+ }
+ }
+
+ if eqtype1(t1.Down, t2.Down, &l) && eqtype1(t1.Type, t2.Type, &l) {
+ return true
+ }
+ return false
+}
+
+// Are t1 and t2 equal struct types when field names are ignored?
+// For deciding whether the result struct from g can be copied
+// directly when compiling f(g()).
+func eqtypenoname(t1 *Type, t2 *Type) bool {
+ if t1 == nil || t2 == nil || t1.Etype != TSTRUCT || t2.Etype != TSTRUCT {
+ return false
+ }
+
+ t1 = t1.Type
+ t2 = t2.Type
+ for {
+ if !Eqtype(t1, t2) {
+ return false
+ }
+ if t1 == nil {
+ return true
+ }
+ t1 = t1.Down
+ t2 = t2.Down
+ }
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return 0.
+func assignop(src *Type, dst *Type, why *string) Op {
+ if why != nil {
+ *why = ""
+ }
+
+ // TODO(rsc,lvd): This behaves poorly in the presence of inlining.
+ // https://golang.org/issue/2795
+ if safemode != 0 && importpkg == nil && src != nil && src.Etype == TUNSAFEPTR {
+ Yyerror("cannot use unsafe.Pointer")
+ errorexit()
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil || src.Etype == TFORW || dst.Etype == TFORW || src.Orig == nil || dst.Orig == nil {
+ return 0
+ }
+
+ // 1. src type is identical to dst.
+ if Eqtype(src, dst) {
+ return OCONVNOP
+ }
+
+ // 2. src and dst have identical underlying types
+ // and either src or dst is not a named type or
+ // both are empty interface types.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab.
+ if Eqtype(src.Orig, dst.Orig) && (src.Sym == nil || dst.Sym == nil || isnilinter(src)) {
+ return OCONVNOP
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.Etype == TINTER && src.Etype != TNIL {
+ var missing *Type
+ var ptr int
+ var have *Type
+ if implements(src, dst, &missing, &have, &ptr) {
+ return OCONVIFACE
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke || missing.Type.Broke) {
+ return OCONVIFACE
+ }
+
+ if why != nil {
+ if isptrto(src, TINTER) {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", src, dst, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else if ptr != 0 {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+"\t\thave %v%v\n\t\twant %v%v", src, dst, missing.Sym, have.Sym, Tconv(have.Type, obj.FmtShort|obj.FmtByte), missing.Sym, Tconv(missing.Type, obj.FmtShort|obj.FmtByte))
+ } else {
+ *why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+ }
+ }
+
+ return 0
+ }
+
+ if isptrto(dst, TINTER) {
+ if why != nil {
+ *why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ }
+ return 0
+ }
+
+ if src.Etype == TINTER && dst.Etype != TBLANK {
+ var have *Type
+ var ptr int
+ var missing *Type
+ if why != nil && implements(dst, src, &missing, &have, &ptr) {
+ *why = ": need type assertion"
+ }
+ return 0
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.Etype == TCHAN && src.Chan == Cboth && dst.Etype == TCHAN {
+ if Eqtype(src.Type, dst.Type) && (src.Sym == nil || dst.Sym == nil) {
+ return OCONVNOP
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Etype == TNIL {
+ switch dst.Etype {
+ case TARRAY:
+ if dst.Bound != -100 { // not slice
+ break
+ }
+ fallthrough
+
+ case TPTR32,
+ TPTR64,
+ TFUNC,
+ TMAP,
+ TCHAN,
+ TINTER:
+ return OCONVNOP
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by defaultlit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Etype == TBLANK {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return 0.
+func convertop(src *Type, dst *Type, why *string) Op {
+ if why != nil {
+ *why = ""
+ }
+
+ if src == dst {
+ return OCONVNOP
+ }
+ if src == nil || dst == nil {
+ return 0
+ }
+
+ // 1. src can be assigned to dst.
+ op := assignop(src, dst, why)
+ if op != 0 {
+ return op
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.Etype == TINTER || dst.Etype == TINTER {
+ return 0
+ }
+ if why != nil {
+ *why = ""
+ }
+
+ // 2. src and dst have identical underlying types.
+ if Eqtype(src.Orig, dst.Orig) {
+ return OCONVNOP
+ }
+
+ // 3. src and dst are unnamed pointer types
+ // and their base types have identical underlying types.
+ if Isptr[src.Etype] && Isptr[dst.Etype] && src.Sym == nil && dst.Sym == nil {
+ if Eqtype(src.Type.Orig, dst.Type.Orig) {
+ return OCONVNOP
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (Isint[src.Etype] || Isfloat[src.Etype]) && (Isint[dst.Etype] || Isfloat[dst.Etype]) {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 5. src and dst are both complex types.
+ if Iscomplex[src.Etype] && Iscomplex[dst.Etype] {
+ if Simtype[src.Etype] == Simtype[dst.Etype] {
+ return OCONVNOP
+ }
+ return OCONV
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if Isint[src.Etype] && dst.Etype == TSTRING {
+ return ORUNESTR
+ }
+
+ if Isslice(src) && dst.Etype == TSTRING {
+ if src.Type.Etype == bytetype.Etype {
+ return OARRAYBYTESTR
+ }
+ if src.Type.Etype == runetype.Etype {
+ return OARRAYRUNESTR
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.Etype == TSTRING && Isslice(dst) {
+ if dst.Type.Etype == bytetype.Etype {
+ return OSTRARRAYBYTE
+ }
+ if dst.Type.Etype == runetype.Etype {
+ return OSTRARRAYRUNE
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (Isptr[src.Etype] || src.Etype == TUINTPTR) && dst.Etype == TUNSAFEPTR {
+ return OCONVNOP
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.Etype == TUNSAFEPTR && (Isptr[dst.Etype] || dst.Etype == TUINTPTR) {
+ return OCONVNOP
+ }
+
+ return 0
+}
+
+func assignconv(n *Node, t *Type, context string) *Node {
+ return assignconvfn(n, t, func() string { return context })
+}
+
+// Convert node n for assignment to type t.
+func assignconvfn(n *Node, t *Type, context func() string) *Node {
+ if n == nil || n.Type == nil || n.Type.Broke {
+ return n
+ }
+
+ if t.Etype == TBLANK && n.Type.Etype == TNIL {
+ Yyerror("use of untyped nil")
+ }
+
+ old := n
+ old.Diag++ // silence errors about n; we'll issue one below
+ defaultlit(&n, t)
+ old.Diag--
+ if t.Etype == TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type == idealbool && t.Etype != TBOOL {
+ if n.Op == ONAME || n.Op == OLITERAL {
+ r := Nod(OCONVNOP, n, nil)
+ r.Type = Types[TBOOL]
+ r.Typecheck = 1
+ r.Implicit = true
+ n = r
+ }
+ }
+
+ if Eqtype(n.Type, t) {
+ return n
+ }
+
+ var why string
+ op := assignop(n.Type, t, &why)
+ if op == 0 {
+ Yyerror("cannot use %v as type %v in %s%s", Nconv(n, obj.FmtLong), t, context(), why)
+ op = OCONV
+ }
+
+ r := Nod(op, n, nil)
+ r.Type = t
+ r.Typecheck = 1
+ r.Implicit = true
+ r.Orig = n.Orig
+ return r
+}
+
+// substArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+func substArgTypes(n *Node, types ...*Type) {
+ for _, t := range types {
+ dowidth(t)
+ }
+ substAny(&n.Type, &types)
+ if len(types) > 0 {
+ Fatalf("substArgTypes: too many argument types")
+ }
+}
+
+// substAny walks *tp, replacing instances of "any" with successive
+// elements removed from types.
+func substAny(tp **Type, types *[]*Type) {
+ for {
+ t := *tp
+ if t == nil {
+ return
+ }
+ if t.Etype == TANY && t.Copyany {
+ if len(*types) == 0 {
+ Fatalf("substArgTypes: not enough argument types")
+ }
+ *tp = (*types)[0]
+ *types = (*types)[1:]
+ }
+
+ switch t.Etype {
+ case TPTR32, TPTR64, TCHAN, TARRAY:
+ tp = &t.Type
+ continue
+
+ case TMAP:
+ substAny(&t.Down, types)
+ tp = &t.Type
+ continue
+
+ case TFUNC:
+ substAny(&t.Type, types)
+ substAny(&t.Type.Down.Down, types)
+ substAny(&t.Type.Down, types)
+
+ case TSTRUCT:
+ for t = t.Type; t != nil; t = t.Down {
+ substAny(&t.Type, types)
+ }
+ }
+ return
+ }
+}
+
+// Is this a 64-bit type?
+func Is64(t *Type) bool {
+ if t == nil {
+ return false
+ }
+ switch Simtype[t.Etype] {
+ case TINT64, TUINT64, TPTR64:
+ return true
+ }
+
+ return false
+}
+
+// Is a conversion between t1 and t2 a no-op?
+func Noconv(t1 *Type, t2 *Type) bool {
+ e1 := Simtype[t1.Etype]
+ e2 := Simtype[t2.Etype]
+
+ switch e1 {
+ case TINT8, TUINT8:
+ return e2 == TINT8 || e2 == TUINT8
+
+ case TINT16, TUINT16:
+ return e2 == TINT16 || e2 == TUINT16
+
+ case TINT32, TUINT32, TPTR32:
+ return e2 == TINT32 || e2 == TUINT32 || e2 == TPTR32
+
+ case TINT64, TUINT64, TPTR64:
+ return e2 == TINT64 || e2 == TUINT64 || e2 == TPTR64
+
+ case TFLOAT32:
+ return e2 == TFLOAT32
+
+ case TFLOAT64:
+ return e2 == TFLOAT64
+ }
+
+ return false
+}
+
+func shallow(t *Type) *Type {
+ if t == nil {
+ return nil
+ }
+ nt := typ(0)
+ *nt = *t
+ if t.Orig == t {
+ nt.Orig = nt
+ }
+ return nt
+}
+
+func deep(t *Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ var nt *Type
+ switch t.Etype {
+ default:
+ nt = t // share from here down
+
+ case TANY:
+ nt = shallow(t)
+ nt.Copyany = true
+
+ case TPTR32, TPTR64, TCHAN, TARRAY:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+
+ case TMAP:
+ nt = shallow(t)
+ nt.Down = deep(t.Down)
+ nt.Type = deep(t.Type)
+
+ case TFUNC:
+ nt = shallow(t)
+ nt.Type = deep(t.Type)
+ nt.Type.Down = deep(t.Type.Down)
+ nt.Type.Down.Down = deep(t.Type.Down.Down)
+
+ case TSTRUCT:
+ nt = shallow(t)
+ nt.Type = shallow(t.Type)
+ xt := nt.Type
+
+ for t = t.Type; t != nil; t = t.Down {
+ xt.Type = deep(t.Type)
+ xt.Down = shallow(t.Down)
+ xt = xt.Down
+ }
+ }
+
+ return nt
+}
+
+func syslook(name string, copy int) *Node {
+ s := Pkglookup(name, Runtimepkg)
+ if s == nil || s.Def == nil {
+ Fatalf("syslook: can't find runtime.%s", name)
+ }
+
+ if copy == 0 {
+ return s.Def
+ }
+
+ n := Nod(0, nil, nil)
+ *n = *s.Def
+ n.Type = deep(s.Def.Type)
+
+ return n
+}
+
+// compute a hash value for type t.
+// if t is a method type, ignore the receiver
+// so that the hash can be used in interface checks.
+// %T already contains
+// all the necessary logic to generate a representation
+// of the type that completely describes it.
+// using smprint here avoids duplicating that code.
+// using md5 here is overkill, but i got tired of
+// accidental collisions making the runtime think
+// two types are equal when they really aren't.
+func typehash(t *Type) uint32 {
+ var p string
+
+ if t.Thistuple != 0 {
+ // hide method receiver from Tpretty
+ t.Thistuple = 0
+
+ p = Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+ t.Thistuple = 1
+ } else {
+ p = Tconv(t, obj.FmtLeft|obj.FmtUnsigned)
+ }
+
+ //print("typehash: %s\n", p);
+ h := md5.Sum([]byte(p))
+ return binary.LittleEndian.Uint32(h[:4])
+}
+
+var initPtrtoDone bool
+
+var (
+ ptrToUint8 *Type
+ ptrToAny *Type
+ ptrToString *Type
+ ptrToBool *Type
+ ptrToInt32 *Type
+)
+
+func initPtrto() {
+ ptrToUint8 = ptrto1(Types[TUINT8])
+ ptrToAny = ptrto1(Types[TANY])
+ ptrToString = ptrto1(Types[TSTRING])
+ ptrToBool = ptrto1(Types[TBOOL])
+ ptrToInt32 = ptrto1(Types[TINT32])
+}
+
+func ptrto1(t *Type) *Type {
+ t1 := typ(Tptr)
+ t1.Type = t
+ t1.Width = int64(Widthptr)
+ t1.Align = uint8(Widthptr)
+ return t1
+}
+
+// Ptrto returns the Type *t.
+// The returned struct must not be modified.
+func Ptrto(t *Type) *Type {
+ if Tptr == 0 {
+ Fatalf("ptrto: no tptr")
+ }
+ // Reduce allocations by pre-creating common cases.
+ if !initPtrtoDone {
+ initPtrto()
+ initPtrtoDone = true
+ }
+ switch t {
+ case Types[TUINT8]:
+ return ptrToUint8
+ case Types[TINT32]:
+ return ptrToInt32
+ case Types[TANY]:
+ return ptrToAny
+ case Types[TSTRING]:
+ return ptrToString
+ case Types[TBOOL]:
+ return ptrToBool
+ }
+ return ptrto1(t)
+}
+
+func frame(context int) {
+ if context != 0 {
+ fmt.Printf("--- external frame ---\n")
+ for _, n := range externdcl {
+ printframenode(n)
+ }
+ return
+ }
+
+ if Curfn != nil {
+ fmt.Printf("--- %v frame ---\n", Curfn.Func.Nname.Sym)
+ for l := Curfn.Func.Dcl; l != nil; l = l.Next {
+ printframenode(l.N)
+ }
+ }
+}
+
+func printframenode(n *Node) {
+ w := int64(-1)
+ if n.Type != nil {
+ w = n.Type.Width
+ }
+ switch n.Op {
+ case ONAME:
+ fmt.Printf("%v %v G%d %v width=%d\n", Oconv(int(n.Op), 0), n.Sym, n.Name.Vargen, n.Type, w)
+ case OTYPE:
+ fmt.Printf("%v %v width=%d\n", Oconv(int(n.Op), 0), n.Type, w)
+ }
+}
+
+// calculate sethi/ullman number
+// roughly how many registers needed to
+// compile a node. used to compile the
+// hardest side first to minimize registers.
+func ullmancalc(n *Node) {
+ if n == nil {
+ return
+ }
+
+ var ul int
+ var ur int
+ if n.Ninit != nil {
+ ul = UINF
+ goto out
+ }
+
+ switch n.Op {
+ case OREGISTER, OLITERAL, ONAME:
+ ul = 1
+ if n.Class == PPARAMREF || (n.Class&PHEAP != 0) {
+ ul++
+ }
+ goto out
+
+ case OCALL, OCALLFUNC, OCALLMETH, OCALLINTER, OASWB:
+ ul = UINF
+ goto out
+
+ // hard with instrumented code
+ case OANDAND, OOROR:
+ if instrumenting {
+ ul = UINF
+ goto out
+ }
+ }
+
+ ul = 1
+ if n.Left != nil {
+ ul = int(n.Left.Ullman)
+ }
+ ur = 1
+ if n.Right != nil {
+ ur = int(n.Right.Ullman)
+ }
+ if ul == ur {
+ ul += 1
+ }
+ if ur > ul {
+ ul = ur
+ }
+
+out:
+ if ul > 200 {
+ ul = 200 // clamp to uchar with room to grow
+ }
+ n.Ullman = uint8(ul)
+}
+
+func badtype(op Op, tl *Type, tr *Type) {
+ fmt_ := ""
+ if tl != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", tl)
+ }
+ if tr != nil {
+ fmt_ += fmt.Sprintf("\n\t%v", tr)
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && Isptr[tl.Etype] && Isptr[tr.Etype] {
+ if tl.Type.Etype == TSTRUCT && tr.Type.Etype == TINTER {
+ fmt_ += "\n\t(*struct vs *interface)"
+ } else if tl.Type.Etype == TINTER && tr.Type.Etype == TSTRUCT {
+ fmt_ += "\n\t(*interface vs *struct)"
+ }
+ }
+
+ s := fmt_
+ Yyerror("illegal types for operand: %v%s", Oconv(int(op), 0), s)
+}
+
+// iterator to walk a structure declaration
+func Structfirst(s *Iter, nn **Type) *Type {
+ var t *Type
+
+ n := *nn
+ if n == nil {
+ goto bad
+ }
+
+ switch n.Etype {
+ default:
+ goto bad
+
+ case TSTRUCT, TINTER, TFUNC:
+ break
+ }
+
+ t = n.Type
+ if t == nil {
+ return nil
+ }
+
+ if t.Etype != TFIELD {
+ Fatalf("structfirst: not field %v", t)
+ }
+
+ s.T = t
+ return t
+
+bad:
+ Fatalf("structfirst: not struct %v", n)
+
+ return nil
+}
+
+func structnext(s *Iter) *Type {
+ n := s.T
+ t := n.Down
+ if t == nil {
+ return nil
+ }
+
+ if t.Etype != TFIELD {
+ Fatalf("structnext: not struct %v", n)
+
+ return nil
+ }
+
+ s.T = t
+ return t
+}
+
+// iterator to this and inargs in a function
+func funcfirst(s *Iter, t *Type) *Type {
+ var fp *Type
+
+ if t == nil {
+ goto bad
+ }
+
+ if t.Etype != TFUNC {
+ goto bad
+ }
+
+ s.Tfunc = t
+ s.Done = 0
+ fp = Structfirst(s, getthis(t))
+ if fp == nil {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(t))
+ }
+
+ return fp
+
+bad:
+ Fatalf("funcfirst: not func %v", t)
+ return nil
+}
+
+func funcnext(s *Iter) *Type {
+ fp := structnext(s)
+ if fp == nil && s.Done == 0 {
+ s.Done = 1
+ fp = Structfirst(s, getinarg(s.Tfunc))
+ }
+
+ return fp
+}
+
+func getthis(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatalf("getthis: not a func %v", t)
+ }
+ return &t.Type
+}
+
+func Getoutarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatalf("getoutarg: not a func %v", t)
+ }
+ return &t.Type.Down
+}
+
+func getinarg(t *Type) **Type {
+ if t.Etype != TFUNC {
+ Fatalf("getinarg: not a func %v", t)
+ }
+ return &t.Type.Down.Down
+}
+
+func getthisx(t *Type) *Type {
+ return *getthis(t)
+}
+
+func getoutargx(t *Type) *Type {
+ return *Getoutarg(t)
+}
+
+func getinargx(t *Type) *Type {
+ return *getinarg(t)
+}
+
+// Brcom returns !(op).
+// For example, Brcom(==) is !=.
+func Brcom(op Op) Op {
+ switch op {
+ case OEQ:
+ return ONE
+ case ONE:
+ return OEQ
+ case OLT:
+ return OGE
+ case OGT:
+ return OLE
+ case OLE:
+ return OGT
+ case OGE:
+ return OLT
+ }
+ Fatalf("brcom: no com for %v\n", Oconv(int(op), 0))
+ return op
+}
+
+// Brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func Brrev(op Op) Op {
+ switch op {
+ case OEQ:
+ return OEQ
+ case ONE:
+ return ONE
+ case OLT:
+ return OGT
+ case OGT:
+ return OLT
+ case OLE:
+ return OGE
+ case OGE:
+ return OLE
+ }
+ Fatalf("brrev: no rev for %v\n", Oconv(int(op), 0))
+ return op
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeexpr(n *Node, init **NodeList) *Node {
+ if n == nil {
+ return nil
+ }
+
+ if n.Ninit != nil {
+ walkstmtlist(n.Ninit)
+ *init = concat(*init, n.Ninit)
+ n.Ninit = nil
+ }
+
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+
+ case ODOT, OLEN, OCAP:
+ l := safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ r := Nod(OXXX, nil, nil)
+ *r = *n
+ r.Left = l
+ typecheck(&r, Erv)
+ walkexpr(&r, init)
+ return r
+
+ case ODOTPTR, OIND:
+ l := safeexpr(n.Left, init)
+ if l == n.Left {
+ return n
+ }
+ a := Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ walkexpr(&a, init)
+ return a
+
+ case OINDEX, OINDEXMAP:
+ l := safeexpr(n.Left, init)
+ r := safeexpr(n.Right, init)
+ if l == n.Left && r == n.Right {
+ return n
+ }
+ a := Nod(OXXX, nil, nil)
+ *a = *n
+ a.Left = l
+ a.Right = r
+ walkexpr(&a, init)
+ return a
+ }
+
+ // make a copy; must not be used as an lvalue
+ if islvalue(n) {
+ Fatalf("missing lvalue case in safeexpr: %v", n)
+ }
+ return cheapexpr(n, init)
+}
+
+func copyexpr(n *Node, t *Type, init **NodeList) *Node {
+ l := temp(t)
+ a := Nod(OAS, l, n)
+ typecheck(&a, Etop)
+ walkexpr(&a, init)
+ *init = list(*init, a)
+ return l
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapexpr(n *Node, init **NodeList) *Node {
+ switch n.Op {
+ case ONAME, OLITERAL:
+ return n
+ }
+
+ return copyexpr(n, n.Type, init)
+}
+
+func Setmaxarg(t *Type, extra int32) {
+ dowidth(t)
+ w := t.Argwid
+ if w >= Thearch.MAXWIDTH {
+ Fatalf("bad argwid %v", t)
+ }
+ w += int64(extra)
+ if w >= Thearch.MAXWIDTH {
+ Fatalf("bad argwid %d + %v", extra, t)
+ }
+ if w > Maxarg {
+ Maxarg = w
+ }
+}
+
+// unicode-aware case-insensitive strcmp
+
+// code to resolve elided DOTs
+// in embedded types
+
+// search depth 0 --
+// return count of fields+methods
+// found with a given name
+func lookdot0(s *Sym, t *Type, save **Type, ignorecase int) int {
+ u := t
+ if Isptr[u.Etype] {
+ u = u.Type
+ }
+
+ c := 0
+ if u.Etype == TSTRUCT || u.Etype == TINTER {
+ for f := u.Type; f != nil; f = f.Down {
+ if f.Sym == s || (ignorecase != 0 && f.Type.Etype == TFUNC && f.Type.Thistuple > 0 && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ for f := u.Method; f != nil; f = f.Down {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase != 0 && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+// search depth d for field/method s --
+// return count of fields+methods
+// found at search depth.
+// answer is in dotlist array and
+// count of number of ways is returned.
+func adddot1(s *Sym, t *Type, d int, save **Type, ignorecase int) int {
+ if t.Trecur != 0 {
+ return 0
+ }
+ t.Trecur = 1
+
+ var c int
+ var u *Type
+ var a int
+ if d == 0 {
+ c = lookdot0(s, t, save, ignorecase)
+ goto out
+ }
+
+ c = 0
+ u = t
+ if Isptr[u.Etype] {
+ u = u.Type
+ }
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ d--
+ for f := u.Type; f != nil; f = f.Down {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ a = adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ }
+
+out:
+ t.Trecur = 0
+ return c
+}
+
+// in T.field
+// find missing fields that
+// will give shortest unique addressing.
+// modify the tree with missing type names.
+func adddot(n *Node) *Node {
+ typecheck(&n.Left, Etype|Erv)
+ n.Diag |= n.Left.Diag
+ t := n.Left.Type
+ if t == nil {
+ return n
+ }
+
+ if n.Left.Op == OTYPE {
+ return n
+ }
+
+ if n.Right.Op != ONAME {
+ return n
+ }
+ s := n.Right.Sym
+ if s == nil {
+ return n
+ }
+
+ var c int
+ for d := 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, nil, 0)
+ if c > 0 {
+ if c > 1 {
+ Yyerror("ambiguous selector %v", n)
+ n.Left = nil
+ return n
+ }
+
+ // rebuild elided dots
+ for c := d - 1; c >= 0; c-- {
+ n.Left = Nod(ODOT, n.Left, newname(dotlist[c].field.Sym))
+ n.Left.Implicit = true
+ }
+
+ return n
+ }
+ }
+
+ return n
+}
+
+// code to help generate trampoline
+// functions for methods on embedded
+// subtypes.
+// these are approx the same as
+// the corresponding adddot routines
+// except that they expect to be called
+// with unique tasks and they return
+// the actual methods.
+type Symlink struct {
+ field *Type
+ link *Symlink
+ good bool
+ followptr bool
+}
+
+var slist *Symlink
+
+func expand0(t *Type, followptr bool) {
+ u := t
+ if Isptr[u.Etype] {
+ followptr = true
+ u = u.Type
+ }
+
+ if u.Etype == TINTER {
+ var sl *Symlink
+ for f := u.Type; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = followptr
+ slist = sl
+ }
+
+ return
+ }
+
+ u = methtype(t, 0)
+ if u != nil {
+ var sl *Symlink
+ for f := u.Method; f != nil; f = f.Down {
+ if f.Sym.Flags&SymUniq != 0 {
+ continue
+ }
+ f.Sym.Flags |= SymUniq
+ sl = new(Symlink)
+ sl.field = f
+ sl.link = slist
+ sl.followptr = followptr
+ slist = sl
+ }
+ }
+}
+
+func expand1(t *Type, d int, followptr bool) {
+ if t.Trecur != 0 {
+ return
+ }
+ if d == 0 {
+ return
+ }
+ t.Trecur = 1
+
+ if d != len(dotlist)-1 {
+ expand0(t, followptr)
+ }
+
+ u := t
+ if Isptr[u.Etype] {
+ followptr = true
+ u = u.Type
+ }
+
+ if u.Etype != TSTRUCT && u.Etype != TINTER {
+ goto out
+ }
+
+ for f := u.Type; f != nil; f = f.Down {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, d-1, followptr)
+ }
+
+out:
+ t.Trecur = 0
+}
+
+func expandmeth(t *Type) {
+ if t == nil || t.Xmethod != nil {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ var f *Type
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags |= SymUniq
+ }
+
+ // generate all reachable methods
+ slist = nil
+
+ expand1(t, len(dotlist)-1, false)
+
+ // check each method to be uniquely reachable
+ var c int
+ var d int
+ for sl := slist; sl != nil; sl = sl.link {
+ sl.field.Sym.Flags &^= SymUniq
+ for d = 0; d < len(dotlist); d++ {
+ c = adddot1(sl.field.Sym, t, d, &f, 0)
+ if c == 0 {
+ continue
+ }
+ if c == 1 {
+ // addot1 may have dug out arbitrary fields, we only want methods.
+ if f.Type.Etype == TFUNC && f.Type.Thistuple > 0 {
+ sl.good = true
+ sl.field = f
+ }
+ }
+
+ break
+ }
+ }
+
+ for f = t.Method; f != nil; f = f.Down {
+ f.Sym.Flags &^= SymUniq
+ }
+
+ t.Xmethod = t.Method
+ for sl := slist; sl != nil; sl = sl.link {
+ if sl.good {
+ // add it to the base type method list
+ f = typ(TFIELD)
+
+ *f = *sl.field
+ f.Embedded = 1 // needs a trampoline
+ if sl.followptr {
+ f.Embedded = 2
+ }
+ f.Down = t.Xmethod
+ t.Xmethod = f
+ }
+ }
+}
+
+// Given funarg struct list, return list of ODCLFIELD Node fn args.
+func structargs(tl **Type, mustname int) *NodeList {
+ var savet Iter
+ var a *Node
+ var n *Node
+ var buf string
+
+ var args *NodeList
+ gen := 0
+ for t := Structfirst(&savet, tl); t != nil; t = structnext(&savet) {
+ n = nil
+ if mustname != 0 && (t.Sym == nil || t.Sym.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ buf = fmt.Sprintf(".anon%d", gen)
+ gen++
+
+ n = newname(Lookup(buf))
+ } else if t.Sym != nil {
+ n = newname(t.Sym)
+ }
+ a = Nod(ODCLFIELD, n, typenod(t.Type))
+ a.Isddd = t.Isddd
+ if n != nil {
+ n.Isddd = t.Isddd
+ }
+ args = list(args, a)
+ }
+
+ return args
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+// newnam - the eventual mangled name of this function
+
+var genwrapper_linehistdone int = 0
+
+func genwrapper(rcvr *Type, method *Type, newnam *Sym, iface int) {
+ if false && Debug['r'] != 0 {
+ fmt.Printf("genwrapper rcvrtype=%v method=%v newnam=%v\n", rcvr, method, newnam)
+ }
+
+ lexlineno++
+ lineno = lexlineno
+ if genwrapper_linehistdone == 0 {
+ // All the wrappers can share the same linehist entry.
+ linehistpush("")
+
+ genwrapper_linehistdone = 1
+ }
+
+ dclcontext = PEXTERN
+ markdcl()
+
+ this := Nod(ODCLFIELD, newname(Lookup(".this")), typenod(rcvr))
+ this.Left.Name.Param.Ntype = this.Right
+ in := structargs(getinarg(method.Type), 1)
+ out := structargs(Getoutarg(method.Type), 0)
+
+ t := Nod(OTFUNC, nil, nil)
+ l := list1(this)
+ if iface != 0 && rcvr.Width < Types[Tptr].Width {
+ // Building method for interface table and receiver
+ // is smaller than the single pointer-sized word
+ // that the interface call will pass in.
+ // Add a dummy padding argument after the
+ // receiver to make up the difference.
+ tpad := typ(TARRAY)
+
+ tpad.Type = Types[TUINT8]
+ tpad.Bound = Types[Tptr].Width - rcvr.Width
+ pad := Nod(ODCLFIELD, newname(Lookup(".pad")), typenod(tpad))
+ l = list(l, pad)
+ }
+
+ t.List = concat(l, in)
+ t.Rlist = out
+
+ fn := Nod(ODCLFUNC, nil, nil)
+ fn.Func.Nname = newname(newnam)
+ fn.Func.Nname.Name.Defn = fn
+ fn.Func.Nname.Name.Param.Ntype = t
+ declare(fn.Func.Nname, PFUNC)
+ funchdr(fn)
+
+ // arg list
+ var args *NodeList
+
+ isddd := false
+ for l := in; l != nil; l = l.Next {
+ args = list(args, l.N.Left)
+ isddd = l.N.Left.Isddd
+ }
+
+ methodrcvr := getthisx(method.Type).Type.Type
+
+ // generate nil pointer check for better error
+ if Isptr[rcvr.Etype] && rcvr.Type == methodrcvr {
+ // generating wrapper from *T to T.
+ n := Nod(OIF, nil, nil)
+
+ n.Left = Nod(OEQ, this.Left, nodnil())
+
+ // these strings are already in the reflect tables,
+ // so no space cost to use them here.
+ var l *NodeList
+
+ var v Val
+ v.U = rcvr.Type.Sym.Pkg.Name // package name
+ l = list(l, nodlit(v))
+ v.U = rcvr.Type.Sym.Name // type name
+ l = list(l, nodlit(v))
+ v.U = method.Sym.Name
+ l = list(l, nodlit(v)) // method name
+ call := Nod(OCALL, syslook("panicwrap", 0), nil)
+ call.List = l
+ n.Nbody = list1(call)
+ fn.Nbody = list(fn.Nbody, n)
+ }
+
+ dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
+
+ // generate call
+ if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
+ // generate tail call: adjust pointer receiver and jump to embedded method.
+ dot = dot.Left // skip final .M
+ if !Isptr[dotlist[0].field.Type.Etype] {
+ dot = Nod(OADDR, dot, nil)
+ }
+ as := Nod(OAS, this.Left, Nod(OCONVNOP, dot, nil))
+ as.Right.Type = rcvr
+ fn.Nbody = list(fn.Nbody, as)
+ n := Nod(ORETJMP, nil, nil)
+ n.Left = newname(methodsym(method.Sym, methodrcvr, 0))
+ fn.Nbody = list(fn.Nbody, n)
+ } else {
+ fn.Func.Wrapper = true // ignore frame for panic+recover matching
+ call := Nod(OCALL, dot, nil)
+ call.List = args
+ call.Isddd = isddd
+ if method.Type.Outtuple > 0 {
+ n := Nod(ORETURN, nil, nil)
+ n.List = list1(call)
+ call = n
+ }
+
+ fn.Nbody = list(fn.Nbody, call)
+ }
+
+ if false && Debug['r'] != 0 {
+ dumplist("genwrapper body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+
+ // wrappers where T is anonymous (struct or interface) can be duplicated.
+ if rcvr.Etype == TSTRUCT || rcvr.Etype == TINTER || Isptr[rcvr.Etype] && rcvr.Type.Etype == TSTRUCT {
+ fn.Func.Dupok = true
+ }
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+
+ inlcalls(fn)
+ escAnalyze([]*Node{fn}, false)
+
+ Curfn = nil
+ funccompile(fn)
+}
+
+func hashmem(t *Type) *Node {
+ sym := Pkglookup("memhash", Runtimepkg)
+
+ n := newname(sym)
+ n.Class = PFUNC
+ tfn := Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+func hashfor(t *Type) *Node {
+ var sym *Sym
+
+ a := algtype1(t, nil)
+ switch a {
+ case AMEM:
+ Fatalf("hashfor with AMEM type")
+
+ case AINTER:
+ sym = Pkglookup("interhash", Runtimepkg)
+
+ case ANILINTER:
+ sym = Pkglookup("nilinterhash", Runtimepkg)
+
+ case ASTRING:
+ sym = Pkglookup("strhash", Runtimepkg)
+
+ case AFLOAT32:
+ sym = Pkglookup("f32hash", Runtimepkg)
+
+ case AFLOAT64:
+ sym = Pkglookup("f64hash", Runtimepkg)
+
+ case ACPLX64:
+ sym = Pkglookup("c64hash", Runtimepkg)
+
+ case ACPLX128:
+ sym = Pkglookup("c128hash", Runtimepkg)
+
+ default:
+ sym = typesymprefix(".hash", t)
+ }
+
+ n := newname(sym)
+ n.Class = PFUNC
+ tfn := Nod(OTFUNC, nil, nil)
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Ptrto(t))))
+ tfn.List = list(tfn.List, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ tfn.Rlist = list(tfn.Rlist, Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])))
+ typecheck(&tfn, Etype)
+ n.Type = tfn.Type
+ return n
+}
+
+// Generate a helper function to compute the hash of a value of type t.
+func genhash(sym *Sym, t *Type) {
+ if Debug['r'] != 0 {
+ fmt.Printf("genhash %v %v\n", sym, t)
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p *T, h uintptr) uintptr
+ fn := Nod(ODCLFUNC, nil, nil)
+
+ fn.Func.Nname = newname(sym)
+ fn.Func.Nname.Class = PFUNC
+ tfn := Nod(OTFUNC, nil, nil)
+ fn.Func.Nname.Name.Param.Ntype = tfn
+
+ n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np := n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("h")), typenod(Types[TUINTPTR]))
+ tfn.List = list(tfn.List, n)
+ nh := n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TUINTPTR])) // return value
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+ typecheck(&fn.Func.Nname.Name.Param.Ntype, Etype)
+
+ // genhash is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatalf("genhash %v", t)
+
+ case TARRAY:
+ if Isslice(t) {
+ Fatalf("genhash %v", t)
+ }
+
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel := hashfor(t.Type)
+
+ n := Nod(ORANGE, nil, Nod(OIND, np, nil))
+ ni := newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ n.List = list1(ni)
+ n.Colas = true
+ colasdefn(n.List, n)
+ ni = n.List.N
+
+ // h = hashel(&p[i], h)
+ call := Nod(OCALL, hashel, nil)
+
+ nx := Nod(OINDEX, np, ni)
+ nx.Bounded = true
+ na := Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ n.Nbody = list(n.Nbody, Nod(OAS, nh, call))
+
+ fn.Nbody = list(fn.Nbody, n)
+
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ case TSTRUCT:
+ var first *Type
+
+ offend := int64(0)
+ var size int64
+ var call *Node
+ var nx *Node
+ var na *Node
+ var hashel *Node
+ for t1 := t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memhash for fields up to this one.
+ if first != nil {
+ size = offend - first.Width // first->width is offset
+ hashel = hashmem(first.Type)
+
+ // h = hashel(&p.first, size, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(first.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(size))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Run hash for this field.
+ if algtype1(t1.Type, nil) == AMEM {
+ hashel = hashmem(t1.Type)
+
+ // h = memhash(&p.t1, h, size)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ call.List = list(call.List, Nodintconst(t1.Type.Width))
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ } else {
+ hashel = hashfor(t1.Type)
+
+ // h = hashel(&p.t1, h)
+ call = Nod(OCALL, hashel, nil)
+
+ nx = Nod(OXDOT, np, newname(t1.Sym)) // TODO: fields from other packages?
+ na = Nod(OADDR, nx, nil)
+ na.Etype = 1 // no escape to heap
+ call.List = list(call.List, na)
+ call.List = list(call.List, nh)
+ fn.Nbody = list(fn.Nbody, Nod(OAS, nh, call))
+ }
+ }
+ }
+
+ r := Nod(ORETURN, nil, nil)
+ r.List = list(r.List, nh)
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("genhash body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Func.Dupok = true
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode := safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+// Return node for
+// if p.field != q.field { return false }
+func eqfield(p *Node, q *Node, field *Node) *Node {
+ nx := Nod(OXDOT, p, field)
+ ny := Nod(OXDOT, q, field)
+ nif := Nod(OIF, nil, nil)
+ nif.Left = Nod(ONE, nx, ny)
+ r := Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+func eqmemfunc(size int64, type_ *Type, needsize *int) *Node {
+ var fn *Node
+
+ switch size {
+ default:
+ fn = syslook("memequal", 1)
+ *needsize = 1
+
+ case 1, 2, 4, 8, 16:
+ buf := fmt.Sprintf("memequal%d", int(size)*8)
+ fn = syslook(buf, 1)
+ *needsize = 0
+ }
+
+ substArgTypes(fn, type_, type_)
+ return fn
+}
+
+// Return node for
+// if !memequal(&p.field, &q.field [, size]) { return false }
+func eqmem(p *Node, q *Node, field *Node, size int64) *Node {
+ var needsize int
+
+ nx := Nod(OADDR, Nod(OXDOT, p, field), nil)
+ nx.Etype = 1 // does not escape
+ ny := Nod(OADDR, Nod(OXDOT, q, field), nil)
+ ny.Etype = 1 // does not escape
+ typecheck(&nx, Erv)
+ typecheck(&ny, Erv)
+
+ call := Nod(OCALL, eqmemfunc(size, nx.Type.Type, &needsize), nil)
+ call.List = list(call.List, nx)
+ call.List = list(call.List, ny)
+ if needsize != 0 {
+ call.List = list(call.List, Nodintconst(size))
+ }
+
+ nif := Nod(OIF, nil, nil)
+ nif.Left = Nod(ONOT, call, nil)
+ r := Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ return nif
+}
+
+// Generate a helper function to check equality of two values of type t.
+func geneq(sym *Sym, t *Type) {
+ if Debug['r'] != 0 {
+ fmt.Printf("geneq %v %v\n", sym, t)
+ }
+
+ lineno = 1 // less confusing than end of input
+ dclcontext = PEXTERN
+ markdcl()
+
+ // func sym(p, q *T) bool
+ fn := Nod(ODCLFUNC, nil, nil)
+
+ fn.Func.Nname = newname(sym)
+ fn.Func.Nname.Class = PFUNC
+ tfn := Nod(OTFUNC, nil, nil)
+ fn.Func.Nname.Name.Param.Ntype = tfn
+
+ n := Nod(ODCLFIELD, newname(Lookup("p")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ np := n.Left
+ n = Nod(ODCLFIELD, newname(Lookup("q")), typenod(Ptrto(t)))
+ tfn.List = list(tfn.List, n)
+ nq := n.Left
+ n = Nod(ODCLFIELD, nil, typenod(Types[TBOOL]))
+ tfn.Rlist = list(tfn.Rlist, n)
+
+ funchdr(fn)
+
+ // geneq is only called for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Etype {
+ default:
+ Fatalf("geneq %v", t)
+
+ case TARRAY:
+ if Isslice(t) {
+ Fatalf("geneq %v", t)
+ }
+
+ // An array of pure memory would be handled by the
+ // standard memequal, so the element type must not be
+ // pure memory. Even if we unrolled the range loop,
+ // each iteration would be a function call, so don't bother
+ // unrolling.
+ nrange := Nod(ORANGE, nil, Nod(OIND, np, nil))
+
+ ni := newname(Lookup("i"))
+ ni.Type = Types[TINT]
+ nrange.List = list1(ni)
+ nrange.Colas = true
+ colasdefn(nrange.List, nrange)
+ ni = nrange.List.N
+
+ // if p[i] != q[i] { return false }
+ nx := Nod(OINDEX, np, ni)
+
+ nx.Bounded = true
+ ny := Nod(OINDEX, nq, ni)
+ ny.Bounded = true
+
+ nif := Nod(OIF, nil, nil)
+ nif.Left = Nod(ONE, nx, ny)
+ r := Nod(ORETURN, nil, nil)
+ r.List = list(r.List, Nodbool(false))
+ nif.Nbody = list(nif.Nbody, r)
+ nrange.Nbody = list(nrange.Nbody, nif)
+ fn.Nbody = list(fn.Nbody, nrange)
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ // Skip blank-named fields.
+ case TSTRUCT:
+ var first *Type
+
+ offend := int64(0)
+ var size int64
+ for t1 := t.Type; ; t1 = t1.Down {
+ if t1 != nil && algtype1(t1.Type, nil) == AMEM && !isblanksym(t1.Sym) {
+ offend = t1.Width + t1.Type.Width
+ if first == nil {
+ first = t1
+ }
+
+ // If it's a memory field but it's padded, stop here.
+ if ispaddedfield(t1, t.Width) {
+ t1 = t1.Down
+ } else {
+ continue
+ }
+ }
+
+ // Run memequal for fields up to this one.
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if first != nil {
+ if first.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ } else if first.Down.Down == t1 {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ first = first.Down
+ if !isblanksym(first.Sym) {
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(first.Sym)))
+ }
+ } else {
+ // More than two fields: use memequal.
+ size = offend - first.Width // first->width is offset
+ fn.Nbody = list(fn.Nbody, eqmem(np, nq, newname(first.Sym), size))
+ }
+
+ first = nil
+ }
+
+ if t1 == nil {
+ break
+ }
+ if isblanksym(t1.Sym) {
+ continue
+ }
+
+ // Check this field, which is not just memory.
+ fn.Nbody = list(fn.Nbody, eqfield(np, nq, newname(t1.Sym)))
+ }
+ }
+
+ // return true
+ r := Nod(ORETURN, nil, nil)
+
+ r.List = list(r.List, Nodbool(true))
+ fn.Nbody = list(fn.Nbody, r)
+
+ if Debug['r'] != 0 {
+ dumplist("geneq body", fn.Nbody)
+ }
+
+ funcbody(fn)
+ Curfn = fn
+ fn.Func.Dupok = true
+ typecheck(&fn, Etop)
+ typechecklist(fn.Nbody, Etop)
+ Curfn = nil
+
+ // Disable safemode while compiling this code: the code we
+ // generate internally can refer to unsafe.Pointer.
+ // In this case it can happen if we need to generate an ==
+ // for a struct containing a reflect.Value, which itself has
+ // an unexported field of type unsafe.Pointer.
+ old_safemode := safemode
+
+ safemode = 0
+ funccompile(fn)
+ safemode = old_safemode
+}
+
+func ifacelookdot(s *Sym, t *Type, followptr *bool, ignorecase int) *Type {
+ *followptr = false
+
+ if t == nil {
+ return nil
+ }
+
+ var m *Type
+ var i int
+ var c int
+ for d := 0; d < len(dotlist); d++ {
+ c = adddot1(s, t, d, &m, ignorecase)
+ if c > 1 {
+ Yyerror("%v.%v is ambiguous", t, s)
+ return nil
+ }
+
+ if c == 1 {
+ for i = 0; i < d; i++ {
+ if Isptr[dotlist[i].field.Type.Etype] {
+ *followptr = true
+ break
+ }
+ }
+
+ if m.Type.Etype != TFUNC || m.Type.Thistuple == 0 {
+ Yyerror("%v.%v is a field, not a method", t, s)
+ return nil
+ }
+
+ return m
+ }
+ }
+
+ return nil
+}
+
+func implements(t *Type, iface *Type, m **Type, samename **Type, ptr *int) bool {
+ t0 := t
+ if t == nil {
+ return false
+ }
+
+ // if this is too slow,
+ // could sort these first
+ // and then do one loop.
+
+ if t.Etype == TINTER {
+ var tm *Type
+ for im := iface.Type; im != nil; im = im.Down {
+ for tm = t.Type; tm != nil; tm = tm.Down {
+ if tm.Sym == im.Sym {
+ if Eqtype(tm.Type, im.Type) {
+ goto found
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ found:
+ }
+
+ return true
+ }
+
+ t = methtype(t, 0)
+ if t != nil {
+ expandmeth(t)
+ }
+ var tm *Type
+ var imtype *Type
+ var followptr bool
+ var rcvr *Type
+ for im := iface.Type; im != nil; im = im.Down {
+ if im.Broke {
+ continue
+ }
+ imtype = methodfunc(im.Type, nil)
+ tm = ifacelookdot(im.Sym, t, &followptr, 0)
+ if tm == nil || tm.Nointerface || !Eqtype(methodfunc(tm.Type, nil), imtype) {
+ if tm == nil {
+ tm = ifacelookdot(im.Sym, t, &followptr, 1)
+ }
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr = getthisx(tm.Type).Type.Type
+
+ if Isptr[rcvr.Etype] && !Isptr[t0.Etype] && !followptr && !isifacemethod(tm.Type) {
+ if false && Debug['r'] != 0 {
+ Yyerror("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ return true
+}
+
+// even simpler simtype; get rid of ptr, bool.
+// assuming that the front end has rejected
+// all the invalid conversions (like ptr -> bool)
+func Simsimtype(t *Type) EType {
+ if t == nil {
+ return 0
+ }
+
+ et := Simtype[t.Etype]
+ switch et {
+ case TPTR32:
+ et = TUINT32
+
+ case TPTR64:
+ et = TUINT64
+
+ case TBOOL:
+ et = TUINT8
+ }
+
+ return et
+}
+
+func listtreecopy(l *NodeList, lineno int32) *NodeList {
+ var out *NodeList
+ for ; l != nil; l = l.Next {
+ out = list(out, treecopy(l.N, lineno))
+ }
+ return out
+}
+
+func liststmt(l *NodeList) *Node {
+ n := Nod(OBLOCK, nil, nil)
+ n.List = l
+ if l != nil {
+ n.Lineno = l.N.Lineno
+ }
+ return n
+}
+
+// return nelem of list
+func structcount(t *Type) int {
+ var s Iter
+
+ v := 0
+ for t = Structfirst(&s, &t); t != nil; t = structnext(&s) {
+ v++
+ }
+ return v
+}
+
+// return power of 2 of the constant
+// operand. -1 if it is not a power of 2.
+// 1000+ if it is a -(power of 2)
+func powtwo(n *Node) int {
+ if n == nil || n.Op != OLITERAL || n.Type == nil {
+ return -1
+ }
+ if !Isint[n.Type.Etype] {
+ return -1
+ }
+
+ v := uint64(Mpgetfix(n.Val().U.(*Mpint)))
+ b := uint64(1)
+ for i := 0; i < 64; i++ {
+ if b == v {
+ return i
+ }
+ b = b << 1
+ }
+
+ if !Issigned[n.Type.Etype] {
+ return -1
+ }
+
+ v = -v
+ b = 1
+ for i := 0; i < 64; i++ {
+ if b == v {
+ return i + 1000
+ }
+ b = b << 1
+ }
+
+ return -1
+}
+
+// return the unsigned type for
+// a signed integer type.
+// returns T if input is not a
+// signed integer type.
+func tounsigned(t *Type) *Type {
+ // this is types[et+1], but not sure
+ // that this relation is immutable
+ switch t.Etype {
+ default:
+ fmt.Printf("tounsigned: unknown type %v\n", t)
+ t = nil
+
+ case TINT:
+ t = Types[TUINT]
+
+ case TINT8:
+ t = Types[TUINT8]
+
+ case TINT16:
+ t = Types[TUINT16]
+
+ case TINT32:
+ t = Types[TUINT32]
+
+ case TINT64:
+ t = Types[TUINT64]
+ }
+
+ return t
+}
+
+// magic number for signed division
+// see hacker's delight chapter 10
+func Smagic(m *Magic) {
+ var mask uint64
+
+ m.Bad = 0
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 := mask ^ (mask >> 1)
+
+ p := m.W - 1
+ ad := uint64(m.Sd)
+ if m.Sd < 0 {
+ ad = -uint64(m.Sd)
+ }
+
+ // bad denominators
+ if ad == 0 || ad == 1 || ad == two31 {
+ m.Bad = 1
+ return
+ }
+
+ t := two31
+ ad &= mask
+
+ anc := t - 1 - t%ad
+ anc &= mask
+
+ q1 := two31 / anc
+ r1 := two31 - q1*anc
+ q1 &= mask
+ r1 &= mask
+
+ q2 := two31 / ad
+ r2 := two31 - q2*ad
+ q2 &= mask
+ r2 &= mask
+
+ var delta uint64
+ for {
+ p++
+ q1 <<= 1
+ r1 <<= 1
+ q1 &= mask
+ r1 &= mask
+ if r1 >= anc {
+ q1++
+ r1 -= anc
+ q1 &= mask
+ r1 &= mask
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ q2 &= mask
+ r2 &= mask
+ if r2 >= ad {
+ q2++
+ r2 -= ad
+ q2 &= mask
+ r2 &= mask
+ }
+
+ delta = ad - r2
+ delta &= mask
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+
+ break
+ }
+
+ m.Sm = int64(q2 + 1)
+ if uint64(m.Sm)&two31 != 0 {
+ m.Sm |= ^int64(mask)
+ }
+ m.S = p - m.W
+}
+
+// magic number for unsigned division
+// see hacker's delight chapter 10
+func Umagic(m *Magic) {
+ var mask uint64
+
+ m.Bad = 0
+ m.Ua = 0
+
+ switch m.W {
+ default:
+ m.Bad = 1
+ return
+
+ case 8:
+ mask = 0xff
+
+ case 16:
+ mask = 0xffff
+
+ case 32:
+ mask = 0xffffffff
+
+ case 64:
+ mask = 0xffffffffffffffff
+ }
+
+ two31 := mask ^ (mask >> 1)
+
+ m.Ud &= mask
+ if m.Ud == 0 || m.Ud == two31 {
+ m.Bad = 1
+ return
+ }
+
+ nc := mask - (-m.Ud&mask)%m.Ud
+ p := m.W - 1
+
+ q1 := two31 / nc
+ r1 := two31 - q1*nc
+ q1 &= mask
+ r1 &= mask
+
+ q2 := (two31 - 1) / m.Ud
+ r2 := (two31 - 1) - q2*m.Ud
+ q2 &= mask
+ r2 &= mask
+
+ var delta uint64
+ for {
+ p++
+ if r1 >= nc-r1 {
+ q1 <<= 1
+ q1++
+ r1 <<= 1
+ r1 -= nc
+ } else {
+ q1 <<= 1
+ r1 <<= 1
+ }
+
+ q1 &= mask
+ r1 &= mask
+ if r2+1 >= m.Ud-r2 {
+ if q2 >= two31-1 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ q2++
+ r2 <<= 1
+ r2++
+ r2 -= m.Ud
+ } else {
+ if q2 >= two31 {
+ m.Ua = 1
+ }
+
+ q2 <<= 1
+ r2 <<= 1
+ r2++
+ }
+
+ q2 &= mask
+ r2 &= mask
+
+ delta = m.Ud - 1 - r2
+ delta &= mask
+
+ if p < m.W+m.W {
+ if q1 < delta || (q1 == delta && r1 == 0) {
+ continue
+ }
+ }
+
+ break
+ }
+
+ m.Um = q2 + 1
+ m.S = p - m.W
+}
+
+func ngotype(n *Node) *Sym {
+ if n.Type != nil {
+ return typenamesym(n.Type)
+ }
+ return nil
+}
+
+// Convert raw string to the prefix that will be used in the symbol
+// table. All control characters, space, '%' and '"', as well as
+// non-7-bit clean bytes turn into %xx. The period needs escaping
+// only in the last segment of the path, and it makes for happier
+// users if we escape that as little as possible.
+//
+// If you edit this, edit ../../debug/goobj/read.go:/importPathToPrefix too.
+func pathtoprefix(s string) string {
+ slash := strings.LastIndex(s, "/")
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ var buf bytes.Buffer
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if c <= ' ' || i >= slash && c == '.' || c == '%' || c == '"' || c >= 0x7F {
+ fmt.Fprintf(&buf, "%%%02x", c)
+ continue
+ }
+ buf.WriteByte(c)
+ }
+ return buf.String()
+ }
+ }
+ return s
+}
+
+var pkgMap = make(map[string]*Pkg)
+var pkgs []*Pkg
+
+func mkpkg(path string) *Pkg {
+ if p := pkgMap[path]; p != nil {
+ return p
+ }
+
+ p := new(Pkg)
+ p.Path = path
+ p.Prefix = pathtoprefix(path)
+ p.Syms = make(map[string]*Sym)
+ pkgMap[path] = p
+ pkgs = append(pkgs, p)
+ return p
+}
+
+func addinit(np **Node, init *NodeList) {
+ if init == nil {
+ return
+ }
+
+ n := *np
+ switch n.Op {
+ // There may be multiple refs to this node;
+ // introduce OCONVNOP to hold init list.
+ case ONAME, OLITERAL:
+ n = Nod(OCONVNOP, n, nil)
+
+ n.Type = n.Left.Type
+ n.Typecheck = 1
+ *np = n
+ }
+
+ n.Ninit = concat(init, n.Ninit)
+ n.Ullman = UINF
+}
+
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func isbadimport(path string) bool {
+ if strings.Contains(path, "\x00") {
+ Yyerror("import path contains NUL")
+ return true
+ }
+
+ for _, ri := range reservedimports {
+ if path == ri {
+ Yyerror("import path %q is reserved and cannot be used", path)
+ return true
+ }
+ }
+
+ for _, r := range path {
+ if r == utf8.RuneError {
+ Yyerror("import path contains invalid UTF-8 sequence: %q", path)
+ return true
+ }
+
+ if r < 0x20 || r == 0x7f {
+ Yyerror("import path contains control character: %q", path)
+ return true
+ }
+
+ if r == '\\' {
+ Yyerror("import path contains backslash; use slash: %q", path)
+ return true
+ }
+
+ if unicode.IsSpace(rune(r)) {
+ Yyerror("import path contains space character: %q", path)
+ return true
+ }
+
+ if strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r) {
+ Yyerror("import path contains invalid character '%c': %q", r, path)
+ return true
+ }
+ }
+
+ return false
+}
+
+func checknil(x *Node, init **NodeList) {
+ if Isinter(x.Type) {
+ x = Nod(OITAB, x, nil)
+ typecheck(&x, Erv)
+ }
+
+ n := Nod(OCHECKNIL, x, nil)
+ n.Typecheck = 1
+ *init = list(*init, n)
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func isdirectiface(t *Type) bool {
+ switch t.Etype {
+ case TPTR32,
+ TPTR64,
+ TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ // Array of 1 direct iface type can be direct.
+ case TARRAY:
+ return t.Bound == 1 && isdirectiface(t.Type)
+
+ // Struct with 1 field of direct iface type can be direct.
+ case TSTRUCT:
+ return t.Type != nil && t.Type.Down == nil && isdirectiface(t.Type.Type)
+ }
+
+ return false
+}
+
+// type2IET returns "T" if t is a concrete type,
+// "I" if t is an interface type, and "E" if t is an empty interface type.
+// It is used to build calls to the conv* and assert* runtime routines.
+func type2IET(t *Type) string {
+ if isnilinter(t) {
+ return "E"
+ }
+ if Isinter(t) {
+ return "I"
+ }
+ return "T"
+}
diff -pruN 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/misc/cgo/testshared/src/dep/dep.go 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/misc/cgo/testshared/src/dep/dep.go
--- 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/misc/cgo/testshared/src/dep/dep.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/misc/cgo/testshared/src/dep/dep.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,20 @@
+package dep
+
+var V int = 1
+
+var HasMask []string = []string{"hi"}
+
+type HasProg struct {
+ array [1024]*byte
+}
+
+type Dep struct {
+ X int
+}
+
+func (d *Dep) Method() {
+}
+
+func F() int {
+ return V
+}
diff -pruN 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/go/build.go 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/go/build.go
--- 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/go/build.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/go/build.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,3522 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "container/heap"
+ "debug/elf"
+ "errors"
+ "flag"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "os/exec"
+ "path"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var cmdBuild = &Command{
+ UsageLine: "build [-o output] [-i] [build flags] [packages]",
+ Short: "compile packages and dependencies",
+ Long: `
+Build compiles the packages named by the import paths,
+along with their dependencies, but it does not install the results.
+
+If the arguments to build are a list of .go files, build treats
+them as a list of source files specifying a single package.
+
+When compiling a single main package, build writes
+the resulting executable to an output file named after
+the first source file ('go build ed.go rx.go' writes 'ed' or 'ed.exe')
+or the source code directory ('go build unix/sam' writes 'sam' or 'sam.exe').
+The '.exe' suffix is added when writing a Windows executable.
+
+When compiling multiple packages or a single non-main package,
+build compiles the packages but discards the resulting object,
+serving only as a check that the packages can be built.
+
+The -o flag, only allowed when compiling a single package,
+forces build to write the resulting executable or object
+to the named output file, instead of the default behavior described
+in the last two paragraphs.
+
+The -i flag installs the packages that are dependencies of the target.
+
+The build flags are shared by the build, clean, get, install, list, run,
+and test commands:
+
+ -a
+ force rebuilding of packages that are already up-to-date.
+ -n
+ print the commands but do not run them.
+ -p n
+ the number of programs, such as build commands or
+ test binaries, that can be run in parallel.
+ The default is the number of CPUs available, except
+ on darwin/arm which defaults to 1.
+ -race
+ enable data race detection.
+ Supported only on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64.
+ -msan
+ enable interoperation with memory sanitizer.
+ Supported only on linux/amd64,
+ and only with Clang/LLVM as the host C compiler.
+ -v
+ print the names of packages as they are compiled.
+ -work
+ print the name of the temporary work directory and
+ do not delete it when exiting.
+ -x
+ print the commands.
+
+ -asmflags 'flag list'
+ arguments to pass on each go tool asm invocation.
+ -buildmode mode
+ build mode to use. See 'go help buildmode' for more.
+ -compiler name
+ name of compiler to use, as in runtime.Compiler (gccgo or gc).
+ -gccgoflags 'arg list'
+ arguments to pass on each gccgo compiler/linker invocation.
+ -gcflags 'arg list'
+ arguments to pass on each go tool compile invocation.
+ -installsuffix suffix
+ a suffix to use in the name of the package installation directory,
+ in order to keep output separate from default builds.
+ If using the -race flag, the install suffix is automatically set to race
+ or, if set explicitly, has _race appended to it. Likewise for the -msan
+ flag. Using a -buildmode option that requires non-default compile flags
+ has a similar effect.
+ -ldflags 'flag list'
+ arguments to pass on each go tool link invocation.
+ -linkshared
+ link against shared libraries previously created with
+ -buildmode=shared.
+ -pkgdir dir
+ install and load all packages from dir instead of the usual locations.
+ For example, when building with a non-standard configuration,
+ use -pkgdir to keep generated packages in a separate location.
+ -tags 'tag list'
+ a list of build tags to consider satisfied during the build.
+ For more information about build tags, see the description of
+ build constraints in the documentation for the go/build package.
+ -toolexec 'cmd args'
+ a program to use to invoke toolchain programs like vet and asm.
+ For example, instead of running asm, the go command will run
+ 'cmd args /path/to/asm '.
+
+The list flags accept a space-separated list of strings. To embed spaces
+in an element in the list, surround it with either single or double quotes.
+
+For more about specifying packages, see 'go help packages'.
+For more about where packages and binaries are installed,
+run 'go help gopath'.
+For more about calling between Go and C/C++, run 'go help c'.
+
+Note: Build adheres to certain conventions such as those described
+by 'go help gopath'. Not all projects can follow these conventions,
+however. Installations that have their own conventions or that use
+a separate software build system may choose to use lower-level
+invocations such as 'go tool compile' and 'go tool link' to avoid
+some of the overheads and design decisions of the build tool.
+
+See also: go install, go get, go clean.
+ `,
+}
+
+func init() {
+ // break init cycle
+ cmdBuild.Run = runBuild
+ cmdInstall.Run = runInstall
+
+ cmdBuild.Flag.BoolVar(&buildI, "i", false, "")
+
+ addBuildFlags(cmdBuild)
+ addBuildFlags(cmdInstall)
+
+ if buildContext.GOOS == "darwin" {
+ switch buildContext.GOARCH {
+ case "arm", "arm64":
+ // darwin/arm cannot run multiple tests simultaneously.
+ // Parallelism is limited in go_darwin_arm_exec, but
+ // also needs to be limited here so go test std does not
+ // timeout tests that waiting to run.
+ buildP = 1
+ }
+ }
+}
+
+// Flags set by multiple commands.
+var buildA bool // -a flag
+var buildN bool // -n flag
+var buildP = runtime.NumCPU() // -p flag
+var buildV bool // -v flag
+var buildX bool // -x flag
+var buildI bool // -i flag
+var buildO = cmdBuild.Flag.String("o", "", "output file")
+var buildWork bool // -work flag
+var buildAsmflags []string // -asmflags flag
+var buildGcflags []string // -gcflags flag
+var buildLdflags []string // -ldflags flag
+var buildGccgoflags []string // -gccgoflags flag
+var buildRace bool // -race flag
+var buildMSan bool // -msan flag
+var buildToolExec []string // -toolexec flag
+var buildBuildmode string // -buildmode flag
+var buildLinkshared bool // -linkshared flag
+var buildPkgdir string // -pkgdir flag
+
+var buildContext = build.Default
+var buildToolchain toolchain = noToolchain{}
+var ldBuildmode string
+
+// buildCompiler implements flag.Var.
+// It implements Set by updating both
+// buildToolchain and buildContext.Compiler.
+type buildCompiler struct{}
+
+func (c buildCompiler) Set(value string) error {
+ switch value {
+ case "gc":
+ buildToolchain = gcToolchain{}
+ case "gccgo":
+ buildToolchain = gccgoToolchain{}
+ default:
+ return fmt.Errorf("unknown compiler %q", value)
+ }
+ buildContext.Compiler = value
+ return nil
+}
+
+func (c buildCompiler) String() string {
+ return buildContext.Compiler
+}
+
+func init() {
+ switch build.Default.Compiler {
+ case "gc":
+ buildToolchain = gcToolchain{}
+ case "gccgo":
+ buildToolchain = gccgoToolchain{}
+ }
+}
+
+// addBuildFlags adds the flags common to the build, clean, get,
+// install, list, run, and test commands.
+func addBuildFlags(cmd *Command) {
+ cmd.Flag.BoolVar(&buildA, "a", false, "")
+ cmd.Flag.BoolVar(&buildN, "n", false, "")
+ cmd.Flag.IntVar(&buildP, "p", buildP, "")
+ cmd.Flag.BoolVar(&buildV, "v", false, "")
+ cmd.Flag.BoolVar(&buildX, "x", false, "")
+
+ cmd.Flag.Var((*stringsFlag)(&buildAsmflags), "asmflags", "")
+ cmd.Flag.Var(buildCompiler{}, "compiler", "")
+ cmd.Flag.StringVar(&buildBuildmode, "buildmode", "default", "")
+ cmd.Flag.Var((*stringsFlag)(&buildGcflags), "gcflags", "")
+ cmd.Flag.Var((*stringsFlag)(&buildGccgoflags), "gccgoflags", "")
+ cmd.Flag.StringVar(&buildContext.InstallSuffix, "installsuffix", "", "")
+ cmd.Flag.Var((*stringsFlag)(&buildLdflags), "ldflags", "")
+ cmd.Flag.BoolVar(&buildLinkshared, "linkshared", false, "")
+ cmd.Flag.StringVar(&buildPkgdir, "pkgdir", "", "")
+ cmd.Flag.BoolVar(&buildRace, "race", false, "")
+ cmd.Flag.BoolVar(&buildMSan, "msan", false, "")
+ cmd.Flag.Var((*stringsFlag)(&buildContext.BuildTags), "tags", "")
+ cmd.Flag.Var((*stringsFlag)(&buildToolExec), "toolexec", "")
+ cmd.Flag.BoolVar(&buildWork, "work", false, "")
+}
+
+func addBuildFlagsNX(cmd *Command) {
+ cmd.Flag.BoolVar(&buildN, "n", false, "")
+ cmd.Flag.BoolVar(&buildX, "x", false, "")
+}
+
+func isSpaceByte(c byte) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+// fileExtSplit expects a filename and returns the name
+// and ext (without the dot). If the file has no
+// extension, ext will be empty.
+func fileExtSplit(file string) (name, ext string) {
+ dotExt := filepath.Ext(file)
+ name = file[:len(file)-len(dotExt)]
+ if dotExt != "" {
+ ext = dotExt[1:]
+ }
+ return
+}
+
+type stringsFlag []string
+
+func (v *stringsFlag) Set(s string) error {
+ var err error
+ *v, err = splitQuotedFields(s)
+ if *v == nil {
+ *v = []string{}
+ }
+ return err
+}
+
+func splitQuotedFields(s string) ([]string, error) {
+ // Split fields allowing '' or "" around elements.
+ // Quotes further inside the string do not count.
+ var f []string
+ for len(s) > 0 {
+ for len(s) > 0 && isSpaceByte(s[0]) {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ break
+ }
+ // Accepted quoted string. No unescaping inside.
+ if s[0] == '"' || s[0] == '\'' {
+ quote := s[0]
+ s = s[1:]
+ i := 0
+ for i < len(s) && s[i] != quote {
+ i++
+ }
+ if i >= len(s) {
+ return nil, fmt.Errorf("unterminated %c string", quote)
+ }
+ f = append(f, s[:i])
+ s = s[i+1:]
+ continue
+ }
+ i := 0
+ for i < len(s) && !isSpaceByte(s[i]) {
+ i++
+ }
+ f = append(f, s[:i])
+ s = s[i:]
+ }
+ return f, nil
+}
+
+func (v *stringsFlag) String() string {
+ return ""
+}
+
+func pkgsMain(pkgs []*Package) (res []*Package) {
+ for _, p := range pkgs {
+ if p.Name == "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+func pkgsNotMain(pkgs []*Package) (res []*Package) {
+ for _, p := range pkgs {
+ if p.Name != "main" {
+ res = append(res, p)
+ }
+ }
+ return res
+}
+
+var pkgsFilter = func(pkgs []*Package) []*Package { return pkgs }
+
+func buildModeInit() {
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ var codegenArg string
+ platform := goos + "/" + goarch
+ switch buildBuildmode {
+ case "archive":
+ pkgsFilter = pkgsNotMain
+ case "c-archive":
+ pkgsFilter = func(p []*Package) []*Package {
+ if len(p) != 1 || p[0].Name != "main" {
+ fatalf("-buildmode=c-archive requires exactly one main package")
+ }
+ return p
+ }
+ exeSuffix = ".a"
+ ldBuildmode = "c-archive"
+ case "c-shared":
+ pkgsFilter = pkgsMain
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/amd64", "linux/arm", "linux/arm64", "linux/386",
+ "android/amd64", "android/arm", "android/arm64", "android/386":
+ codegenArg = "-shared"
+ case "darwin/amd64", "darwin/386":
+ default:
+ fatalf("-buildmode=c-shared not supported on %s\n", platform)
+ }
+ }
+ ldBuildmode = "c-shared"
+ case "default":
+ switch platform {
+ case "android/arm", "android/arm64", "android/amd64", "android/386":
+ codegenArg = "-shared"
+ ldBuildmode = "pie"
+ default:
+ ldBuildmode = "exe"
+ }
+ case "exe":
+ pkgsFilter = pkgsMain
+ ldBuildmode = "exe"
+ case "pie":
+ if gccgo {
+ fatalf("-buildmode=pie not supported by gccgo")
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
+ "android/amd64", "android/arm", "android/arm64", "android/386":
+ codegenArg = "-shared"
+ default:
+ fatalf("-buildmode=pie not supported on %s\n", platform)
+ }
+ }
+ ldBuildmode = "pie"
+ case "shared":
+ pkgsFilter = pkgsNotMain
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+ default:
+ fatalf("-buildmode=shared not supported on %s\n", platform)
+ }
+ codegenArg = "-dynlink"
+ }
+ if *buildO != "" {
+ fatalf("-buildmode=shared and -o not supported together")
+ }
+ ldBuildmode = "shared"
+ default:
+ fatalf("buildmode=%s not supported", buildBuildmode)
+ }
+ if buildLinkshared {
+ if gccgo {
+ codegenArg = "-fPIC"
+ } else {
+ switch platform {
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
+ buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
+ default:
+ fatalf("-linkshared not supported on %s\n", platform)
+ }
+ codegenArg = "-dynlink"
+ // TODO(mwhudson): remove -w when that gets fixed in linker.
+ buildLdflags = append(buildLdflags, "-linkshared", "-w")
+ }
+ }
+ if codegenArg != "" {
+ if gccgo {
+ buildGccgoflags = append(buildGccgoflags, codegenArg)
+ } else {
+ buildAsmflags = append(buildAsmflags, codegenArg)
+ buildGcflags = append(buildGcflags, codegenArg)
+ }
+ if buildContext.InstallSuffix != "" {
+ buildContext.InstallSuffix += "_"
+ }
+ buildContext.InstallSuffix += codegenArg[1:]
+ }
+}
+
+func runBuild(cmd *Command, args []string) {
+ instrumentInit()
+ buildModeInit()
+ var b builder
+ b.init()
+
+ pkgs := packagesForBuild(args)
+
+ if len(pkgs) == 1 && pkgs[0].Name == "main" && *buildO == "" {
+ _, *buildO = path.Split(pkgs[0].ImportPath)
+ *buildO += exeSuffix
+ }
+
+ // sanity check some often mis-used options
+ switch buildContext.Compiler {
+ case "gccgo":
+ if len(buildGcflags) != 0 {
+ fmt.Println("go build: when using gccgo toolchain, please pass compiler flags using -gccgoflags, not -gcflags")
+ }
+ if len(buildLdflags) != 0 {
+ fmt.Println("go build: when using gccgo toolchain, please pass linker flags using -gccgoflags, not -ldflags")
+ }
+ case "gc":
+ if len(buildGccgoflags) != 0 {
+ fmt.Println("go build: when using gc toolchain, please pass compile flags using -gcflags, and linker flags using -ldflags")
+ }
+ }
+
+ depMode := modeBuild
+ if buildI {
+ depMode = modeInstall
+ }
+
+ if *buildO != "" {
+ if len(pkgs) > 1 {
+ fatalf("go build: cannot use -o with multiple packages")
+ } else if len(pkgs) == 0 {
+ fatalf("no packages to build")
+ }
+ p := pkgs[0]
+ p.target = *buildO
+ p.Stale = true // must build - not up to date
+ a := b.action(modeInstall, depMode, p)
+ b.do(a)
+ return
+ }
+
+ var a *action
+ if buildBuildmode == "shared" {
+ pkgs := pkgsFilter(packages(args))
+ if libName, err := libname(args, pkgs); err != nil {
+ fatalf("%s", err.Error())
+ } else {
+ a = b.libaction(libName, pkgs, modeBuild, depMode)
+ }
+ } else {
+ a = &action{}
+ for _, p := range pkgsFilter(packages(args)) {
+ a.deps = append(a.deps, b.action(modeBuild, depMode, p))
+ }
+ }
+ b.do(a)
+}
+
+var cmdInstall = &Command{
+ UsageLine: "install [build flags] [packages]",
+ Short: "compile and install packages and dependencies",
+ Long: `
+Install compiles and installs the packages named by the import paths,
+along with their dependencies.
+
+For more about the build flags, see 'go help build'.
+For more about specifying packages, see 'go help packages'.
+
+See also: go build, go get, go clean.
+ `,
+}
+
+// isMetaPackage checks if name is a reserved package name that expands to multiple packages
+func isMetaPackage(name string) bool {
+ return name == "std" || name == "cmd" || name == "all"
+}
+
+// libname returns the filename to use for the shared library when using
+// -buildmode=shared. The rules we use are:
+// Use arguments for special 'meta' packages:
+// std --> libstd.so
+// std cmd --> libstd,cmd.so
+// A single non-meta argument with trailing "/..." is special cased:
+// foo/... --> libfoo.so
+// (A relative path like "./..." expands the "." first)
+// Use import paths for other cases, changing '/' to '-':
+// somelib --> libsubdir-somelib.so
+// ./ or ../ --> libsubdir-somelib.so
+// gopkg.in/tomb.v2 -> libgopkg.in-tomb.v2.so
+// a/... b/... ---> liba/c,b/d.so - all matching import paths
+// Name parts are joined with ','.
+func libname(args []string, pkgs []*Package) (string, error) {
+ var libname string
+ appendName := func(arg string) {
+ if libname == "" {
+ libname = arg
+ } else {
+ libname += "," + arg
+ }
+ }
+ var haveNonMeta bool
+ for _, arg := range args {
+ if isMetaPackage(arg) {
+ appendName(arg)
+ } else {
+ haveNonMeta = true
+ }
+ }
+ if len(libname) == 0 { // non-meta packages only. use import paths
+ if len(args) == 1 && strings.HasSuffix(args[0], "/...") {
+ // Special case of "foo/..." as mentioned above.
+ arg := strings.TrimSuffix(args[0], "/...")
+ if build.IsLocalImport(arg) {
+ cwd, _ := os.Getwd()
+ bp, _ := buildContext.ImportDir(filepath.Join(cwd, arg), build.FindOnly)
+ if bp.ImportPath != "" && bp.ImportPath != "." {
+ arg = bp.ImportPath
+ }
+ }
+ appendName(strings.Replace(arg, "/", "-", -1))
+ } else {
+ for _, pkg := range pkgs {
+ appendName(strings.Replace(pkg.ImportPath, "/", "-", -1))
+ }
+ }
+ } else if haveNonMeta { // have both meta package and a non-meta one
+ return "", errors.New("mixing of meta and non-meta packages is not allowed")
+ }
+ // TODO(mwhudson): Needs to change for platforms that use different naming
+ // conventions...
+ return "lib" + libname + ".so", nil
+}
+
+func runInstall(cmd *Command, args []string) {
+ if gobin != "" && !filepath.IsAbs(gobin) {
+ fatalf("cannot install, GOBIN must be an absolute path")
+ }
+
+ instrumentInit()
+ buildModeInit()
+ pkgs := pkgsFilter(packagesForBuild(args))
+
+ for _, p := range pkgs {
+ if p.Target == "" && (!p.Standard || p.ImportPath != "unsafe") {
+ switch {
+ case p.gobinSubdir:
+ errorf("go install: cannot install cross-compiled binaries when GOBIN is set")
+ case p.cmdline:
+ errorf("go install: no install location for .go files listed on command line (GOBIN not set)")
+ case p.ConflictDir != "":
+ errorf("go install: no install location for %s: hidden by %s", p.Dir, p.ConflictDir)
+ default:
+ errorf("go install: no install location for directory %s outside GOPATH\n"+
+ "\tFor more details see: go help gopath", p.Dir)
+ }
+ }
+ }
+ exitIfErrors()
+
+ var b builder
+ b.init()
+ var a *action
+ if buildBuildmode == "shared" {
+ if libName, err := libname(args, pkgs); err != nil {
+ fatalf("%s", err.Error())
+ } else {
+ a = b.libaction(libName, pkgs, modeInstall, modeInstall)
+ }
+ } else {
+ a = &action{}
+ var tools []*action
+ for _, p := range pkgs {
+ // If p is a tool, delay the installation until the end of the build.
+ // This avoids installing assemblers/compilers that are being executed
+ // by other steps in the build.
+ // cmd/cgo is handled specially in b.action, so that we can
+ // both build and use it in the same 'go install'.
+ action := b.action(modeInstall, modeInstall, p)
+ if goTools[p.ImportPath] == toTool && p.ImportPath != "cmd/cgo" {
+ a.deps = append(a.deps, action.deps...)
+ action.deps = append(action.deps, a)
+ tools = append(tools, action)
+ continue
+ }
+ a.deps = append(a.deps, action)
+ }
+ if len(tools) > 0 {
+ a = &action{
+ deps: tools,
+ }
+ }
+ }
+ b.do(a)
+ exitIfErrors()
+
+ // Success. If this command is 'go install' with no arguments
+ // and the current directory (the implicit argument) is a command,
+ // remove any leftover command binary from a previous 'go build'.
+ // The binary is installed; it's not needed here anymore.
+ // And worse it might be a stale copy, which you don't want to find
+ // instead of the installed one if $PATH contains dot.
+ // One way to view this behavior is that it is as if 'go install' first
+ // runs 'go build' and the moves the generated file to the install dir.
+ // See issue 9645.
+ if len(args) == 0 && len(pkgs) == 1 && pkgs[0].Name == "main" {
+ // Compute file 'go build' would have created.
+ // If it exists and is an executable file, remove it.
+ _, targ := filepath.Split(pkgs[0].ImportPath)
+ targ += exeSuffix
+ if filepath.Join(pkgs[0].Dir, targ) != pkgs[0].Target { // maybe $GOBIN is the current directory
+ fi, err := os.Stat(targ)
+ if err == nil {
+ m := fi.Mode()
+ if m.IsRegular() {
+ if m&0111 != 0 || goos == "windows" { // windows never sets executable bit
+ os.Remove(targ)
+ }
+ }
+ }
+ }
+ }
+}
+
+// Global build parameters (used during package load)
+var (
+ goarch string
+ goos string
+ exeSuffix string
+ gopath []string
+)
+
+func init() {
+ goarch = buildContext.GOARCH
+ goos = buildContext.GOOS
+ if goos == "windows" {
+ exeSuffix = ".exe"
+ }
+ gopath = filepath.SplitList(buildContext.GOPATH)
+}
+
+// A builder holds global state about a build.
+// It does not hold per-package state, because we
+// build packages in parallel, and the builder is shared.
+type builder struct {
+ work string // the temporary work directory (ends in filepath.Separator)
+ actionCache map[cacheKey]*action // a cache of already-constructed actions
+ mkdirCache map[string]bool // a cache of created directories
+ print func(args ...interface{}) (int, error)
+
+ output sync.Mutex
+ scriptDir string // current directory in printed script
+
+ exec sync.Mutex
+ readySema chan bool
+ ready actionQueue
+}
+
+// An action represents a single action in the action graph.
+type action struct {
+ p *Package // the package this action works on
+ deps []*action // actions that must happen before this one
+ triggers []*action // inverse of deps
+ cgo *action // action for cgo binary if needed
+ args []string // additional args for runProgram
+ testOutput *bytes.Buffer // test output buffer
+
+ f func(*builder, *action) error // the action itself (nil = no-op)
+ ignoreFail bool // whether to run f even if dependencies fail
+
+ // Generated files, directories.
+ link bool // target is executable, not just package
+ pkgdir string // the -I or -L argument to use when importing this package
+ objdir string // directory for intermediate objects
+ objpkg string // the intermediate package .a file created during the action
+ target string // goal of the action: the created package or executable
+
+ // Execution state.
+ pending int // number of deps yet to complete
+ priority int // relative execution priority
+ failed bool // whether the action failed
+}
+
+// cacheKey is the key for the action cache.
+type cacheKey struct {
+ mode buildMode
+ p *Package
+ shlib string
+}
+
+// buildMode specifies the build mode:
+// are we just building things or also installing the results?
+type buildMode int
+
+const (
+ modeBuild buildMode = iota
+ modeInstall
+)
+
+var (
+ goroot = filepath.Clean(runtime.GOROOT())
+ gobin = os.Getenv("GOBIN")
+ gorootBin = filepath.Join(goroot, "bin")
+ gorootPkg = filepath.Join(goroot, "pkg")
+ gorootSrc = filepath.Join(goroot, "src")
+)
+
+func (b *builder) init() {
+ var err error
+ b.print = func(a ...interface{}) (int, error) {
+ return fmt.Fprint(os.Stderr, a...)
+ }
+ b.actionCache = make(map[cacheKey]*action)
+ b.mkdirCache = make(map[string]bool)
+
+ if buildN {
+ b.work = "$WORK"
+ } else {
+ b.work, err = ioutil.TempDir("", "go-build")
+ if err != nil {
+ fatalf("%s", err)
+ }
+ if buildX || buildWork {
+ fmt.Fprintf(os.Stderr, "WORK=%s\n", b.work)
+ }
+ if !buildWork {
+ workdir := b.work
+ atexit(func() { os.RemoveAll(workdir) })
+ }
+ }
+}
+
+// goFilesPackage creates a package for building a collection of Go files
+// (typically named on the command line). The target is named p.a for
+// package p or named after the first Go file for package main.
+func goFilesPackage(gofiles []string) *Package {
+ // TODO: Remove this restriction.
+ for _, f := range gofiles {
+ if !strings.HasSuffix(f, ".go") {
+ fatalf("named files must be .go files")
+ }
+ }
+
+ var stk importStack
+ ctxt := buildContext
+ ctxt.UseAllFiles = true
+
+ // Synthesize fake "directory" that only shows the named files,
+ // to make it look like this is a standard package or
+ // command directory. So that local imports resolve
+ // consistently, the files must all be in the same directory.
+ var dirent []os.FileInfo
+ var dir string
+ for _, file := range gofiles {
+ fi, err := os.Stat(file)
+ if err != nil {
+ fatalf("%s", err)
+ }
+ if fi.IsDir() {
+ fatalf("%s is a directory, should be a Go file", file)
+ }
+ dir1, _ := filepath.Split(file)
+ if dir1 == "" {
+ dir1 = "./"
+ }
+ if dir == "" {
+ dir = dir1
+ } else if dir != dir1 {
+ fatalf("named files must all be in one directory; have %s and %s", dir, dir1)
+ }
+ dirent = append(dirent, fi)
+ }
+ ctxt.ReadDir = func(string) ([]os.FileInfo, error) { return dirent, nil }
+
+ var err error
+ if dir == "" {
+ dir = cwd
+ }
+ dir, err = filepath.Abs(dir)
+ if err != nil {
+ fatalf("%s", err)
+ }
+
+ bp, err := ctxt.ImportDir(dir, 0)
+ pkg := new(Package)
+ pkg.local = true
+ pkg.cmdline = true
+ stk.push("main")
+ pkg.load(&stk, bp, err)
+ stk.pop()
+ pkg.localPrefix = dirToImportPath(dir)
+ pkg.ImportPath = "command-line-arguments"
+ pkg.target = ""
+
+ if pkg.Name == "main" {
+ _, elem := filepath.Split(gofiles[0])
+ exe := elem[:len(elem)-len(".go")] + exeSuffix
+ if *buildO == "" {
+ *buildO = exe
+ }
+ if gobin != "" {
+ pkg.target = filepath.Join(gobin, exe)
+ }
+ }
+
+ pkg.Target = pkg.target
+ pkg.Stale = true
+
+ computeStale(pkg)
+ return pkg
+}
+
+// readpkglist returns the list of packages that were built into the shared library
+// at shlibpath. For the native toolchain this list is stored, newline separated, in
+// an ELF note with name "Go\x00\x00" and type 1. For GCCGO it is extracted from the
+// .go_export section.
+func readpkglist(shlibpath string) (pkgs []*Package) {
+ var stk importStack
+ if _, gccgo := buildToolchain.(gccgoToolchain); gccgo {
+ f, _ := elf.Open(shlibpath)
+ sect := f.Section(".go_export")
+ data, _ := sect.Data()
+ scanner := bufio.NewScanner(bytes.NewBuffer(data))
+ for scanner.Scan() {
+ t := scanner.Text()
+ if strings.HasPrefix(t, "pkgpath ") {
+ t = strings.TrimPrefix(t, "pkgpath ")
+ t = strings.TrimSuffix(t, ";")
+ pkgs = append(pkgs, loadPackage(t, &stk))
+ }
+ }
+ } else {
+ pkglistbytes, err := readELFNote(shlibpath, "Go\x00\x00", 1)
+ if err != nil {
+ fatalf("readELFNote failed: %v", err)
+ }
+ scanner := bufio.NewScanner(bytes.NewBuffer(pkglistbytes))
+ for scanner.Scan() {
+ t := scanner.Text()
+ pkgs = append(pkgs, loadPackage(t, &stk))
+ }
+ }
+ return
+}
+
+// action returns the action for applying the given operation (mode) to the package.
+// depMode is the action to use when building dependencies.
+// action never looks for p in a shared library, but may find p's dependencies in a
+// shared library if buildLinkshared is true.
+func (b *builder) action(mode buildMode, depMode buildMode, p *Package) *action {
+ return b.action1(mode, depMode, p, false, "")
+}
+
+// action1 returns the action for applying the given operation (mode) to the package.
+// depMode is the action to use when building dependencies.
+// action1 will look for p in a shared library if lookshared is true.
+// forShlib is the shared library that p will become part of, if any.
+func (b *builder) action1(mode buildMode, depMode buildMode, p *Package, lookshared bool, forShlib string) *action {
+ shlib := ""
+ if lookshared {
+ shlib = p.Shlib
+ }
+ key := cacheKey{mode, p, shlib}
+
+ a := b.actionCache[key]
+ if a != nil {
+ return a
+ }
+ if shlib != "" {
+ key2 := cacheKey{modeInstall, nil, shlib}
+ a = b.actionCache[key2]
+ if a != nil {
+ b.actionCache[key] = a
+ return a
+ }
+ pkgs := readpkglist(shlib)
+ a = b.libaction(filepath.Base(shlib), pkgs, modeInstall, depMode)
+ b.actionCache[key2] = a
+ b.actionCache[key] = a
+ return a
+ }
+
+ a = &action{p: p, pkgdir: p.build.PkgRoot}
+ if p.pkgdir != "" { // overrides p.t
+ a.pkgdir = p.pkgdir
+ }
+ b.actionCache[key] = a
+
+ for _, p1 := range p.imports {
+ if forShlib != "" {
+ // p is part of a shared library.
+ if p1.Shlib != "" && p1.Shlib != forShlib {
+ // p1 is explicitly part of a different shared library.
+ // Put the action for that shared library into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, true, p1.Shlib))
+ } else {
+ // p1 is (implicitly or not) part of this shared library.
+ // Put the action for p1 into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, false, forShlib))
+ }
+ } else {
+ // p is not part of a shared library.
+ // If p1 is in a shared library, put the action for that into
+ // a.deps, otherwise put the action for p1 into a.deps.
+ a.deps = append(a.deps, b.action1(depMode, depMode, p1, buildLinkshared, p1.Shlib))
+ }
+ }
+
+ // If we are not doing a cross-build, then record the binary we'll
+ // generate for cgo as a dependency of the build of any package
+ // using cgo, to make sure we do not overwrite the binary while
+ // a package is using it. If this is a cross-build, then the cgo we
+ // are writing is not the cgo we need to use.
+ if goos == runtime.GOOS && goarch == runtime.GOARCH && !buildRace && !buildMSan {
+ if (len(p.CgoFiles) > 0 || p.Standard && p.ImportPath == "runtime/cgo") && !buildLinkshared && buildBuildmode != "shared" {
+ var stk importStack
+ p1 := loadPackage("cmd/cgo", &stk)
+ if p1.Error != nil {
+ fatalf("load cmd/cgo: %v", p1.Error)
+ }
+ a.cgo = b.action(depMode, depMode, p1)
+ a.deps = append(a.deps, a.cgo)
+ }
+ }
+
+ if p.Standard {
+ switch p.ImportPath {
+ case "builtin", "unsafe":
+ // Fake packages - nothing to build.
+ return a
+ }
+ // gccgo standard library is "fake" too.
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ // the target name is needed for cgo.
+ a.target = p.target
+ return a
+ }
+ }
+
+ if !p.Stale && p.target != "" {
+ // p.Stale==false implies that p.target is up-to-date.
+ // Record target name for use by actions depending on this one.
+ a.target = p.target
+ return a
+ }
+
+ if p.local && p.target == "" {
+ // Imported via local path. No permanent target.
+ mode = modeBuild
+ }
+ work := p.pkgdir
+ if work == "" {
+ work = b.work
+ }
+ a.objdir = filepath.Join(work, a.p.ImportPath, "_obj") + string(filepath.Separator)
+ a.objpkg = buildToolchain.pkgpath(work, a.p)
+ a.link = p.Name == "main"
+
+ switch mode {
+ case modeInstall:
+ a.f = (*builder).install
+ a.deps = []*action{b.action1(modeBuild, depMode, p, lookshared, forShlib)}
+ a.target = a.p.target
+
+ // Install header for cgo in c-archive and c-shared modes.
+ if p.usesCgo() && (buildBuildmode == "c-archive" || buildBuildmode == "c-shared") {
+ hdrTarget := a.target[:len(a.target)-len(filepath.Ext(a.target))] + ".h"
+ if buildContext.Compiler == "gccgo" {
+ // For the header file, remove the "lib"
+ // added by go/build, so we generate pkg.h
+ // rather than libpkg.h.
+ dir, file := filepath.Split(hdrTarget)
+ file = strings.TrimPrefix(file, "lib")
+ hdrTarget = filepath.Join(dir, file)
+ }
+ ah := &action{
+ p: a.p,
+ deps: []*action{a.deps[0]},
+ f: (*builder).installHeader,
+ pkgdir: a.pkgdir,
+ objdir: a.objdir,
+ target: hdrTarget,
+ }
+ a.deps = append(a.deps, ah)
+ }
+
+ case modeBuild:
+ a.f = (*builder).build
+ a.target = a.objpkg
+ if a.link {
+ // An executable file. (This is the name of a temporary file.)
+ // Because we run the temporary file in 'go run' and 'go test',
+ // the name will show up in ps listings. If the caller has specified
+ // a name, use that instead of a.out. The binary is generated
+ // in an otherwise empty subdirectory named exe to avoid
+ // naming conflicts. The only possible conflict is if we were
+ // to create a top-level package named exe.
+ name := "a.out"
+ if p.exeName != "" {
+ name = p.exeName
+ } else if goos == "darwin" && buildBuildmode == "c-shared" && p.target != "" {
+ // On OS X, the linker output name gets recorded in the
+ // shared library's LC_ID_DYLIB load command.
+ // The code invoking the linker knows to pass only the final
+ // path element. Arrange that the path element matches what
+ // we'll install it as; otherwise the library is only loadable as "a.out".
+ _, name = filepath.Split(p.target)
+ }
+ a.target = a.objdir + filepath.Join("exe", name) + exeSuffix
+ }
+ }
+
+ return a
+}
+
+func (b *builder) libaction(libname string, pkgs []*Package, mode, depMode buildMode) *action {
+ a := &action{}
+ switch mode {
+ default:
+ fatalf("unrecognized mode %v", mode)
+
+ case modeBuild:
+ a.f = (*builder).linkShared
+ a.target = filepath.Join(b.work, libname)
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ a.deps = append(a.deps, b.action(depMode, depMode, p))
+ }
+
+ case modeInstall:
+ // Currently build mode shared forces external linking mode, and
+ // external linking mode forces an import of runtime/cgo (and
+ // math on arm). So if it was not passed on the command line and
+ // it is not present in another shared library, add it here.
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ if !gccgo {
+ seencgo := false
+ for _, p := range pkgs {
+ seencgo = seencgo || (p.Standard && p.ImportPath == "runtime/cgo")
+ }
+ if !seencgo {
+ var stk importStack
+ p := loadPackage("runtime/cgo", &stk)
+ if p.Error != nil {
+ fatalf("load runtime/cgo: %v", p.Error)
+ }
+ computeStale(p)
+ // If runtime/cgo is in another shared library, then that's
+ // also the shared library that contains runtime, so
+ // something will depend on it and so runtime/cgo's staleness
+ // will be checked when processing that library.
+ if p.Shlib == "" || p.Shlib == libname {
+ pkgs = append([]*Package{}, pkgs...)
+ pkgs = append(pkgs, p)
+ }
+ }
+ if goarch == "arm" {
+ seenmath := false
+ for _, p := range pkgs {
+ seenmath = seenmath || (p.Standard && p.ImportPath == "math")
+ }
+ if !seenmath {
+ var stk importStack
+ p := loadPackage("math", &stk)
+ if p.Error != nil {
+ fatalf("load math: %v", p.Error)
+ }
+ computeStale(p)
+ // If math is in another shared library, then that's
+ // also the shared library that contains runtime, so
+ // something will depend on it and so math's staleness
+ // will be checked when processing that library.
+ if p.Shlib == "" || p.Shlib == libname {
+ pkgs = append([]*Package{}, pkgs...)
+ pkgs = append(pkgs, p)
+ }
+ }
+ }
+ }
+
+ // Figure out where the library will go.
+ var libdir string
+ for _, p := range pkgs {
+ plibdir := p.build.PkgTargetRoot
+ if gccgo {
+ plibdir = filepath.Join(plibdir, "shlibs")
+ }
+ if libdir == "" {
+ libdir = plibdir
+ } else if libdir != plibdir {
+ fatalf("multiple roots %s & %s", libdir, plibdir)
+ }
+ }
+ a.target = filepath.Join(libdir, libname)
+
+ // Now we can check whether we need to rebuild it.
+ stale := false
+ var built time.Time
+ if fi, err := os.Stat(a.target); err == nil {
+ built = fi.ModTime()
+ }
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ stale = stale || p.Stale
+ lstat, err := os.Stat(p.target)
+ if err != nil || lstat.ModTime().After(built) {
+ stale = true
+ }
+ a.deps = append(a.deps, b.action1(depMode, depMode, p, false, a.target))
+ }
+
+ if stale {
+ a.f = (*builder).install
+ buildAction := b.libaction(libname, pkgs, modeBuild, depMode)
+ a.deps = []*action{buildAction}
+ for _, p := range pkgs {
+ if p.target == "" {
+ continue
+ }
+ shlibnameaction := &action{}
+ shlibnameaction.f = (*builder).installShlibname
+ shlibnameaction.target = p.target[:len(p.target)-2] + ".shlibname"
+ a.deps = append(a.deps, shlibnameaction)
+ shlibnameaction.deps = append(shlibnameaction.deps, buildAction)
+ }
+ }
+ }
+ return a
+}
+
+// actionList returns the list of actions in the dag rooted at root
+// as visited in a depth-first post-order traversal.
+func actionList(root *action) []*action {
+ seen := map[*action]bool{}
+ all := []*action{}
+ var walk func(*action)
+ walk = func(a *action) {
+ if seen[a] {
+ return
+ }
+ seen[a] = true
+ for _, a1 := range a.deps {
+ walk(a1)
+ }
+ all = append(all, a)
+ }
+ walk(root)
+ return all
+}
+
+// allArchiveActions returns a list of the archive dependencies of root.
+// This is needed because if package p depends on package q that is in libr.so, the
+// action graph looks like p->libr.so->q and so just scanning through p's
+// dependencies does not find the import dir for q.
+func allArchiveActions(root *action) []*action {
+ seen := map[*action]bool{}
+ r := []*action{}
+ var walk func(*action)
+ walk = func(a *action) {
+ if seen[a] {
+ return
+ }
+ seen[a] = true
+ if strings.HasSuffix(a.target, ".so") || a == root {
+ for _, a1 := range a.deps {
+ walk(a1)
+ }
+ } else if strings.HasSuffix(a.target, ".a") {
+ r = append(r, a)
+ }
+ }
+ walk(root)
+ return r
+}
+
+// do runs the action graph rooted at root.
+func (b *builder) do(root *action) {
+ // Build list of all actions, assigning depth-first post-order priority.
+ // The original implementation here was a true queue
+ // (using a channel) but it had the effect of getting
+ // distracted by low-level leaf actions to the detriment
+ // of completing higher-level actions. The order of
+ // work does not matter much to overall execution time,
+ // but when running "go test std" it is nice to see each test
+ // results as soon as possible. The priorities assigned
+ // ensure that, all else being equal, the execution prefers
+ // to do what it would have done first in a simple depth-first
+ // dependency order traversal.
+ all := actionList(root)
+ for i, a := range all {
+ a.priority = i
+ }
+
+ b.readySema = make(chan bool, len(all))
+
+ // Initialize per-action execution state.
+ for _, a := range all {
+ for _, a1 := range a.deps {
+ a1.triggers = append(a1.triggers, a)
+ }
+ a.pending = len(a.deps)
+ if a.pending == 0 {
+ b.ready.push(a)
+ b.readySema <- true
+ }
+ }
+
+ // Handle runs a single action and takes care of triggering
+ // any actions that are runnable as a result.
+ handle := func(a *action) {
+ var err error
+ if a.f != nil && (!a.failed || a.ignoreFail) {
+ err = a.f(b, a)
+ }
+
+ // The actions run in parallel but all the updates to the
+ // shared work state are serialized through b.exec.
+ b.exec.Lock()
+ defer b.exec.Unlock()
+
+ if err != nil {
+ if err == errPrintedOutput {
+ setExitStatus(2)
+ } else {
+ errorf("%s", err)
+ }
+ a.failed = true
+ }
+
+ for _, a0 := range a.triggers {
+ if a.failed {
+ a0.failed = true
+ }
+ if a0.pending--; a0.pending == 0 {
+ b.ready.push(a0)
+ b.readySema <- true
+ }
+ }
+
+ if a == root {
+ close(b.readySema)
+ }
+ }
+
+ var wg sync.WaitGroup
+
+ // Kick off goroutines according to parallelism.
+ // If we are using the -n flag (just printing commands)
+ // drop the parallelism to 1, both to make the output
+ // deterministic and because there is no real work anyway.
+ par := buildP
+ if buildN {
+ par = 1
+ }
+ for i := 0; i < par; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case _, ok := <-b.readySema:
+ if !ok {
+ return
+ }
+ // Receiving a value from b.readySema entitles
+ // us to take from the ready queue.
+ b.exec.Lock()
+ a := b.ready.pop()
+ b.exec.Unlock()
+ handle(a)
+ case <-interrupted:
+ setExitStatus(1)
+ return
+ }
+ }
+ }()
+ }
+
+ wg.Wait()
+}
+
+// hasString reports whether s appears in the list of strings.
+func hasString(strings []string, s string) bool {
+ for _, t := range strings {
+ if s == t {
+ return true
+ }
+ }
+ return false
+}
+
+// build is the action for building a single package or command.
+func (b *builder) build(a *action) (err error) {
+ // Return an error if the package has CXX files but it's not using
+ // cgo nor SWIG, since the CXX files can only be processed by cgo
+ // and SWIG.
+ if len(a.p.CXXFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() {
+ return fmt.Errorf("can't build package %s because it contains C++ files (%s) but it's not using cgo nor SWIG",
+ a.p.ImportPath, strings.Join(a.p.CXXFiles, ","))
+ }
+ // Same as above for Objective-C files
+ if len(a.p.MFiles) > 0 && !a.p.usesCgo() && !a.p.usesSwig() {
+ return fmt.Errorf("can't build package %s because it contains Objective-C files (%s) but it's not using cgo nor SWIG",
+ a.p.ImportPath, strings.Join(a.p.MFiles, ","))
+ }
+ defer func() {
+ if err != nil && err != errPrintedOutput {
+ err = fmt.Errorf("go build %s: %v", a.p.ImportPath, err)
+ }
+ }()
+ if buildN {
+ // In -n mode, print a banner between packages.
+ // The banner is five lines so that when changes to
+ // different sections of the bootstrap script have to
+ // be merged, the banners give patch something
+ // to use to find its context.
+ b.print("\n#\n# " + a.p.ImportPath + "\n#\n\n")
+ }
+
+ if buildV {
+ b.print(a.p.ImportPath + "\n")
+ }
+
+ // Make build directory.
+ obj := a.objdir
+ if err := b.mkdir(obj); err != nil {
+ return err
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ var gofiles, cgofiles, cfiles, sfiles, cxxfiles, objects, cgoObjects, pcCFLAGS, pcLDFLAGS []string
+
+ gofiles = append(gofiles, a.p.GoFiles...)
+ cgofiles = append(cgofiles, a.p.CgoFiles...)
+ cfiles = append(cfiles, a.p.CFiles...)
+ sfiles = append(sfiles, a.p.SFiles...)
+ cxxfiles = append(cxxfiles, a.p.CXXFiles...)
+
+ if a.p.usesCgo() || a.p.usesSwig() {
+ if pcCFLAGS, pcLDFLAGS, err = b.getPkgConfigFlags(a.p); err != nil {
+ return
+ }
+ }
+
+ // Run SWIG on each .swig and .swigcxx file.
+ // Each run will generate two files, a .go file and a .c or .cxx file.
+ // The .go file will use import "C" and is to be processed by cgo.
+ if a.p.usesSwig() {
+ outGo, outC, outCXX, err := b.swig(a.p, obj, pcCFLAGS)
+ if err != nil {
+ return err
+ }
+ cgofiles = append(cgofiles, outGo...)
+ cfiles = append(cfiles, outC...)
+ cxxfiles = append(cxxfiles, outCXX...)
+ }
+
+ // Run cgo.
+ if a.p.usesCgo() || a.p.usesSwig() {
+ // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc.
+ // There is one exception: runtime/cgo's job is to bridge the
+ // cgo and non-cgo worlds, so it necessarily has files in both.
+ // In that case gcc only gets the gcc_* files.
+ var gccfiles []string
+ if a.p.Standard && a.p.ImportPath == "runtime/cgo" {
+ filter := func(files, nongcc, gcc []string) ([]string, []string) {
+ for _, f := range files {
+ if strings.HasPrefix(f, "gcc_") {
+ gcc = append(gcc, f)
+ } else {
+ nongcc = append(nongcc, f)
+ }
+ }
+ return nongcc, gcc
+ }
+ cfiles, gccfiles = filter(cfiles, cfiles[:0], gccfiles)
+ sfiles, gccfiles = filter(sfiles, sfiles[:0], gccfiles)
+ } else {
+ gccfiles = append(cfiles, sfiles...)
+ cfiles = nil
+ sfiles = nil
+ }
+
+ cgoExe := tool("cgo")
+ if a.cgo != nil && a.cgo.target != "" {
+ cgoExe = a.cgo.target
+ }
+ outGo, outObj, err := b.cgo(a.p, cgoExe, obj, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, cxxfiles, a.p.MFiles)
+ if err != nil {
+ return err
+ }
+ cgoObjects = append(cgoObjects, outObj...)
+ gofiles = append(gofiles, outGo...)
+ }
+
+ if len(gofiles) == 0 {
+ return &build.NoGoError{Dir: a.p.Dir}
+ }
+
+ // If we're doing coverage, preprocess the .go files and put them in the work directory
+ if a.p.coverMode != "" {
+ for i, file := range gofiles {
+ var sourceFile string
+ var coverFile string
+ var key string
+ if strings.HasSuffix(file, ".cgo1.go") {
+ // cgo files have absolute paths
+ base := filepath.Base(file)
+ sourceFile = file
+ coverFile = filepath.Join(obj, base)
+ key = strings.TrimSuffix(base, ".cgo1.go") + ".go"
+ } else {
+ sourceFile = filepath.Join(a.p.Dir, file)
+ coverFile = filepath.Join(obj, file)
+ key = file
+ }
+ cover := a.p.coverVars[key]
+ if cover == nil || isTestFile(file) {
+ // Not covering this file.
+ continue
+ }
+ if err := b.cover(a, coverFile, sourceFile, 0666, cover.Var); err != nil {
+ return err
+ }
+ gofiles[i] = coverFile
+ }
+ }
+
+ // Prepare Go import path list.
+ inc := b.includeArgs("-I", allArchiveActions(a))
+
+ // Compile Go.
+ ofile, out, err := buildToolchain.gc(b, a.p, a.objpkg, obj, len(sfiles) > 0, inc, gofiles)
+ if len(out) > 0 {
+ b.showOutput(a.p.Dir, a.p.ImportPath, b.processOutput(out))
+ if err != nil {
+ return errPrintedOutput
+ }
+ }
+ if err != nil {
+ return err
+ }
+ if ofile != a.objpkg {
+ objects = append(objects, ofile)
+ }
+
+ // Copy .h files named for goos or goarch or goos_goarch
+ // to names using GOOS and GOARCH.
+ // For example, defs_linux_amd64.h becomes defs_GOOS_GOARCH.h.
+ _goos_goarch := "_" + goos + "_" + goarch
+ _goos := "_" + goos
+ _goarch := "_" + goarch
+ for _, file := range a.p.HFiles {
+ name, ext := fileExtSplit(file)
+ switch {
+ case strings.HasSuffix(name, _goos_goarch):
+ targ := file[:len(name)-len(_goos_goarch)] + "_GOOS_GOARCH." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goarch):
+ targ := file[:len(name)-len(_goarch)] + "_GOARCH." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ case strings.HasSuffix(name, _goos):
+ targ := file[:len(name)-len(_goos)] + "_GOOS." + ext
+ if err := b.copyFile(a, obj+targ, filepath.Join(a.p.Dir, file), 0666, true); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, file := range cfiles {
+ out := file[:len(file)-len(".c")] + ".o"
+ if err := buildToolchain.cc(b, a.p, obj, obj+out, file); err != nil {
+ return err
+ }
+ objects = append(objects, out)
+ }
+
+ // Assemble .s files.
+ for _, file := range sfiles {
+ out := file[:len(file)-len(".s")] + ".o"
+ if err := buildToolchain.asm(b, a.p, obj, obj+out, file); err != nil {
+ return err
+ }
+ objects = append(objects, out)
+ }
+
+ // NOTE(rsc): On Windows, it is critically important that the
+ // gcc-compiled objects (cgoObjects) be listed after the ordinary
+ // objects in the archive. I do not know why this is.
+ // https://golang.org/issue/2601
+ objects = append(objects, cgoObjects...)
+
+ // Add system object files.
+ for _, syso := range a.p.SysoFiles {
+ objects = append(objects, filepath.Join(a.p.Dir, syso))
+ }
+
+ // Pack into archive in obj directory.
+ // If the Go compiler wrote an archive, we only need to add the
+ // object files for non-Go sources to the archive.
+ // If the Go compiler wrote an archive and the package is entirely
+ // Go sources, there is no pack to execute at all.
+ if len(objects) > 0 {
+ if err := buildToolchain.pack(b, a.p, obj, a.objpkg, objects); err != nil {
+ return err
+ }
+ }
+
+ // Link if needed.
+ if a.link {
+ // The compiler only cares about direct imports, but the
+ // linker needs the whole dependency tree.
+ all := actionList(a)
+ all = all[:len(all)-1] // drop a
+ if err := buildToolchain.ld(b, a, a.target, all, a.objpkg, objects); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// Calls pkg-config if needed and returns the cflags/ldflags needed to build the package.
+func (b *builder) getPkgConfigFlags(p *Package) (cflags, ldflags []string, err error) {
+ if pkgs := p.CgoPkgConfig; len(pkgs) > 0 {
+ var out []byte
+ out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--cflags", pkgs)
+ if err != nil {
+ b.showOutput(p.Dir, "pkg-config --cflags "+strings.Join(pkgs, " "), string(out))
+ b.print(err.Error() + "\n")
+ err = errPrintedOutput
+ return
+ }
+ if len(out) > 0 {
+ cflags = strings.Fields(string(out))
+ }
+ out, err = b.runOut(p.Dir, p.ImportPath, nil, "pkg-config", "--libs", pkgs)
+ if err != nil {
+ b.showOutput(p.Dir, "pkg-config --libs "+strings.Join(pkgs, " "), string(out))
+ b.print(err.Error() + "\n")
+ err = errPrintedOutput
+ return
+ }
+ if len(out) > 0 {
+ ldflags = strings.Fields(string(out))
+ }
+ }
+ return
+}
+
+func (b *builder) installShlibname(a *action) error {
+ a1 := a.deps[0]
+ err := ioutil.WriteFile(a.target, []byte(filepath.Base(a1.target)+"\n"), 0666)
+ if err != nil {
+ return err
+ }
+ if buildX {
+ b.showcmd("", "echo '%s' > %s # internal", filepath.Base(a1.target), a.target)
+ }
+ return nil
+}
+
+func (b *builder) linkShared(a *action) (err error) {
+ allactions := actionList(a)
+ allactions = allactions[:len(allactions)-1]
+ return buildToolchain.ldShared(b, a.deps, a.target, allactions)
+}
+
+// install is the action for installing a single package or executable.
+func (b *builder) install(a *action) (err error) {
+ defer func() {
+ if err != nil && err != errPrintedOutput {
+ err = fmt.Errorf("go install %s: %v", a.p.ImportPath, err)
+ }
+ }()
+ a1 := a.deps[0]
+ perm := os.FileMode(0666)
+ if a1.link {
+ switch buildBuildmode {
+ case "c-archive", "c-shared":
+ default:
+ perm = 0777
+ }
+ }
+
+ // make target directory
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ // remove object dir to keep the amount of
+ // garbage down in a large build. On an operating system
+ // with aggressive buffering, cleaning incrementally like
+ // this keeps the intermediate objects from hitting the disk.
+ if !buildWork {
+ defer os.RemoveAll(a1.objdir)
+ defer os.Remove(a1.target)
+ }
+
+ return b.moveOrCopyFile(a, a.target, a1.target, perm, false)
+}
+
+// includeArgs returns the -I or -L directory list for access
+// to the results of the list of actions.
+func (b *builder) includeArgs(flag string, all []*action) []string {
+ inc := []string{}
+ incMap := map[string]bool{
+ b.work: true, // handled later
+ gorootPkg: true,
+ "": true, // ignore empty strings
+ }
+
+ // Look in the temporary space for results of test-specific actions.
+ // This is the $WORK/my/package/_test directory for the
+ // package being built, so there are few of these.
+ for _, a1 := range all {
+ if a1.p == nil {
+ continue
+ }
+ if dir := a1.pkgdir; dir != a1.p.build.PkgRoot && !incMap[dir] {
+ incMap[dir] = true
+ inc = append(inc, flag, dir)
+ }
+ }
+
+ // Also look in $WORK for any non-test packages that have
+ // been built but not installed.
+ inc = append(inc, flag, b.work)
+
+ // Finally, look in the installed package directories for each action.
+ // First add the package dirs corresponding to GOPATH entries
+ // in the original GOPATH order.
+ need := map[string]*build.Package{}
+ for _, a1 := range all {
+ if a1.p != nil && a1.pkgdir == a1.p.build.PkgRoot {
+ need[a1.p.build.Root] = a1.p.build
+ }
+ }
+ for _, root := range gopath {
+ if p := need[root]; p != nil && !incMap[p.PkgRoot] {
+ incMap[p.PkgRoot] = true
+ inc = append(inc, flag, p.PkgTargetRoot)
+ }
+ }
+
+ // Then add anything that's left.
+ for _, a1 := range all {
+ if a1.p == nil {
+ continue
+ }
+ if dir := a1.pkgdir; dir == a1.p.build.PkgRoot && !incMap[dir] {
+ incMap[dir] = true
+ inc = append(inc, flag, a1.p.build.PkgTargetRoot)
+ }
+ }
+
+ return inc
+}
+
+// moveOrCopyFile is like 'mv src dst' or 'cp src dst'.
+func (b *builder) moveOrCopyFile(a *action, dst, src string, perm os.FileMode, force bool) error {
+ if buildN {
+ b.showcmd("", "mv %s %s", src, dst)
+ return nil
+ }
+
+ // If we can update the mode and rename to the dst, do it.
+ // Otherwise fall back to standard copy.
+
+ // The perm argument is meant to be adjusted according to umask,
+ // but we don't know what the umask is.
+ // Create a dummy file to find out.
+ // This avoids build tags and works even on systems like Plan 9
+ // where the file mask computation incorporates other information.
+ mode := perm
+ f, err := os.OpenFile(filepath.Clean(dst)+"-go-tmp-umask", os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm)
+ if err == nil {
+ fi, err := f.Stat()
+ if err == nil {
+ mode = fi.Mode() & 0777
+ }
+ name := f.Name()
+ f.Close()
+ os.Remove(name)
+ }
+
+ if err := os.Chmod(src, mode); err == nil {
+ if err := os.Rename(src, dst); err == nil {
+ if buildX {
+ b.showcmd("", "mv %s %s", src, dst)
+ }
+ return nil
+ }
+ }
+
+ return b.copyFile(a, dst, src, perm, force)
+}
+
+// copyFile is like 'cp src dst'.
+func (b *builder) copyFile(a *action, dst, src string, perm os.FileMode, force bool) error {
+ if buildN || buildX {
+ b.showcmd("", "cp %s %s", src, dst)
+ if buildN {
+ return nil
+ }
+ }
+
+ sf, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer sf.Close()
+
+ // Be careful about removing/overwriting dst.
+ // Do not remove/overwrite if dst exists and is a directory
+ // or a non-object file.
+ if fi, err := os.Stat(dst); err == nil {
+ if fi.IsDir() {
+ return fmt.Errorf("build output %q already exists and is a directory", dst)
+ }
+ if !force && fi.Mode().IsRegular() && !isObject(dst) {
+ return fmt.Errorf("build output %q already exists and is not an object file", dst)
+ }
+ }
+
+ // On Windows, remove lingering ~ file from last attempt.
+ if toolIsWindows {
+ if _, err := os.Stat(dst + "~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ }
+
+ mayberemovefile(dst)
+ df, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil && toolIsWindows {
+ // Windows does not allow deletion of a binary file
+ // while it is executing. Try to move it out of the way.
+ // If the move fails, which is likely, we'll try again the
+ // next time we do an install of this binary.
+ if err := os.Rename(dst, dst+"~"); err == nil {
+ os.Remove(dst + "~")
+ }
+ df, err = os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ }
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(df, sf)
+ df.Close()
+ if err != nil {
+ mayberemovefile(dst)
+ return fmt.Errorf("copying %s to %s: %v", src, dst, err)
+ }
+ return nil
+}
+
+// Install the cgo export header file, if there is one.
+func (b *builder) installHeader(a *action) error {
+ src := a.objdir + "_cgo_install.h"
+ if _, err := os.Stat(src); os.IsNotExist(err) {
+ // If the file does not exist, there are no exported
+ // functions, and we do not install anything.
+ return nil
+ }
+
+ dir, _ := filepath.Split(a.target)
+ if dir != "" {
+ if err := b.mkdir(dir); err != nil {
+ return err
+ }
+ }
+
+ return b.moveOrCopyFile(a, a.target, src, 0666, true)
+}
+
+// cover runs, in effect,
+// go tool cover -mode=b.coverMode -var="varName" -o dst.go src.go
+func (b *builder) cover(a *action, dst, src string, perm os.FileMode, varName string) error {
+ return b.run(a.objdir, "cover "+a.p.ImportPath, nil,
+ buildToolExec,
+ tool("cover"),
+ "-mode", a.p.coverMode,
+ "-var", varName,
+ "-o", dst,
+ src)
+}
+
+var objectMagic = [][]byte{
+ {'!', '<', 'a', 'r', 'c', 'h', '>', '\n'}, // Package archive
+ {'\x7F', 'E', 'L', 'F'}, // ELF
+ {0xFE, 0xED, 0xFA, 0xCE}, // Mach-O big-endian 32-bit
+ {0xFE, 0xED, 0xFA, 0xCF}, // Mach-O big-endian 64-bit
+ {0xCE, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 32-bit
+ {0xCF, 0xFA, 0xED, 0xFE}, // Mach-O little-endian 64-bit
+ {0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00}, // PE (Windows) as generated by 6l/8l and gcc
+ {0x00, 0x00, 0x01, 0xEB}, // Plan 9 i386
+ {0x00, 0x00, 0x8a, 0x97}, // Plan 9 amd64
+ {0x00, 0x00, 0x06, 0x47}, // Plan 9 arm
+}
+
+func isObject(s string) bool {
+ f, err := os.Open(s)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+ buf := make([]byte, 64)
+ io.ReadFull(f, buf)
+ for _, magic := range objectMagic {
+ if bytes.HasPrefix(buf, magic) {
+ return true
+ }
+ }
+ return false
+}
+
+// mayberemovefile removes a file only if it is a regular file
+// When running as a user with sufficient privileges, we may delete
+// even device files, for example, which is not intended.
+func mayberemovefile(s string) {
+ if fi, err := os.Lstat(s); err == nil && !fi.Mode().IsRegular() {
+ return
+ }
+ os.Remove(s)
+}
+
+// fmtcmd formats a command in the manner of fmt.Sprintf but also:
+//
+// If dir is non-empty and the script is not in dir right now,
+// fmtcmd inserts "cd dir\n" before the command.
+//
+// fmtcmd replaces the value of b.work with $WORK.
+// fmtcmd replaces the value of goroot with $GOROOT.
+// fmtcmd replaces the value of b.gobin with $GOBIN.
+//
+// fmtcmd replaces the name of the current directory with dot (.)
+// but only when it is at the beginning of a space-separated token.
+//
+func (b *builder) fmtcmd(dir string, format string, args ...interface{}) string {
+ cmd := fmt.Sprintf(format, args...)
+ if dir != "" && dir != "/" {
+ cmd = strings.Replace(" "+cmd, " "+dir, " .", -1)[1:]
+ if b.scriptDir != dir {
+ b.scriptDir = dir
+ cmd = "cd " + dir + "\n" + cmd
+ }
+ }
+ if b.work != "" {
+ cmd = strings.Replace(cmd, b.work, "$WORK", -1)
+ }
+ return cmd
+}
+
+// showcmd prints the given command to standard output
+// for the implementation of -n or -x.
+func (b *builder) showcmd(dir string, format string, args ...interface{}) {
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.print(b.fmtcmd(dir, format, args...) + "\n")
+}
+
+// showOutput prints "# desc" followed by the given output.
+// The output is expected to contain references to 'dir', usually
+// the source directory for the package that has failed to build.
+// showOutput rewrites mentions of dir with a relative path to dir
+// when the relative path is shorter. This is usually more pleasant.
+// For example, if fmt doesn't compile and we are in src/html,
+// the output is
+//
+// $ go build
+// # fmt
+// ../fmt/print.go:1090: undefined: asdf
+// $
+//
+// instead of
+//
+// $ go build
+// # fmt
+// /usr/gopher/go/src/fmt/print.go:1090: undefined: asdf
+// $
+//
+// showOutput also replaces references to the work directory with $WORK.
+//
+func (b *builder) showOutput(dir, desc, out string) {
+ prefix := "# " + desc
+ suffix := "\n" + out
+ if reldir := shortPath(dir); reldir != dir {
+ suffix = strings.Replace(suffix, " "+dir, " "+reldir, -1)
+ suffix = strings.Replace(suffix, "\n"+dir, "\n"+reldir, -1)
+ }
+ suffix = strings.Replace(suffix, " "+b.work, " $WORK", -1)
+
+ b.output.Lock()
+ defer b.output.Unlock()
+ b.print(prefix, suffix)
+}
+
+// shortPath returns an absolute or relative name for path, whatever is shorter.
+func shortPath(path string) string {
+ if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
+ return rel
+ }
+ return path
+}
+
+// relPaths returns a copy of paths with absolute paths
+// made relative to the current directory if they would be shorter.
+func relPaths(paths []string) []string {
+ var out []string
+ pwd, _ := os.Getwd()
+ for _, p := range paths {
+ rel, err := filepath.Rel(pwd, p)
+ if err == nil && len(rel) < len(p) {
+ p = rel
+ }
+ out = append(out, p)
+ }
+ return out
+}
+
+// errPrintedOutput is a special error indicating that a command failed
+// but that it generated output as well, and that output has already
+// been printed, so there's no point showing 'exit status 1' or whatever
+// the wait status was. The main executor, builder.do, knows not to
+// print this error.
+var errPrintedOutput = errors.New("already printed output - no need to show error")
+
+var cgoLine = regexp.MustCompile(`\[[^\[\]]+\.cgo1\.go:[0-9]+\]`)
+var cgoTypeSigRe = regexp.MustCompile(`\b_Ctype_\B`)
+
+// run runs the command given by cmdline in the directory dir.
+// If the command fails, run prints information about the failure
+// and returns a non-nil error.
+func (b *builder) run(dir string, desc string, env []string, cmdargs ...interface{}) error {
+ out, err := b.runOut(dir, desc, env, cmdargs...)
+ if len(out) > 0 {
+ if desc == "" {
+ desc = b.fmtcmd(dir, "%s", strings.Join(stringList(cmdargs...), " "))
+ }
+ b.showOutput(dir, desc, b.processOutput(out))
+ if err != nil {
+ err = errPrintedOutput
+ }
+ }
+ return err
+}
+
+// processOutput prepares the output of runOut to be output to the console.
+func (b *builder) processOutput(out []byte) string {
+ if out[len(out)-1] != '\n' {
+ out = append(out, '\n')
+ }
+ messages := string(out)
+ // Fix up output referring to cgo-generated code to be more readable.
+ // Replace x.go:19[/tmp/.../x.cgo1.go:18] with x.go:19.
+ // Replace *[100]_Ctype_foo with *[100]C.foo.
+ // If we're using -x, assume we're debugging and want the full dump, so disable the rewrite.
+ if !buildX && cgoLine.MatchString(messages) {
+ messages = cgoLine.ReplaceAllString(messages, "")
+ messages = cgoTypeSigRe.ReplaceAllString(messages, "C.")
+ }
+ return messages
+}
+
+// runOut runs the command given by cmdline in the directory dir.
+// It returns the command output and any errors that occurred.
+func (b *builder) runOut(dir string, desc string, env []string, cmdargs ...interface{}) ([]byte, error) {
+ cmdline := stringList(cmdargs...)
+ if buildN || buildX {
+ var envcmdline string
+ for i := range env {
+ envcmdline += env[i]
+ envcmdline += " "
+ }
+ envcmdline += joinUnambiguously(cmdline)
+ b.showcmd(dir, "%s", envcmdline)
+ if buildN {
+ return nil, nil
+ }
+ }
+
+ nbusy := 0
+ for {
+ var buf bytes.Buffer
+ cmd := exec.Command(cmdline[0], cmdline[1:]...)
+ cmd.Stdout = &buf
+ cmd.Stderr = &buf
+ cmd.Dir = dir
+ cmd.Env = mergeEnvLists(env, envForDir(cmd.Dir, os.Environ()))
+ err := cmd.Run()
+
+ // cmd.Run will fail on Unix if some other process has the binary
+ // we want to run open for writing. This can happen here because
+ // we build and install the cgo command and then run it.
+ // If another command was kicked off while we were writing the
+ // cgo binary, the child process for that command may be holding
+ // a reference to the fd, keeping us from running exec.
+ //
+ // But, you might reasonably wonder, how can this happen?
+ // The cgo fd, like all our fds, is close-on-exec, so that we need
+ // not worry about other processes inheriting the fd accidentally.
+ // The answer is that running a command is fork and exec.
+ // A child forked while the cgo fd is open inherits that fd.
+ // Until the child has called exec, it holds the fd open and the
+ // kernel will not let us run cgo. Even if the child were to close
+ // the fd explicitly, it would still be open from the time of the fork
+ // until the time of the explicit close, and the race would remain.
+ //
+ // On Unix systems, this results in ETXTBSY, which formats
+ // as "text file busy". Rather than hard-code specific error cases,
+ // we just look for that string. If this happens, sleep a little
+ // and try again. We let this happen three times, with increasing
+ // sleep lengths: 100+200+400 ms = 0.7 seconds.
+ //
+ // An alternate solution might be to split the cmd.Run into
+ // separate cmd.Start and cmd.Wait, and then use an RWLock
+ // to make sure that copyFile only executes when no cmd.Start
+ // call is in progress. However, cmd.Start (really syscall.forkExec)
+ // only guarantees that when it returns, the exec is committed to
+ // happen and succeed. It uses a close-on-exec file descriptor
+ // itself to determine this, so we know that when cmd.Start returns,
+ // at least one close-on-exec file descriptor has been closed.
+ // However, we cannot be sure that all of them have been closed,
+ // so the program might still encounter ETXTBSY even with such
+ // an RWLock. The race window would be smaller, perhaps, but not
+ // guaranteed to be gone.
+ //
+ // Sleeping when we observe the race seems to be the most reliable
+ // option we have.
+ //
+ // https://golang.org/issue/3001
+ //
+ if err != nil && nbusy < 3 && strings.Contains(err.Error(), "text file busy") {
+ time.Sleep(100 * time.Millisecond << uint(nbusy))
+ nbusy++
+ continue
+ }
+
+ // err can be something like 'exit status 1'.
+ // Add information about what program was running.
+ // Note that if buf.Bytes() is non-empty, the caller usually
+ // shows buf.Bytes() and does not print err at all, so the
+ // prefix here does not make most output any more verbose.
+ if err != nil {
+ err = errors.New(cmdline[0] + ": " + err.Error())
+ }
+ return buf.Bytes(), err
+ }
+}
+
+// joinUnambiguously prints the slice, quoting where necessary to make the
+// output unambiguous.
+// TODO: See issue 5279. The printing of commands needs a complete redo.
+func joinUnambiguously(a []string) string {
+ var buf bytes.Buffer
+ for i, s := range a {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ q := strconv.Quote(s)
+ if s == "" || strings.Contains(s, " ") || len(q) > len(s)+2 {
+ buf.WriteString(q)
+ } else {
+ buf.WriteString(s)
+ }
+ }
+ return buf.String()
+}
+
+// mkdir makes the named directory.
+func (b *builder) mkdir(dir string) error {
+ b.exec.Lock()
+ defer b.exec.Unlock()
+ // We can be a little aggressive about being
+ // sure directories exist. Skip repeated calls.
+ if b.mkdirCache[dir] {
+ return nil
+ }
+ b.mkdirCache[dir] = true
+
+ if buildN || buildX {
+ b.showcmd("", "mkdir -p %s", dir)
+ if buildN {
+ return nil
+ }
+ }
+
+ if err := os.MkdirAll(dir, 0777); err != nil {
+ return err
+ }
+ return nil
+}
+
+// mkAbs returns an absolute path corresponding to
+// evaluating f in the directory dir.
+// We always pass absolute paths of source files so that
+// the error messages will include the full path to a file
+// in need of attention.
+func mkAbs(dir, f string) string {
+ // Leave absolute paths alone.
+ // Also, during -n mode we use the pseudo-directory $WORK
+ // instead of creating an actual work directory that won't be used.
+ // Leave paths beginning with $WORK alone too.
+ if filepath.IsAbs(f) || strings.HasPrefix(f, "$WORK") {
+ return f
+ }
+ return filepath.Join(dir, f)
+}
+
+type toolchain interface {
+ // gc runs the compiler in a specific directory on a set of files
+ // and returns the name of the generated output file.
+ // The compiler runs in the directory dir.
+ gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error)
+ // cc runs the toolchain's C compiler in a directory on a C file
+ // to produce an output file.
+ cc(b *builder, p *Package, objdir, ofile, cfile string) error
+ // asm runs the assembler in a specific directory on a specific file
+ // to generate the named output file.
+ asm(b *builder, p *Package, obj, ofile, sfile string) error
+ // pkgpath builds an appropriate path for a temporary package file.
+ pkgpath(basedir string, p *Package) string
+ // pack runs the archive packer in a specific directory to create
+ // an archive from a set of object files.
+ // typically it is run in the object directory.
+ pack(b *builder, p *Package, objDir, afile string, ofiles []string) error
+ // ld runs the linker to create an executable starting at mainpkg.
+ ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error
+ // ldShared runs the linker to create a shared library containing the pkgs built by toplevelactions
+ ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error
+
+ compiler() string
+ linker() string
+}
+
+type noToolchain struct{}
+
+func noCompiler() error {
+ log.Fatalf("unknown compiler %q", buildContext.Compiler)
+ return nil
+}
+
+func (noToolchain) compiler() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) linker() string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, out []byte, err error) {
+ return "", nil, noCompiler()
+}
+
+func (noToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ return noCompiler()
+}
+
+func (noToolchain) pkgpath(basedir string, p *Package) string {
+ noCompiler()
+ return ""
+}
+
+func (noToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ return noCompiler()
+}
+
+func (noToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ return noCompiler()
+}
+
+func (noToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ return noCompiler()
+}
+
+// The Go toolchain.
+type gcToolchain struct{}
+
+func (gcToolchain) compiler() string {
+ return tool("compile")
+}
+
+func (gcToolchain) linker() string {
+ return tool("link")
+}
+
+func (gcToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+ if archive != "" {
+ ofile = archive
+ } else {
+ out := "_go_.o"
+ ofile = obj + out
+ }
+
+ gcargs := []string{"-p", p.ImportPath}
+ if p.Name == "main" {
+ gcargs[1] = "main"
+ }
+ if p.Standard && (p.ImportPath == "runtime" || strings.HasPrefix(p.ImportPath, "runtime/internal")) {
+ // runtime compiles with a special gc flag to emit
+ // additional reflect type data.
+ gcargs = append(gcargs, "-+")
+ }
+
+ // If we're giving the compiler the entire package (no C etc files), tell it that,
+ // so that it can give good error messages about forward declarations.
+ // Exceptions: a few standard packages have forward declarations for
+ // pieces supplied behind-the-scenes by package runtime.
+ extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles)
+ if p.Standard {
+ switch p.ImportPath {
+ case "bytes", "net", "os", "runtime/pprof", "sync", "time":
+ extFiles++
+ }
+ }
+ if extFiles == 0 {
+ gcargs = append(gcargs, "-complete")
+ }
+ if buildContext.InstallSuffix != "" {
+ gcargs = append(gcargs, "-installsuffix", buildContext.InstallSuffix)
+ }
+ if p.buildID != "" {
+ gcargs = append(gcargs, "-buildid", p.buildID)
+ }
+
+ for _, path := range p.Imports {
+ if i := strings.LastIndex(path, "/vendor/"); i >= 0 {
+ gcargs = append(gcargs, "-importmap", path[i+len("/vendor/"):]+"="+path)
+ } else if strings.HasPrefix(path, "vendor/") {
+ gcargs = append(gcargs, "-importmap", path[len("vendor/"):]+"="+path)
+ }
+ }
+
+ args := []interface{}{buildToolExec, tool("compile"), "-o", ofile, "-trimpath", b.work, buildGcflags, gcargs, "-D", p.localPrefix, importArgs}
+ if ofile == archive {
+ args = append(args, "-pack")
+ }
+ if asmhdr {
+ args = append(args, "-asmhdr", obj+"go_asm.h")
+ }
+ for _, f := range gofiles {
+ args = append(args, mkAbs(p.Dir, f))
+ }
+
+ output, err = b.runOut(p.Dir, p.ImportPath, nil, args...)
+ return ofile, output, err
+}
+
+func (gcToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
+ inc := filepath.Join(goroot, "pkg", "include")
+ sfile = mkAbs(p.Dir, sfile)
+ args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile}
+ if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
+ return err
+ }
+ return nil
+}
+
+// toolVerify checks that the command line args writes the same output file
+// if run using newTool instead.
+// Unused now but kept around for future use.
+func toolVerify(b *builder, p *Package, newTool string, ofile string, args []interface{}) error {
+ newArgs := make([]interface{}, len(args))
+ copy(newArgs, args)
+ newArgs[1] = tool(newTool)
+ newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
+ if err := b.run(p.Dir, p.ImportPath, nil, newArgs...); err != nil {
+ return err
+ }
+ data1, err := ioutil.ReadFile(ofile)
+ if err != nil {
+ return err
+ }
+ data2, err := ioutil.ReadFile(ofile + ".new")
+ if err != nil {
+ return err
+ }
+ if !bytes.Equal(data1, data2) {
+ return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(stringList(args...), " "), strings.Join(stringList(newArgs...), " "))
+ }
+ os.Remove(ofile + ".new")
+ return nil
+}
+
+func (gcToolchain) pkgpath(basedir string, p *Package) string {
+ end := filepath.FromSlash(p.ImportPath + ".a")
+ return filepath.Join(basedir, end)
+}
+
+func (gcToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(objDir, f))
+ }
+ absAfile := mkAbs(objDir, afile)
+
+ // The archive file should have been created by the compiler.
+ // Since it used to not work that way, verify.
+ if _, err := os.Stat(absAfile); err != nil {
+ fatalf("os.Stat of archive file failed: %v", err)
+ }
+
+ if buildN || buildX {
+ cmdline := stringList("pack", "r", absAfile, absOfiles)
+ b.showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline))
+ }
+ if buildN {
+ return nil
+ }
+ if err := packInternal(b, absAfile, absOfiles); err != nil {
+ b.showOutput(p.Dir, p.ImportPath, err.Error()+"\n")
+ return errPrintedOutput
+ }
+ return nil
+}
+
+func packInternal(b *builder, afile string, ofiles []string) error {
+ dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0)
+ if err != nil {
+ return err
+ }
+ defer dst.Close() // only for error returns or panics
+ w := bufio.NewWriter(dst)
+
+ for _, ofile := range ofiles {
+ src, err := os.Open(ofile)
+ if err != nil {
+ return err
+ }
+ fi, err := src.Stat()
+ if err != nil {
+ src.Close()
+ return err
+ }
+ // Note: Not using %-16.16s format because we care
+ // about bytes, not runes.
+ name := fi.Name()
+ if len(name) > 16 {
+ name = name[:16]
+ } else {
+ name += strings.Repeat(" ", 16-len(name))
+ }
+ size := fi.Size()
+ fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n",
+ name, 0, 0, 0, 0644, size)
+ n, err := io.Copy(w, src)
+ src.Close()
+ if err == nil && n < size {
+ err = io.ErrUnexpectedEOF
+ } else if err == nil && n > size {
+ err = fmt.Errorf("file larger than size reported by stat")
+ }
+ if err != nil {
+ return fmt.Errorf("copying %s to %s: %v", ofile, afile, err)
+ }
+ if size&1 != 0 {
+ w.WriteByte(0)
+ }
+ }
+
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ return dst.Close()
+}
+
+// setextld sets the appropriate linker flags for the specified compiler.
+func setextld(ldflags []string, compiler []string) []string {
+ for _, f := range ldflags {
+ if f == "-extld" || strings.HasPrefix(f, "-extld=") {
+ // don't override -extld if supplied
+ return ldflags
+ }
+ }
+ ldflags = append(ldflags, "-extld="+compiler[0])
+ if len(compiler) > 1 {
+ extldflags := false
+ add := strings.Join(compiler[1:], " ")
+ for i, f := range ldflags {
+ if f == "-extldflags" && i+1 < len(ldflags) {
+ ldflags[i+1] = add + " " + ldflags[i+1]
+ extldflags = true
+ break
+ } else if strings.HasPrefix(f, "-extldflags=") {
+ ldflags[i] = "-extldflags=" + add + " " + ldflags[i][len("-extldflags="):]
+ extldflags = true
+ break
+ }
+ }
+ if !extldflags {
+ ldflags = append(ldflags, "-extldflags="+add)
+ }
+ }
+ return ldflags
+}
+
+func (gcToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ importArgs := b.includeArgs("-L", allactions)
+ cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
+ for _, a := range allactions {
+ if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ var ldflags []string
+ if buildContext.InstallSuffix != "" {
+ ldflags = append(ldflags, "-installsuffix", buildContext.InstallSuffix)
+ }
+ if root.p.omitDWARF {
+ ldflags = append(ldflags, "-w")
+ }
+
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", defaultCXX)
+ } else {
+ compiler = envList("CC", defaultCC)
+ }
+ ldflags = setextld(ldflags, compiler)
+ ldflags = append(ldflags, "-buildmode="+ldBuildmode)
+ if root.p.buildID != "" {
+ ldflags = append(ldflags, "-buildid="+root.p.buildID)
+ }
+ ldflags = append(ldflags, buildLdflags...)
+
+ // On OS X when using external linking to build a shared library,
+ // the argument passed here to -o ends up recorded in the final
+ // shared library in the LC_ID_DYLIB load command.
+ // To avoid putting the temporary output directory name there
+ // (and making the resulting shared library useless),
+ // run the link in the output directory so that -o can name
+ // just the final path element.
+ dir := "."
+ if goos == "darwin" && buildBuildmode == "c-shared" {
+ dir, out = filepath.Split(out)
+ }
+
+ return b.run(dir, root.p.ImportPath, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags, mainpkg)
+}
+
+func (gcToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ importArgs := b.includeArgs("-L", allactions)
+ ldflags := []string{"-installsuffix", buildContext.InstallSuffix}
+ ldflags = append(ldflags, "-buildmode=shared")
+ ldflags = append(ldflags, buildLdflags...)
+ cxx := false
+ for _, a := range allactions {
+ if a.p != nil && (len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0) {
+ cxx = true
+ }
+ }
+ // If the user has not specified the -extld option, then specify the
+ // appropriate linker. In case of C++ code, use the compiler named
+ // by the CXX environment variable or defaultCXX if CXX is not set.
+ // Else, use the CC environment variable and defaultCC as fallback.
+ var compiler []string
+ if cxx {
+ compiler = envList("CXX", defaultCXX)
+ } else {
+ compiler = envList("CC", defaultCC)
+ }
+ ldflags = setextld(ldflags, compiler)
+ for _, d := range toplevelactions {
+ if !strings.HasSuffix(d.target, ".a") { // omit unsafe etc and actions for other shared libraries
+ continue
+ }
+ ldflags = append(ldflags, d.p.ImportPath+"="+d.target)
+ }
+ return b.run(".", out, nil, buildToolExec, tool("link"), "-o", out, importArgs, ldflags)
+}
+
+func (gcToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(p.Dir, cfile))
+}
+
+// The Gccgo toolchain.
+type gccgoToolchain struct{}
+
+var gccgoName, gccgoBin string
+
+func init() {
+ gccgoName = os.Getenv("GCCGO")
+ if gccgoName == "" {
+ gccgoName = "gccgo"
+ }
+ gccgoBin, _ = exec.LookPath(gccgoName)
+}
+
+func (gccgoToolchain) compiler() string {
+ return gccgoBin
+}
+
+func (gccgoToolchain) linker() string {
+ return gccgoBin
+}
+
+func (tools gccgoToolchain) gc(b *builder, p *Package, archive, obj string, asmhdr bool, importArgs []string, gofiles []string) (ofile string, output []byte, err error) {
+ out := "_go_.o"
+ ofile = obj + out
+ gcargs := []string{"-g"}
+ gcargs = append(gcargs, b.gccArchArgs()...)
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ gcargs = append(gcargs, "-fgo-pkgpath="+pkgpath)
+ }
+ if p.localPrefix != "" {
+ gcargs = append(gcargs, "-fgo-relative-import-path="+p.localPrefix)
+ }
+ args := stringList(tools.compiler(), importArgs, "-c", gcargs, "-o", ofile, buildGccgoflags)
+ for _, f := range gofiles {
+ args = append(args, mkAbs(p.Dir, f))
+ }
+
+ output, err = b.runOut(p.Dir, p.ImportPath, nil, args)
+ return ofile, output, err
+}
+
+func (tools gccgoToolchain) asm(b *builder, p *Package, obj, ofile, sfile string) error {
+ sfile = mkAbs(p.Dir, sfile)
+ defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch}
+ if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH=`+pkgpath)
+ }
+ defs = tools.maybePIC(defs)
+ defs = append(defs, b.gccArchArgs()...)
+ return b.run(p.Dir, p.ImportPath, nil, tools.compiler(), "-xassembler-with-cpp", "-I", obj, "-c", "-o", ofile, defs, sfile)
+}
+
+func (gccgoToolchain) pkgpath(basedir string, p *Package) string {
+ end := filepath.FromSlash(p.ImportPath + ".a")
+ afile := filepath.Join(basedir, end)
+ // add "lib" to the final element
+ return filepath.Join(filepath.Dir(afile), "lib"+filepath.Base(afile))
+}
+
+func (gccgoToolchain) pack(b *builder, p *Package, objDir, afile string, ofiles []string) error {
+ var absOfiles []string
+ for _, f := range ofiles {
+ absOfiles = append(absOfiles, mkAbs(objDir, f))
+ }
+ return b.run(p.Dir, p.ImportPath, nil, "ar", "rc", mkAbs(objDir, afile), absOfiles)
+}
+
+func (tools gccgoToolchain) ld(b *builder, root *action, out string, allactions []*action, mainpkg string, ofiles []string) error {
+ // gccgo needs explicit linking with all package dependencies,
+ // and all LDFLAGS from cgo dependencies.
+ apackagesSeen := make(map[*Package]bool)
+ afiles := []string{}
+ shlibs := []string{}
+ xfiles := []string{}
+ ldflags := b.gccArchArgs()
+ cgoldflags := []string{}
+ usesCgo := false
+ cxx := len(root.p.CXXFiles) > 0 || len(root.p.SwigCXXFiles) > 0
+ objc := len(root.p.MFiles) > 0
+
+ actionsSeen := make(map[*action]bool)
+ // Make a pre-order depth-first traversal of the action graph, taking note of
+ // whether a shared library action has been seen on the way to an action (the
+ // construction of the graph means that if any path to a node passes through
+ // a shared library action, they all do).
+ var walk func(a *action, seenShlib bool)
+ walk = func(a *action, seenShlib bool) {
+ if actionsSeen[a] {
+ return
+ }
+ actionsSeen[a] = true
+ if a.p != nil && !seenShlib {
+ if a.p.Standard {
+ return
+ }
+ // We record the target of the first time we see a .a file
+ // for a package to make sure that we prefer the 'install'
+ // rather than the 'build' location (which may not exist any
+ // more). We still need to traverse the dependencies of the
+ // build action though so saying
+ // if apackagesSeen[a.p] { return }
+ // doesn't work.
+ if !apackagesSeen[a.p] {
+ apackagesSeen[a.p] = true
+ if a.p.fake && a.p.external {
+ // external _tests, if present must come before
+ // internal _tests. Store these on a separate list
+ // and place them at the head after this loop.
+ xfiles = append(xfiles, a.target)
+ } else if a.p.fake {
+ // move _test files to the top of the link order
+ afiles = append([]string{a.target}, afiles...)
+ } else {
+ afiles = append(afiles, a.target)
+ }
+ }
+ }
+ if strings.HasSuffix(a.target, ".so") {
+ shlibs = append(shlibs, a.target)
+ seenShlib = true
+ }
+ for _, a1 := range a.deps {
+ walk(a1, seenShlib)
+ }
+ }
+ for _, a1 := range root.deps {
+ walk(a1, false)
+ }
+ afiles = append(xfiles, afiles...)
+
+ for _, a := range allactions {
+ // Gather CgoLDFLAGS, but not from standard packages.
+ // The go tool can dig up runtime/cgo from GOROOT and
+ // think that it should use its CgoLDFLAGS, but gccgo
+ // doesn't use runtime/cgo.
+ if a.p == nil {
+ continue
+ }
+ if !a.p.Standard {
+ cgoldflags = append(cgoldflags, a.p.CgoLDFLAGS...)
+ }
+ if len(a.p.CgoFiles) > 0 {
+ usesCgo = true
+ }
+ if a.p.usesSwig() {
+ usesCgo = true
+ }
+ if len(a.p.CXXFiles) > 0 || len(a.p.SwigCXXFiles) > 0 {
+ cxx = true
+ }
+ if len(a.p.MFiles) > 0 {
+ objc = true
+ }
+ }
+
+ ldflags = append(ldflags, "-Wl,--whole-archive")
+ ldflags = append(ldflags, afiles...)
+ ldflags = append(ldflags, "-Wl,--no-whole-archive")
+
+ ldflags = append(ldflags, cgoldflags...)
+ ldflags = append(ldflags, envList("CGO_LDFLAGS", "")...)
+ ldflags = append(ldflags, root.p.CgoLDFLAGS...)
+
+ ldflags = stringList("-Wl,-(", ldflags, "-Wl,-)")
+
+ for _, shlib := range shlibs {
+ ldflags = append(
+ ldflags,
+ "-L"+filepath.Dir(shlib),
+ "-Wl,-rpath="+filepath.Dir(shlib),
+ "-l"+strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(shlib), "lib"),
+ ".so"))
+ }
+
+ var realOut string
+ switch ldBuildmode {
+ case "exe":
+ if usesCgo && goos == "linux" {
+ ldflags = append(ldflags, "-Wl,-E")
+ }
+
+ case "c-archive":
+ // Link the Go files into a single .o, and also link
+ // in -lgolibbegin.
+ //
+ // We need to use --whole-archive with -lgolibbegin
+ // because it doesn't define any symbols that will
+ // cause the contents to be pulled in; it's just
+ // initialization code.
+ //
+ // The user remains responsible for linking against
+ // -lgo -lpthread -lm in the final link. We can't use
+ // -r to pick them up because we can't combine
+ // split-stack and non-split-stack code in a single -r
+ // link, and libgo picks up non-split-stack code from
+ // libffi.
+ ldflags = append(ldflags, "-Wl,-r", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive")
+
+ if b.gccSupportsNoPie() {
+ ldflags = append(ldflags, "-no-pie")
+ }
+
+ // We are creating an object file, so we don't want a build ID.
+ ldflags = b.disableBuildID(ldflags)
+
+ realOut = out
+ out = out + ".o"
+
+ case "c-shared":
+ ldflags = append(ldflags, "-shared", "-nostdlib", "-Wl,--whole-archive", "-lgolibbegin", "-Wl,--no-whole-archive", "-lgo", "-lgcc_s", "-lgcc", "-lc", "-lgcc")
+
+ default:
+ fatalf("-buildmode=%s not supported for gccgo", ldBuildmode)
+ }
+
+ switch ldBuildmode {
+ case "exe", "c-shared":
+ if cxx {
+ ldflags = append(ldflags, "-lstdc++")
+ }
+ if objc {
+ ldflags = append(ldflags, "-lobjc")
+ }
+ }
+
+ if err := b.run(".", root.p.ImportPath, nil, tools.linker(), "-o", out, ofiles, ldflags, buildGccgoflags); err != nil {
+ return err
+ }
+
+ switch ldBuildmode {
+ case "c-archive":
+ if err := b.run(".", root.p.ImportPath, nil, "ar", "rc", realOut, out); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (tools gccgoToolchain) ldShared(b *builder, toplevelactions []*action, out string, allactions []*action) error {
+ args := []string{"-o", out, "-shared", "-nostdlib", "-zdefs", "-Wl,--whole-archive"}
+ for _, a := range toplevelactions {
+ args = append(args, a.target)
+ }
+ args = append(args, "-Wl,--no-whole-archive", "-shared", "-nostdlib", "-lgo", "-lgcc_s", "-lgcc", "-lc")
+ shlibs := []string{}
+ for _, a := range allactions {
+ if strings.HasSuffix(a.target, ".so") {
+ shlibs = append(shlibs, a.target)
+ }
+ }
+ for _, shlib := range shlibs {
+ args = append(
+ args,
+ "-L"+filepath.Dir(shlib),
+ "-Wl,-rpath="+filepath.Dir(shlib),
+ "-l"+strings.TrimSuffix(
+ strings.TrimPrefix(filepath.Base(shlib), "lib"),
+ ".so"))
+ }
+ return b.run(".", out, nil, tools.linker(), args, buildGccgoflags)
+}
+
+func (tools gccgoToolchain) cc(b *builder, p *Package, objdir, ofile, cfile string) error {
+ inc := filepath.Join(goroot, "pkg", "include")
+ cfile = mkAbs(p.Dir, cfile)
+ defs := []string{"-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch}
+ defs = append(defs, b.gccArchArgs()...)
+ if pkgpath := gccgoCleanPkgpath(p); pkgpath != "" {
+ defs = append(defs, `-D`, `GOPKGPATH="`+pkgpath+`"`)
+ }
+ switch goarch {
+ case "386", "amd64":
+ defs = append(defs, "-fsplit-stack")
+ }
+ defs = tools.maybePIC(defs)
+ return b.run(p.Dir, p.ImportPath, nil, envList("CC", defaultCC), "-Wall", "-g",
+ "-I", objdir, "-I", inc, "-o", ofile, defs, "-c", cfile)
+}
+
+// maybePIC adds -fPIC to the list of arguments if needed.
+func (tools gccgoToolchain) maybePIC(args []string) []string {
+ switch buildBuildmode {
+ case "c-shared", "shared":
+ args = append(args, "-fPIC")
+ }
+ return args
+}
+
+func gccgoPkgpath(p *Package) string {
+ if p.build.IsCommand() && !p.forceLibrary {
+ return ""
+ }
+ return p.ImportPath
+}
+
+func gccgoCleanPkgpath(p *Package) string {
+ clean := func(r rune) rune {
+ switch {
+ case 'A' <= r && r <= 'Z', 'a' <= r && r <= 'z',
+ '0' <= r && r <= '9':
+ return r
+ }
+ return '_'
+ }
+ return strings.Map(clean, gccgoPkgpath(p))
+}
+
+// gcc runs the gcc C compiler to create an object from a single C file.
+func (b *builder) gcc(p *Package, out string, flags []string, cfile string) error {
+ return b.ccompile(p, out, flags, cfile, b.gccCmd(p.Dir))
+}
+
+// gxx runs the g++ C++ compiler to create an object from a single C++ file.
+func (b *builder) gxx(p *Package, out string, flags []string, cxxfile string) error {
+ return b.ccompile(p, out, flags, cxxfile, b.gxxCmd(p.Dir))
+}
+
+// ccompile runs the given C or C++ compiler and creates an object from a single source file.
+func (b *builder) ccompile(p *Package, out string, flags []string, file string, compiler []string) error {
+ file = mkAbs(p.Dir, file)
+ return b.run(p.Dir, p.ImportPath, nil, compiler, flags, "-o", out, "-c", file)
+}
+
+// gccld runs the gcc linker to create an executable from a set of object files.
+func (b *builder) gccld(p *Package, out string, flags []string, obj []string) error {
+ var cmd []string
+ if len(p.CXXFiles) > 0 || len(p.SwigCXXFiles) > 0 {
+ cmd = b.gxxCmd(p.Dir)
+ } else {
+ cmd = b.gccCmd(p.Dir)
+ }
+ return b.run(p.Dir, p.ImportPath, nil, cmd, "-o", out, obj, flags)
+}
+
+// gccCmd returns a gcc command line prefix
+// defaultCC is defined in zdefaultcc.go, written by cmd/dist.
+func (b *builder) gccCmd(objdir string) []string {
+ return b.ccompilerCmd("CC", defaultCC, objdir)
+}
+
+// gxxCmd returns a g++ command line prefix
+// defaultCXX is defined in zdefaultcc.go, written by cmd/dist.
+func (b *builder) gxxCmd(objdir string) []string {
+ return b.ccompilerCmd("CXX", defaultCXX, objdir)
+}
+
+// ccompilerCmd returns a command line prefix for the given environment
+// variable and using the default command when the variable is empty.
+func (b *builder) ccompilerCmd(envvar, defcmd, objdir string) []string {
+ // NOTE: env.go's mkEnv knows that the first three
+ // strings returned are "gcc", "-I", objdir (and cuts them off).
+
+ compiler := envList(envvar, defcmd)
+ a := []string{compiler[0], "-I", objdir}
+ a = append(a, compiler[1:]...)
+
+ // Definitely want -fPIC but on Windows gcc complains
+ // "-fPIC ignored for target (all code is position independent)"
+ if goos != "windows" {
+ a = append(a, "-fPIC")
+ }
+ a = append(a, b.gccArchArgs()...)
+ // gcc-4.5 and beyond require explicit "-pthread" flag
+ // for multithreading with pthread library.
+ if buildContext.CgoEnabled {
+ switch goos {
+ case "windows":
+ a = append(a, "-mthreads")
+ default:
+ a = append(a, "-pthread")
+ }
+ }
+
+ if strings.Contains(a[0], "clang") {
+ // disable ASCII art in clang errors, if possible
+ a = append(a, "-fno-caret-diagnostics")
+ // clang is too smart about command-line arguments
+ a = append(a, "-Qunused-arguments")
+ }
+
+ // disable word wrapping in error messages
+ a = append(a, "-fmessage-length=0")
+
+ // On OS X, some of the compilers behave as if -fno-common
+ // is always set, and the Mach-O linker in 6l/8l assumes this.
+ // See https://golang.org/issue/3253.
+ if goos == "darwin" {
+ a = append(a, "-fno-common")
+ }
+
+ return a
+}
+
+// On systems with PIE (position independent executables) enabled by default,
+// -no-pie must be passed when doing a partial link with -Wl,-r. But -no-pie is
+// not supported by all compilers.
+func (b *builder) gccSupportsNoPie() bool {
+ if goos != "linux" {
+ // On some BSD platforms, error messages from the
+ // compiler make it to the console despite cmd.Std*
+ // all being nil. As -no-pie is only required on linux
+ // systems so far, we only test there.
+ return false
+ }
+ src := filepath.Join(b.work, "trivial.c")
+ if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
+ return false
+ }
+ cmdArgs := b.gccCmd(b.work)
+ cmdArgs = append(cmdArgs, "-no-pie", "-c", "trivial.c")
+ if buildN || buildX {
+ b.showcmd(b.work, "%s", joinUnambiguously(cmdArgs))
+ if buildN {
+ return false
+ }
+ }
+ cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
+ cmd.Dir = b.work
+ cmd.Env = envForDir(cmd.Dir, os.Environ())
+ out, err := cmd.CombinedOutput()
+ return err == nil && !bytes.Contains(out, []byte("unrecognized"))
+}
+
+// gccArchArgs returns arguments to pass to gcc based on the architecture.
+func (b *builder) gccArchArgs() []string {
+ switch goarch {
+ case "386":
+ return []string{"-m32"}
+ case "amd64", "amd64p32":
+ return []string{"-m64"}
+ case "arm":
+ return []string{"-marm"} // not thumb
+ case "s390x":
+ return []string{"-m64", "-march=z196"}
+ }
+ return nil
+}
+
+// envList returns the value of the given environment variable broken
+// into fields, using the default value when the variable is empty.
+func envList(key, def string) []string {
+ v := os.Getenv(key)
+ if v == "" {
+ v = def
+ }
+ return strings.Fields(v)
+}
+
+// Return the flags to use when invoking the C or C++ compilers, or cgo.
+func (b *builder) cflags(p *Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
+ var defaults string
+ if def {
+ defaults = "-g -O2"
+ }
+
+ cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
+ cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
+ cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
+ ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
+ return
+}
+
+var cgoRe = regexp.MustCompile(`[/\\:]`)
+
+func (b *builder) cgo(p *Package, cgoExe, obj string, pcCFLAGS, pcLDFLAGS, cgofiles, gccfiles, gxxfiles, mfiles []string) (outGo, outObj []string, err error) {
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, cgoLDFLAGS := b.cflags(p, true)
+ _, cgoexeCFLAGS, _, _ := b.cflags(p, false)
+ cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
+ cgoLDFLAGS = append(cgoLDFLAGS, pcLDFLAGS...)
+ // If we are compiling Objective-C code, then we need to link against libobjc
+ if len(mfiles) > 0 {
+ cgoLDFLAGS = append(cgoLDFLAGS, "-lobjc")
+ }
+
+ if buildMSan && p.ImportPath != "runtime/cgo" {
+ cgoCFLAGS = append([]string{"-fsanitize=memory"}, cgoCFLAGS...)
+ cgoLDFLAGS = append([]string{"-fsanitize=memory"}, cgoLDFLAGS...)
+ }
+
+ // Allows including _cgo_export.h from .[ch] files in the package.
+ cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", obj)
+
+ // cgo
+ // TODO: CGO_FLAGS?
+ gofiles := []string{obj + "_cgo_gotypes.go"}
+ cfiles := []string{"_cgo_main.c", "_cgo_export.c"}
+ for _, fn := range cgofiles {
+ f := cgoRe.ReplaceAllString(fn[:len(fn)-2], "_")
+ gofiles = append(gofiles, obj+f+"cgo1.go")
+ cfiles = append(cfiles, f+"cgo2.c")
+ }
+ defunC := obj + "_cgo_defun.c"
+
+ cgoflags := []string{}
+ // TODO: make cgo not depend on $GOARCH?
+
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-import_runtime_cgo=false")
+ }
+ if p.Standard && (p.ImportPath == "runtime/race" || p.ImportPath == "runtime/msan" || p.ImportPath == "runtime/cgo") {
+ cgoflags = append(cgoflags, "-import_syscall=false")
+ }
+
+ // Update $CGO_LDFLAGS with p.CgoLDFLAGS.
+ var cgoenv []string
+ if len(cgoLDFLAGS) > 0 {
+ flags := make([]string, len(cgoLDFLAGS))
+ for i, f := range cgoLDFLAGS {
+ flags[i] = strconv.Quote(f)
+ }
+ cgoenv = []string{"CGO_LDFLAGS=" + strings.Join(flags, " ")}
+ }
+
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ switch goarch {
+ case "386", "amd64":
+ cgoCFLAGS = append(cgoCFLAGS, "-fsplit-stack")
+ }
+ cgoflags = append(cgoflags, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ cgoflags = append(cgoflags, "-gccgopkgpath="+pkgpath)
+ }
+ }
+
+ switch buildBuildmode {
+ case "c-archive", "c-shared":
+ // Tell cgo that if there are any exported functions
+ // it should generate a header file that C code can
+ // #include.
+ cgoflags = append(cgoflags, "-exportheader="+obj+"_cgo_install.h")
+ }
+
+ if err := b.run(p.Dir, p.ImportPath, cgoenv, buildToolExec, cgoExe, "-objdir", obj, "-importpath", p.ImportPath, cgoflags, "--", cgoCPPFLAGS, cgoexeCFLAGS, cgofiles); err != nil {
+ return nil, nil, err
+ }
+ outGo = append(outGo, gofiles...)
+
+ // cc _cgo_defun.c
+ _, gccgo := buildToolchain.(gccgoToolchain)
+ if gccgo {
+ defunObj := obj + "_cgo_defun.o"
+ if err := buildToolchain.cc(b, p, obj, defunObj, defunC); err != nil {
+ return nil, nil, err
+ }
+ outObj = append(outObj, defunObj)
+ }
+
+ // gcc
+ var linkobj []string
+
+ var bareLDFLAGS []string
+ // When linking relocatable objects, various flags need to be
+ // filtered out as they are inapplicable and can cause some linkers
+ // to fail.
+ for i := 0; i < len(cgoLDFLAGS); i++ {
+ f := cgoLDFLAGS[i]
+ switch {
+ // skip "-lc" or "-l somelib"
+ case strings.HasPrefix(f, "-l"):
+ if f == "-l" {
+ i++
+ }
+ // skip "-framework X" on Darwin
+ case goos == "darwin" && f == "-framework":
+ i++
+ // skip "*.{dylib,so,dll}"
+ case strings.HasSuffix(f, ".dylib"),
+ strings.HasSuffix(f, ".so"),
+ strings.HasSuffix(f, ".dll"):
+ // Remove any -fsanitize=foo flags.
+ // Otherwise the compiler driver thinks that we are doing final link
+ // and links sanitizer runtime into the object file. But we are not doing
+ // the final link, we will link the resulting object file again. And
+ // so the program ends up with two copies of sanitizer runtime.
+ // See issue 8788 for details.
+ case strings.HasPrefix(f, "-fsanitize="):
+ continue
+ // runpath flags not applicable unless building a shared
+ // object or executable; see issue 12115 for details. This
+ // is necessary as Go currently does not offer a way to
+ // specify the set of LDFLAGS that only apply to shared
+ // objects.
+ case strings.HasPrefix(f, "-Wl,-rpath"):
+ if f == "-Wl,-rpath" || f == "-Wl,-rpath-link" {
+ // Skip following argument to -rpath* too.
+ i++
+ }
+ default:
+ bareLDFLAGS = append(bareLDFLAGS, f)
+ }
+ }
+
+ var staticLibs []string
+ if goos == "windows" {
+ // libmingw32 and libmingwex have some inter-dependencies,
+ // so must use linker groups.
+ staticLibs = []string{"-Wl,--start-group", "-lmingwex", "-lmingw32", "-Wl,--end-group"}
+ }
+
+ cflags := stringList(cgoCPPFLAGS, cgoCFLAGS)
+ for _, cfile := range cfiles {
+ ofile := obj + cfile[:len(cfile)-1] + "o"
+ if err := b.gcc(p, ofile, cflags, obj+cfile); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ if !strings.HasSuffix(ofile, "_cgo_main.o") {
+ outObj = append(outObj, ofile)
+ }
+ }
+
+ for _, file := range gccfiles {
+ ofile := obj + cgoRe.ReplaceAllString(file[:len(file)-1], "_") + "o"
+ if err := b.gcc(p, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ cxxflags := stringList(cgoCPPFLAGS, cgoCXXFLAGS)
+ for _, file := range gxxfiles {
+ // Append .o to the file, just in case the pkg has file.c and file.cpp
+ ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
+ if err := b.gxx(p, ofile, cxxflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ for _, file := range mfiles {
+ // Append .o to the file, just in case the pkg has file.c and file.m
+ ofile := obj + cgoRe.ReplaceAllString(file, "_") + ".o"
+ if err := b.gcc(p, ofile, cflags, file); err != nil {
+ return nil, nil, err
+ }
+ linkobj = append(linkobj, ofile)
+ outObj = append(outObj, ofile)
+ }
+
+ linkobj = append(linkobj, p.SysoFiles...)
+ dynobj := obj + "_cgo_.o"
+ pie := (goarch == "arm" && goos == "linux") || goos == "android"
+ if pie { // we need to use -pie for Linux/ARM to get accurate imported sym
+ cgoLDFLAGS = append(cgoLDFLAGS, "-pie")
+ }
+ if err := b.gccld(p, dynobj, cgoLDFLAGS, linkobj); err != nil {
+ return nil, nil, err
+ }
+ if pie { // but we don't need -pie for normal cgo programs
+ cgoLDFLAGS = cgoLDFLAGS[0 : len(cgoLDFLAGS)-1]
+ }
+
+ if _, ok := buildToolchain.(gccgoToolchain); ok {
+ // we don't use dynimport when using gccgo.
+ return outGo, outObj, nil
+ }
+
+ // cgo -dynimport
+ importGo := obj + "_cgo_import.go"
+ cgoflags = []string{}
+ if p.Standard && p.ImportPath == "runtime/cgo" {
+ cgoflags = append(cgoflags, "-dynlinker") // record path to dynamic linker
+ }
+ if err := b.run(p.Dir, p.ImportPath, nil, buildToolExec, cgoExe, "-objdir", obj, "-dynpackage", p.Name, "-dynimport", dynobj, "-dynout", importGo, cgoflags); err != nil {
+ return nil, nil, err
+ }
+ outGo = append(outGo, importGo)
+
+ ofile := obj + "_all.o"
+ var gccObjs, nonGccObjs []string
+ for _, f := range outObj {
+ if strings.HasSuffix(f, ".o") {
+ gccObjs = append(gccObjs, f)
+ } else {
+ nonGccObjs = append(nonGccObjs, f)
+ }
+ }
+ ldflags := stringList(bareLDFLAGS, "-Wl,-r", "-nostdlib", staticLibs)
+
+ if b.gccSupportsNoPie() {
+ ldflags = append(ldflags, "-no-pie")
+ }
+
+ // We are creating an object file, so we don't want a build ID.
+ ldflags = b.disableBuildID(ldflags)
+
+ if err := b.gccld(p, ofile, ldflags, gccObjs); err != nil {
+ return nil, nil, err
+ }
+
+ // NOTE(rsc): The importObj is a 5c/6c/8c object and on Windows
+ // must be processed before the gcc-generated objects.
+ // Put it first. https://golang.org/issue/2601
+ outObj = stringList(nonGccObjs, ofile)
+
+ return outGo, outObj, nil
+}
+
+// Run SWIG on all SWIG input files.
+// TODO: Don't build a shared library, once SWIG emits the necessary
+// pragmas for external linking.
+func (b *builder) swig(p *Package, obj string, pcCFLAGS []string) (outGo, outC, outCXX []string, err error) {
+ if err := b.swigVersionCheck(); err != nil {
+ return nil, nil, nil, err
+ }
+
+ intgosize, err := b.swigIntSize(obj)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for _, f := range p.SwigFiles {
+ goFile, cFile, err := b.swigOne(p, f, obj, pcCFLAGS, false, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cFile != "" {
+ outC = append(outC, cFile)
+ }
+ }
+ for _, f := range p.SwigCXXFiles {
+ goFile, cxxFile, err := b.swigOne(p, f, obj, pcCFLAGS, true, intgosize)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ if goFile != "" {
+ outGo = append(outGo, goFile)
+ }
+ if cxxFile != "" {
+ outCXX = append(outCXX, cxxFile)
+ }
+ }
+ return outGo, outC, outCXX, nil
+}
+
+// Make sure SWIG is new enough.
+var (
+ swigCheckOnce sync.Once
+ swigCheck error
+)
+
+func (b *builder) swigDoVersionCheck() error {
+ out, err := b.runOut("", "", nil, "swig", "-version")
+ if err != nil {
+ return err
+ }
+ re := regexp.MustCompile(`[vV]ersion +([\d]+)([.][\d]+)?([.][\d]+)?`)
+ matches := re.FindSubmatch(out)
+ if matches == nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+
+ major, err := strconv.Atoi(string(matches[1]))
+ if err != nil {
+ // Can't find version number; hope for the best.
+ return nil
+ }
+ const errmsg = "must have SWIG version >= 3.0.6"
+ if major < 3 {
+ return errors.New(errmsg)
+ }
+ if major > 3 {
+ // 4.0 or later
+ return nil
+ }
+
+ // We have SWIG version 3.x.
+ if len(matches[2]) > 0 {
+ minor, err := strconv.Atoi(string(matches[2][1:]))
+ if err != nil {
+ return nil
+ }
+ if minor > 0 {
+ // 3.1 or later
+ return nil
+ }
+ }
+
+ // We have SWIG version 3.0.x.
+ if len(matches[3]) > 0 {
+ patch, err := strconv.Atoi(string(matches[3][1:]))
+ if err != nil {
+ return nil
+ }
+ if patch < 6 {
+ // Before 3.0.6.
+ return errors.New(errmsg)
+ }
+ }
+
+ return nil
+}
+
+func (b *builder) swigVersionCheck() error {
+ swigCheckOnce.Do(func() {
+ swigCheck = b.swigDoVersionCheck()
+ })
+ return swigCheck
+}
+
+// This code fails to build if sizeof(int) <= 32
+const swigIntSizeCode = `
+package main
+const i int = 1 << 32
+`
+
+// Determine the size of int on the target system for the -intgosize option
+// of swig >= 2.0.9
+func (b *builder) swigIntSize(obj string) (intsize string, err error) {
+ if buildN {
+ return "$INTBITS", nil
+ }
+ src := filepath.Join(b.work, "swig_intsize.go")
+ if err = ioutil.WriteFile(src, []byte(swigIntSizeCode), 0666); err != nil {
+ return
+ }
+ srcs := []string{src}
+
+ p := goFilesPackage(srcs)
+
+ if _, _, e := buildToolchain.gc(b, p, "", obj, false, nil, srcs); e != nil {
+ return "32", nil
+ }
+ return "64", nil
+}
+
+// Run SWIG on one SWIG input file.
+func (b *builder) swigOne(p *Package, file, obj string, pcCFLAGS []string, cxx bool, intgosize string) (outGo, outC string, err error) {
+ cgoCPPFLAGS, cgoCFLAGS, cgoCXXFLAGS, _ := b.cflags(p, true)
+ var cflags []string
+ if cxx {
+ cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCXXFLAGS)
+ } else {
+ cflags = stringList(cgoCPPFLAGS, pcCFLAGS, cgoCFLAGS)
+ }
+
+ n := 5 // length of ".swig"
+ if cxx {
+ n = 8 // length of ".swigcxx"
+ }
+ base := file[:len(file)-n]
+ goFile := base + ".go"
+ gccBase := base + "_wrap."
+ gccExt := "c"
+ if cxx {
+ gccExt = "cxx"
+ }
+
+ _, gccgo := buildToolchain.(gccgoToolchain)
+
+ // swig
+ args := []string{
+ "-go",
+ "-cgo",
+ "-intgosize", intgosize,
+ "-module", base,
+ "-o", obj + gccBase + gccExt,
+ "-outdir", obj,
+ }
+
+ for _, f := range cflags {
+ if len(f) > 3 && f[:2] == "-I" {
+ args = append(args, f)
+ }
+ }
+
+ if gccgo {
+ args = append(args, "-gccgo")
+ if pkgpath := gccgoPkgpath(p); pkgpath != "" {
+ args = append(args, "-go-pkgpath", pkgpath)
+ }
+ }
+ if cxx {
+ args = append(args, "-c++")
+ }
+
+ out, err := b.runOut(p.Dir, p.ImportPath, nil, "swig", args, file)
+ if err != nil {
+ if len(out) > 0 {
+ if bytes.Contains(out, []byte("-intgosize")) || bytes.Contains(out, []byte("-cgo")) {
+ return "", "", errors.New("must have SWIG version >= 3.0.6")
+ }
+ b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig error
+ return "", "", errPrintedOutput
+ }
+ return "", "", err
+ }
+ if len(out) > 0 {
+ b.showOutput(p.Dir, p.ImportPath, b.processOutput(out)) // swig warning
+ }
+
+ return obj + goFile, obj + gccBase + gccExt, nil
+}
+
+// disableBuildID adjusts a linker command line to avoid creating a
+// build ID when creating an object file rather than an executable or
+// shared library. Some systems, such as Ubuntu, always add
+// --build-id to every link, but we don't want a build ID when we are
+// producing an object file. On some of those system a plain -r (not
+// -Wl,-r) will turn off --build-id, but clang 3.0 doesn't support a
+// plain -r. I don't know how to turn off --build-id when using clang
+// other than passing a trailing --build-id=none. So that is what we
+// do, but only on systems likely to support it, which is to say,
+// systems that normally use gold or the GNU linker.
+func (b *builder) disableBuildID(ldflags []string) []string {
+ switch goos {
+ case "android", "dragonfly", "linux", "netbsd":
+ ldflags = append(ldflags, "-Wl,--build-id=none")
+ }
+ return ldflags
+}
+
+// An actionQueue is a priority queue of actions.
+type actionQueue []*action
+
+// Implement heap.Interface
+func (q *actionQueue) Len() int { return len(*q) }
+func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] }
+func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority }
+func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*action)) }
+func (q *actionQueue) Pop() interface{} {
+ n := len(*q) - 1
+ x := (*q)[n]
+ *q = (*q)[:n]
+ return x
+}
+
+func (q *actionQueue) push(a *action) {
+ heap.Push(q, a)
+}
+
+func (q *actionQueue) pop() *action {
+ return heap.Pop(q).(*action)
+}
+
+func instrumentInit() {
+ if !buildRace && !buildMSan {
+ return
+ }
+ if buildRace && buildMSan {
+ fmt.Fprintf(os.Stderr, "go %s: may not use -race and -msan simultaneously", flag.Args()[0])
+ os.Exit(2)
+ }
+ if goarch != "amd64" || goos != "linux" && goos != "freebsd" && goos != "darwin" && goos != "windows" {
+ fmt.Fprintf(os.Stderr, "go %s: -race and -msan are only supported on linux/amd64, freebsd/amd64, darwin/amd64 and windows/amd64\n", flag.Args()[0])
+ os.Exit(2)
+ }
+ if !buildContext.CgoEnabled {
+ fmt.Fprintf(os.Stderr, "go %s: -race requires cgo; enable cgo by setting CGO_ENABLED=1\n", flag.Args()[0])
+ os.Exit(2)
+ }
+ if buildRace {
+ buildGcflags = append(buildGcflags, "-race")
+ buildLdflags = append(buildLdflags, "-race")
+ } else {
+ buildGcflags = append(buildGcflags, "-msan")
+ buildLdflags = append(buildLdflags, "-msan")
+ }
+ if buildContext.InstallSuffix != "" {
+ buildContext.InstallSuffix += "_"
+ }
+
+ if buildRace {
+ buildContext.InstallSuffix += "race"
+ buildContext.BuildTags = append(buildContext.BuildTags, "race")
+ } else {
+ buildContext.InstallSuffix += "msan"
+ buildContext.BuildTags = append(buildContext.BuildTags, "msan")
+ }
+}
diff -pruN 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/internal/obj/x86/obj6.go 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/internal/obj/x86/obj6.go
--- 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/internal/obj/x86/obj6.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/cmd/internal/obj/x86/obj6.go 2016-07-18 16:24:07.000000000 +0000
@@ -0,0 +1,1440 @@
+// Inferno utils/6l/pass.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6l/pass.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package x86
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "log"
+ "math"
+)
+
+func canuse1insntls(ctxt *obj.Link) bool {
+ if isAndroid {
+ // For android, we use a disgusting hack that assumes
+ // the thread-local storage slot for g is allocated
+ // using pthread_key_create with a fixed offset
+ // (see src/runtime/cgo/gcc_android_amd64.c).
+ // This makes access to the TLS storage (for g) doable
+ // with 1 instruction.
+ return true
+ }
+
+ if ctxt.Arch.Regsize == 4 {
+ switch ctxt.Headtype {
+ case obj.Hlinux,
+ obj.Hnacl,
+ obj.Hplan9,
+ obj.Hwindows:
+ return false
+ }
+
+ return true
+ }
+
+ switch ctxt.Headtype {
+ case obj.Hplan9,
+ obj.Hwindows:
+ return false
+ case obj.Hlinux:
+ return ctxt.Flag_shared == 0
+ }
+
+ return true
+}
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ // Maintain information about code generation mode.
+ if ctxt.Mode == 0 {
+ ctxt.Mode = ctxt.Arch.Regsize * 8
+ }
+ p.Mode = int8(ctxt.Mode)
+
+ switch p.As {
+ case AMODE:
+ if p.From.Type == obj.TYPE_CONST || (p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_NONE) {
+ switch int(p.From.Offset) {
+ case 16, 32, 64:
+ ctxt.Mode = int(p.From.Offset)
+ }
+ }
+ obj.Nopout(p)
+ }
+
+ // Thread-local storage references use the TLS pseudo-register.
+ // As a register, TLS refers to the thread-local storage base, and it
+ // can only be loaded into another register:
+ //
+ // MOVQ TLS, AX
+ //
+ // An offset from the thread-local storage base is written off(reg)(TLS*1).
+ // Semantically it is off(reg), but the (TLS*1) annotation marks this as
+ // indexing from the loaded TLS base. This emits a relocation so that
+ // if the linker needs to adjust the offset, it can. For example:
+ //
+ // MOVQ TLS, AX
+ // MOVQ 0(AX)(TLS*1), CX // load g into CX
+ //
+ // On systems that support direct access to the TLS memory, this
+ // pair of instructions can be reduced to a direct TLS memory reference:
+ //
+ // MOVQ 0(TLS), CX // load g into CX
+ //
+ // The 2-instruction and 1-instruction forms correspond to the two code
+ // sequences for loading a TLS variable in the local exec model given in "ELF
+ // Handling For Thread-Local Storage".
+ //
+ // We apply this rewrite on systems that support the 1-instruction form.
+ // The decision is made using only the operating system and the -shared flag,
+ // not the link mode. If some link modes on a particular operating system
+ // require the 2-instruction form, then all builds for that operating system
+ // will use the 2-instruction form, so that the link mode decision can be
+ // delayed to link time.
+ //
+ // In this way, all supported systems use identical instructions to
+ // access TLS, and they are rewritten appropriately first here in
+ // liblink and then finally using relocations in the linker.
+ //
+ // When -shared is passed, we leave the code in the 2-instruction form but
+ // assemble (and relocate) them in different ways to generate the initial
+ // exec code sequence. It's a bit of a fluke that this is possible without
+ // rewriting the instructions more comprehensively, and it only does because
+ // we only support a single TLS variable (g).
+
+ if canuse1insntls(ctxt) {
+ // Reduce 2-instruction sequence to 1-instruction sequence.
+ // Sequences like
+ // MOVQ TLS, BX
+ // ... off(BX)(TLS*1) ...
+ // become
+ // NOP
+ // ... off(TLS) ...
+ //
+ // TODO(rsc): Remove the Hsolaris special case. It exists only to
+ // guarantee we are producing byte-identical binaries as before this code.
+ // But it should be unnecessary.
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_REG && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 && ctxt.Headtype != obj.Hsolaris {
+ obj.Nopout(p)
+ }
+ if p.From.Type == obj.TYPE_MEM && p.From.Index == REG_TLS && REG_AX <= p.From.Reg && p.From.Reg <= REG_R15 {
+ p.From.Reg = REG_TLS
+ p.From.Scale = 0
+ p.From.Index = REG_NONE
+ }
+
+ if p.To.Type == obj.TYPE_MEM && p.To.Index == REG_TLS && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
+ p.To.Reg = REG_TLS
+ p.To.Scale = 0
+ p.To.Index = REG_NONE
+ }
+ } else {
+ // load_g_cx, below, always inserts the 1-instruction sequence. Rewrite it
+ // as the 2-instruction sequence if necessary.
+ // MOVQ 0(TLS), BX
+ // becomes
+ // MOVQ TLS, BX
+ // MOVQ 0(BX)(TLS*1), BX
+ if (p.As == AMOVQ || p.As == AMOVL) && p.From.Type == obj.TYPE_MEM && p.From.Reg == REG_TLS && p.To.Type == obj.TYPE_REG && REG_AX <= p.To.Reg && p.To.Reg <= REG_R15 {
+ q := obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From = p.From
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = p.To.Reg
+ q.From.Index = REG_TLS
+ q.From.Scale = 2 // TODO: use 1
+ q.To = p.To
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_TLS
+ p.From.Index = REG_NONE
+ p.From.Offset = 0
+ }
+ }
+
+ // TODO: Remove.
+ if ctxt.Headtype == obj.Hwindows && p.Mode == 64 || ctxt.Headtype == obj.Hplan9 {
+ if p.From.Scale == 1 && p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+ if p.To.Scale == 1 && p.To.Index == REG_TLS {
+ p.To.Scale = 2
+ }
+ }
+
+ // Rewrite 0 to $0 in 3rd argment to CMPPS etc.
+ // That's what the tables expect.
+ switch p.As {
+ case ACMPPD, ACMPPS, ACMPSD, ACMPSS:
+ if p.To.Type == obj.TYPE_MEM && p.To.Name == obj.NAME_NONE && p.To.Reg == REG_NONE && p.To.Index == REG_NONE && p.To.Sym == nil {
+ p.To.Type = obj.TYPE_CONST
+ }
+ }
+
+ // Rewrite CALL/JMP/RET to symbol as TYPE_BRANCH.
+ switch p.As {
+ case obj.ACALL, obj.AJMP, obj.ARET:
+ if p.To.Type == obj.TYPE_MEM && (p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC) && p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite MOVL/MOVQ $XXX(FP/SP) as LEAL/LEAQ.
+ if p.From.Type == obj.TYPE_ADDR && (ctxt.Arch.Thechar == '6' || p.From.Name != obj.NAME_EXTERN && p.From.Name != obj.NAME_STATIC) {
+ switch p.As {
+ case AMOVL:
+ p.As = ALEAL
+ p.From.Type = obj.TYPE_MEM
+ case AMOVQ:
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ }
+ }
+
+ if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
+ if p.From3 != nil {
+ nacladdr(ctxt, p, p.From3)
+ }
+ nacladdr(ctxt, p, &p.From)
+ nacladdr(ctxt, p, &p.To)
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ // Convert AMOVSS $(0), Xx to AXORPS Xx, Xx
+ case AMOVSS:
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.Val.(float64) == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ case AFMOVF,
+ AFADDF,
+ AFSUBF,
+ AFSUBRF,
+ AFMULF,
+ AFDIVF,
+ AFDIVRF,
+ AFCOMF,
+ AFCOMFP,
+ AADDSS,
+ ASUBSS,
+ AMULSS,
+ ADIVSS,
+ ACOMISS,
+ AUCOMISS:
+ if p.From.Type == obj.TYPE_FCONST {
+ f32 := float32(p.From.Val.(float64))
+ i32 := math.Float32bits(f32)
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Sym.Local = true
+ p.From.Offset = 0
+ }
+
+ case AMOVSD:
+ // Convert AMOVSD $(0), Xx to AXORPS Xx, Xx
+ if p.From.Type == obj.TYPE_FCONST {
+ if p.From.Val.(float64) == 0 {
+ if p.To.Type == obj.TYPE_REG && REG_X0 <= p.To.Reg && p.To.Reg <= REG_X15 {
+ p.As = AXORPS
+ p.From = p.To
+ break
+ }
+ }
+ }
+ fallthrough
+
+ case AFMOVD,
+ AFADDD,
+ AFSUBD,
+ AFSUBRD,
+ AFMULD,
+ AFDIVD,
+ AFDIVRD,
+ AFCOMD,
+ AFCOMDP,
+ AADDSD,
+ ASUBSD,
+ AMULSD,
+ ADIVSD,
+ ACOMISD,
+ AUCOMISD:
+ if p.From.Type == obj.TYPE_FCONST {
+ i64 := math.Float64bits(p.From.Val.(float64))
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Sym = s
+ p.From.Sym.Local = true
+ p.From.Offset = 0
+ }
+ }
+
+ if ctxt.Flag_dynlink {
+ rewriteToUseGot(ctxt, p)
+ }
+
+ if ctxt.Flag_shared != 0 && p.Mode == 32 {
+ rewriteToPcrel(ctxt, p)
+ }
+}
+
+// Rewrite p, if necessary, to access global data via the global offset table.
+func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
+ var add, lea, mov, reg int16
+ if p.Mode == 64 {
+ add = AADDQ
+ lea = ALEAQ
+ mov = AMOVQ
+ reg = REG_R15
+ } else {
+ add = AADDL
+ lea = ALEAL
+ mov = AMOVL
+ reg = REG_CX
+ }
+
+ if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
+ // ADUFFxxx $offset
+ // becomes
+ // $MOV runtime.duffxxx@GOT, $reg
+ // $ADD $offset, $reg
+ // CALL $reg
+ var sym *obj.LSym
+ if p.As == obj.ADUFFZERO {
+ sym = obj.Linklookup(ctxt, "runtime.duffzero", 0)
+ } else {
+ sym = obj.Linklookup(ctxt, "runtime.duffcopy", 0)
+ }
+ offset := p.To.Offset
+ p.As = mov
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_GOTREF
+ p.From.Sym = sym
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ p.To.Offset = 0
+ p.To.Sym = nil
+ p1 := obj.Appendp(ctxt, p)
+ p1.As = add
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = offset
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = reg
+ p2 := obj.Appendp(ctxt, p1)
+ p2.As = obj.ACALL
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = reg
+ }
+
+ // We only care about global data: NAME_EXTERN means a global
+ // symbol in the Go sense, and p.Sym.Local is true for a few
+ // internally defined symbols.
+ if p.As == lea && p.From.Type == obj.TYPE_MEM && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
+ // $LEA sym, Rx becomes $MOV $sym, Rx which will be rewritten below
+ p.As = mov
+ p.From.Type = obj.TYPE_ADDR
+ }
+ if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
+ // $MOV $sym, Rx becomes $MOV sym@GOT, Rx
+ // $MOV $sym+, Rx becomes $MOV sym@GOT, Rx; $ADD , Rx
+ // On 386 only, more complicated things like PUSHL $sym become $MOV sym@GOT, CX; PUSHL CX
+ cmplxdest := false
+ pAs := p.As
+ var dest obj.Addr
+ if p.To.Type != obj.TYPE_REG || pAs != mov {
+ if p.Mode == 64 {
+ ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
+ }
+ cmplxdest = true
+ dest = p.To
+ p.As = mov
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+ p.To.Sym = nil
+ p.To.Name = obj.NAME_NONE
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_GOTREF
+ q := p
+ if p.From.Offset != 0 {
+ q = obj.Appendp(ctxt, p)
+ q.As = add
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = p.From.Offset
+ q.To = p.To
+ p.From.Offset = 0
+ }
+ if cmplxdest {
+ q = obj.Appendp(ctxt, q)
+ q.As = pAs
+ q.To = dest
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_CX
+ }
+ }
+ if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
+ ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ var source *obj.Addr
+ // MOVx sym, Ry becomes $MOV sym@GOT, R15; MOVx (R15), Ry
+ // MOVx Ry, sym becomes $MOV sym@GOT, R15; MOVx Ry, (R15)
+ // An addition may be inserted between the two MOVs if there is an offset.
+ if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
+ if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
+ ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
+ }
+ source = &p.From
+ } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
+ source = &p.To
+ } else {
+ return
+ }
+ if p.As == obj.ACALL {
+ // When dynlinking on 386, almost any call might end up being a call
+ // to a PLT, so make sure the GOT pointer is loaded into BX.
+ // RegTo2 is set on the replacement call insn to stop it being
+ // processed when it is in turn passed to progedit.
+ if p.Mode == 64 || (p.To.Sym != nil && p.To.Sym.Local) || p.RegTo2 != 0 {
+ return
+ }
+ p1 := obj.Appendp(ctxt, p)
+ p2 := obj.Appendp(ctxt, p1)
+
+ p1.As = ALEAL
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Name = obj.NAME_STATIC
+ p1.From.Sym = obj.Linklookup(ctxt, "_GLOBAL_OFFSET_TABLE_", 0)
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = REG_BX
+
+ p2.As = p.As
+ p2.Scond = p.Scond
+ p2.From = p.From
+ p2.From3 = p.From3
+ p2.Reg = p.Reg
+ p2.To = p.To
+ // p.To.Type was set to TYPE_BRANCH above, but that makes checkaddr
+ // in ../pass.go complain, so set it back to TYPE_MEM here, until p2
+ // itself gets passed to progedit.
+ p2.To.Type = obj.TYPE_MEM
+ p2.RegTo2 = 1
+
+ obj.Nopout(p)
+ return
+
+ }
+ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ARET || p.As == obj.AJMP {
+ return
+ }
+ if source.Type != obj.TYPE_MEM {
+ ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ p1 := obj.Appendp(ctxt, p)
+ p2 := obj.Appendp(ctxt, p1)
+
+ p1.As = mov
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Sym = source.Sym
+ p1.From.Name = obj.NAME_GOTREF
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = reg
+
+ p2.As = p.As
+ p2.From = p.From
+ p2.To = p.To
+ if p.From.Name == obj.NAME_EXTERN {
+ p2.From.Reg = reg
+ p2.From.Name = obj.NAME_NONE
+ p2.From.Sym = nil
+ } else if p.To.Name == obj.NAME_EXTERN {
+ p2.To.Reg = reg
+ p2.To.Name = obj.NAME_NONE
+ p2.To.Sym = nil
+ } else {
+ return
+ }
+ obj.Nopout(p)
+}
+
+func rewriteToPcrel(ctxt *obj.Link, p *obj.Prog) {
+ // RegTo2 is set on the instructions we insert here so they don't get
+ // processed twice.
+ if p.RegTo2 != 0 {
+ return
+ }
+ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+ return
+ }
+ // Any Prog (aside from the above special cases) with an Addr with Name ==
+ // NAME_EXTERN, NAME_STATIC or NAME_GOTREF has a CALL __x86.get_pc_thunk.cx
+ // inserted before it.
+ isName := func(a *obj.Addr) bool {
+ if a.Sym == nil || (a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR) || a.Reg != 0 {
+ return false
+ }
+ if a.Sym.Type == obj.STLSBSS {
+ return false
+ }
+ return a.Name == obj.NAME_EXTERN || a.Name == obj.NAME_STATIC || a.Name == obj.NAME_GOTREF
+ }
+
+ if isName(&p.From) && p.From.Type == obj.TYPE_ADDR {
+ // Handle things like "MOVL $sym, (SP)" or "PUSHL $sym" by rewriting
+ // to "MOVL $sym, CX; MOVL CX, (SP)" or "MOVL $sym, CX; PUSHL CX"
+ // respectively.
+ if p.To.Type != obj.TYPE_REG {
+ q := obj.Appendp(ctxt, p)
+ q.As = p.As
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_CX
+ q.To = p.To
+ p.As = AMOVL
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+ p.To.Sym = nil
+ p.To.Name = obj.NAME_NONE
+ }
+ }
+
+ if !isName(&p.From) && !isName(&p.To) && (p.From3 == nil || !isName(p.From3)) {
+ return
+ }
+ q := obj.Appendp(ctxt, p)
+ q.RegTo2 = 1
+ r := obj.Appendp(ctxt, q)
+ r.RegTo2 = 1
+ q.As = obj.ACALL
+ q.To.Sym = obj.Linklookup(ctxt, "__x86.get_pc_thunk.cx", 0)
+ q.To.Type = obj.TYPE_MEM
+ q.To.Name = obj.NAME_EXTERN
+ q.To.Sym.Local = true
+ r.As = p.As
+ r.Scond = p.Scond
+ r.From = p.From
+ r.From3 = p.From3
+ r.Reg = p.Reg
+ r.To = p.To
+ obj.Nopout(p)
+}
+
+func nacladdr(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ if p.As == ALEAL || p.As == ALEAQ {
+ return
+ }
+
+ if a.Reg == REG_BP {
+ ctxt.Diag("invalid address: %v", p)
+ return
+ }
+
+ if a.Reg == REG_TLS {
+ a.Reg = REG_BP
+ }
+ if a.Type == obj.TYPE_MEM && a.Name == obj.NAME_NONE {
+ switch a.Reg {
+ // all ok
+ case REG_BP, REG_SP, REG_R15:
+ break
+
+ default:
+ if a.Index != REG_NONE {
+ ctxt.Diag("invalid address %v", p)
+ }
+ a.Index = a.Reg
+ if a.Index != REG_NONE {
+ a.Scale = 1
+ }
+ a.Reg = REG_R15
+ }
+ }
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ if ctxt.Headtype == obj.Hplan9 && ctxt.Plan9privates == nil {
+ ctxt.Plan9privates = obj.Linklookup(ctxt, "_privates", 0)
+ }
+
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p := cursym.Text
+ autoffset := int32(p.To.Offset)
+ if autoffset < 0 {
+ autoffset = 0
+ }
+
+ var bpsize int
+ if p.Mode == 64 && obj.Framepointer_enabled != 0 && autoffset > 0 {
+ // Make room for to save a base pointer. If autoffset == 0,
+ // this might do something special like a tail jump to
+ // another function, so in that case we omit this.
+ bpsize = ctxt.Arch.Ptrsize
+
+ autoffset += int32(bpsize)
+ p.To.Offset += int64(bpsize)
+ } else {
+ bpsize = 0
+ }
+
+ textarg := int64(p.To.Val.(int32))
+ cursym.Args = int32(textarg)
+ cursym.Locals = int32(p.To.Offset)
+
+ // TODO(rsc): Remove.
+ if p.Mode == 32 && cursym.Locals < 0 {
+ cursym.Locals = 0
+ }
+
+ // TODO(rsc): Remove 'p.Mode == 64 &&'.
+ if p.Mode == 64 && autoffset < obj.StackSmall && p.From3Offset()&obj.NOSPLIT == 0 {
+ for q := p; q != nil; q = q.Link {
+ if q.As == obj.ACALL {
+ goto noleaf
+ }
+ if (q.As == obj.ADUFFCOPY || q.As == obj.ADUFFZERO) && autoffset >= obj.StackSmall-8 {
+ goto noleaf
+ }
+ }
+
+ p.From3.Offset |= obj.NOSPLIT
+ noleaf:
+ }
+
+ if p.From3Offset()&obj.NOSPLIT == 0 || p.From3Offset()&obj.WRAPPER != 0 {
+ p = obj.Appendp(ctxt, p)
+ p = load_g_cx(ctxt, p) // load g into CX
+ }
+
+ if cursym.Text.From3Offset()&obj.NOSPLIT == 0 {
+ p = stacksplit(ctxt, p, autoffset, int32(textarg)) // emit split check
+ }
+
+ if autoffset != 0 {
+ if autoffset%int32(ctxt.Arch.Regsize) != 0 {
+ ctxt.Diag("unaligned stack size %d", autoffset)
+ }
+ p = obj.Appendp(ctxt, p)
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autoffset)
+ p.Spadj = autoffset
+ } else {
+ // zero-byte stack adjustment.
+ // Insert a fake non-zero adjustment so that stkcheck can
+ // recognize the end of the stack-splitting prolog.
+ p = obj.Appendp(ctxt, p)
+
+ p.As = obj.ANOP
+ p.Spadj = int32(-ctxt.Arch.Ptrsize)
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ p.Spadj = int32(ctxt.Arch.Ptrsize)
+ }
+
+ deltasp := autoffset
+
+ if bpsize > 0 {
+ // Save caller's BP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_BP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_SP
+ p.To.Scale = 1
+ p.To.Offset = int64(autoffset) - int64(bpsize)
+
+ // Move current frame to BP
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Scale = 1
+ p.From.Offset = int64(autoffset) - int64(bpsize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BP
+ }
+
+ if cursym.Text.From3Offset()&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVQ g_panic(CX), BX
+ // TESTQ BX, BX
+ // JEQ end
+ // LEAQ (autoffset+8)(SP), DI
+ // CMPQ panic_argp(BX), DI
+ // JNE end
+ // MOVQ SP, panic_argp(BX)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not an x86 NOP: it encodes to 0 instruction bytes.
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_CX
+ p.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+ if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
+ p.As = AMOVL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_R15
+ p.From.Scale = 1
+ p.From.Index = REG_CX
+ }
+ if p.Mode == 32 {
+ p.As = AMOVL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ATESTQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_BX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BX
+ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
+ p.As = ATESTL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ p1 := p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ALEAQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = int64(autoffset) + int64(ctxt.Arch.Regsize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
+ p.As = ALEAL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ACMPQ
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_BX
+ p.From.Offset = 0 // Panic.argp
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_DI
+ if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
+ p.As = ACMPL
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_R15
+ p.From.Scale = 1
+ p.From.Index = REG_BX
+ }
+ if p.Mode == 32 {
+ p.As = ACMPL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJNE
+ p.To.Type = obj.TYPE_BRANCH
+ p2 := p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVQ
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_BX
+ p.To.Offset = 0 // Panic.argp
+ if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
+ p.As = AMOVL
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REG_R15
+ p.To.Scale = 1
+ p.To.Index = REG_BX
+ }
+ if p.Mode == 32 {
+ p.As = AMOVL
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ANOP
+ p1.Pcond = p
+ p2.Pcond = p
+ }
+
+ var a int
+ var pcsize int
+ for ; p != nil; p = p.Link {
+ pcsize = int(p.Mode) / 8
+ a = int(p.From.Name)
+ if a == obj.NAME_AUTO {
+ p.From.Offset += int64(deltasp) - int64(bpsize)
+ }
+ if a == obj.NAME_PARAM {
+ p.From.Offset += int64(deltasp) + int64(pcsize)
+ }
+ if p.From3 != nil {
+ a = int(p.From3.Name)
+ if a == obj.NAME_AUTO {
+ p.From3.Offset += int64(deltasp) - int64(bpsize)
+ }
+ if a == obj.NAME_PARAM {
+ p.From3.Offset += int64(deltasp) + int64(pcsize)
+ }
+ }
+ a = int(p.To.Name)
+ if a == obj.NAME_AUTO {
+ p.To.Offset += int64(deltasp) - int64(bpsize)
+ }
+ if a == obj.NAME_PARAM {
+ p.To.Offset += int64(deltasp) + int64(pcsize)
+ }
+
+ switch p.As {
+ default:
+ continue
+
+ case APUSHL, APUSHFL:
+ deltasp += 4
+ p.Spadj = 4
+ continue
+
+ case APUSHQ, APUSHFQ:
+ deltasp += 8
+ p.Spadj = 8
+ continue
+
+ case APUSHW, APUSHFW:
+ deltasp += 2
+ p.Spadj = 2
+ continue
+
+ case APOPL, APOPFL:
+ deltasp -= 4
+ p.Spadj = -4
+ continue
+
+ case APOPQ, APOPFQ:
+ deltasp -= 8
+ p.Spadj = -8
+ continue
+
+ case APOPW, APOPFW:
+ deltasp -= 2
+ p.Spadj = -2
+ continue
+
+ case obj.ARET:
+ break
+ }
+
+ if autoffset != deltasp {
+ ctxt.Diag("unbalanced PUSH/POP")
+ }
+
+ if autoffset != 0 {
+ if bpsize > 0 {
+ // Restore caller's BP
+ p.As = AMOVQ
+
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Scale = 1
+ p.From.Offset = int64(autoffset) - int64(bpsize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_BP
+ p = obj.Appendp(ctxt, p)
+ }
+
+ p.As = AADJSP
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-autoffset)
+ p.Spadj = -autoffset
+ p = obj.Appendp(ctxt, p)
+ p.As = obj.ARET
+
+ // If there are instructions following
+ // this ARET, they come from a branch
+ // with the same stackframe, so undo
+ // the cleanup.
+ p.Spadj = +autoffset
+ }
+
+ if p.To.Sym != nil { // retjmp
+ p.As = obj.AJMP
+ }
+ }
+}
+
+func indir_cx(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) {
+ if ctxt.Headtype == obj.Hnacl && p.Mode == 64 {
+ a.Type = obj.TYPE_MEM
+ a.Reg = REG_R15
+ a.Index = REG_CX
+ a.Scale = 1
+ return
+ }
+
+ a.Type = obj.TYPE_MEM
+ a.Reg = REG_CX
+}
+
+// Append code to p to load g into cx.
+// Overwrites p with the first instruction (no first appendp).
+// Overwriting p is unusual but it lets use this in both the
+// prologue (caller must call appendp first) and in the epilogue.
+// Returns last new instruction.
+func load_g_cx(ctxt *obj.Link, p *obj.Prog) *obj.Prog {
+ p.As = AMOVQ
+ if ctxt.Arch.Ptrsize == 4 {
+ p.As = AMOVL
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_TLS
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_CX
+
+ next := p.Link
+ progedit(ctxt, p)
+ for p.Link != next {
+ p = p.Link
+ }
+
+ if p.From.Index == REG_TLS {
+ p.From.Scale = 2
+ }
+
+ return p
+}
+
+// Append code to p to check for stack split.
+// Appends to (does not overwrite) p.
+// Assumes g is in CX.
+// Returns last new instruction.
+func stacksplit(ctxt *obj.Link, p *obj.Prog, framesize int32, textarg int32) *obj.Prog {
+ cmp := ACMPQ
+ lea := ALEAQ
+ mov := AMOVQ
+ sub := ASUBQ
+
+ if ctxt.Headtype == obj.Hnacl || p.Mode == 32 {
+ cmp = ACMPL
+ lea = ALEAL
+ mov = AMOVL
+ sub = ASUBL
+ }
+
+ var q1 *obj.Prog
+ if framesize <= obj.StackSmall {
+ // small stack: SP <= stackguard
+ // CMPQ SP, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SP
+ indir_cx(ctxt, p, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // LEAQ -xxx(SP), AX
+ // CMPQ AX, stackguard
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(lea)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = -(int64(framesize) - obj.StackSmall)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ indir_cx(ctxt, p, &p.To)
+ p.To.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ } else {
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // MOVQ stackguard, CX
+ // CMPQ CX, $StackPreempt
+ // JEQ label-of-call-to-morestack
+ // LEAQ StackGuard(SP), AX
+ // SUBQ CX, AX
+ // CMPQ AX, $(framesize+(StackGuard-StackSmall))
+
+ p = obj.Appendp(ctxt, p)
+
+ p.As = int16(mov)
+ indir_cx(ctxt, p, &p.From)
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_SI
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = obj.StackPreempt
+ if p.Mode == 32 {
+ p.To.Offset = int64(uint32(obj.StackPreempt & (1<<32 - 1)))
+ }
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AJEQ
+ p.To.Type = obj.TYPE_BRANCH
+ q1 = p
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(lea)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REG_SP
+ p.From.Offset = obj.StackGuard
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(sub)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_SI
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_AX
+
+ p = obj.Appendp(ctxt, p)
+ p.As = int16(cmp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_AX
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(framesize) + (obj.StackGuard - obj.StackSmall)
+ }
+
+ // common
+ jls := obj.Appendp(ctxt, p)
+ jls.As = AJLS
+ jls.To.Type = obj.TYPE_BRANCH
+
+ var last *obj.Prog
+ for last = ctxt.Cursym.Text; last.Link != nil; last = last.Link {
+ }
+
+ spfix := obj.Appendp(ctxt, last)
+ spfix.As = obj.ANOP
+ spfix.Spadj = -framesize
+
+ call := obj.Appendp(ctxt, spfix)
+ call.Lineno = ctxt.Cursym.Text.Lineno
+ call.Mode = ctxt.Cursym.Text.Mode
+ call.As = obj.ACALL
+ call.To.Type = obj.TYPE_BRANCH
+ morestack := "runtime.morestack"
+ switch {
+ case ctxt.Cursym.Cfunc != 0:
+ morestack = "runtime.morestackc"
+ case ctxt.Cursym.Text.From3Offset()&obj.NEEDCTXT == 0:
+ morestack = "runtime.morestack_noctxt"
+ }
+ call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
+
+ jmp := obj.Appendp(ctxt, call)
+ jmp.As = obj.AJMP
+ jmp.To.Type = obj.TYPE_BRANCH
+ jmp.Pcond = ctxt.Cursym.Text.Link
+ jmp.Spadj = +framesize
+
+ jls.Pcond = call
+ if q1 != nil {
+ q1.Pcond = call
+ }
+
+ return jls
+}
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ ctxt.Cursym = s
+
+ firstp := ctxt.NewProg()
+ lastp := firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func nofollow(a int) bool {
+ switch a {
+ case obj.AJMP,
+ obj.ARET,
+ AIRETL,
+ AIRETQ,
+ AIRETW,
+ ARETFL,
+ ARETFQ,
+ ARETFW,
+ obj.AUNDEF:
+ return true
+ }
+
+ return false
+}
+
+func pushpop(a int) bool {
+ switch a {
+ case APUSHL,
+ APUSHFL,
+ APUSHQ,
+ APUSHFQ,
+ APUSHW,
+ APUSHFW,
+ APOPL,
+ APOPFL,
+ APOPQ,
+ APOPFQ,
+ APOPW,
+ APOPFW:
+ return true
+ }
+
+ return false
+}
+
+func relinv(a int16) int16 {
+ switch a {
+ case AJEQ:
+ return AJNE
+ case AJNE:
+ return AJEQ
+ case AJLE:
+ return AJGT
+ case AJLS:
+ return AJHI
+ case AJLT:
+ return AJGE
+ case AJMI:
+ return AJPL
+ case AJGE:
+ return AJLT
+ case AJPL:
+ return AJMI
+ case AJGT:
+ return AJLE
+ case AJHI:
+ return AJLS
+ case AJCS:
+ return AJCC
+ case AJCC:
+ return AJCS
+ case AJPS:
+ return AJPC
+ case AJPC:
+ return AJPS
+ case AJOS:
+ return AJOC
+ case AJOC:
+ return AJOS
+ }
+
+ log.Fatalf("unknown relation: %s", obj.Aconv(int(a)))
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var i int
+ var a int
+
+loop:
+ if p == nil {
+ return
+ }
+ if p.As == obj.AJMP {
+ q = p.Pcond
+ if q != nil && q.As != obj.ATEXT {
+ /* mark instruction as done and continue layout at target of jump */
+ p.Mark = 1
+
+ p = q
+ if p.Mark == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark != 0 {
+ /*
+ * p goes here, but already used it elsewhere.
+ * copy up to 4 instructions or else branch to other copy.
+ */
+ i = 0
+ q = p
+ for ; i < 4; i, q = i+1, q.Link {
+ if q == nil {
+ break
+ }
+ if q == *last {
+ break
+ }
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if nofollow(a) || pushpop(a) {
+ break // NOTE(rsc): arm does goto copy
+ }
+ if q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+ if a == obj.ACALL || a == ALOOP {
+ continue
+ }
+ for {
+ if p.As == obj.ANOP {
+ p = p.Link
+ continue
+ }
+
+ q = obj.Copyp(ctxt, p)
+ p = p.Link
+ q.Mark = 1
+ (*last).Link = q
+ *last = q
+ if int(q.As) != a || q.Pcond == nil || q.Pcond.Mark != 0 {
+ continue
+ }
+
+ q.As = relinv(q.As)
+ p = q.Pcond
+ q.Pcond = q.Link
+ q.Link = p
+ xfol(ctxt, q.Link, last)
+ p = q.Link
+ if p.Mark != 0 {
+ return
+ }
+ goto loop
+ /* */
+ }
+ }
+ q = ctxt.NewProg()
+ q.As = obj.AJMP
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ /* emit p */
+ p.Mark = 1
+
+ (*last).Link = p
+ *last = p
+ a = int(p.As)
+
+ /* continue loop with what comes after p */
+ if nofollow(a) {
+ return
+ }
+ if p.Pcond != nil && a != obj.ACALL {
+ /*
+ * some kind of conditional branch.
+ * recurse to follow one path.
+ * continue loop on the other.
+ */
+ q = obj.Brchain(ctxt, p.Pcond)
+ if q != nil {
+ p.Pcond = q
+ }
+ q = obj.Brchain(ctxt, p.Link)
+ if q != nil {
+ p.Link = q
+ }
+ if p.From.Type == obj.TYPE_CONST {
+ if p.From.Offset == 1 {
+ /*
+ * expect conditional jump to be taken.
+ * rewrite so that's the fall-through case.
+ */
+ p.As = relinv(int16(a))
+
+ q = p.Link
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ } else {
+ q = p.Link
+ if q.Mark != 0 {
+ if a != ALOOP {
+ p.As = relinv(int16(a))
+ p.Link = p.Pcond
+ p.Pcond = q
+ }
+ }
+ }
+
+ xfol(ctxt, p.Link, last)
+ if p.Pcond.Mark != 0 {
+ return
+ }
+ p = p.Pcond
+ goto loop
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var unaryDst = map[int]bool{
+ ABSWAPL: true,
+ ABSWAPQ: true,
+ ACMPXCHG8B: true,
+ ADECB: true,
+ ADECL: true,
+ ADECQ: true,
+ ADECW: true,
+ AINCB: true,
+ AINCL: true,
+ AINCQ: true,
+ AINCW: true,
+ ANEGB: true,
+ ANEGL: true,
+ ANEGQ: true,
+ ANEGW: true,
+ ANOTB: true,
+ ANOTL: true,
+ ANOTQ: true,
+ ANOTW: true,
+ APOPL: true,
+ APOPQ: true,
+ APOPW: true,
+ ASETCC: true,
+ ASETCS: true,
+ ASETEQ: true,
+ ASETGE: true,
+ ASETGT: true,
+ ASETHI: true,
+ ASETLE: true,
+ ASETLS: true,
+ ASETLT: true,
+ ASETMI: true,
+ ASETNE: true,
+ ASETOC: true,
+ ASETOS: true,
+ ASETPC: true,
+ ASETPL: true,
+ ASETPS: true,
+ AFFREE: true,
+ AFLDENV: true,
+ AFSAVE: true,
+ AFSTCW: true,
+ AFSTENV: true,
+ AFSTSW: true,
+ AFXSAVE: true,
+ AFXSAVE64: true,
+ ASTMXCSR: true,
+}
+
+var Linkamd64 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "amd64",
+ Thechar: '6',
+ Preprocess: preprocess,
+ Assemble: span6,
+ Follow: follow,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ Minlc: 1,
+ Ptrsize: 8,
+ Regsize: 8,
+}
+
+var Linkamd64p32 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "amd64p32",
+ Thechar: '6',
+ Preprocess: preprocess,
+ Assemble: span6,
+ Follow: follow,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 8,
+}
+
+var Link386 = obj.LinkArch{
+ ByteOrder: binary.LittleEndian,
+ Name: "386",
+ Thechar: '8',
+ Preprocess: preprocess,
+ Assemble: span6,
+ Follow: follow,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ Minlc: 1,
+ Ptrsize: 4,
+ Regsize: 4,
+}
diff -pruN 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/runtime/asm_386.s 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/runtime/asm_386.s
--- 1.6.3-1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/runtime/asm_386.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/.pc/0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch/src/runtime/asm_386.s 2016-07-18 16:24:08.000000000 +0000
@@ -0,0 +1,1612 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+TEXT runtime·rt0_go(SB),NOSPLIT,$0
+ // copy arguments forward on an even stack
+ MOVL argc+0(FP), AX
+ MOVL argv+4(FP), BX
+ SUBL $128, SP // plenty of scratch
+ ANDL $~15, SP
+ MOVL AX, 120(SP) // save argc, argv away
+ MOVL BX, 124(SP)
+
+ // set default stack bounds.
+ // _cgo_init may update stackguard.
+ MOVL $runtime·g0(SB), BP
+ LEAL (-64*1024+104)(SP), BX
+ MOVL BX, g_stackguard0(BP)
+ MOVL BX, g_stackguard1(BP)
+ MOVL BX, (g_stack+stack_lo)(BP)
+ MOVL SP, (g_stack+stack_hi)(BP)
+
+ // find out information about the processor we're on
+#ifdef GOOS_nacl // NaCl doesn't like PUSHFL/POPFL
+ JMP has_cpuid
+#else
+ // first see if CPUID instruction is supported.
+ PUSHFL
+ PUSHFL
+ XORL $(1<<21), 0(SP) // flip ID bit
+ POPFL
+ PUSHFL
+ POPL AX
+ XORL 0(SP), AX
+ POPFL // restore EFLAGS
+ TESTL $(1<<21), AX
+ JNE has_cpuid
+#endif
+
+bad_proc: // show that the program requires MMX.
+ MOVL $2, 0(SP)
+ MOVL $bad_proc_msg<>(SB), 4(SP)
+ MOVL $0x3d, 8(SP)
+ CALL runtime·write(SB)
+ MOVL $1, 0(SP)
+ CALL runtime·exit(SB)
+ INT $3
+
+has_cpuid:
+ MOVL $0, AX
+ CPUID
+ CMPL AX, $0
+ JE nocpuinfo
+
+ // Figure out how to serialize RDTSC.
+ // On Intel processors LFENCE is enough. AMD requires MFENCE.
+ // Don't know about the rest, so let's do MFENCE.
+ CMPL BX, $0x756E6547 // "Genu"
+ JNE notintel
+ CMPL DX, $0x49656E69 // "ineI"
+ JNE notintel
+ CMPL CX, $0x6C65746E // "ntel"
+ JNE notintel
+ MOVB $1, runtime·lfenceBeforeRdtsc(SB)
+notintel:
+
+ MOVL $1, AX
+ CPUID
+ MOVL CX, AX // Move to global variable clobbers CX when generating PIC
+ MOVL AX, runtime·cpuid_ecx(SB)
+ MOVL DX, runtime·cpuid_edx(SB)
+
+ // Check for MMX support
+ TESTL $(1<<23), DX // MMX
+ JZ bad_proc
+
+nocpuinfo:
+
+ // if there is an _cgo_init, call it to let it
+ // initialize and to set up GS. if not,
+ // we set up GS ourselves.
+ MOVL _cgo_init(SB), AX
+ TESTL AX, AX
+ JZ needtls
+ MOVL $setg_gcc<>(SB), BX
+ MOVL BX, 4(SP)
+ MOVL BP, 0(SP)
+ CALL AX
+
+ // update stackguard after _cgo_init
+ MOVL $runtime·g0(SB), CX
+ MOVL (g_stack+stack_lo)(CX), AX
+ ADDL $const__StackGuard, AX
+ MOVL AX, g_stackguard0(CX)
+ MOVL AX, g_stackguard1(CX)
+
+#ifndef GOOS_windows
+ // skip runtime·ldt0setup(SB) and tls test after _cgo_init for non-windows
+ JMP ok
+#endif
+needtls:
+#ifdef GOOS_plan9
+ // skip runtime·ldt0setup(SB) and tls test on Plan 9 in all cases
+ JMP ok
+#endif
+
+ // set up %gs
+ CALL runtime·ldt0setup(SB)
+
+ // store through it, to make sure it works
+ get_tls(BX)
+ MOVL $0x123, g(BX)
+ MOVL runtime·m0+m_tls(SB), AX
+ CMPL AX, $0x123
+ JEQ ok
+ MOVL AX, 0 // abort
+ok:
+ // set up m and g "registers"
+ get_tls(BX)
+ LEAL runtime·g0(SB), DX
+ MOVL DX, g(BX)
+ LEAL runtime·m0(SB), AX
+
+ // save m->g0 = g0
+ MOVL DX, m_g0(AX)
+ // save g0->m = m0
+ MOVL AX, g_m(DX)
+
+ CALL runtime·emptyfunc(SB) // fault if stack check is wrong
+
+ // convention is D is always cleared
+ CLD
+
+ CALL runtime·check(SB)
+
+ // saved argc, argv
+ MOVL 120(SP), AX
+ MOVL AX, 0(SP)
+ MOVL 124(SP), AX
+ MOVL AX, 4(SP)
+ CALL runtime·args(SB)
+ CALL runtime·osinit(SB)
+ CALL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ PUSHL $runtime·mainPC(SB) // entry
+ PUSHL $0 // arg size
+ CALL runtime·newproc(SB)
+ POPL AX
+ POPL AX
+
+ // start this M
+ CALL runtime·mstart(SB)
+
+ INT $3
+ RET
+
+DATA bad_proc_msg<>+0x00(SB)/8, $"This pro"
+DATA bad_proc_msg<>+0x08(SB)/8, $"gram can"
+DATA bad_proc_msg<>+0x10(SB)/8, $" only be"
+DATA bad_proc_msg<>+0x18(SB)/8, $" run on "
+DATA bad_proc_msg<>+0x20(SB)/8, $"processe"
+DATA bad_proc_msg<>+0x28(SB)/8, $"rs with "
+DATA bad_proc_msg<>+0x30(SB)/8, $"MMX supp"
+DATA bad_proc_msg<>+0x38(SB)/4, $"ort."
+DATA bad_proc_msg<>+0x3c(SB)/1, $0xa
+GLOBL bad_proc_msg<>(SB), RODATA, $0x3d
+
+DATA runtime·mainPC+0(SB)/4,$runtime·main(SB)
+GLOBL runtime·mainPC(SB),RODATA,$4
+
+TEXT runtime·breakpoint(SB),NOSPLIT,$0-0
+ INT $3
+ RET
+
+TEXT runtime·asminit(SB),NOSPLIT,$0-0
+ // Linux and MinGW start the FPU in extended double precision.
+ // Other operating systems use double precision.
+ // Change to double precision to match them,
+ // and to match other hardware that only has double.
+ PUSHL $0x27F
+ FLDCW 0(SP)
+ POPL AX
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), NOSPLIT, $0-4
+ MOVL buf+0(FP), AX // gobuf
+ LEAL buf+0(FP), BX // caller's SP
+ MOVL BX, gobuf_sp(AX)
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, gobuf_pc(AX)
+ MOVL $0, gobuf_ret(AX)
+ MOVL $0, gobuf_ctxt(AX)
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL BX, gobuf_g(AX)
+ RET
+
+// void gogo(Gobuf*)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), NOSPLIT, $0-4
+ MOVL buf+0(FP), BX // gobuf
+ MOVL gobuf_g(BX), DX
+ MOVL 0(DX), CX // make sure g != nil
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL gobuf_sp(BX), SP // restore SP
+ MOVL gobuf_ret(BX), AX
+ MOVL gobuf_ctxt(BX), DX
+ MOVL $0, gobuf_sp(BX) // clear to help garbage collector
+ MOVL $0, gobuf_ret(BX)
+ MOVL $0, gobuf_ctxt(BX)
+ MOVL gobuf_pc(BX), BX
+ JMP BX
+
+// func mcall(fn func(*g))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), NOSPLIT, $0-4
+ MOVL fn+0(FP), DI
+
+ get_tls(DX)
+ MOVL g(DX), AX // save state in g->sched
+ MOVL 0(SP), BX // caller's PC
+ MOVL BX, (g_sched+gobuf_pc)(AX)
+ LEAL fn+0(FP), BX // caller's SP
+ MOVL BX, (g_sched+gobuf_sp)(AX)
+ MOVL AX, (g_sched+gobuf_g)(AX)
+
+ // switch to m->g0 & its stack, call fn
+ MOVL g(DX), BX
+ MOVL g_m(BX), BX
+ MOVL m_g0(BX), SI
+ CMPL SI, AX // if g == m->g0 call badmcall
+ JNE 3(PC)
+ MOVL $runtime·badmcall(SB), AX
+ JMP AX
+ MOVL SI, g(DX) // g = m->g0
+ MOVL (g_sched+gobuf_sp)(SI), SP // sp = m->g0->sched.sp
+ PUSHL AX
+ MOVL DI, DX
+ MOVL 0(DI), DI
+ CALL DI
+ POPL AX
+ MOVL $runtime·badmcall2(SB), AX
+ JMP AX
+ RET
+
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
+// of the G stack. We need to distinguish the routine that
+// lives at the bottom of the G stack from the one that lives
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
+ RET
+
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-4
+ MOVL fn+0(FP), DI // DI = fn
+ get_tls(CX)
+ MOVL g(CX), AX // AX = g
+ MOVL g_m(AX), BX // BX = m
+
+ MOVL m_gsignal(BX), DX // DX = gsignal
+ CMPL AX, DX
+ JEQ noswitch
+
+ MOVL m_g0(BX), DX // DX = g0
+ CMPL AX, DX
+ JEQ noswitch
+
+ MOVL m_curg(BX), BP
+ CMPL AX, BP
+ JEQ switch
+
+ // Bad: g is not gsignal, not g0, not curg. What is it?
+ // Hide call from linker nosplit analysis.
+ MOVL $runtime·badsystemstack(SB), AX
+ CALL AX
+
+switch:
+ // save our state in g->sched. Pretend to
+ // be systemstack_switch if the G stack is scanned.
+ MOVL $runtime·systemstack_switch(SB), (g_sched+gobuf_pc)(AX)
+ MOVL SP, (g_sched+gobuf_sp)(AX)
+ MOVL AX, (g_sched+gobuf_g)(AX)
+
+ // switch to g0
+ get_tls(CX)
+ MOVL DX, g(CX)
+ MOVL (g_sched+gobuf_sp)(DX), BX
+ // make it look like mstart called systemstack on g0, to stop traceback
+ SUBL $4, BX
+ MOVL $runtime·mstart(SB), DX
+ MOVL DX, 0(BX)
+ MOVL BX, SP
+
+ // call target function
+ MOVL DI, DX
+ MOVL 0(DI), DI
+ CALL DI
+
+ // switch back to g
+ get_tls(CX)
+ MOVL g(CX), AX
+ MOVL g_m(AX), BX
+ MOVL m_curg(BX), AX
+ MOVL AX, g(CX)
+ MOVL (g_sched+gobuf_sp)(AX), SP
+ MOVL $0, (g_sched+gobuf_sp)(AX)
+ RET
+
+noswitch:
+ // already on system stack, just call directly
+ MOVL DI, DX
+ MOVL 0(DI), DI
+ CALL DI
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+TEXT runtime·morestack(SB),NOSPLIT,$0-0
+ // Cannot grow scheduler stack (m->g0).
+ get_tls(CX)
+ MOVL g(CX), BX
+ MOVL g_m(BX), BX
+ MOVL m_g0(BX), SI
+ CMPL g(CX), SI
+ JNE 2(PC)
+ INT $3
+
+ // Cannot grow signal stack.
+ MOVL m_gsignal(BX), SI
+ CMPL g(CX), SI
+ JNE 2(PC)
+ INT $3
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVL 4(SP), DI // f's caller's PC
+ MOVL DI, (m_morebuf+gobuf_pc)(BX)
+ LEAL 8(SP), CX // f's caller's SP
+ MOVL CX, (m_morebuf+gobuf_sp)(BX)
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL SI, (m_morebuf+gobuf_g)(BX)
+
+ // Set g->sched to context in f.
+ MOVL 0(SP), AX // f's PC
+ MOVL AX, (g_sched+gobuf_pc)(SI)
+ MOVL SI, (g_sched+gobuf_g)(SI)
+ LEAL 4(SP), AX // f's SP
+ MOVL AX, (g_sched+gobuf_sp)(SI)
+ MOVL DX, (g_sched+gobuf_ctxt)(SI)
+
+ // Call newstack on m->g0's stack.
+ MOVL m_g0(BX), BP
+ MOVL BP, g(CX)
+ MOVL (g_sched+gobuf_sp)(BP), AX
+ MOVL -4(AX), BX // fault if CALL would, before smashing SP
+ MOVL AX, SP
+ CALL runtime·newstack(SB)
+ MOVL $0, 0x1003 // crash if newstack returns
+ RET
+
+TEXT runtime·morestack_noctxt(SB),NOSPLIT,$0-0
+ MOVL $0, DX
+ JMP runtime·morestack(SB)
+
+TEXT runtime·stackBarrier(SB),NOSPLIT,$0
+ // We came here via a RET to an overwritten return PC.
+ // AX may be live. Other registers are available.
+
+ // Get the original return PC, g.stkbar[g.stkbarPos].savedLRVal.
+ get_tls(CX)
+ MOVL g(CX), CX
+ MOVL (g_stkbar+slice_array)(CX), DX
+ MOVL g_stkbarPos(CX), BX
+ IMULL $stkbar__size, BX // Too big for SIB.
+ MOVL stkbar_savedLRVal(DX)(BX*1), BX
+ // Record that this stack barrier was hit.
+ ADDL $1, g_stkbarPos(CX)
+ // Jump to the original return PC.
+ JMP BX
+
+// reflectcall: call a function with the given argument list
+// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ CMPL CX, $MAXSIZE; \
+ JA 3(PC); \
+ MOVL $NAME(SB), AX; \
+ JMP AX
+// Note: can't just "JMP NAME(SB)" - bad inlining results.
+
+TEXT reflect·call(SB), NOSPLIT, $0-0
+ JMP ·reflectcall(SB)
+
+TEXT ·reflectcall(SB), NOSPLIT, $0-20
+ MOVL argsize+12(FP), CX
+ DISPATCH(runtime·call16, 16)
+ DISPATCH(runtime·call32, 32)
+ DISPATCH(runtime·call64, 64)
+ DISPATCH(runtime·call128, 128)
+ DISPATCH(runtime·call256, 256)
+ DISPATCH(runtime·call512, 512)
+ DISPATCH(runtime·call1024, 1024)
+ DISPATCH(runtime·call2048, 2048)
+ DISPATCH(runtime·call4096, 4096)
+ DISPATCH(runtime·call8192, 8192)
+ DISPATCH(runtime·call16384, 16384)
+ DISPATCH(runtime·call32768, 32768)
+ DISPATCH(runtime·call65536, 65536)
+ DISPATCH(runtime·call131072, 131072)
+ DISPATCH(runtime·call262144, 262144)
+ DISPATCH(runtime·call524288, 524288)
+ DISPATCH(runtime·call1048576, 1048576)
+ DISPATCH(runtime·call2097152, 2097152)
+ DISPATCH(runtime·call4194304, 4194304)
+ DISPATCH(runtime·call8388608, 8388608)
+ DISPATCH(runtime·call16777216, 16777216)
+ DISPATCH(runtime·call33554432, 33554432)
+ DISPATCH(runtime·call67108864, 67108864)
+ DISPATCH(runtime·call134217728, 134217728)
+ DISPATCH(runtime·call268435456, 268435456)
+ DISPATCH(runtime·call536870912, 536870912)
+ DISPATCH(runtime·call1073741824, 1073741824)
+ MOVL $runtime·badreflectcall(SB), AX
+ JMP AX
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-20; \
+ NO_LOCAL_POINTERS; \
+ /* copy arguments to stack */ \
+ MOVL argptr+8(FP), SI; \
+ MOVL argsize+12(FP), CX; \
+ MOVL SP, DI; \
+ REP;MOVSB; \
+ /* call function */ \
+ MOVL f+4(FP), DX; \
+ MOVL (DX), AX; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ CALL AX; \
+ /* copy return values back */ \
+ MOVL argptr+8(FP), DI; \
+ MOVL argsize+12(FP), CX; \
+ MOVL retoffset+16(FP), BX; \
+ MOVL SP, SI; \
+ ADDL BX, DI; \
+ ADDL BX, SI; \
+ SUBL BX, CX; \
+ REP;MOVSB; \
+ /* execute write barrier updates */ \
+ MOVL argtype+0(FP), DX; \
+ MOVL argptr+8(FP), DI; \
+ MOVL argsize+12(FP), CX; \
+ MOVL retoffset+16(FP), BX; \
+ MOVL DX, 0(SP); \
+ MOVL DI, 4(SP); \
+ MOVL CX, 8(SP); \
+ MOVL BX, 12(SP); \
+ CALL runtime·callwritebarrier(SB); \
+ RET
+
+CALLFN(·call16, 16)
+CALLFN(·call32, 32)
+CALLFN(·call64, 64)
+CALLFN(·call128, 128)
+CALLFN(·call256, 256)
+CALLFN(·call512, 512)
+CALLFN(·call1024, 1024)
+CALLFN(·call2048, 2048)
+CALLFN(·call4096, 4096)
+CALLFN(·call8192, 8192)
+CALLFN(·call16384, 16384)
+CALLFN(·call32768, 32768)
+CALLFN(·call65536, 65536)
+CALLFN(·call131072, 131072)
+CALLFN(·call262144, 262144)
+CALLFN(·call524288, 524288)
+CALLFN(·call1048576, 1048576)
+CALLFN(·call2097152, 2097152)
+CALLFN(·call4194304, 4194304)
+CALLFN(·call8388608, 8388608)
+CALLFN(·call16777216, 16777216)
+CALLFN(·call33554432, 33554432)
+CALLFN(·call67108864, 67108864)
+CALLFN(·call134217728, 134217728)
+CALLFN(·call268435456, 268435456)
+CALLFN(·call536870912, 536870912)
+CALLFN(·call1073741824, 1073741824)
+
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ MOVL cycles+0(FP), AX
+again:
+ PAUSE
+ SUBL $1, AX
+ JNZ again
+ RET
+
+TEXT ·publicationBarrier(SB),NOSPLIT,$0-0
+ // Stores are already ordered on x86, so this is just a
+ // compile barrier.
+ RET
+
+// void jmpdefer(fn, sp);
+// called from deferreturn.
+// 1. pop the caller
+// 2. sub 5 bytes from the callers return
+// 3. jmp to the argument
+TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
+ MOVL fv+0(FP), DX // fn
+ MOVL argp+4(FP), BX // caller sp
+ LEAL -4(BX), SP // caller sp after CALL
+ SUBL $5, (SP) // return to CALL again
+ MOVL 0(DX), BX
+ JMP BX // but first run the deferred function
+
+// Save state of caller into g->sched.
+TEXT gosave<>(SB),NOSPLIT,$0
+ PUSHL AX
+ PUSHL BX
+ get_tls(BX)
+ MOVL g(BX), BX
+ LEAL arg+0(FP), AX
+ MOVL AX, (g_sched+gobuf_sp)(BX)
+ MOVL -4(AX), AX
+ MOVL AX, (g_sched+gobuf_pc)(BX)
+ MOVL $0, (g_sched+gobuf_ret)(BX)
+ MOVL $0, (g_sched+gobuf_ctxt)(BX)
+ POPL BX
+ POPL AX
+ RET
+
+// func asmcgocall(fn, arg unsafe.Pointer) int32
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.go for more details.
+TEXT ·asmcgocall(SB),NOSPLIT,$0-12
+ MOVL fn+0(FP), AX
+ MOVL arg+4(FP), BX
+
+ MOVL SP, DX
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already.
+ get_tls(CX)
+ MOVL g(CX), BP
+ MOVL g_m(BP), BP
+ MOVL m_g0(BP), SI
+ MOVL g(CX), DI
+ CMPL SI, DI
+ JEQ noswitch
+ CALL gosave<>(SB)
+ get_tls(CX)
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+
+noswitch:
+ // Now on a scheduling stack (a pthread-created stack).
+ SUBL $32, SP
+ ANDL $~15, SP // alignment, perhaps unnecessary
+ MOVL DI, 8(SP) // save g
+ MOVL (g_stack+stack_hi)(DI), DI
+ SUBL DX, DI
+ MOVL DI, 4(SP) // save depth in stack (can't just save SP, as stack might be copied during a callback)
+ MOVL BX, 0(SP) // first argument in x86-32 ABI
+ CALL AX
+
+ // Restore registers, g, stack pointer.
+ get_tls(CX)
+ MOVL 8(SP), DI
+ MOVL (g_stack+stack_hi)(DI), SI
+ SUBL 4(SP), SI
+ MOVL DI, g(CX)
+ MOVL SI, SP
+
+ MOVL AX, ret+8(FP)
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// Turn the fn into a Go func (by taking its address) and call
+// cgocallback_gofunc.
+TEXT runtime·cgocallback(SB),NOSPLIT,$12-12
+ LEAL fn+0(FP), AX
+ MOVL AX, 0(SP)
+ MOVL frame+4(FP), AX
+ MOVL AX, 4(SP)
+ MOVL framesize+8(FP), AX
+ MOVL AX, 8(SP)
+ MOVL $runtime·cgocallback_gofunc(SB), AX
+ CALL AX
+ RET
+
+// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
+// See cgocall.go for more details.
+TEXT ·cgocallback_gofunc(SB),NOSPLIT,$12-12
+ NO_LOCAL_POINTERS
+
+ // If g is nil, Go did not create the current thread.
+ // Call needm to obtain one for temporary use.
+ // In this case, we're running on the thread stack, so there's
+ // lots of space, but the linker doesn't know. Hide the call from
+ // the linker analysis by using an indirect call through AX.
+ get_tls(CX)
+#ifdef GOOS_windows
+ MOVL $0, BP
+ CMPL CX, $0
+ JEQ 2(PC) // TODO
+#endif
+ MOVL g(CX), BP
+ CMPL BP, $0
+ JEQ needm
+ MOVL g_m(BP), BP
+ MOVL BP, DX // saved copy of oldm
+ JMP havem
+needm:
+ MOVL $0, 0(SP)
+ MOVL $runtime·needm(SB), AX
+ CALL AX
+ MOVL 0(SP), DX
+ get_tls(CX)
+ MOVL g(CX), BP
+ MOVL g_m(BP), BP
+
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then systemstack will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVL m_g0(BP), SI
+ MOVL SP, (g_sched+gobuf_sp)(SI)
+
+havem:
+ // Now there's a valid m, and we're running on its m->g0.
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ // Save current sp in m->g0->sched.sp in preparation for
+ // switch back to m->curg stack.
+ // NOTE: unwindm knows that the saved g->sched.sp is at 0(SP).
+ MOVL m_g0(BP), SI
+ MOVL (g_sched+gobuf_sp)(SI), AX
+ MOVL AX, 0(SP)
+ MOVL SP, (g_sched+gobuf_sp)(SI)
+
+ // Switch to m->curg stack and call runtime.cgocallbackg.
+ // Because we are taking over the execution of m->curg
+ // but *not* resuming what had been running, we need to
+ // save that information (m->curg->sched) so we can restore it.
+ // We can restore m->curg->sched.sp easily, because calling
+ // runtime.cgocallbackg leaves SP unchanged upon return.
+ // To save m->curg->sched.pc, we push it onto the stack.
+ // This has the added benefit that it looks to the traceback
+ // routine like cgocallbackg is going to return to that
+ // PC (because the frame we allocate below has the same
+ // size as cgocallback_gofunc's frame declared above)
+ // so that the traceback will seamlessly trace back into
+ // the earlier calls.
+ //
+ // In the new goroutine, 0(SP) holds the saved oldm (DX) register.
+ // 4(SP) and 8(SP) are unused.
+ MOVL m_curg(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), DI // prepare stack as DI
+ MOVL (g_sched+gobuf_pc)(SI), BP
+ MOVL BP, -4(DI)
+ LEAL -(4+12)(DI), SP
+ MOVL DX, 0(SP)
+ CALL runtime·cgocallbackg(SB)
+ MOVL 0(SP), DX
+
+ // Restore g->sched (== m->curg->sched) from saved values.
+ get_tls(CX)
+ MOVL g(CX), SI
+ MOVL 12(SP), BP
+ MOVL BP, (g_sched+gobuf_pc)(SI)
+ LEAL (12+4)(SP), DI
+ MOVL DI, (g_sched+gobuf_sp)(SI)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVL g(CX), BP
+ MOVL g_m(BP), BP
+ MOVL m_g0(BP), SI
+ MOVL SI, g(CX)
+ MOVL (g_sched+gobuf_sp)(SI), SP
+ MOVL 0(SP), AX
+ MOVL AX, (g_sched+gobuf_sp)(SI)
+
+ // If the m on entry was nil, we called needm above to borrow an m
+ // for the duration of the call. Since the call is over, return it with dropm.
+ CMPL DX, $0
+ JNE 3(PC)
+ MOVL $runtime·dropm(SB), AX
+ CALL AX
+
+ // Done!
+ RET
+
+// void setg(G*); set g. for use by needm.
+TEXT runtime·setg(SB), NOSPLIT, $0-4
+ MOVL gg+0(FP), BX
+#ifdef GOOS_windows
+ CMPL BX, $0
+ JNE settls
+ MOVL $0, 0x14(FS)
+ RET
+settls:
+ MOVL g_m(BX), AX
+ LEAL m_tls(AX), AX
+ MOVL AX, 0x14(FS)
+#endif
+ get_tls(CX)
+ MOVL BX, g(CX)
+ RET
+
+// void setg_gcc(G*); set g. for use by gcc
+TEXT setg_gcc<>(SB), NOSPLIT, $0
+ get_tls(AX)
+ MOVL gg+0(FP), DX
+ MOVL DX, g(AX)
+ RET
+
+// check that SP is in range [g->stack.lo, g->stack.hi)
+TEXT runtime·stackcheck(SB), NOSPLIT, $0-0
+ get_tls(CX)
+ MOVL g(CX), AX
+ CMPL (g_stack+stack_hi)(AX), SP
+ JHI 2(PC)
+ INT $3
+ CMPL SP, (g_stack+stack_lo)(AX)
+ JHI 2(PC)
+ INT $3
+ RET
+
+TEXT runtime·getcallerpc(SB),NOSPLIT,$4-8
+ MOVL argp+0(FP),AX // addr of first arg
+ MOVL -4(AX),AX // get calling pc
+ CMPL AX, runtime·stackBarrierPC(SB)
+ JNE nobar
+ // Get original return PC.
+ CALL runtime·nextBarrierPC(SB)
+ MOVL 0(SP), AX
+nobar:
+ MOVL AX, ret+4(FP)
+ RET
+
+TEXT runtime·setcallerpc(SB),NOSPLIT,$4-8
+ MOVL argp+0(FP),AX // addr of first arg
+ MOVL pc+4(FP), BX
+ MOVL -4(AX), DX
+ CMPL DX, runtime·stackBarrierPC(SB)
+ JEQ setbar
+ MOVL BX, -4(AX) // set calling pc
+ RET
+setbar:
+ // Set the stack barrier return PC.
+ MOVL BX, 0(SP)
+ CALL runtime·setNextBarrierPC(SB)
+ RET
+
+TEXT runtime·getcallersp(SB), NOSPLIT, $0-8
+ MOVL argp+0(FP), AX
+ MOVL AX, ret+4(FP)
+ RET
+
+// func cputicks() int64
+TEXT runtime·cputicks(SB),NOSPLIT,$0-8
+ TESTL $0x4000000, runtime·cpuid_edx(SB) // no sse2, no mfence
+ JEQ done
+ CMPB runtime·lfenceBeforeRdtsc(SB), $1
+ JNE mfence
+ BYTE $0x0f; BYTE $0xae; BYTE $0xe8 // LFENCE
+ JMP done
+mfence:
+ BYTE $0x0f; BYTE $0xae; BYTE $0xf0 // MFENCE
+done:
+ RDTSC
+ MOVL AX, ret_lo+0(FP)
+ MOVL DX, ret_hi+4(FP)
+ RET
+
+TEXT runtime·ldt0setup(SB),NOSPLIT,$16-0
+ // set up ldt 7 to point at m0.tls
+ // ldt 1 would be fine on Linux, but on OS X, 7 is as low as we can go.
+ // the entry number is just a hint. setldt will set up GS with what it used.
+ MOVL $7, 0(SP)
+ LEAL runtime·m0+m_tls(SB), AX
+ MOVL AX, 4(SP)
+ MOVL $32, 8(SP) // sizeof(tls array)
+ CALL runtime·setldt(SB)
+ RET
+
+TEXT runtime·emptyfunc(SB),0,$0-0
+ RET
+
+TEXT runtime·abort(SB),NOSPLIT,$0-0
+ INT $0x3
+
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$16-12
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVL p+0(FP), AX
+ MOVL h+4(FP), BX
+ MOVL 4(DX), CX
+ MOVL AX, 0(SP)
+ MOVL BX, 4(SP)
+ MOVL CX, 8(SP)
+ CALL runtime·memhash(SB)
+ MOVL 12(SP), AX
+ MOVL AX, ret+8(FP)
+ RET
+
+// hash function using AES hardware instructions
+TEXT runtime·aeshash(SB),NOSPLIT,$0-16
+ MOVL p+0(FP), AX // ptr to data
+ MOVL s+8(FP), BX // size
+ LEAL ret+12(FP), DX
+ JMP runtime·aeshashbody(SB)
+
+TEXT runtime·aeshashstr(SB),NOSPLIT,$0-12
+ MOVL p+0(FP), AX // ptr to string object
+ MOVL 4(AX), BX // length of string
+ MOVL (AX), AX // string data
+ LEAL ret+8(FP), DX
+ JMP runtime·aeshashbody(SB)
+
+// AX: data
+// BX: length
+// DX: address to put return value
+TEXT runtime·aeshashbody(SB),NOSPLIT,$0-0
+ MOVL h+4(FP), X0 // 32 bits of per-table hash seed
+ PINSRW $4, BX, X0 // 16 bits of length
+ PSHUFHW $0, X0, X0 // replace size with its low 2 bytes repeated 4 times
+ MOVO X0, X1 // save unscrambled seed
+ PXOR runtime·aeskeysched(SB), X0 // xor in per-process seed
+ AESENC X0, X0 // scramble seed
+
+ CMPL BX, $16
+ JB aes0to15
+ JE aes16
+ CMPL BX, $32
+ JBE aes17to32
+ CMPL BX, $64
+ JBE aes33to64
+ JMP aes65plus
+
+aes0to15:
+ TESTL BX, BX
+ JE aes0
+
+ ADDL $16, AX
+ TESTW $0xff0, AX
+ JE endofpage
+
+ // 16 bytes loaded at this address won't cross
+ // a page boundary, so we can load it directly.
+ MOVOU -16(AX), X1
+ ADDL BX, BX
+ PAND masks<>(SB)(BX*8), X1
+
+final1:
+ AESENC X0, X1 // scramble input, xor in seed
+ AESENC X1, X1 // scramble combo 2 times
+ AESENC X1, X1
+ MOVL X1, (DX)
+ RET
+
+endofpage:
+ // address ends in 1111xxxx. Might be up against
+ // a page boundary, so load ending at last byte.
+ // Then shift bytes down using pshufb.
+ MOVOU -32(AX)(BX*1), X1
+ ADDL BX, BX
+ PSHUFB shifts<>(SB)(BX*8), X1
+ JMP final1
+
+aes0:
+ // Return scrambled input seed
+ AESENC X0, X0
+ MOVL X0, (DX)
+ RET
+
+aes16:
+ MOVOU (AX), X1
+ JMP final1
+
+aes17to32:
+ // make second starting seed
+ PXOR runtime·aeskeysched+16(SB), X1
+ AESENC X1, X1
+
+ // load data to be hashed
+ MOVOU (AX), X2
+ MOVOU -16(AX)(BX*1), X3
+
+ // scramble 3 times
+ AESENC X0, X2
+ AESENC X1, X3
+ AESENC X2, X2
+ AESENC X3, X3
+ AESENC X2, X2
+ AESENC X3, X3
+
+ // combine results
+ PXOR X3, X2
+ MOVL X2, (DX)
+ RET
+
+aes33to64:
+ // make 3 more starting seeds
+ MOVO X1, X2
+ MOVO X1, X3
+ PXOR runtime·aeskeysched+16(SB), X1
+ PXOR runtime·aeskeysched+32(SB), X2
+ PXOR runtime·aeskeysched+48(SB), X3
+ AESENC X1, X1
+ AESENC X2, X2
+ AESENC X3, X3
+
+ MOVOU (AX), X4
+ MOVOU 16(AX), X5
+ MOVOU -32(AX)(BX*1), X6
+ MOVOU -16(AX)(BX*1), X7
+
+ AESENC X0, X4
+ AESENC X1, X5
+ AESENC X2, X6
+ AESENC X3, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ PXOR X6, X4
+ PXOR X7, X5
+ PXOR X5, X4
+ MOVL X4, (DX)
+ RET
+
+aes65plus:
+ // make 3 more starting seeds
+ MOVO X1, X2
+ MOVO X1, X3
+ PXOR runtime·aeskeysched+16(SB), X1
+ PXOR runtime·aeskeysched+32(SB), X2
+ PXOR runtime·aeskeysched+48(SB), X3
+ AESENC X1, X1
+ AESENC X2, X2
+ AESENC X3, X3
+
+ // start with last (possibly overlapping) block
+ MOVOU -64(AX)(BX*1), X4
+ MOVOU -48(AX)(BX*1), X5
+ MOVOU -32(AX)(BX*1), X6
+ MOVOU -16(AX)(BX*1), X7
+
+ // scramble state once
+ AESENC X0, X4
+ AESENC X1, X5
+ AESENC X2, X6
+ AESENC X3, X7
+
+ // compute number of remaining 64-byte blocks
+ DECL BX
+ SHRL $6, BX
+
+aesloop:
+ // scramble state, xor in a block
+ MOVOU (AX), X0
+ MOVOU 16(AX), X1
+ MOVOU 32(AX), X2
+ MOVOU 48(AX), X3
+ AESENC X0, X4
+ AESENC X1, X5
+ AESENC X2, X6
+ AESENC X3, X7
+
+ // scramble state
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ ADDL $64, AX
+ DECL BX
+ JNE aesloop
+
+ // 2 more scrambles to finish
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ AESENC X4, X4
+ AESENC X5, X5
+ AESENC X6, X6
+ AESENC X7, X7
+
+ PXOR X6, X4
+ PXOR X7, X5
+ PXOR X5, X4
+ MOVL X4, (DX)
+ RET
+
+TEXT runtime·aeshash32(SB),NOSPLIT,$0-12
+ MOVL p+0(FP), AX // ptr to data
+ MOVL h+4(FP), X0 // seed
+ PINSRD $1, (AX), X0 // data
+ AESENC runtime·aeskeysched+0(SB), X0
+ AESENC runtime·aeskeysched+16(SB), X0
+ AESENC runtime·aeskeysched+32(SB), X0
+ MOVL X0, ret+8(FP)
+ RET
+
+TEXT runtime·aeshash64(SB),NOSPLIT,$0-12
+ MOVL p+0(FP), AX // ptr to data
+ MOVQ (AX), X0 // data
+ PINSRD $2, h+4(FP), X0 // seed
+ AESENC runtime·aeskeysched+0(SB), X0
+ AESENC runtime·aeskeysched+16(SB), X0
+ AESENC runtime·aeskeysched+32(SB), X0
+ MOVL X0, ret+8(FP)
+ RET
+
+// simple mask to get rid of data in the high part of the register.
+DATA masks<>+0x00(SB)/4, $0x00000000
+DATA masks<>+0x04(SB)/4, $0x00000000
+DATA masks<>+0x08(SB)/4, $0x00000000
+DATA masks<>+0x0c(SB)/4, $0x00000000
+
+DATA masks<>+0x10(SB)/4, $0x000000ff
+DATA masks<>+0x14(SB)/4, $0x00000000
+DATA masks<>+0x18(SB)/4, $0x00000000
+DATA masks<>+0x1c(SB)/4, $0x00000000
+
+DATA masks<>+0x20(SB)/4, $0x0000ffff
+DATA masks<>+0x24(SB)/4, $0x00000000
+DATA masks<>+0x28(SB)/4, $0x00000000
+DATA masks<>+0x2c(SB)/4, $0x00000000
+
+DATA masks<>+0x30(SB)/4, $0x00ffffff
+DATA masks<>+0x34(SB)/4, $0x00000000
+DATA masks<>+0x38(SB)/4, $0x00000000
+DATA masks<>+0x3c(SB)/4, $0x00000000
+
+DATA masks<>+0x40(SB)/4, $0xffffffff
+DATA masks<>+0x44(SB)/4, $0x00000000
+DATA masks<>+0x48(SB)/4, $0x00000000
+DATA masks<>+0x4c(SB)/4, $0x00000000
+
+DATA masks<>+0x50(SB)/4, $0xffffffff
+DATA masks<>+0x54(SB)/4, $0x000000ff
+DATA masks<>+0x58(SB)/4, $0x00000000
+DATA masks<>+0x5c(SB)/4, $0x00000000
+
+DATA masks<>+0x60(SB)/4, $0xffffffff
+DATA masks<>+0x64(SB)/4, $0x0000ffff
+DATA masks<>+0x68(SB)/4, $0x00000000
+DATA masks<>+0x6c(SB)/4, $0x00000000
+
+DATA masks<>+0x70(SB)/4, $0xffffffff
+DATA masks<>+0x74(SB)/4, $0x00ffffff
+DATA masks<>+0x78(SB)/4, $0x00000000
+DATA masks<>+0x7c(SB)/4, $0x00000000
+
+DATA masks<>+0x80(SB)/4, $0xffffffff
+DATA masks<>+0x84(SB)/4, $0xffffffff
+DATA masks<>+0x88(SB)/4, $0x00000000
+DATA masks<>+0x8c(SB)/4, $0x00000000
+
+DATA masks<>+0x90(SB)/4, $0xffffffff
+DATA masks<>+0x94(SB)/4, $0xffffffff
+DATA masks<>+0x98(SB)/4, $0x000000ff
+DATA masks<>+0x9c(SB)/4, $0x00000000
+
+DATA masks<>+0xa0(SB)/4, $0xffffffff
+DATA masks<>+0xa4(SB)/4, $0xffffffff
+DATA masks<>+0xa8(SB)/4, $0x0000ffff
+DATA masks<>+0xac(SB)/4, $0x00000000
+
+DATA masks<>+0xb0(SB)/4, $0xffffffff
+DATA masks<>+0xb4(SB)/4, $0xffffffff
+DATA masks<>+0xb8(SB)/4, $0x00ffffff
+DATA masks<>+0xbc(SB)/4, $0x00000000
+
+DATA masks<>+0xc0(SB)/4, $0xffffffff
+DATA masks<>+0xc4(SB)/4, $0xffffffff
+DATA masks<>+0xc8(SB)/4, $0xffffffff
+DATA masks<>+0xcc(SB)/4, $0x00000000
+
+DATA masks<>+0xd0(SB)/4, $0xffffffff
+DATA masks<>+0xd4(SB)/4, $0xffffffff
+DATA masks<>+0xd8(SB)/4, $0xffffffff
+DATA masks<>+0xdc(SB)/4, $0x000000ff
+
+DATA masks<>+0xe0(SB)/4, $0xffffffff
+DATA masks<>+0xe4(SB)/4, $0xffffffff
+DATA masks<>+0xe8(SB)/4, $0xffffffff
+DATA masks<>+0xec(SB)/4, $0x0000ffff
+
+DATA masks<>+0xf0(SB)/4, $0xffffffff
+DATA masks<>+0xf4(SB)/4, $0xffffffff
+DATA masks<>+0xf8(SB)/4, $0xffffffff
+DATA masks<>+0xfc(SB)/4, $0x00ffffff
+
+GLOBL masks<>(SB),RODATA,$256
+
+// these are arguments to pshufb. They move data down from
+// the high bytes of the register to the low bytes of the register.
+// index is how many bytes to move.
+DATA shifts<>+0x00(SB)/4, $0x00000000
+DATA shifts<>+0x04(SB)/4, $0x00000000
+DATA shifts<>+0x08(SB)/4, $0x00000000
+DATA shifts<>+0x0c(SB)/4, $0x00000000
+
+DATA shifts<>+0x10(SB)/4, $0xffffff0f
+DATA shifts<>+0x14(SB)/4, $0xffffffff
+DATA shifts<>+0x18(SB)/4, $0xffffffff
+DATA shifts<>+0x1c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x20(SB)/4, $0xffff0f0e
+DATA shifts<>+0x24(SB)/4, $0xffffffff
+DATA shifts<>+0x28(SB)/4, $0xffffffff
+DATA shifts<>+0x2c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x30(SB)/4, $0xff0f0e0d
+DATA shifts<>+0x34(SB)/4, $0xffffffff
+DATA shifts<>+0x38(SB)/4, $0xffffffff
+DATA shifts<>+0x3c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x40(SB)/4, $0x0f0e0d0c
+DATA shifts<>+0x44(SB)/4, $0xffffffff
+DATA shifts<>+0x48(SB)/4, $0xffffffff
+DATA shifts<>+0x4c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x50(SB)/4, $0x0e0d0c0b
+DATA shifts<>+0x54(SB)/4, $0xffffff0f
+DATA shifts<>+0x58(SB)/4, $0xffffffff
+DATA shifts<>+0x5c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x60(SB)/4, $0x0d0c0b0a
+DATA shifts<>+0x64(SB)/4, $0xffff0f0e
+DATA shifts<>+0x68(SB)/4, $0xffffffff
+DATA shifts<>+0x6c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x70(SB)/4, $0x0c0b0a09
+DATA shifts<>+0x74(SB)/4, $0xff0f0e0d
+DATA shifts<>+0x78(SB)/4, $0xffffffff
+DATA shifts<>+0x7c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x80(SB)/4, $0x0b0a0908
+DATA shifts<>+0x84(SB)/4, $0x0f0e0d0c
+DATA shifts<>+0x88(SB)/4, $0xffffffff
+DATA shifts<>+0x8c(SB)/4, $0xffffffff
+
+DATA shifts<>+0x90(SB)/4, $0x0a090807
+DATA shifts<>+0x94(SB)/4, $0x0e0d0c0b
+DATA shifts<>+0x98(SB)/4, $0xffffff0f
+DATA shifts<>+0x9c(SB)/4, $0xffffffff
+
+DATA shifts<>+0xa0(SB)/4, $0x09080706
+DATA shifts<>+0xa4(SB)/4, $0x0d0c0b0a
+DATA shifts<>+0xa8(SB)/4, $0xffff0f0e
+DATA shifts<>+0xac(SB)/4, $0xffffffff
+
+DATA shifts<>+0xb0(SB)/4, $0x08070605
+DATA shifts<>+0xb4(SB)/4, $0x0c0b0a09
+DATA shifts<>+0xb8(SB)/4, $0xff0f0e0d
+DATA shifts<>+0xbc(SB)/4, $0xffffffff
+
+DATA shifts<>+0xc0(SB)/4, $0x07060504
+DATA shifts<>+0xc4(SB)/4, $0x0b0a0908
+DATA shifts<>+0xc8(SB)/4, $0x0f0e0d0c
+DATA shifts<>+0xcc(SB)/4, $0xffffffff
+
+DATA shifts<>+0xd0(SB)/4, $0x06050403
+DATA shifts<>+0xd4(SB)/4, $0x0a090807
+DATA shifts<>+0xd8(SB)/4, $0x0e0d0c0b
+DATA shifts<>+0xdc(SB)/4, $0xffffff0f
+
+DATA shifts<>+0xe0(SB)/4, $0x05040302
+DATA shifts<>+0xe4(SB)/4, $0x09080706
+DATA shifts<>+0xe8(SB)/4, $0x0d0c0b0a
+DATA shifts<>+0xec(SB)/4, $0xffff0f0e
+
+DATA shifts<>+0xf0(SB)/4, $0x04030201
+DATA shifts<>+0xf4(SB)/4, $0x08070605
+DATA shifts<>+0xf8(SB)/4, $0x0c0b0a09
+DATA shifts<>+0xfc(SB)/4, $0xff0f0e0d
+
+GLOBL shifts<>(SB),RODATA,$256
+
+TEXT ·checkASM(SB),NOSPLIT,$0-1
+ // check that masks<>(SB) and shifts<>(SB) are aligned to 16-byte
+ MOVL $masks<>(SB), AX
+ MOVL $shifts<>(SB), BX
+ ORL BX, AX
+ TESTL $15, AX
+ SETEQ ret+0(FP)
+ RET
+
+TEXT runtime·memeq(SB),NOSPLIT,$0-13
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ MOVL size+8(FP), BX
+ LEAL ret+12(FP), AX
+ JMP runtime·memeqbody(SB)
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT,$0-9
+ MOVL a+0(FP), SI
+ MOVL b+4(FP), DI
+ CMPL SI, DI
+ JEQ eq
+ MOVL 4(DX), BX // compiler stores size at offset 4 in the closure
+ LEAL ret+8(FP), AX
+ JMP runtime·memeqbody(SB)
+eq:
+ MOVB $1, ret+8(FP)
+ RET
+
+// eqstring tests whether two strings are equal.
+// The compiler guarantees that strings passed
+// to eqstring have equal length.
+// See runtime_test.go:eqstring_generic for
+// equivalent Go code.
+TEXT runtime·eqstring(SB),NOSPLIT,$0-17
+ MOVL s1str+0(FP), SI
+ MOVL s2str+8(FP), DI
+ CMPL SI, DI
+ JEQ same
+ MOVL s1len+4(FP), BX
+ LEAL v+16(FP), AX
+ JMP runtime·memeqbody(SB)
+same:
+ MOVB $1, v+16(FP)
+ RET
+
+TEXT bytes·Equal(SB),NOSPLIT,$0-25
+ MOVL a_len+4(FP), BX
+ MOVL b_len+16(FP), CX
+ CMPL BX, CX
+ JNE eqret
+ MOVL a+0(FP), SI
+ MOVL b+12(FP), DI
+ LEAL ret+24(FP), AX
+ JMP runtime·memeqbody(SB)
+eqret:
+ MOVB $0, ret+24(FP)
+ RET
+
+// a in SI
+// b in DI
+// count in BX
+// address of result byte in AX
+TEXT runtime·memeqbody(SB),NOSPLIT,$0-0
+ CMPL BX, $4
+ JB small
+
+ // 64 bytes at a time using xmm registers
+hugeloop:
+ CMPL BX, $64
+ JB bigloop
+ TESTL $0x4000000, runtime·cpuid_edx(SB) // check for sse2
+ JE bigloop
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ MOVOU 16(SI), X2
+ MOVOU 16(DI), X3
+ MOVOU 32(SI), X4
+ MOVOU 32(DI), X5
+ MOVOU 48(SI), X6
+ MOVOU 48(DI), X7
+ PCMPEQB X1, X0
+ PCMPEQB X3, X2
+ PCMPEQB X5, X4
+ PCMPEQB X7, X6
+ PAND X2, X0
+ PAND X6, X4
+ PAND X4, X0
+ PMOVMSKB X0, DX
+ ADDL $64, SI
+ ADDL $64, DI
+ SUBL $64, BX
+ CMPL DX, $0xffff
+ JEQ hugeloop
+ MOVB $0, (AX)
+ RET
+
+ // 4 bytes at a time using 32-bit register
+bigloop:
+ CMPL BX, $4
+ JBE leftover
+ MOVL (SI), CX
+ MOVL (DI), DX
+ ADDL $4, SI
+ ADDL $4, DI
+ SUBL $4, BX
+ CMPL CX, DX
+ JEQ bigloop
+ MOVB $0, (AX)
+ RET
+
+ // remaining 0-4 bytes
+leftover:
+ MOVL -4(SI)(BX*1), CX
+ MOVL -4(DI)(BX*1), DX
+ CMPL CX, DX
+ SETEQ (AX)
+ RET
+
+small:
+ CMPL BX, $0
+ JEQ equal
+
+ LEAL 0(BX*8), CX
+ NEGL CX
+
+ MOVL SI, DX
+ CMPB DX, $0xfc
+ JA si_high
+
+ // load at SI won't cross a page boundary.
+ MOVL (SI), SI
+ JMP si_finish
+si_high:
+ // address ends in 111111xx. Load up to bytes we want, move to correct position.
+ MOVL -4(SI)(BX*1), SI
+ SHRL CX, SI
+si_finish:
+
+ // same for DI.
+ MOVL DI, DX
+ CMPB DX, $0xfc
+ JA di_high
+ MOVL (DI), DI
+ JMP di_finish
+di_high:
+ MOVL -4(DI)(BX*1), DI
+ SHRL CX, DI
+di_finish:
+
+ SUBL SI, DI
+ SHLL CX, DI
+equal:
+ SETEQ (AX)
+ RET
+
+TEXT runtime·cmpstring(SB),NOSPLIT,$0-20
+ MOVL s1_base+0(FP), SI
+ MOVL s1_len+4(FP), BX
+ MOVL s2_base+8(FP), DI
+ MOVL s2_len+12(FP), DX
+ LEAL ret+16(FP), AX
+ JMP runtime·cmpbody(SB)
+
+TEXT bytes·Compare(SB),NOSPLIT,$0-28
+ MOVL s1+0(FP), SI
+ MOVL s1+4(FP), BX
+ MOVL s2+12(FP), DI
+ MOVL s2+16(FP), DX
+ LEAL ret+24(FP), AX
+ JMP runtime·cmpbody(SB)
+
+TEXT bytes·IndexByte(SB),NOSPLIT,$0-20
+ MOVL s+0(FP), SI
+ MOVL s_len+4(FP), CX
+ MOVB c+12(FP), AL
+ MOVL SI, DI
+ CLD; REPN; SCASB
+ JZ 3(PC)
+ MOVL $-1, ret+16(FP)
+ RET
+ SUBL SI, DI
+ SUBL $1, DI
+ MOVL DI, ret+16(FP)
+ RET
+
+TEXT strings·IndexByte(SB),NOSPLIT,$0-16
+ MOVL s+0(FP), SI
+ MOVL s_len+4(FP), CX
+ MOVB c+8(FP), AL
+ MOVL SI, DI
+ CLD; REPN; SCASB
+ JZ 3(PC)
+ MOVL $-1, ret+12(FP)
+ RET
+ SUBL SI, DI
+ SUBL $1, DI
+ MOVL DI, ret+12(FP)
+ RET
+
+// input:
+// SI = a
+// DI = b
+// BX = alen
+// DX = blen
+// AX = address of return word (set to 1/0/-1)
+TEXT runtime·cmpbody(SB),NOSPLIT,$0-0
+ MOVL DX, BP
+ SUBL BX, DX // DX = blen-alen
+ JLE 2(PC)
+ MOVL BX, BP // BP = min(alen, blen)
+ CMPL SI, DI
+ JEQ allsame
+ CMPL BP, $4
+ JB small
+ TESTL $0x4000000, runtime·cpuid_edx(SB) // check for sse2
+ JE mediumloop
+largeloop:
+ CMPL BP, $16
+ JB mediumloop
+ MOVOU (SI), X0
+ MOVOU (DI), X1
+ PCMPEQB X0, X1
+ PMOVMSKB X1, BX
+ XORL $0xffff, BX // convert EQ to NE
+ JNE diff16 // branch if at least one byte is not equal
+ ADDL $16, SI
+ ADDL $16, DI
+ SUBL $16, BP
+ JMP largeloop
+
+diff16:
+ BSFL BX, BX // index of first byte that differs
+ XORL DX, DX
+ MOVB (SI)(BX*1), CX
+ CMPB CX, (DI)(BX*1)
+ SETHI DX
+ LEAL -1(DX*2), DX // convert 1/0 to +1/-1
+ MOVL DX, (AX)
+ RET
+
+mediumloop:
+ CMPL BP, $4
+ JBE _0through4
+ MOVL (SI), BX
+ MOVL (DI), CX
+ CMPL BX, CX
+ JNE diff4
+ ADDL $4, SI
+ ADDL $4, DI
+ SUBL $4, BP
+ JMP mediumloop
+
+_0through4:
+ MOVL -4(SI)(BP*1), BX
+ MOVL -4(DI)(BP*1), CX
+ CMPL BX, CX
+ JEQ allsame
+
+diff4:
+ BSWAPL BX // reverse order of bytes
+ BSWAPL CX
+ XORL BX, CX // find bit differences
+ BSRL CX, CX // index of highest bit difference
+ SHRL CX, BX // move a's bit to bottom
+ ANDL $1, BX // mask bit
+ LEAL -1(BX*2), BX // 1/0 => +1/-1
+ MOVL BX, (AX)
+ RET
+
+ // 0-3 bytes in common
+small:
+ LEAL (BP*8), CX
+ NEGL CX
+ JEQ allsame
+
+ // load si
+ CMPB SI, $0xfc
+ JA si_high
+ MOVL (SI), SI
+ JMP si_finish
+si_high:
+ MOVL -4(SI)(BP*1), SI
+ SHRL CX, SI
+si_finish:
+ SHLL CX, SI
+
+ // same for di
+ CMPB DI, $0xfc
+ JA di_high
+ MOVL (DI), DI
+ JMP di_finish
+di_high:
+ MOVL -4(DI)(BP*1), DI
+ SHRL CX, DI
+di_finish:
+ SHLL CX, DI
+
+ BSWAPL SI // reverse order of bytes
+ BSWAPL DI
+ XORL SI, DI // find bit differences
+ JEQ allsame
+ BSRL DI, CX // index of highest bit difference
+ SHRL CX, SI // move a's bit to bottom
+ ANDL $1, SI // mask bit
+ LEAL -1(SI*2), BX // 1/0 => +1/-1
+ MOVL BX, (AX)
+ RET
+
+ // all the bytes in common are the same, so we just need
+ // to compare the lengths.
+allsame:
+ XORL BX, BX
+ XORL CX, CX
+ TESTL DX, DX
+ SETLT BX // 1 if alen > blen
+ SETEQ CX // 1 if alen == blen
+ LEAL -1(CX)(BX*2), BX // 1,0,-1 result
+ MOVL BX, (AX)
+ RET
+
+TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
+ get_tls(CX)
+ MOVL g(CX), AX
+ MOVL g_m(AX), AX
+ MOVL m_fastrand(AX), DX
+ ADDL DX, DX
+ MOVL DX, BX
+ XORL $0x88888eef, DX
+ JPL 2(PC)
+ MOVL BX, DX
+ MOVL DX, m_fastrand(AX)
+ MOVL DX, ret+0(FP)
+ RET
+
+TEXT runtime·return0(SB), NOSPLIT, $0
+ MOVL $0, AX
+ RET
+
+// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
+// Must obey the gcc calling convention.
+TEXT _cgo_topofstack(SB),NOSPLIT,$0
+ get_tls(CX)
+ MOVL g(CX), AX
+ MOVL g_m(AX), AX
+ MOVL m_curg(AX), AX
+ MOVL (g_stack+stack_hi)(AX), AX
+ RET
+
+// The top-most function running on a goroutine
+// returns to goexit+PCQuantum.
+TEXT runtime·goexit(SB),NOSPLIT,$0-0
+ BYTE $0x90 // NOP
+ CALL runtime·goexit1(SB) // does not return
+ // traceback from goexit1 must hit code range of goexit
+ BYTE $0x90 // NOP
+
+// Prefetching doesn't seem to help.
+TEXT runtime·prefetcht0(SB),NOSPLIT,$0-4
+ RET
+
+TEXT runtime·prefetcht1(SB),NOSPLIT,$0-4
+ RET
+
+TEXT runtime·prefetcht2(SB),NOSPLIT,$0-4
+ RET
+
+TEXT runtime·prefetchnta(SB),NOSPLIT,$0-4
+ RET
+
+// Add a module's moduledata to the linked list of moduledata objects. This
+// is called from .init_array by a function generated in the linker and so
+// follows the platform ABI wrt register preservation -- it only touches AX,
+// CX (implicitly) and DX, but it does not follow the ABI wrt arguments:
+// instead the pointer to the moduledata is passed in AX.
+TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0
+ MOVL runtime·lastmoduledatap(SB), DX
+ MOVL AX, moduledata_next(DX)
+ MOVL AX, runtime·lastmoduledatap(SB)
+ RET
diff -pruN 1.6.3-1/.pc/applied-patches 1.6.3-1ubuntu1/.pc/applied-patches
--- 1.6.3-1/.pc/applied-patches 2016-07-21 13:36:08.274620873 +0000
+++ 1.6.3-1ubuntu1/.pc/applied-patches 2016-07-21 13:36:09.422652161 +0000
@@ -0,0 +1,4 @@
+0001-s390x-port.patch
+0002-no-pie-when-race.patch
+0003-cmd-compile-do-not-generate-tail-calls-when-dynamic-.patch
+0004-cmd-internal-obj-runtime-fixes-for-defer-in-386-shar.patch
diff -pruN 1.6.3-1/src/cmd/asm/internal/arch/arch.go 1.6.3-1ubuntu1/src/cmd/asm/internal/arch/arch.go
--- 1.6.3-1/src/cmd/asm/internal/arch/arch.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/arch/arch.go 2016-07-21 13:36:09.000000000 +0000
@@ -10,6 +10,7 @@ import (
"cmd/internal/obj/arm64"
"cmd/internal/obj/mips"
"cmd/internal/obj/ppc64"
+ "cmd/internal/obj/s390x"
"cmd/internal/obj/x86"
"fmt"
"strings"
@@ -82,6 +83,10 @@ func Set(GOARCH string) *Arch {
a := archPPC64()
a.LinkArch = &ppc64.Linkppc64le
return a
+ case "s390x":
+ a := archS390x()
+ a.LinkArch = &s390x.Links390x
+ return a
}
return nil
}
@@ -426,3 +431,56 @@ func archMips64() *Arch {
IsJump: jumpMIPS64,
}
}
+
+func archS390x() *Arch {
+ register := make(map[string]int16)
+ // Create maps for easy lookup of instruction names etc.
+ // Note that there is no list of names as there is for x86.
+ for i := s390x.REG_R0; i <= s390x.REG_R15; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := s390x.REG_F0; i <= s390x.REG_F15; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := s390x.REG_V0; i <= s390x.REG_V31; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ for i := s390x.REG_AR0; i <= s390x.REG_AR15; i++ {
+ register[obj.Rconv(i)] = int16(i)
+ }
+ register["LR"] = s390x.REG_LR
+ // Pseudo-registers.
+ register["SB"] = RSB
+ register["FP"] = RFP
+ register["PC"] = RPC
+ // Avoid unintentionally clobbering g using R13.
+ delete(register, "R13")
+ register["g"] = s390x.REG_R13
+ registerPrefix := map[string]bool{
+ "AR": true,
+ "F": true,
+ "R": true,
+ }
+
+ instructions := make(map[string]int)
+ for i, s := range obj.Anames {
+ instructions[s] = i
+ }
+ for i, s := range s390x.Anames {
+ if i >= obj.A_ARCHSPECIFIC {
+ instructions[s] = i + obj.ABaseS390X
+ }
+ }
+ // Annoying aliases.
+ instructions["BR"] = s390x.ABR
+ instructions["BL"] = s390x.ABL
+
+ return &Arch{
+ LinkArch: &s390x.Links390x,
+ Instructions: instructions,
+ Register: register,
+ RegisterPrefix: registerPrefix,
+ RegisterNumber: s390xRegisterNumber,
+ IsJump: jumpS390x,
+ }
+}
diff -pruN 1.6.3-1/src/cmd/asm/internal/arch/s390x.go 1.6.3-1ubuntu1/src/cmd/asm/internal/arch/s390x.go
--- 1.6.3-1/src/cmd/asm/internal/arch/s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/arch/s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,136 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file encapsulates some of the odd characteristics of the
+// s390x instruction set, to minimize its interaction
+// with the core of the assembler.
+
+package arch
+
+import "cmd/internal/obj/s390x"
+
+func jumpS390x(word string) bool {
+ switch word {
+ case "BC",
+ "BCL",
+ "BEQ",
+ "BGE",
+ "BGT",
+ "BL",
+ "BLE",
+ "BLT",
+ "BNE",
+ "BR",
+ "BVC",
+ "BVS",
+ "CMPBEQ",
+ "CMPBGE",
+ "CMPBGT",
+ "CMPBLE",
+ "CMPBLT",
+ "CMPBNE",
+ "CMPUBEQ",
+ "CMPUBGE",
+ "CMPUBGT",
+ "CMPUBLE",
+ "CMPUBLT",
+ "CMPUBNE",
+ "CALL",
+ "JMP":
+ return true
+ }
+ return false
+}
+
+// IsS390xRLD reports whether the op (as defined by an s390x.A* constant) is
+// one of the RLD-like instructions that require special handling.
+// The FMADD-like instructions behave similarly.
+func IsS390xRLD(op int) bool {
+ switch op {
+ case s390x.AFMADD,
+ s390x.AFMADDS,
+ s390x.AFMSUB,
+ s390x.AFMSUBS,
+ s390x.AFNMADD,
+ s390x.AFNMADDS,
+ s390x.AFNMSUB,
+ s390x.AFNMSUBS:
+ return true
+ }
+ return false
+}
+
+// IsS390xCMP reports whether the op (as defined by an s390x.A* constant) is
+// one of the CMP instructions that require special handling.
+func IsS390xCMP(op int) bool {
+ switch op {
+ case s390x.ACMP, s390x.ACMPU, s390x.ACMPW, s390x.ACMPWU:
+ return true
+ }
+ return false
+}
+
+// IsS390xNEG reports whether the op (as defined by an s390x.A* constant) is
+// one of the NEG-like instructions that require special handling.
+func IsS390xNEG(op int) bool {
+ switch op {
+ case s390x.AADDME,
+ s390x.AADDZE,
+ s390x.ANEG,
+ s390x.ASUBME,
+ s390x.ASUBZE:
+ return true
+ }
+ return false
+}
+
+// IsS390xWithLength reports whether the op (as defined by an s390x.A* constant)
+// refers to an instruction which takes a length as its first argument.
+func IsS390xWithLength(op int) bool {
+ switch op {
+ case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
+ return true
+ case s390x.AVLL, s390x.AVSTL:
+ return true
+ }
+ return false
+}
+
+// IsS390xWithIndex reports whether the op (as defined by an s390x.A* constant)
+// refers to an instruction which takes an index as its first argument.
+func IsS390xWithIndex(op int) bool {
+ switch op {
+ case s390x.AVSCEG, s390x.AVSCEF, s390x.AVGEG, s390x.AVGEF:
+ return true
+ case s390x.AVGMG, s390x.AVGMF, s390x.AVGMH, s390x.AVGMB:
+ return true
+ case s390x.AVLEIG, s390x.AVLEIF, s390x.AVLEIH, s390x.AVLEIB:
+ return true
+ case s390x.AVPDI:
+ return true
+ }
+ return false
+}
+
+func s390xRegisterNumber(name string, n int16) (int16, bool) {
+ switch name {
+ case "AR":
+ if 0 <= n && n <= 15 {
+ return s390x.REG_AR0 + n, true
+ }
+ case "F":
+ if 0 <= n && n <= 15 {
+ return s390x.REG_F0 + n, true
+ }
+ case "R":
+ if 0 <= n && n <= 15 {
+ return s390x.REG_R0 + n, true
+ }
+ case "V":
+ if 0 <= n && n <= 31 {
+ return s390x.REG_V0 + n, true
+ }
+ }
+ return 0, false
+}
diff -pruN 1.6.3-1/src/cmd/asm/internal/asm/asm.go 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/asm.go
--- 1.6.3-1/src/cmd/asm/internal/asm/asm.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/asm.go 2016-07-21 13:36:09.000000000 +0000
@@ -381,6 +381,20 @@ func (p *Parser) asmJump(op int, cond st
prog.Reg = p.getRegister(prog, op, &a[1])
break
}
+ if p.arch.Thechar == 'z' {
+ // 3-operand jumps.
+ target = &a[2]
+ prog.From = a[0]
+ if a[1].Reg != 0 {
+ // compare two regs; jump.
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ } else {
+ // compare reg with imm; jump.
+ prog.From3 = newAddr(a[1])
+ }
+ break
+ }
+
fallthrough
default:
p.errorf("wrong number of arguments to %s instruction", obj.Aconv(op))
@@ -593,6 +607,15 @@ func (p *Parser) asmInstruction(op int,
p.errorf("invalid addressing modes for %s instruction", obj.Aconv(op))
return
}
+ case 'z':
+ if arch.IsS390xWithLength(op) || arch.IsS390xWithIndex(op) {
+ prog.From = a[1]
+ prog.From3 = newAddr(a[0])
+ } else {
+ prog.Reg = p.getRegister(prog, op, &a[1])
+ prog.From = a[0]
+ }
+ prog.To = a[2]
default:
p.errorf("TODO: implement three-operand instructions for this architecture")
return
@@ -628,6 +651,13 @@ func (p *Parser) asmInstruction(op int,
prog.To = a[3]
break
}
+ if p.arch.Thechar == 'z' {
+ prog.From = a[1]
+ prog.Reg = p.getRegister(prog, op, &a[2])
+ prog.From3 = newAddr(a[0])
+ prog.To = a[3]
+ break
+ }
p.errorf("can't handle %s instruction with 4 operands", obj.Aconv(op))
return
case 5:
diff -pruN 1.6.3-1/src/cmd/asm/internal/asm/endtoend_test.go 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/endtoend_test.go
--- 1.6.3-1/src/cmd/asm/internal/asm/endtoend_test.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/endtoend_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -389,3 +389,7 @@ func TestMIPS64EndToEnd(t *testing.T) {
func TestPPC64EndToEnd(t *testing.T) {
testEndToEnd(t, "ppc64", "ppc64")
}
+
+func TestS390XEndToEnd(t *testing.T) {
+ testEndToEnd(t, "s390x", "s390x")
+}
diff -pruN 1.6.3-1/src/cmd/asm/internal/asm/operand_test.go 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/operand_test.go
--- 1.6.3-1/src/cmd/asm/internal/asm/operand_test.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/operand_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -70,6 +70,11 @@ func TestMIPS64OperandParser(t *testing.
testOperandParser(t, parser, mips64OperandTests)
}
+func TestS390XOperandParser(t *testing.T) {
+ parser := newParser("s390x")
+ testOperandParser(t, parser, s390xOperandTests)
+}
+
type operandTest struct {
input, output string
}
@@ -518,6 +523,104 @@ var mips64OperandTests = []operandTest{
{"a(FP)", "a(FP)"},
{"g", "g"},
{"ret+8(FP)", "ret+8(FP)"},
+ {"runtime·abort(SB)", "runtime.abort(SB)"},
+ {"·AddUint32(SB)", "\"\".AddUint32(SB)"},
+ {"·trunc(SB)", "\"\".trunc(SB)"},
+ {"[):[o-FP", ""}, // Issue 12469 - asm hung parsing the o-FP range on non ARM platforms.
+}
+
+var s390xOperandTests = []operandTest{
+ {"$((1<<63)-1)", "$9223372036854775807"},
+ {"$(-64*1024)", "$-65536"},
+ {"$(1024 * 8)", "$8192"},
+ {"$-1", "$-1"},
+ {"$-24(R4)", "$-24(R4)"},
+ {"$0", "$0"},
+ {"$0(R1)", "$(R1)"},
+ {"$0.5", "$(0.5)"},
+ {"$0x7000", "$28672"},
+ {"$0x88888eef", "$2290650863"},
+ {"$1", "$1"},
+ {"$_main<>(SB)", "$_main<>(SB)"},
+ {"$argframe(FP)", "$argframe(FP)"},
+ {"$~3", "$-4"},
+ {"(-288-3*8)(R1)", "-312(R1)"},
+ {"(16)(R7)", "16(R7)"},
+ {"(8)(g)", "8(g)"},
+ {"(R0)", "(R0)"},
+ {"(R3)", "(R3)"},
+ {"(R4)", "(R4)"},
+ {"(R5)", "(R5)"},
+ {"-1(R4)", "-1(R4)"},
+ {"-1(R5)", "-1(R5)"},
+ {"6(PC)", "6(PC)"},
+ {"R0", "R0"},
+ {"R1", "R1"},
+ {"R2", "R2"},
+ {"R3", "R3"},
+ {"R4", "R4"},
+ {"R5", "R5"},
+ {"R6", "R6"},
+ {"R7", "R7"},
+ {"R8", "R8"},
+ {"R9", "R9"},
+ {"R10", "R10"},
+ {"R11", "R11"},
+ {"R12", "R12"},
+ // {"R13", "R13"}, R13 is g
+ {"R14", "R14"},
+ {"R15", "R15"},
+ {"F0", "F0"},
+ {"F1", "F1"},
+ {"F2", "F2"},
+ {"F3", "F3"},
+ {"F4", "F4"},
+ {"F5", "F5"},
+ {"F6", "F6"},
+ {"F7", "F7"},
+ {"F8", "F8"},
+ {"F9", "F9"},
+ {"F10", "F10"},
+ {"F11", "F11"},
+ {"F12", "F12"},
+ {"F13", "F13"},
+ {"F14", "F14"},
+ {"F15", "F15"},
+ {"V0", "V0"},
+ {"V1", "V1"},
+ {"V2", "V2"},
+ {"V3", "V3"},
+ {"V4", "V4"},
+ {"V5", "V5"},
+ {"V6", "V6"},
+ {"V7", "V7"},
+ {"V8", "V8"},
+ {"V9", "V9"},
+ {"V10", "V10"},
+ {"V11", "V11"},
+ {"V12", "V12"},
+ {"V13", "V13"},
+ {"V14", "V14"},
+ {"V15", "V15"},
+ {"V16", "V16"},
+ {"V17", "V17"},
+ {"V18", "V18"},
+ {"V19", "V19"},
+ {"V20", "V20"},
+ {"V21", "V21"},
+ {"V22", "V22"},
+ {"V23", "V23"},
+ {"V24", "V24"},
+ {"V25", "V25"},
+ {"V26", "V26"},
+ {"V27", "V27"},
+ {"V28", "V28"},
+ {"V29", "V29"},
+ {"V30", "V30"},
+ {"V31", "V31"},
+ {"a(FP)", "a(FP)"},
+ {"g", "g"},
+ {"ret+8(FP)", "ret+8(FP)"},
{"runtime·abort(SB)", "runtime.abort(SB)"},
{"·AddUint32(SB)", "\"\".AddUint32(SB)"},
{"·trunc(SB)", "\"\".trunc(SB)"},
diff -pruN 1.6.3-1/src/cmd/asm/internal/asm/testdata/s390x.s 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/testdata/s390x.s
--- 1.6.3-1/src/cmd/asm/internal/asm/testdata/s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/asm/internal/asm/testdata/s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,215 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+TEXT main·foo(SB),7,$16-0 // TEXT main.foo(SB), 7, $16-0
+ MOVD R1, R2 // b9040021
+ MOVW R3, R4 // b9140043
+ MOVH R5, R6 // b9070065
+ MOVB R7, R8 // b9060087
+ MOVWZ R1, R2 // b9160021
+ MOVHZ R2, R3 // b9850032
+ MOVBZ R4, R5 // b9840054
+ MOVDBR R1, R2 // b90f0021
+ MOVWBR R3, R4 // b91f0043
+
+ MOVD (R15), R1 // e310f0000004
+ MOVW (R15), R2 // e320f0000014
+ MOVH (R15), R3 // e330f0000015
+ MOVB (R15), R4 // e340f0000077
+ MOVWZ (R15), R5 // e350f0000016
+ MOVHZ (R15), R6 // e360f0000091
+ MOVBZ (R15), R7 // e370f0000090
+ MOVDBR (R15), R8 // e380f000000f
+ MOVWBR (R15), R9 // e390f000001e
+
+ MOVD R1, n-8(SP) // e310f0100024
+ MOVW R2, n-8(SP) // e320f0100050
+ MOVH R3, n-8(SP) // e330f0100070
+ MOVB R4, n-8(SP) // e340f0100072
+ MOVWZ R5, n-8(SP) // e350f0100050
+ MOVHZ R6, n-8(SP) // e360f0100070
+ MOVBZ R7, n-8(SP) // e370f0100072
+ MOVDBR R8, n-8(SP) // e380f010002f
+ MOVWBR R9, n-8(SP) // e390f010003e
+
+ MOVD $-8589934592, R1 // c01efffffffe
+ MOVW $-131072, R2 // c021fffe0000
+ MOVH $-512, R3 // a739fe00
+ MOVB $-1, R4 // a749ffff
+
+ MOVD $-2147483648, n-8(SP) // c0b180000000e3b0f0100024
+ MOVW $-131072, n-8(SP) // c0b1fffe0000e3b0f0100050
+ MOVH $-512, n-8(SP) // e544f010fe00
+ MOVB $-1, n-8(SP) // 92fff010
+
+ ADD R1, R2 // b9e81022
+ ADD R1, R2, R3 // b9e81032
+ ADD $8192, R1 // c21800002000
+ ADD $8192, R1, R2 // ec21200000d9
+ ADDC R1, R2 // b9ea1022
+ ADDC $1, R1, R2 // b9040021c22a00000001
+ ADDC R1, R2, R3 // b9ea1032
+ SUB R3, R4 // b9090043
+ SUB R3, R4, R5 // b9e93054
+ SUB $8192, R3 // c238ffffe000
+ SUB $8192, R3, R4 // ec43e00000d9
+ SUBC R1, R2 // b90b0021
+ SUBC $1, R1, R2 // b9040021c22affffffff
+ SUBC R2, R3, R4 // b9eb2043
+ MULLW R6, R7 // b91c0076
+ MULLW R6, R7, R8 // b9040087b91c0086
+ MULLW $8192, R6 // c26000002000
+ MULLW $8192, R6, R7 // b9040076c27000002000
+ DIVD R1, R2 // b90400b2b90d00a1b904002b
+ DIVD R1, R2, R3 // b90400b2b90d00a1b904003b
+ DIVW R4, R5 // b90400b5b91d00a4b904005b
+ DIVW R4, R5, R6 // b90400b5b91d00a4b904006b
+ DIVDU R7, R8 // b90400a0b90400b8b98700a7b904008b
+ DIVDU R7, R8, R9 // b90400a0b90400b8b98700a7b904009b
+ DIVWU R1, R2 // b90400a0b90400b2b99700a1b904002b
+ DIVWU R1, R2, R3 // b90400a0b90400b2b99700a1b904003b
+
+ XC $8, (R15), n-8(SP) // XC (R15), $8, n-8(SP) // d707f010f000
+ NC $8, (R15), n-8(SP) // NC (R15), $8, n-8(SP) // d407f010f000
+ OC $8, (R15), n-8(SP) // OC (R15), $8, n-8(SP) // d607f010f000
+ MVC $8, (R15), n-8(SP) // MVC (R15), $8, n-8(SP) // d207f010f000
+ CLC $8, (R15), n-8(SP) // CLC (R15), $8, n-8(SP) // d507f000f010
+ XC $256, -8(R15), -8(R15) // XC -8(R15), $256, -8(R15) // b90400afc2a8fffffff8d7ffa000a000
+ MVC $256, 8192(R1), 8192(R2) // MVC 8192(R1), $256, 8192(R2) // b90400a2c2a800002000b90400b1c2b800002000d2ffa000b000
+
+ CMP R1, R2 // b9200012
+ CMP R3, $-2147483648 // c23c80000000
+ CMPU R4, R5 // b9210045
+ CMPU R6, $4294967295 // c26effffffff
+ CMPW R7, R8 // 1978
+ CMPW R9, $-2147483648 // c29d80000000
+ CMPWU R1, R2 // 1512
+ CMPWU R3, $4294967295 // c23fffffffff
+
+ BNE 0(PC) // a7740000
+ BEQ 0(PC) // a7840000
+ BLT 0(PC) // a7440000
+ BLE 0(PC) // a7c40000
+ BGT 0(PC) // a7240000
+ BGE 0(PC) // a7a40000
+
+ CMPBNE R1, R2, 0(PC) // ec1200007064
+ CMPBEQ R3, R4, 0(PC) // ec3400008064
+ CMPBLT R5, R6, 0(PC) // ec5600004064
+ CMPBLE R7, R8, 0(PC) // ec780000c064
+ CMPBGT R9, R1, 0(PC) // ec9100002064
+ CMPBGE R2, R3, 0(PC) // ec230000a064
+
+ CMPBNE R1, $-127, 0(PC) // ec170000817c
+ CMPBEQ R3, $0, 0(PC) // ec380000007c
+ CMPBLT R5, $128, 0(PC) // ec540000807c
+ CMPBLE R7, $127, 0(PC) // ec7c00007f7c
+ CMPBGT R9, $0, 0(PC) // ec920000007c
+ CMPBGE R2, $128, 0(PC) // ec2a0000807c
+
+ CMPUBNE R1, R2, 0(PC) // ec1200007065
+ CMPUBEQ R3, R4, 0(PC) // ec3400008065
+ CMPUBLT R5, R6, 0(PC) // ec5600004065
+ CMPUBLE R7, R8, 0(PC) // ec780000c065
+ CMPUBGT R9, R1, 0(PC) // ec9100002065
+ CMPUBGE R2, R3, 0(PC) // ec230000a065
+
+ CMPUBNE R1, $256, 0(PC) // ec170000007d
+ CMPUBEQ R3, $0, 0(PC) // ec380000007d
+ CMPUBLT R5, $256, 0(PC) // ec540000007d
+ CMPUBLE R7, $0, 0(PC) // ec7c0000007d
+ CMPUBGT R9, $256, 0(PC) // ec920000007d
+ CMPUBGE R2, $0, 0(PC) // ec2a0000007d
+
+ CEFBRA R0, F15 // b39400f0
+ CDFBRA R1, F14 // b39500e1
+ CEGBRA R2, F13 // b3a400d2
+ CDGBRA R3, F12 // b3a500c3
+
+ CELFBR R0, F15 // b39000f0
+ CDLFBR R1, F14 // b39100e1
+ CELGBR R2, F13 // b3a000d2
+ CDLGBR R3, F12 // b3a100c3
+
+ CFEBRA F15, R1 // b398501f
+ CFDBRA F14, R2 // b399502e
+ CGEBRA F13, R3 // b3a8503d
+ CGDBRA F12, R4 // b3a9504c
+
+ CLFEBR F15, R1 // b39c501f
+ CLFDBR F14, R2 // b39d502e
+ CLGEBR F13, R3 // b3ac503d
+ CLGDBR F12, R4 // b3ad504c
+
+ FMOVS $0, F11 // b37400b0
+ FMOVD $0, F12 // b37500c0
+ FMOVS (R1)(R2*1), F0 // ed0210000064
+ FMOVS n-8(SP), F15 // edf0f0100064
+ FMOVD -9999999(R8)(R9*1), F8 // c0a1ff67698141aa9000ed8a80000065
+ FMOVD F4, F5 // 2854
+ FADDS F0, F15 // b30a00f0
+ FADD F1, F14 // b31a00e1
+ FSUBS F2, F13 // b30b00d2
+ FSUB F3, F12 // b31b00c3
+ FMULS F4, F11 // b31700b4
+ FMUL F5, F10 // b31c00a5
+ FDIVS F6, F9 // b30d0096
+ FDIV F7, F8 // b31d0087
+ FABS F1, F2 // b3100021
+ FSQRTS F3, F4 // b3140043
+ FSQRT F5, F15 // b31500f5
+
+ VL (R15), V1 // e710f0000006
+ VST V1, (R15) // e710f000000e
+ VL (R15), V31 // e7f0f0000806
+ VST V31, (R15) // e7f0f000080e
+ VESLB $5, V14 // e7ee00050030
+ VESRAG $0, V15, V16 // e70f0000383a
+ VLM (R15), V8, V23 // e787f0000436
+ VSTM V8, V23, (R15) // e787f000043e
+ VONE V1 // e710ffff0044
+ VZERO V16 // e70000000844
+ VGBM $52428, V31 // e7f0cccc0844
+ VREPIB $255, V4 // e74000ff0045
+ VREPG $1, V4, V16 // e7040001384d
+ VREPB $4, V31, V1 // e71f0004044d
+ VFTCIDB $4095, V1, V2 // e721fff0304a
+ WFTCIDB $3276, V15, V16 // e70fccc8384a
+ VPOPCT V8, V19 // e73800000850
+ VFEEZBS V1, V2, V31 // e7f120300880
+ WFCHDBS V22, V23, V4 // e746701836eb
+ VMNH V1, V2, V30 // e7e1200018fe
+ VO V2, V1, V0 // e7021000006a
+ VERLLVF V2, V30, V27 // e7be20002c73
+ VSCBIB V0, V23, V24 // e78700000cf5
+ VNOT V16, V1 // e7101000046b
+ VCLZF V16, V17 // e71000002c53
+ VLVGP R3, R4, V8 // e78340000062
+
+ // many vector instructions have their inputs reordered
+ // typically this is to put the length or index input into From3
+ VGEG $1, 8(R15)(V30*1), V31 // VGEG 8(R15)(V30*1), $1, V31 // e7fef0081c12
+ VSCEG $1, V31, 16(R15)(V30*1) // VSCEG V31, $1, 16(R15)(V30*1) // e7fef0101c1a
+ VGEF $0, 2048(R15)(V1*1), V2 // VGEF 2048(R15)(V1*1), $0, V2 // e721f8000013
+ VSCEF $0, V2, 4095(R15)(V1*1) // VSCEF V2, $0, 4095(R15)(V1*1) // e721ffff001b
+ VLL R0, (R15), V1 // VLL (R15), R0, V1 // e710f0000037
+ VSTL R0, V16, (R15) // VSTL V16, R0, (R15) // e700f000083f
+ VGMH $8, $16, V12 // VGMH $16, $8, V12 // e7c008101046
+ VLEIF $2, $-43, V16 // VLEIF $-43, $2, V16 // e700ffd52843
+ VSLDB $3, V1, V16, V18 // VSLDB V1, V16, $3, V18 // e72100030a77
+ VERIMB $2, V31, V1, V2 // VERIMB V31, V1, $2, V2 // e72f10020472
+ VSEL V1, V2, V3, V4 // VSEL V2, V3, V1, V4 // e7412000308d
+ VGFMAH V21, V31, V24, V0 // VGFMAH V31, V24, V21, V0 // e705f10087bc
+ WFMSDB V2, V25, V24, V31 // WFMSDB V25, V24, V2, V31 // e7f298038b8e
+ VPERM V31, V0, V2, V3 // VPERM V0, V2, V31, V3 // e73f0000248c
+ VPDI $1, V2, V31, V1 // VPDI V2, V31, $1, V1 // e712f0001284
+
+ RET
+
+TEXT main·init(SB),7,$0 // TEXT main.init(SB), 7, $0
+ RET
+
+TEXT main·main(SB),7,$0 // TEXT main.main(SB), 7, $0
+ BL main·foo(SB) // CALL main.foo(SB)
+ RET
diff -pruN 1.6.3-1/src/cmd/cgo/main.go 1.6.3-1ubuntu1/src/cmd/cgo/main.go
--- 1.6.3-1/src/cmd/cgo/main.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/cgo/main.go 2016-07-21 13:36:09.000000000 +0000
@@ -156,7 +156,7 @@ var intSizeMap = map[string]int64{
"ppc64": 8,
"ppc64le": 8,
"s390": 4,
- "s390x": 4,
+ "s390x": 8,
}
var cPrefix string
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/cgen.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/cgen.go
--- 1.6.3-1/src/cmd/compile/internal/gc/cgen.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/cgen.go 2016-07-21 13:36:09.000000000 +0000
@@ -247,7 +247,7 @@ func cgen_wb(n, res *Node, wb bool) {
return
}
- if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8') && n.Addable {
+ if (Ctxt.Arch.Thechar == '6' || Ctxt.Arch.Thechar == '8' || Ctxt.Arch.Thechar == 'z') && n.Addable {
Thearch.Gmove(n, res)
return
}
@@ -1832,7 +1832,7 @@ func bgenx(n, res *Node, wantTrue bool,
// but they don't support direct generation of a bool value yet.
// We can fix that as we go.
switch Ctxt.Arch.Thechar {
- case '0', '5', '7', '9':
+ case '0', '5', '7', '9', 'z':
Fatalf("genval 0g, 5g, 7g, 9g ONAMES not fully implemented")
}
Cgen(n, res)
@@ -1842,7 +1842,7 @@ func bgenx(n, res *Node, wantTrue bool,
return
}
- if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' {
+ if n.Addable && Ctxt.Arch.Thechar != '0' && Ctxt.Arch.Thechar != '5' && Ctxt.Arch.Thechar != '7' && Ctxt.Arch.Thechar != '9' && Ctxt.Arch.Thechar != 'z' {
// no need for a temporary
bgenNonZero(n, nil, wantTrue, likely, to)
return
@@ -2640,7 +2640,7 @@ func cgen_div(op Op, nl *Node, nr *Node,
// in peep and optoas in order to enable this.
// TODO(rsc): ppc64 needs to support the relevant instructions
// in peep and optoas in order to enable this.
- if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' {
+ if nr.Op != OLITERAL || Ctxt.Arch.Thechar == '0' || Ctxt.Arch.Thechar == '7' || Ctxt.Arch.Thechar == '9' || Ctxt.Arch.Thechar == 'z' {
goto longdiv
}
w = int(nl.Type.Width * 8)
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/gsubr.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/gsubr.go
--- 1.6.3-1/src/cmd/compile/internal/gc/gsubr.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/gsubr.go 2016-07-21 13:36:09.000000000 +0000
@@ -57,7 +57,7 @@ func Ismem(n *Node) bool {
return true
case OADDR:
- return Thearch.Thechar == '6' || Thearch.Thechar == '9' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
+ return Thearch.Thechar == '6' || Thearch.Thechar == '9' || Ctxt.Arch.Thechar == 'z' // because 6g uses PC-relative addressing; TODO(rsc): not sure why 9g too
}
return false
@@ -83,7 +83,7 @@ func Gbranch(as int, t *Type, likely int
p := Prog(as)
p.To.Type = obj.TYPE_BRANCH
p.To.Val = nil
- if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' {
+ if as != obj.AJMP && likely != 0 && Thearch.Thechar != '9' && Thearch.Thechar != '7' && Thearch.Thechar != '0' && Ctxt.Arch.Thechar != 'z' {
p.From.Type = obj.TYPE_CONST
if likely > 0 {
p.From.Offset = 1
@@ -449,7 +449,7 @@ func Naddr(a *obj.Addr, n *Node) {
case OADDR:
Naddr(a, n.Left)
a.Etype = uint8(Tptr)
- if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' { // TODO(rsc): Do this even for arm, ppc64.
+ if Thearch.Thechar != '0' && Thearch.Thechar != '5' && Thearch.Thechar != '7' && Thearch.Thechar != '9' && Thearch.Thechar != 'z' { // TODO(rsc): Do this even for arm, ppc64.
a.Width = int64(Widthptr)
}
if a.Type != obj.TYPE_MEM {
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/lex.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/lex.go
--- 1.6.3-1/src/cmd/compile/internal/gc/lex.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/lex.go 2016-07-21 13:36:09.000000000 +0000
@@ -216,14 +216,14 @@ func Main() {
var flag_shared int
var flag_dynlink bool
switch Thearch.Thechar {
- case '5', '6', '7', '8', '9':
+ case '5', '6', '7', '8', '9', 'z':
obj.Flagcount("shared", "generate code that can be linked into a shared library", &flag_shared)
}
if Thearch.Thechar == '6' {
obj.Flagcount("largemodel", "generate code that assumes a large memory model", &flag_largemodel)
}
switch Thearch.Thechar {
- case '5', '6', '7', '8', '9':
+ case '5', '6', '7', '8', '9', 'z':
flag.BoolVar(&flag_dynlink, "dynlink", false, "support references to Go symbols defined in other shared libraries")
}
obj.Flagstr("cpuprofile", "write cpu profile to `file`", &cpuprofile)
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/pgen.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/pgen.go
--- 1.6.3-1/src/cmd/compile/internal/gc/pgen.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/pgen.go 2016-07-21 13:36:09.000000000 +0000
@@ -293,7 +293,7 @@ func allocauto(ptxt *obj.Prog) {
if haspointers(n.Type) {
stkptrsize = Stksize
}
- if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z' {
Stksize = Rnd(Stksize, int64(Widthptr))
}
if Stksize >= 1<<31 {
@@ -330,7 +330,7 @@ func Cgen_checknil(n *Node) {
Fatalf("bad checknil")
}
- if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
+ if ((Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z') && n.Op != OREGISTER) || !n.Addable || n.Op == OLITERAL {
var reg Node
Regalloc(®, Types[Tptr], n)
Cgen(n, ®)
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/reg.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/reg.go
--- 1.6.3-1/src/cmd/compile/internal/gc/reg.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/reg.go 2016-07-21 13:36:09.000000000 +0000
@@ -249,7 +249,7 @@ func addmove(r *Flow, bn int, rn int, f
p1.As = int16(Thearch.Optoas(OAS, Types[uint8(v.etype)]))
// TODO(rsc): Remove special case here.
- if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9') && v.etype == TBOOL {
+ if (Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z') && v.etype == TBOOL {
p1.As = int16(Thearch.Optoas(OAS, Types[TUINT8]))
}
p1.From.Type = obj.TYPE_REG
@@ -302,7 +302,7 @@ func mkvar(f *Flow, a *obj.Addr) Bits {
// TODO(rsc): Remove special case here.
case obj.TYPE_ADDR:
var bit Bits
- if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' {
+ if Thearch.Thechar == '0' || Thearch.Thechar == '5' || Thearch.Thechar == '7' || Thearch.Thechar == '9' || Thearch.Thechar == 'z' {
goto memcase
}
a.Type = obj.TYPE_MEM
@@ -1114,7 +1114,7 @@ func regopt(firstp *obj.Prog) {
// Currently we never generate three register forms.
// If we do, this will need to change.
- if p.From3Type() != obj.TYPE_NONE {
+ if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
Fatalf("regopt not implemented for from3")
}
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/subr.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/subr.go
--- 1.6.3-1/src/cmd/compile/internal/gc/subr.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/subr.go 2016-07-21 13:36:09.000000000 +0000
@@ -2357,7 +2357,7 @@ func genwrapper(rcvr *Type, method *Type
dot := adddot(Nod(OXDOT, this.Left, newname(method.Sym)))
// generate call
- if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) {
+ if !instrumenting && Isptr[rcvr.Etype] && Isptr[methodrcvr.Etype] && method.Embedded != 0 && !isifacemethod(method.Type) && !(Thearch.Thechar == '9' && Ctxt.Flag_dynlink) {
// generate tail call: adjust pointer receiver and jump to embedded method.
dot = dot.Left // skip final .M
if !Isptr[dotlist[0].field.Type.Etype] {
diff -pruN 1.6.3-1/src/cmd/compile/internal/gc/walk.go 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/walk.go
--- 1.6.3-1/src/cmd/compile/internal/gc/walk.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/gc/walk.go 2016-07-21 13:36:09.000000000 +0000
@@ -617,7 +617,7 @@ opswitch:
if n.Left.Op == ONAME && n.Left.Sym.Name == "Sqrt" && n.Left.Sym.Pkg.Path == "math" {
switch Thearch.Thechar {
- case '5', '6', '7':
+ case '5', '6', '7', 'z':
n.Op = OSQRT
n.Left = n.List.N
n.List = nil
@@ -3307,6 +3307,11 @@ func walkrotate(np **Node) {
// Constants adding to width?
w := int(l.Type.Width * 8)
+ if Thearch.Thechar == 'z' && w != 32 && w != 64 {
+ // only supports 32-bit and 64-bit rotates
+ return
+ }
+
if Smallintconst(l.Right) && Smallintconst(r.Right) {
sl := int(Mpgetfix(l.Right.Val().U.(*Mpint)))
if sl >= 0 {
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/cgen.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/cgen.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/cgen.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/cgen.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,178 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+type direction int
+
+const (
+ _FORWARDS direction = iota
+ _BACKWARDS
+)
+
+// blockcopy copies w bytes from &n to &res
+func blockcopy(n, res *gc.Node, osrc, odst, w int64) {
+ var dst gc.Node
+ var src gc.Node
+ if n.Ullman >= res.Ullman {
+ gc.Agenr(n, &dst, res) // temporarily use dst
+ gc.Regalloc(&src, gc.Types[gc.Tptr], nil)
+ gins(s390x.AMOVD, &dst, &src)
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ gc.Agen(res, &dst)
+ } else {
+ if res.Op == gc.ONAME {
+ gc.Gvardef(res)
+ }
+ gc.Agenr(res, &dst, res)
+ gc.Agenr(n, &src, nil)
+ }
+ defer gc.Regfree(&src)
+ defer gc.Regfree(&dst)
+
+ var tmp gc.Node
+ gc.Regalloc(&tmp, gc.Types[gc.Tptr], nil)
+ defer gc.Regfree(&tmp)
+
+ offset := int64(0)
+ dir := _FORWARDS
+ if osrc < odst && odst < osrc+w {
+ // Reverse. Can't use MVC, fall back onto basic moves.
+ dir = _BACKWARDS
+ const copiesPerIter = 2
+ if w >= 8*copiesPerIter {
+ cnt := w - (w % (8 * copiesPerIter))
+ ginscon(s390x.AADD, w, &src)
+ ginscon(s390x.AADD, w, &dst)
+
+ var end gc.Node
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+ p := gins(s390x.ASUB, nil, &end)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = cnt
+ p.Reg = src.Reg
+
+ var label *obj.Prog
+ for i := 0; i < copiesPerIter; i++ {
+ offset := int64(-8 * (i + 1))
+ p := gins(s390x.AMOVD, &src, &tmp)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = offset
+ if i == 0 {
+ label = p
+ }
+ p = gins(s390x.AMOVD, &tmp, &dst)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = offset
+ }
+
+ ginscon(s390x.ASUB, 8*copiesPerIter, &src)
+ ginscon(s390x.ASUB, 8*copiesPerIter, &dst)
+ gins(s390x.ACMP, &src, &end)
+ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), label)
+ gc.Regfree(&end)
+
+ w -= cnt
+ } else {
+ offset = w
+ }
+ }
+
+ if dir == _FORWARDS && w > 1024 {
+ // Loop over MVCs
+ cnt := w - (w % 256)
+
+ var end gc.Node
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+ add := gins(s390x.AADD, nil, &end)
+ add.From.Type = obj.TYPE_CONST
+ add.From.Offset = cnt
+ add.Reg = src.Reg
+
+ mvc := gins(s390x.AMVC, &src, &dst)
+ mvc.From.Type = obj.TYPE_MEM
+ mvc.From.Offset = 0
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Offset = 0
+ mvc.From3 = new(obj.Addr)
+ mvc.From3.Type = obj.TYPE_CONST
+ mvc.From3.Offset = 256
+
+ ginscon(s390x.AADD, 256, &src)
+ ginscon(s390x.AADD, 256, &dst)
+ gins(s390x.ACMP, &src, &end)
+ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), mvc)
+ gc.Regfree(&end)
+
+ w -= cnt
+ }
+
+ for w > 0 {
+ cnt := w
+ // If in reverse we can only do 8, 4, 2 or 1 bytes at a time.
+ if dir == _BACKWARDS {
+ switch {
+ case cnt >= 8:
+ cnt = 8
+ case cnt >= 4:
+ cnt = 4
+ case cnt >= 2:
+ cnt = 2
+ }
+ } else if cnt > 256 {
+ cnt = 256
+ }
+
+ switch cnt {
+ case 8, 4, 2, 1:
+ op := s390x.AMOVB
+ switch cnt {
+ case 8:
+ op = s390x.AMOVD
+ case 4:
+ op = s390x.AMOVW
+ case 2:
+ op = s390x.AMOVH
+ }
+ load := gins(op, &src, &tmp)
+ load.From.Type = obj.TYPE_MEM
+ load.From.Offset = offset
+
+ store := gins(op, &tmp, &dst)
+ store.To.Type = obj.TYPE_MEM
+ store.To.Offset = offset
+
+ if dir == _BACKWARDS {
+ load.From.Offset -= cnt
+ store.To.Offset -= cnt
+ }
+
+ default:
+ p := gins(s390x.AMVC, &src, &dst)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = offset
+ p.From3 = new(obj.Addr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = cnt
+ }
+
+ switch dir {
+ case _FORWARDS:
+ offset += cnt
+ case _BACKWARDS:
+ offset -= cnt
+ }
+ w -= cnt
+ }
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/galign.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/galign.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/galign.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/galign.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,95 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+var thechar int = 'z'
+
+var thestring string = "s390x"
+
+var thelinkarch *obj.LinkArch
+
+func linkarchinit() {
+ thestring = obj.Getgoarch()
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = &s390x.Links390x
+}
+
+var MAXWIDTH int64 = 1 << 50
+
+/*
+ * go declares several platform-specific type aliases:
+ * int, uint, and uintptr
+ */
+var typedefs = []gc.Typedef{
+ gc.Typedef{"int", gc.TINT, gc.TINT64},
+ gc.Typedef{"uint", gc.TUINT, gc.TUINT64},
+ gc.Typedef{"uintptr", gc.TUINTPTR, gc.TUINT64},
+}
+
+func betypeinit() {
+ gc.Widthptr = 8
+ gc.Widthint = 8
+ gc.Widthreg = 8
+}
+
+func Main() {
+ gc.Thearch.Thechar = thechar
+ gc.Thearch.Thestring = thestring
+ gc.Thearch.Thelinkarch = thelinkarch
+ gc.Thearch.Typedefs = typedefs
+ gc.Thearch.REGSP = s390x.REGSP
+ gc.Thearch.REGCTXT = s390x.REGCTXT
+ gc.Thearch.REGCALLX = s390x.REG_R3
+ gc.Thearch.REGCALLX2 = s390x.REG_R4
+ gc.Thearch.REGRETURN = s390x.REG_R3
+ gc.Thearch.REGMIN = s390x.REG_R0
+ gc.Thearch.REGMAX = s390x.REG_R15
+ gc.Thearch.FREGMIN = s390x.REG_F0
+ gc.Thearch.FREGMAX = s390x.REG_F15
+ gc.Thearch.MAXWIDTH = MAXWIDTH
+ gc.Thearch.ReservedRegs = resvd
+
+ gc.Thearch.Betypeinit = betypeinit
+ gc.Thearch.Cgen_hmul = cgen_hmul
+ gc.Thearch.Cgen_shift = cgen_shift
+ gc.Thearch.Clearfat = clearfat
+ gc.Thearch.Defframe = defframe
+ gc.Thearch.Dodiv = dodiv
+ gc.Thearch.Excise = excise
+ gc.Thearch.Expandchecks = expandchecks
+ gc.Thearch.Getg = getg
+ gc.Thearch.Gins = gins
+ gc.Thearch.Ginscmp = ginscmp
+ gc.Thearch.Ginscon = ginscon
+ gc.Thearch.Ginsnop = ginsnop
+ gc.Thearch.Gmove = gmove
+ gc.Thearch.Linkarchinit = linkarchinit
+ gc.Thearch.Peep = peep
+ gc.Thearch.Proginfo = proginfo
+ gc.Thearch.Regtyp = regtyp
+ gc.Thearch.Sameaddr = sameaddr
+ gc.Thearch.Smallindir = smallindir
+ gc.Thearch.Stackaddr = stackaddr
+ gc.Thearch.Blockcopy = blockcopy
+ gc.Thearch.Sudoaddable = sudoaddable
+ gc.Thearch.Sudoclean = sudoclean
+ gc.Thearch.Excludedregs = excludedregs
+ gc.Thearch.RtoB = RtoB
+ gc.Thearch.FtoB = RtoB
+ gc.Thearch.BtoR = BtoR
+ gc.Thearch.BtoF = BtoF
+ gc.Thearch.Optoas = optoas
+ gc.Thearch.Doregbits = doregbits
+ gc.Thearch.Regnames = regnames
+
+ gc.Main()
+ gc.Exit(0)
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/ggen.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/ggen.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/ggen.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/ggen.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,578 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "fmt"
+)
+
+// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
+// to have a loop of clear instructions (e.g. XCs) rather than just generating
+// multiple instructions (i.e. loop unrolling).
+// Must be between 256 and 4096.
+const clearLoopCutoff = 1024
+
+func defframe(ptxt *obj.Prog) {
+ var n *gc.Node
+
+ // fill in argument size, stack size
+ ptxt.To.Type = obj.TYPE_TEXTSIZE
+
+ ptxt.To.Val = int32(gc.Rnd(gc.Curfn.Type.Argwid, int64(gc.Widthptr)))
+ frame := uint32(gc.Rnd(gc.Stksize+gc.Maxarg, int64(gc.Widthreg)))
+ ptxt.To.Offset = int64(frame)
+
+ // insert code to zero ambiguously live variables
+ // so that the garbage collector only sees initialized values
+ // when it looks for pointers.
+ p := ptxt
+
+ hi := int64(0)
+ lo := hi
+
+ // iterate through declarations - they are sorted in decreasing xoffset order.
+ for l := gc.Curfn.Func.Dcl; l != nil; l = l.Next {
+ n = l.N
+ if !n.Name.Needzero {
+ continue
+ }
+ if n.Class != gc.PAUTO {
+ gc.Fatalf("needzero class %d", n.Class)
+ }
+ if n.Type.Width%int64(gc.Widthptr) != 0 || n.Xoffset%int64(gc.Widthptr) != 0 || n.Type.Width == 0 {
+ gc.Fatalf("var %v has size %d offset %d", gc.Nconv(n, obj.FmtLong), int(n.Type.Width), int(n.Xoffset))
+ }
+
+ if lo != hi && n.Xoffset+n.Type.Width >= lo-int64(2*gc.Widthreg) {
+ // merge with range we already have
+ lo = n.Xoffset
+
+ continue
+ }
+
+ // zero old range
+ p = zerorange(p, int64(frame), lo, hi)
+
+ // set new range
+ hi = n.Xoffset + n.Type.Width
+
+ lo = n.Xoffset
+ }
+
+ // zero final range
+ zerorange(p, int64(frame), lo, hi)
+}
+
+// zerorange clears the stack in the given range.
+func zerorange(p *obj.Prog, frame int64, lo int64, hi int64) *obj.Prog {
+ cnt := hi - lo
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ frame += gc.Ctxt.FixedFrameSize()
+ offset := frame + lo
+ reg := s390x.REGSP
+
+ // If the offset cannot fit in a 12-bit unsigned displacement then we
+ // need to create a copy of the stack pointer that we can adjust.
+ // We also need to do this if we are going to loop.
+ if offset < 0 || offset > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
+ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset, obj.TYPE_REG, s390x.REGRT1, 0)
+ p.Reg = int16(s390x.REGSP)
+ reg = s390x.REGRT1
+ offset = 0
+ }
+
+ // Generate a loop of large clears.
+ if cnt > clearLoopCutoff {
+ n := cnt - (cnt % 256)
+ end := s390x.REGRT2
+ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, offset+n, obj.TYPE_REG, end, 0)
+ p.Reg = int16(reg)
+ p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
+ p.From3 = new(obj.Addr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = 256
+ pl := p
+ p = appendpp(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = appendpp(p, s390x.ACMP, obj.TYPE_REG, reg, 0, obj.TYPE_REG, end, 0)
+ p = appendpp(p, s390x.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ gc.Patch(p, pl)
+
+ cnt -= n
+ }
+
+ // Generate remaining clear instructions without a loop.
+ for cnt > 0 {
+ n := cnt
+
+ // Can clear at most 256 bytes per instruction.
+ if n > 256 {
+ n = 256
+ }
+
+ switch n {
+ // Handle very small clears with move instructions.
+ case 8, 4, 2, 1:
+ ins := s390x.AMOVB
+ switch n {
+ case 8:
+ ins = s390x.AMOVD
+ case 4:
+ ins = s390x.AMOVW
+ case 2:
+ ins = s390x.AMOVH
+ }
+ p = appendpp(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, offset)
+
+ // Handle clears that would require multiple move instructions with XC.
+ default:
+ p = appendpp(p, s390x.AXC, obj.TYPE_MEM, reg, offset, obj.TYPE_MEM, reg, offset)
+ p.From3 = new(obj.Addr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = n
+ }
+
+ cnt -= n
+ offset += n
+ }
+
+ return p
+}
+
+func appendpp(p *obj.Prog, as int, ftype int, freg int, foffset int64, ttype int, treg int, toffset int64) *obj.Prog {
+ q := gc.Ctxt.NewProg()
+ gc.Clearp(q)
+ q.As = int16(as)
+ q.Lineno = p.Lineno
+ q.From.Type = int16(ftype)
+ q.From.Reg = int16(freg)
+ q.From.Offset = foffset
+ q.To.Type = int16(ttype)
+ q.To.Reg = int16(treg)
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func ginsnop() {
+ var reg gc.Node
+ gc.Nodreg(®, gc.Types[gc.TINT], s390x.REG_R0)
+ gins(s390x.AOR, ®, ®)
+}
+
+var panicdiv *gc.Node
+
+/*
+ * generate division.
+ * generates one of:
+ * res = nl / nr
+ * res = nl % nr
+ * according to op.
+ */
+func dodiv(op gc.Op, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ // Have to be careful about handling
+ // most negative int divided by -1 correctly.
+ // The hardware will generate undefined result.
+ // Also need to explicitly trap on division on zero,
+ // the hardware will silently generate undefined result.
+ // DIVW will leave unpredicable result in higher 32-bit,
+ // so always use DIVD/DIVDU.
+ t := nl.Type
+
+ t0 := t
+ check := 0
+ if gc.Issigned[t.Etype] {
+ check = 1
+ if gc.Isconst(nl, gc.CTINT) && nl.Int() != -(1<= nr.Ullman {
+ gc.Cgen(nl, &tl)
+ gc.Cgen(nr, &tr)
+ } else {
+ gc.Cgen(nr, &tr)
+ gc.Cgen(nl, &tl)
+ }
+
+ if t != t0 {
+ // Convert
+ tl2 := tl
+
+ tr2 := tr
+ tl.Type = t
+ tr.Type = t
+ gmove(&tl2, &tl)
+ gmove(&tr2, &tr)
+ }
+
+ // Handle divide-by-zero panic.
+ p1 := gins(optoas(gc.OCMP, t), &tr, nil)
+
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = s390x.REGZERO
+ p1 = gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if panicdiv == nil {
+ panicdiv = gc.Sysfunc("panicdivide")
+ }
+ gc.Ginscall(panicdiv, -1)
+ gc.Patch(p1, gc.Pc)
+
+ var p2 *obj.Prog
+ if check != 0 {
+ var nm1 gc.Node
+ gc.Nodconst(&nm1, t, -1)
+ gins(optoas(gc.OCMP, t), &tr, &nm1)
+ p1 := gc.Gbranch(optoas(gc.ONE, t), nil, +1)
+ if op == gc.ODIV {
+ // a / (-1) is -a.
+ gins(optoas(gc.OMINUS, t), nil, &tl)
+
+ gmove(&tl, res)
+ } else {
+ // a % (-1) is 0.
+ var nz gc.Node
+ gc.Nodconst(&nz, t, 0)
+
+ gmove(&nz, res)
+ }
+
+ p2 = gc.Gbranch(obj.AJMP, nil, 0)
+ gc.Patch(p1, gc.Pc)
+ }
+
+ p1 = gins(a, &tr, &tl)
+ if op == gc.ODIV {
+ gc.Regfree(&tr)
+ gmove(&tl, res)
+ } else {
+ // A%B = A-(A/B*B)
+ var tm gc.Node
+ gc.Regalloc(&tm, t, nil)
+
+ // patch div to use the 3 register form
+ // TODO(minux): add gins3?
+ p1.Reg = p1.To.Reg
+
+ p1.To.Reg = tm.Reg
+ gins(optoas(gc.OMUL, t), &tr, &tm)
+ gc.Regfree(&tr)
+ gins(optoas(gc.OSUB, t), &tm, &tl)
+ gc.Regfree(&tm)
+ gmove(&tl, res)
+ }
+
+ gc.Regfree(&tl)
+ if check != 0 {
+ gc.Patch(p2, gc.Pc)
+ }
+}
+
+/*
+ * generate high multiply:
+ * res = (nl*nr) >> width
+ */
+func cgen_hmul(nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ // largest ullman on left.
+ if nl.Ullman < nr.Ullman {
+ nl, nr = nr, nl
+ }
+
+ t := nl.Type
+ w := int(t.Width) * 8
+ var n1 gc.Node
+ gc.Cgenr(nl, &n1, res)
+ var n2 gc.Node
+ gc.Cgenr(nr, &n2, nil)
+ switch gc.Simtype[t.Etype] {
+ case gc.TINT8,
+ gc.TINT16,
+ gc.TINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p := gins(s390x.ASRAD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TUINT8,
+ gc.TUINT16,
+ gc.TUINT32:
+ gins(optoas(gc.OMUL, t), &n2, &n1)
+ p := gins(s390x.ASRD, nil, &n1)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(w)
+
+ case gc.TINT64,
+ gc.TUINT64:
+ gins(s390x.AMULHDU, &n2, &n1)
+
+ default:
+ gc.Fatalf("cgen_hmul %v", t)
+ }
+
+ gc.Cgen(&n1, res)
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
+}
+
+/*
+ * generate shift according to op, one of:
+ * res = nl << nr
+ * res = nl >> nr
+ */
+func cgen_shift(op gc.Op, bounded bool, nl *gc.Node, nr *gc.Node, res *gc.Node) {
+ a := optoas(op, nl.Type)
+
+ if nr.Op == gc.OLITERAL {
+ var n1 gc.Node
+ gc.Regalloc(&n1, nl.Type, res)
+ gc.Cgen(nl, &n1)
+ sc := uint64(nr.Int())
+ if sc >= uint64(nl.Type.Width*8) {
+ // large shift gets 2 shifts by width-1
+ var n3 gc.Node
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+
+ gins(a, &n3, &n1)
+ gins(a, &n3, &n1)
+ } else {
+ gins(a, nr, &n1)
+ }
+ gmove(&n1, res)
+ gc.Regfree(&n1)
+ return
+ }
+
+ if nl.Ullman >= gc.UINF {
+ var n4 gc.Node
+ gc.Tempname(&n4, nl.Type)
+ gc.Cgen(nl, &n4)
+ nl = &n4
+ }
+
+ if nr.Ullman >= gc.UINF {
+ var n5 gc.Node
+ gc.Tempname(&n5, nr.Type)
+ gc.Cgen(nr, &n5)
+ nr = &n5
+ }
+
+ // Allow either uint32 or uint64 as shift type,
+ // to avoid unnecessary conversion from uint32 to uint64
+ // just to do the comparison.
+ tcount := gc.Types[gc.Simtype[nr.Type.Etype]]
+
+ if tcount.Etype < gc.TUINT32 {
+ tcount = gc.Types[gc.TUINT32]
+ }
+
+ var n1 gc.Node
+ gc.Regalloc(&n1, nr.Type, nil) // to hold the shift type in CX
+ var n3 gc.Node
+ gc.Regalloc(&n3, tcount, &n1) // to clear high bits of CX
+
+ var n2 gc.Node
+ gc.Regalloc(&n2, nl.Type, res)
+
+ if nl.Ullman >= nr.Ullman {
+ gc.Cgen(nl, &n2)
+ gc.Cgen(nr, &n1)
+ gmove(&n1, &n3)
+ } else {
+ gc.Cgen(nr, &n1)
+ gmove(&n1, &n3)
+ gc.Cgen(nl, &n2)
+ }
+
+ gc.Regfree(&n3)
+
+ // test and fix up large shifts
+ if !bounded {
+ gc.Nodconst(&n3, tcount, nl.Type.Width*8)
+ gins(optoas(gc.OCMP, tcount), &n1, &n3)
+ p1 := gc.Gbranch(optoas(gc.OLT, tcount), nil, 1)
+ if op == gc.ORSH && gc.Issigned[nl.Type.Etype] {
+ gc.Nodconst(&n3, gc.Types[gc.TUINT32], nl.Type.Width*8-1)
+ gins(a, &n3, &n2)
+ } else {
+ gc.Nodconst(&n3, nl.Type, 0)
+ gmove(&n3, &n2)
+ }
+
+ gc.Patch(p1, gc.Pc)
+ }
+
+ gins(a, &n1, &n2)
+
+ gmove(&n2, res)
+
+ gc.Regfree(&n1)
+ gc.Regfree(&n2)
+}
+
+// clearfat clears (i.e. replaces with zeros) the value pointed to by nl.
+func clearfat(nl *gc.Node) {
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("clearfat %v (%v, size: %d)\n", nl, nl.Type, nl.Type.Width)
+ }
+
+ // Avoid taking the address for simple enough types.
+ if gc.Componentgen(nil, nl) {
+ return
+ }
+
+ var dst gc.Node
+ gc.Regalloc(&dst, gc.Types[gc.Tptr], nil)
+ gc.Agen(nl, &dst)
+
+ var boff int64
+ w := nl.Type.Width
+ if w > clearLoopCutoff {
+ // Generate a loop clearing 256 bytes per iteration using XCs.
+ var end gc.Node
+ gc.Regalloc(&end, gc.Types[gc.Tptr], nil)
+ p := gins(s390x.AMOVD, &dst, &end)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = w - (w % 256)
+
+ p = gins(s390x.AXC, &dst, &dst)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 0
+ p.From3 = new(obj.Addr)
+ p.From3.Offset = 256
+ p.From3.Type = obj.TYPE_CONST
+ pl := p
+
+ ginscon(s390x.AADD, 256, &dst)
+ gins(s390x.ACMP, &dst, &end)
+ gc.Patch(gc.Gbranch(s390x.ABNE, nil, 0), pl)
+ gc.Regfree(&end)
+ w = w % 256
+ }
+
+ // Generate instructions to clear the remaining memory.
+ for w > 0 {
+ n := w
+
+ // Can clear at most 256 bytes per instruction.
+ if n > 256 {
+ n = 256
+ }
+
+ switch n {
+ // Handle very small clears using moves.
+ case 8, 4, 2, 1:
+ ins := s390x.AMOVB
+ switch n {
+ case 8:
+ ins = s390x.AMOVD
+ case 4:
+ ins = s390x.AMOVW
+ case 2:
+ ins = s390x.AMOVH
+ }
+ p := gins(ins, nil, &dst)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = boff
+
+ // Handle clears that would require multiple moves with a XC.
+ default:
+ p := gins(s390x.AXC, &dst, &dst)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = boff
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = boff
+ p.From3 = new(obj.Addr)
+ p.From3.Offset = n
+ p.From3.Type = obj.TYPE_CONST
+ }
+
+ boff += n
+ w -= n
+ }
+
+ gc.Regfree(&dst)
+}
+
+// Called after regopt and peep have run.
+// Expand CHECKNIL pseudo-op into actual nil pointer check.
+func expandchecks(firstp *obj.Prog) {
+ for p := firstp; p != nil; p = p.Link {
+ if gc.Debug_checknil != 0 && gc.Ctxt.Debugvlog != 0 {
+ fmt.Printf("expandchecks: %v\n", p)
+ }
+ if p.As != obj.ACHECKNIL {
+ continue
+ }
+ if gc.Debug_checknil != 0 && p.Lineno > 1 { // p->lineno==1 in generated wrappers
+ gc.Warnl(int(p.Lineno), "generated nil check")
+ }
+ if p.From.Type != obj.TYPE_REG {
+ gc.Fatalf("invalid nil check %v\n", p)
+ }
+
+ // check is
+ // CMPBNE arg, $0, 2(PC) [likely]
+ // MOVD R0, 0(R0)
+ p1 := gc.Ctxt.NewProg()
+
+ gc.Clearp(p1)
+ p1.Link = p.Link
+ p.Link = p1
+ p1.Lineno = p.Lineno
+ p1.Pc = 9999
+ p.As = s390x.ACMPBNE
+ p.From3 = new(obj.Addr)
+ p.From3.Type = obj.TYPE_CONST
+ p.From3.Offset = 0
+
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Val = p1.Link
+
+ // crash by write to memory address 0.
+ p1.As = s390x.AMOVD
+
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = s390x.REGZERO
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = s390x.REGZERO
+ p1.To.Offset = 0
+ }
+}
+
+// res = runtime.getg()
+func getg(res *gc.Node) {
+ var n1 gc.Node
+ gc.Nodreg(&n1, res.Type, s390x.REGG)
+ gmove(&n1, res)
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/gsubr.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/gsubr.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/gsubr.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/gsubr.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1136 @@
+// Derived from Inferno utils/6c/txt.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/compile/internal/big"
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "fmt"
+)
+
+var resvd = []int{
+ s390x.REGZERO, // R0
+ s390x.REGTMP, // R10
+ s390x.REGTMP2, // R11
+ s390x.REGCTXT, // R12
+ s390x.REGG, // R13
+ s390x.REG_LR, // R14
+ s390x.REGSP, // R15
+}
+
+// generate
+// as $c, n
+func ginscon(as int, c int64, n2 *gc.Node) {
+ var n1 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ if as != s390x.AMOVD && (c < -s390x.BIG || c > s390x.BIG) || n2.Op != gc.OREGISTER || as == s390x.AMULLD {
+ // cannot have more than 16-bit of immediate in ADD, etc.
+ // instead, MOV into register first.
+ var ntmp gc.Node
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ rawgins(s390x.AMOVD, &n1, &ntmp)
+ rawgins(as, &ntmp, n2)
+ gc.Regfree(&ntmp)
+ return
+ }
+
+ rawgins(as, &n1, n2)
+}
+
+// generate
+// as n, $c (CMP/CMPU)
+func ginscon2(as int, n2 *gc.Node, c int64) {
+ var n1 gc.Node
+
+ gc.Nodconst(&n1, gc.Types[gc.TINT64], c)
+
+ switch as {
+ default:
+ gc.Fatalf("ginscon2")
+
+ case s390x.ACMP:
+ if -s390x.BIG <= c && c <= s390x.BIG {
+ rawgins(as, n2, &n1)
+ return
+ }
+
+ case s390x.ACMPU:
+ if 0 <= c && c <= 2*s390x.BIG {
+ rawgins(as, n2, &n1)
+ return
+ }
+ }
+
+ // MOV n1 into register first
+ var ntmp gc.Node
+ gc.Regalloc(&ntmp, gc.Types[gc.TINT64], nil)
+
+ rawgins(s390x.AMOVD, &n1, &ntmp)
+ rawgins(as, n2, &ntmp)
+ gc.Regfree(&ntmp)
+}
+
+func ginscmp(op gc.Op, t *gc.Type, n1, n2 *gc.Node, likely int) *obj.Prog {
+ if gc.Isint[t.Etype] && n1.Op == gc.OLITERAL && n2.Op != gc.OLITERAL {
+ // Reverse comparison to place constant last.
+ op = gc.Brrev(op)
+ n1, n2 = n2, n1
+ }
+
+ var r1, r2, g1, g2 gc.Node
+ gc.Regalloc(&r1, t, n1)
+ gc.Regalloc(&g1, n1.Type, &r1)
+ gc.Cgen(n1, &g1)
+ gmove(&g1, &r1)
+ if gc.Isint[t.Etype] && gc.Isconst(n2, gc.CTINT) {
+ ginscon2(optoas(gc.OCMP, t), &r1, n2.Int())
+ } else {
+ gc.Regalloc(&r2, t, n2)
+ gc.Regalloc(&g2, n1.Type, &r2)
+ gc.Cgen(n2, &g2)
+ gmove(&g2, &r2)
+ rawgins(optoas(gc.OCMP, t), &r1, &r2)
+ gc.Regfree(&g2)
+ gc.Regfree(&r2)
+ }
+ gc.Regfree(&g1)
+ gc.Regfree(&r1)
+ return gc.Gbranch(optoas(op, t), nil, likely)
+}
+
+// set up nodes representing 2^63
+var (
+ bigi gc.Node
+ bigf gc.Node
+ bignodes_did bool
+)
+
+func bignodes() {
+ if bignodes_did {
+ return
+ }
+ bignodes_did = true
+
+ var i big.Int
+ i.SetInt64(1)
+ i.Lsh(&i, 63)
+
+ gc.Nodconst(&bigi, gc.Types[gc.TUINT64], 0)
+ bigi.SetBigInt(&i)
+
+ bigi.Convconst(&bigf, gc.Types[gc.TFLOAT64])
+}
+
+// gmvc tries to move f to t using a mvc instruction.
+// If successful it returns true, otherwise it returns false.
+func gmvc(f, t *gc.Node) bool {
+ ft := int(gc.Simsimtype(f.Type))
+ tt := int(gc.Simsimtype(t.Type))
+
+ if ft != tt {
+ return false
+ }
+
+ if f.Op != gc.OINDREG || t.Op != gc.OINDREG {
+ return false
+ }
+
+ if f.Xoffset < 0 || f.Xoffset >= 4096-8 {
+ return false
+ }
+
+ if t.Xoffset < 0 || t.Xoffset >= 4096-8 {
+ return false
+ }
+
+ var len int64
+ switch ft {
+ case gc.TUINT8, gc.TINT8, gc.TBOOL:
+ len = 1
+ case gc.TUINT16, gc.TINT16:
+ len = 2
+ case gc.TUINT32, gc.TINT32, gc.TFLOAT32:
+ len = 4
+ case gc.TUINT64, gc.TINT64, gc.TFLOAT64, gc.TPTR64:
+ len = 8
+ case gc.TUNSAFEPTR:
+ len = int64(gc.Widthptr)
+ default:
+ return false
+ }
+
+ p := gc.Prog(s390x.AMVC)
+ gc.Naddr(&p.From, f)
+ gc.Naddr(&p.To, t)
+ p.From3 = new(obj.Addr)
+ p.From3.Offset = len
+ p.From3.Type = obj.TYPE_CONST
+ return true
+}
+
+// generate move:
+// t = f
+// hard part is conversions.
+func gmove(f *gc.Node, t *gc.Node) {
+ if gc.Debug['M'] != 0 {
+ fmt.Printf("gmove %v -> %v\n", gc.Nconv(f, obj.FmtLong), gc.Nconv(t, obj.FmtLong))
+ }
+
+ ft := int(gc.Simsimtype(f.Type))
+ tt := int(gc.Simsimtype(t.Type))
+ cvt := t.Type
+
+ if gc.Iscomplex[ft] || gc.Iscomplex[tt] {
+ gc.Complexmove(f, t)
+ return
+ }
+
+ // cannot have two memory operands
+ var a int
+ if gc.Ismem(f) && gc.Ismem(t) {
+ if gmvc(f, t) {
+ return
+ }
+ goto hard
+ }
+
+ // convert constant to desired type
+ if f.Op == gc.OLITERAL {
+ var con gc.Node
+ f.Convconst(&con, t.Type)
+ f = &con
+ ft = tt // so big switch will choose a simple mov
+
+ // some constants can't move directly to memory.
+ if gc.Ismem(t) {
+ // float constants come from memory.
+ if gc.Isfloat[tt] {
+ goto hard
+ }
+
+ // all immediates are 16-bit sign-extended
+ // unless moving into a register.
+ if gc.Isint[tt] {
+ if i := con.Int(); int64(int16(i)) != i {
+ goto hard
+ }
+ }
+
+ // immediate moves to memory have a 12-bit unsigned displacement
+ if t.Xoffset < 0 || t.Xoffset >= 4096-8 {
+ goto hard
+ }
+ }
+ }
+
+ // a float-to-int or int-to-float conversion requires the source operand in a register
+ if gc.Ismem(f) && ((gc.Isfloat[ft] && gc.Isint[tt]) || (gc.Isint[ft] && gc.Isfloat[tt])) {
+ cvt = f.Type
+ goto hard
+ }
+
+ // a float32-to-float64 or float64-to-float32 conversion requires the source operand in a register
+ if gc.Ismem(f) && gc.Isfloat[ft] && gc.Isfloat[tt] && (ft != tt) {
+ cvt = f.Type
+ goto hard
+ }
+
+ // value -> value copy, only one memory operand.
+ // figure out the instruction to use.
+ // break out of switch for one-instruction gins.
+ // goto rdst for "destination must be register".
+ // goto hard for "convert to cvt type first".
+ // otherwise handle and return.
+ switch uint32(ft)<<16 | uint32(tt) {
+ default:
+ gc.Fatalf("gmove %v -> %v", gc.Tconv(f.Type, obj.FmtLong), gc.Tconv(t.Type, obj.FmtLong))
+
+ // integer copy and truncate
+ case gc.TINT8<<16 | gc.TINT8,
+ gc.TUINT8<<16 | gc.TINT8,
+ gc.TINT16<<16 | gc.TINT8,
+ gc.TUINT16<<16 | gc.TINT8,
+ gc.TINT32<<16 | gc.TINT8,
+ gc.TUINT32<<16 | gc.TINT8,
+ gc.TINT64<<16 | gc.TINT8,
+ gc.TUINT64<<16 | gc.TINT8:
+ a = s390x.AMOVB
+
+ case gc.TINT8<<16 | gc.TUINT8,
+ gc.TUINT8<<16 | gc.TUINT8,
+ gc.TINT16<<16 | gc.TUINT8,
+ gc.TUINT16<<16 | gc.TUINT8,
+ gc.TINT32<<16 | gc.TUINT8,
+ gc.TUINT32<<16 | gc.TUINT8,
+ gc.TINT64<<16 | gc.TUINT8,
+ gc.TUINT64<<16 | gc.TUINT8:
+ a = s390x.AMOVBZ
+
+ case gc.TINT16<<16 | gc.TINT16,
+ gc.TUINT16<<16 | gc.TINT16,
+ gc.TINT32<<16 | gc.TINT16,
+ gc.TUINT32<<16 | gc.TINT16,
+ gc.TINT64<<16 | gc.TINT16,
+ gc.TUINT64<<16 | gc.TINT16:
+ a = s390x.AMOVH
+
+ case gc.TINT16<<16 | gc.TUINT16,
+ gc.TUINT16<<16 | gc.TUINT16,
+ gc.TINT32<<16 | gc.TUINT16,
+ gc.TUINT32<<16 | gc.TUINT16,
+ gc.TINT64<<16 | gc.TUINT16,
+ gc.TUINT64<<16 | gc.TUINT16:
+ a = s390x.AMOVHZ
+
+ case gc.TINT32<<16 | gc.TINT32,
+ gc.TUINT32<<16 | gc.TINT32,
+ gc.TINT64<<16 | gc.TINT32,
+ gc.TUINT64<<16 | gc.TINT32:
+ a = s390x.AMOVW
+
+ case gc.TINT32<<16 | gc.TUINT32,
+ gc.TUINT32<<16 | gc.TUINT32,
+ gc.TINT64<<16 | gc.TUINT32,
+ gc.TUINT64<<16 | gc.TUINT32:
+ a = s390x.AMOVWZ
+
+ case gc.TINT64<<16 | gc.TINT64,
+ gc.TINT64<<16 | gc.TUINT64,
+ gc.TUINT64<<16 | gc.TINT64,
+ gc.TUINT64<<16 | gc.TUINT64:
+ a = s390x.AMOVD
+
+ // sign extend int8
+ case gc.TINT8<<16 | gc.TINT16,
+ gc.TINT8<<16 | gc.TUINT16,
+ gc.TINT8<<16 | gc.TINT32,
+ gc.TINT8<<16 | gc.TUINT32,
+ gc.TINT8<<16 | gc.TINT64,
+ gc.TINT8<<16 | gc.TUINT64:
+ a = s390x.AMOVB
+ goto rdst
+
+ // sign extend uint8
+ case gc.TUINT8<<16 | gc.TINT16,
+ gc.TUINT8<<16 | gc.TUINT16,
+ gc.TUINT8<<16 | gc.TINT32,
+ gc.TUINT8<<16 | gc.TUINT32,
+ gc.TUINT8<<16 | gc.TINT64,
+ gc.TUINT8<<16 | gc.TUINT64:
+ a = s390x.AMOVBZ
+ goto rdst
+
+ // sign extend int16
+ case gc.TINT16<<16 | gc.TINT32,
+ gc.TINT16<<16 | gc.TUINT32,
+ gc.TINT16<<16 | gc.TINT64,
+ gc.TINT16<<16 | gc.TUINT64:
+ a = s390x.AMOVH
+ goto rdst
+
+ // zero extend uint16
+ case gc.TUINT16<<16 | gc.TINT32,
+ gc.TUINT16<<16 | gc.TUINT32,
+ gc.TUINT16<<16 | gc.TINT64,
+ gc.TUINT16<<16 | gc.TUINT64:
+ a = s390x.AMOVHZ
+ goto rdst
+
+ // sign extend int32
+ case gc.TINT32<<16 | gc.TINT64,
+ gc.TINT32<<16 | gc.TUINT64:
+ a = s390x.AMOVW
+ goto rdst
+
+ // zero extend uint32
+ case gc.TUINT32<<16 | gc.TINT64,
+ gc.TUINT32<<16 | gc.TUINT64:
+ a = s390x.AMOVWZ
+ goto rdst
+
+ // float to integer
+ case gc.TFLOAT32<<16 | gc.TUINT8,
+ gc.TFLOAT32<<16 | gc.TUINT16:
+ cvt = gc.Types[gc.TUINT32]
+ goto hard
+
+ case gc.TFLOAT32<<16 | gc.TUINT32:
+ a = s390x.ACLFEBR
+ goto rdst
+
+ case gc.TFLOAT32<<16 | gc.TUINT64:
+ a = s390x.ACLGEBR
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TUINT8,
+ gc.TFLOAT64<<16 | gc.TUINT16:
+ cvt = gc.Types[gc.TUINT32]
+ goto hard
+
+ case gc.TFLOAT64<<16 | gc.TUINT32:
+ a = s390x.ACLFDBR
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TUINT64:
+ a = s390x.ACLGDBR
+ goto rdst
+
+ case gc.TFLOAT32<<16 | gc.TINT8,
+ gc.TFLOAT32<<16 | gc.TINT16:
+ cvt = gc.Types[gc.TINT32]
+ goto hard
+
+ case gc.TFLOAT32<<16 | gc.TINT32:
+ a = s390x.ACFEBRA
+ goto rdst
+
+ case gc.TFLOAT32<<16 | gc.TINT64:
+ a = s390x.ACGEBRA
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT8,
+ gc.TFLOAT64<<16 | gc.TINT16:
+ cvt = gc.Types[gc.TINT32]
+ goto hard
+
+ case gc.TFLOAT64<<16 | gc.TINT32:
+ a = s390x.ACFDBRA
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TINT64:
+ a = s390x.ACGDBRA
+ goto rdst
+
+ // integer to float
+ case gc.TUINT8<<16 | gc.TFLOAT32,
+ gc.TUINT16<<16 | gc.TFLOAT32:
+ cvt = gc.Types[gc.TUINT32]
+ goto hard
+
+ case gc.TUINT32<<16 | gc.TFLOAT32:
+ a = s390x.ACELFBR
+ goto rdst
+
+ case gc.TUINT64<<16 | gc.TFLOAT32:
+ a = s390x.ACELGBR
+ goto rdst
+
+ case gc.TUINT8<<16 | gc.TFLOAT64,
+ gc.TUINT16<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TUINT32]
+ goto hard
+
+ case gc.TUINT32<<16 | gc.TFLOAT64:
+ a = s390x.ACDLFBR
+ goto rdst
+
+ case gc.TUINT64<<16 | gc.TFLOAT64:
+ a = s390x.ACDLGBR
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TFLOAT32,
+ gc.TINT16<<16 | gc.TFLOAT32:
+ cvt = gc.Types[gc.TINT32]
+ goto hard
+
+ case gc.TINT32<<16 | gc.TFLOAT32:
+ a = s390x.ACEFBRA
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT32:
+ a = s390x.ACEGBRA
+ goto rdst
+
+ case gc.TINT8<<16 | gc.TFLOAT64,
+ gc.TINT16<<16 | gc.TFLOAT64:
+ cvt = gc.Types[gc.TINT32]
+ goto hard
+
+ case gc.TINT32<<16 | gc.TFLOAT64:
+ a = s390x.ACDFBRA
+ goto rdst
+
+ case gc.TINT64<<16 | gc.TFLOAT64:
+ a = s390x.ACDGBRA
+ goto rdst
+
+ // float to float
+ case gc.TFLOAT32<<16 | gc.TFLOAT32:
+ a = s390x.AFMOVS
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT64:
+ a = s390x.AFMOVD
+
+ case gc.TFLOAT32<<16 | gc.TFLOAT64:
+ a = s390x.ALDEBR
+ goto rdst
+
+ case gc.TFLOAT64<<16 | gc.TFLOAT32:
+ a = s390x.ALEDBR
+ goto rdst
+ }
+
+ gins(a, f, t)
+ return
+
+ // requires register destination
+rdst:
+ if t != nil && t.Op == gc.OREGISTER {
+ gins(a, f, t)
+ return
+ } else {
+ var r1 gc.Node
+ gc.Regalloc(&r1, t.Type, t)
+
+ gins(a, f, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+ }
+
+ // requires register intermediate
+hard:
+ var r1 gc.Node
+ gc.Regalloc(&r1, cvt, t)
+
+ gmove(f, &r1)
+ gmove(&r1, t)
+ gc.Regfree(&r1)
+ return
+}
+
+func intLiteral(n *gc.Node) (x int64, ok bool) {
+ switch {
+ case n == nil:
+ return
+ case gc.Isconst(n, gc.CTINT):
+ return n.Int(), true
+ case gc.Isconst(n, gc.CTBOOL):
+ return int64(obj.Bool2int(n.Bool())), true
+ }
+ return
+}
+
+// gins is called by the front end.
+// It synthesizes some multiple-instruction sequences
+// so the front end can stay simpler.
+func gins(as int, f, t *gc.Node) *obj.Prog {
+ if t != nil {
+ if as >= obj.A_ARCHSPECIFIC {
+ if x, ok := intLiteral(f); ok {
+ ginscon(as, x, t)
+ return nil // caller must not use
+ }
+ }
+ if as == s390x.ACMP || as == s390x.ACMPU {
+ if x, ok := intLiteral(t); ok {
+ ginscon2(as, f, x)
+ return nil // caller must not use
+ }
+ }
+ }
+ return rawgins(as, f, t)
+}
+
+// generate one instruction:
+// as f, t
+func rawgins(as int, f *gc.Node, t *gc.Node) *obj.Prog {
+ // self move check
+ // TODO(mundaym): use sized math and extend to MOVB, MOVWZ etc.
+ switch as {
+ case s390x.AMOVD, s390x.AFMOVS, s390x.AFMOVD:
+ if f != nil && t != nil &&
+ f.Op == gc.OREGISTER && t.Op == gc.OREGISTER &&
+ f.Reg == t.Reg {
+ return nil
+ }
+ }
+
+ p := gc.Prog(as)
+ gc.Naddr(&p.From, f)
+ gc.Naddr(&p.To, t)
+
+ switch as {
+ // Bad things the front end has done to us. Crash to find call stack.
+ case s390x.AMULLD:
+ if p.From.Type == obj.TYPE_CONST {
+ gc.Debug['h'] = 1
+ gc.Fatalf("bad inst: %v", p)
+ }
+ case s390x.ACMP, s390x.ACMPU:
+ if p.From.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_MEM {
+ gc.Debug['h'] = 1
+ gc.Fatalf("bad inst: %v", p)
+ }
+ }
+
+ if gc.Debug['g'] != 0 {
+ fmt.Printf("%v\n", p)
+ }
+
+ w := int32(0)
+ switch as {
+ case s390x.AMOVB, s390x.AMOVBZ:
+ w = 1
+
+ case s390x.AMOVH, s390x.AMOVHZ:
+ w = 2
+
+ case s390x.AMOVW, s390x.AMOVWZ:
+ w = 4
+
+ case s390x.AMOVD:
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_ADDR {
+ break
+ }
+ w = 8
+ }
+
+ if w != 0 && ((f != nil && p.From.Width < int64(w)) || (t != nil && p.To.Type != obj.TYPE_REG && p.To.Width > int64(w))) {
+ gc.Dump("f", f)
+ gc.Dump("t", t)
+ gc.Fatalf("bad width: %v (%d, %d)\n", p, p.From.Width, p.To.Width)
+ }
+
+ return p
+}
+
+// optoas returns the Axxx equivalent of Oxxx for type t
+func optoas(op gc.Op, t *gc.Type) int {
+ if t == nil {
+ gc.Fatalf("optoas: t is nil")
+ }
+
+ // avoid constant conversions in switches below
+ const (
+ OMINUS_ = uint32(gc.OMINUS) << 16
+ OLSH_ = uint32(gc.OLSH) << 16
+ ORSH_ = uint32(gc.ORSH) << 16
+ OADD_ = uint32(gc.OADD) << 16
+ OSUB_ = uint32(gc.OSUB) << 16
+ OMUL_ = uint32(gc.OMUL) << 16
+ ODIV_ = uint32(gc.ODIV) << 16
+ OOR_ = uint32(gc.OOR) << 16
+ OAND_ = uint32(gc.OAND) << 16
+ OXOR_ = uint32(gc.OXOR) << 16
+ OEQ_ = uint32(gc.OEQ) << 16
+ ONE_ = uint32(gc.ONE) << 16
+ OLT_ = uint32(gc.OLT) << 16
+ OLE_ = uint32(gc.OLE) << 16
+ OGE_ = uint32(gc.OGE) << 16
+ OGT_ = uint32(gc.OGT) << 16
+ OCMP_ = uint32(gc.OCMP) << 16
+ OAS_ = uint32(gc.OAS) << 16
+ OHMUL_ = uint32(gc.OHMUL) << 16
+ OSQRT_ = uint32(gc.OSQRT) << 16
+ OLROT_ = uint32(gc.OLROT) << 16
+ )
+
+ a := int(obj.AXXX)
+ switch uint32(op)<<16 | uint32(gc.Simtype[t.Etype]) {
+ default:
+ gc.Fatalf("optoas: no entry for op=%v type=%v", gc.Oconv(int(op), 0), t)
+
+ case OEQ_ | gc.TBOOL,
+ OEQ_ | gc.TINT8,
+ OEQ_ | gc.TUINT8,
+ OEQ_ | gc.TINT16,
+ OEQ_ | gc.TUINT16,
+ OEQ_ | gc.TINT32,
+ OEQ_ | gc.TUINT32,
+ OEQ_ | gc.TINT64,
+ OEQ_ | gc.TUINT64,
+ OEQ_ | gc.TPTR32,
+ OEQ_ | gc.TPTR64,
+ OEQ_ | gc.TFLOAT32,
+ OEQ_ | gc.TFLOAT64:
+ a = s390x.ABEQ
+
+ case ONE_ | gc.TBOOL,
+ ONE_ | gc.TINT8,
+ ONE_ | gc.TUINT8,
+ ONE_ | gc.TINT16,
+ ONE_ | gc.TUINT16,
+ ONE_ | gc.TINT32,
+ ONE_ | gc.TUINT32,
+ ONE_ | gc.TINT64,
+ ONE_ | gc.TUINT64,
+ ONE_ | gc.TPTR32,
+ ONE_ | gc.TPTR64,
+ ONE_ | gc.TFLOAT32,
+ ONE_ | gc.TFLOAT64:
+ a = s390x.ABNE
+
+ case OLT_ | gc.TINT8, // ACMP
+ OLT_ | gc.TINT16,
+ OLT_ | gc.TINT32,
+ OLT_ | gc.TINT64,
+ OLT_ | gc.TUINT8,
+ // ACMPU
+ OLT_ | gc.TUINT16,
+ OLT_ | gc.TUINT32,
+ OLT_ | gc.TUINT64,
+ OLT_ | gc.TFLOAT32,
+ // AFCMPU
+ OLT_ | gc.TFLOAT64:
+ a = s390x.ABLT
+
+ case OLE_ | gc.TINT8, // ACMP
+ OLE_ | gc.TINT16,
+ OLE_ | gc.TINT32,
+ OLE_ | gc.TINT64,
+ OLE_ | gc.TUINT8,
+ // ACMPU
+ OLE_ | gc.TUINT16,
+ OLE_ | gc.TUINT32,
+ OLE_ | gc.TUINT64,
+ OLE_ | gc.TFLOAT32,
+ OLE_ | gc.TFLOAT64:
+ a = s390x.ABLE
+
+ case OGT_ | gc.TINT8,
+ OGT_ | gc.TINT16,
+ OGT_ | gc.TINT32,
+ OGT_ | gc.TINT64,
+ OGT_ | gc.TUINT8,
+ OGT_ | gc.TUINT16,
+ OGT_ | gc.TUINT32,
+ OGT_ | gc.TUINT64,
+ OGT_ | gc.TFLOAT32,
+ OGT_ | gc.TFLOAT64:
+ a = s390x.ABGT
+
+ case OGE_ | gc.TINT8,
+ OGE_ | gc.TINT16,
+ OGE_ | gc.TINT32,
+ OGE_ | gc.TINT64,
+ OGE_ | gc.TUINT8,
+ OGE_ | gc.TUINT16,
+ OGE_ | gc.TUINT32,
+ OGE_ | gc.TUINT64,
+ OGE_ | gc.TFLOAT32,
+ OGE_ | gc.TFLOAT64:
+ a = s390x.ABGE
+
+ case OCMP_ | gc.TBOOL,
+ OCMP_ | gc.TINT8,
+ OCMP_ | gc.TINT16,
+ OCMP_ | gc.TINT32,
+ OCMP_ | gc.TPTR32,
+ OCMP_ | gc.TINT64:
+ a = s390x.ACMP
+
+ case OCMP_ | gc.TUINT8,
+ OCMP_ | gc.TUINT16,
+ OCMP_ | gc.TUINT32,
+ OCMP_ | gc.TUINT64,
+ OCMP_ | gc.TPTR64:
+ a = s390x.ACMPU
+
+ case OCMP_ | gc.TFLOAT32:
+ a = s390x.ACEBR
+
+ case OCMP_ | gc.TFLOAT64:
+ a = s390x.AFCMPU
+
+ case OAS_ | gc.TBOOL,
+ OAS_ | gc.TINT8:
+ a = s390x.AMOVB
+
+ case OAS_ | gc.TUINT8:
+ a = s390x.AMOVBZ
+
+ case OAS_ | gc.TINT16:
+ a = s390x.AMOVH
+
+ case OAS_ | gc.TUINT16:
+ a = s390x.AMOVHZ
+
+ case OAS_ | gc.TINT32:
+ a = s390x.AMOVW
+
+ case OAS_ | gc.TUINT32,
+ OAS_ | gc.TPTR32:
+ a = s390x.AMOVWZ
+
+ case OAS_ | gc.TINT64,
+ OAS_ | gc.TUINT64,
+ OAS_ | gc.TPTR64:
+ a = s390x.AMOVD
+
+ case OAS_ | gc.TFLOAT32:
+ a = s390x.AFMOVS
+
+ case OAS_ | gc.TFLOAT64:
+ a = s390x.AFMOVD
+
+ case OADD_ | gc.TINT8,
+ OADD_ | gc.TUINT8,
+ OADD_ | gc.TINT16,
+ OADD_ | gc.TUINT16,
+ OADD_ | gc.TINT32,
+ OADD_ | gc.TUINT32,
+ OADD_ | gc.TPTR32,
+ OADD_ | gc.TINT64,
+ OADD_ | gc.TUINT64,
+ OADD_ | gc.TPTR64:
+ a = s390x.AADD
+
+ case OADD_ | gc.TFLOAT32:
+ a = s390x.AFADDS
+
+ case OADD_ | gc.TFLOAT64:
+ a = s390x.AFADD
+
+ case OSUB_ | gc.TINT8,
+ OSUB_ | gc.TUINT8,
+ OSUB_ | gc.TINT16,
+ OSUB_ | gc.TUINT16,
+ OSUB_ | gc.TINT32,
+ OSUB_ | gc.TUINT32,
+ OSUB_ | gc.TPTR32,
+ OSUB_ | gc.TINT64,
+ OSUB_ | gc.TUINT64,
+ OSUB_ | gc.TPTR64:
+ a = s390x.ASUB
+
+ case OSUB_ | gc.TFLOAT32:
+ a = s390x.AFSUBS
+
+ case OSUB_ | gc.TFLOAT64:
+ a = s390x.AFSUB
+
+ case OMINUS_ | gc.TINT8,
+ OMINUS_ | gc.TUINT8,
+ OMINUS_ | gc.TINT16,
+ OMINUS_ | gc.TUINT16,
+ OMINUS_ | gc.TINT32,
+ OMINUS_ | gc.TUINT32,
+ OMINUS_ | gc.TPTR32,
+ OMINUS_ | gc.TINT64,
+ OMINUS_ | gc.TUINT64,
+ OMINUS_ | gc.TPTR64:
+ a = s390x.ANEG
+
+ case OAND_ | gc.TINT8,
+ OAND_ | gc.TUINT8,
+ OAND_ | gc.TINT16,
+ OAND_ | gc.TUINT16,
+ OAND_ | gc.TINT32,
+ OAND_ | gc.TUINT32,
+ OAND_ | gc.TPTR32,
+ OAND_ | gc.TINT64,
+ OAND_ | gc.TUINT64,
+ OAND_ | gc.TPTR64:
+ a = s390x.AAND
+
+ case OOR_ | gc.TINT8,
+ OOR_ | gc.TUINT8,
+ OOR_ | gc.TINT16,
+ OOR_ | gc.TUINT16,
+ OOR_ | gc.TINT32,
+ OOR_ | gc.TUINT32,
+ OOR_ | gc.TPTR32,
+ OOR_ | gc.TINT64,
+ OOR_ | gc.TUINT64,
+ OOR_ | gc.TPTR64:
+ a = s390x.AOR
+
+ case OXOR_ | gc.TINT8,
+ OXOR_ | gc.TUINT8,
+ OXOR_ | gc.TINT16,
+ OXOR_ | gc.TUINT16,
+ OXOR_ | gc.TINT32,
+ OXOR_ | gc.TUINT32,
+ OXOR_ | gc.TPTR32,
+ OXOR_ | gc.TINT64,
+ OXOR_ | gc.TUINT64,
+ OXOR_ | gc.TPTR64:
+ a = s390x.AXOR
+
+ case OLSH_ | gc.TINT8,
+ OLSH_ | gc.TUINT8,
+ OLSH_ | gc.TINT16,
+ OLSH_ | gc.TUINT16,
+ OLSH_ | gc.TINT32,
+ OLSH_ | gc.TUINT32,
+ OLSH_ | gc.TPTR32,
+ OLSH_ | gc.TINT64,
+ OLSH_ | gc.TUINT64,
+ OLSH_ | gc.TPTR64:
+ a = s390x.ASLD
+
+ case ORSH_ | gc.TUINT8,
+ ORSH_ | gc.TUINT16,
+ ORSH_ | gc.TUINT32,
+ ORSH_ | gc.TPTR32,
+ ORSH_ | gc.TUINT64,
+ ORSH_ | gc.TPTR64:
+ a = s390x.ASRD
+
+ case ORSH_ | gc.TINT8,
+ ORSH_ | gc.TINT16,
+ ORSH_ | gc.TINT32,
+ ORSH_ | gc.TINT64:
+ a = s390x.ASRAD
+
+ case OHMUL_ | gc.TINT64,
+ OHMUL_ | gc.TUINT64,
+ OHMUL_ | gc.TPTR64:
+ a = s390x.AMULHDU
+
+ case OMUL_ | gc.TINT8,
+ OMUL_ | gc.TINT16,
+ OMUL_ | gc.TINT32,
+ OMUL_ | gc.TINT64:
+ a = s390x.AMULLD
+
+ case OMUL_ | gc.TUINT8,
+ OMUL_ | gc.TUINT16,
+ OMUL_ | gc.TUINT32,
+ OMUL_ | gc.TPTR32,
+ // don't use word multiply, the high 32-bit are undefined.
+ OMUL_ | gc.TUINT64,
+ OMUL_ | gc.TPTR64:
+ // for 64-bit multiplies, signedness doesn't matter.
+ a = s390x.AMULLD
+
+ case OMUL_ | gc.TFLOAT32:
+ a = s390x.AFMULS
+
+ case OMUL_ | gc.TFLOAT64:
+ a = s390x.AFMUL
+
+ case ODIV_ | gc.TINT8,
+ ODIV_ | gc.TINT16,
+ ODIV_ | gc.TINT32,
+ ODIV_ | gc.TINT64:
+ a = s390x.ADIVD
+
+ case ODIV_ | gc.TUINT8,
+ ODIV_ | gc.TUINT16,
+ ODIV_ | gc.TUINT32,
+ ODIV_ | gc.TPTR32,
+ ODIV_ | gc.TUINT64,
+ ODIV_ | gc.TPTR64:
+ a = s390x.ADIVDU
+
+ case ODIV_ | gc.TFLOAT32:
+ a = s390x.AFDIVS
+
+ case ODIV_ | gc.TFLOAT64:
+ a = s390x.AFDIV
+
+ case OSQRT_ | gc.TFLOAT64:
+ a = s390x.AFSQRT
+
+ case OLROT_ | gc.TUINT32,
+ OLROT_ | gc.TPTR32,
+ OLROT_ | gc.TINT32:
+ a = s390x.ARLL
+
+ case OLROT_ | gc.TUINT64,
+ OLROT_ | gc.TPTR64,
+ OLROT_ | gc.TINT64:
+ a = s390x.ARLLG
+ }
+
+ return a
+}
+
+const (
+ ODynam = 1 << 0
+ OAddable = 1 << 1
+)
+
+var clean [20]gc.Node
+
+var cleani int = 0
+
+func sudoclean() {
+ if clean[cleani-1].Op != gc.OEMPTY {
+ gc.Regfree(&clean[cleani-1])
+ }
+ if clean[cleani-2].Op != gc.OEMPTY {
+ gc.Regfree(&clean[cleani-2])
+ }
+ cleani -= 2
+}
+
+/*
+ * generate code to compute address of n,
+ * a reference to a (perhaps nested) field inside
+ * an array or struct.
+ * return 0 on failure, 1 on success.
+ * on success, leaves usable address in a.
+ *
+ * caller is responsible for calling sudoclean
+ * after successful sudoaddable,
+ * to release the register used for a.
+ */
+func sudoaddable(as int, n *gc.Node, a *obj.Addr) bool {
+ if n.Type == nil {
+ return false
+ }
+
+ *a = obj.Addr{}
+
+ switch n.Op {
+ case gc.OLITERAL:
+ if !gc.Isconst(n, gc.CTINT) {
+ return false
+ }
+ v := n.Int()
+ switch as {
+ default:
+ return false
+
+ // operations that can cope with a 32-bit immediate
+ // TODO(mundaym): logical operations can work on high bits
+ case s390x.AADD,
+ s390x.AADDC,
+ s390x.ASUB,
+ s390x.AMULLW,
+ s390x.AAND,
+ s390x.AOR,
+ s390x.AXOR,
+ s390x.ASLD,
+ s390x.ASLW,
+ s390x.ASRAW,
+ s390x.ASRAD,
+ s390x.ASRW,
+ s390x.ASRD,
+ s390x.AMOVB,
+ s390x.AMOVBZ,
+ s390x.AMOVH,
+ s390x.AMOVHZ,
+ s390x.AMOVW,
+ s390x.AMOVWZ,
+ s390x.AMOVD:
+ if int64(int32(v)) != v {
+ return false
+ }
+
+ // for comparisons avoid immediates unless they can
+ // fit into a int8/uint8
+ // this favours combined compare and branch instructions
+ case s390x.ACMP:
+ if int64(int8(v)) != v {
+ return false
+ }
+ case s390x.ACMPU:
+ if int64(uint8(v)) != v {
+ return false
+ }
+ }
+
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ gc.Naddr(a, n)
+ return true
+
+ case gc.ODOT,
+ gc.ODOTPTR:
+ cleani += 2
+ reg := &clean[cleani-1]
+ reg1 := &clean[cleani-2]
+ reg.Op = gc.OEMPTY
+ reg1.Op = gc.OEMPTY
+ var nn *gc.Node
+ var oary [10]int64
+ o := gc.Dotoffset(n, oary[:], &nn)
+ if nn == nil {
+ sudoclean()
+ return false
+ }
+
+ if nn.Addable && o == 1 && oary[0] >= 0 {
+ // directly addressable set of DOTs
+ n1 := *nn
+
+ n1.Type = n.Type
+ n1.Xoffset += oary[0]
+ // check that the offset fits into a 12-bit displacement
+ if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
+ sudoclean()
+ return false
+ }
+ gc.Naddr(a, &n1)
+ return true
+ }
+
+ gc.Regalloc(reg, gc.Types[gc.Tptr], nil)
+ n1 := *reg
+ n1.Op = gc.OINDREG
+ if oary[0] >= 0 {
+ gc.Agen(nn, reg)
+ n1.Xoffset = oary[0]
+ } else {
+ gc.Cgen(nn, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[0] + 1)
+ }
+
+ for i := 1; i < o; i++ {
+ if oary[i] >= 0 {
+ gc.Fatalf("can't happen")
+ }
+ gins(s390x.AMOVD, &n1, reg)
+ gc.Cgen_checknil(reg)
+ n1.Xoffset = -(oary[i] + 1)
+ }
+
+ a.Type = obj.TYPE_NONE
+ a.Index = obj.TYPE_NONE
+ // check that the offset fits into a 12-bit displacement
+ if n1.Xoffset < 0 || n1.Xoffset >= (1<<12)-8 {
+ tmp := n1
+ tmp.Op = gc.OREGISTER
+ tmp.Type = gc.Types[gc.Tptr]
+ tmp.Xoffset = 0
+ gc.Cgen_checknil(&tmp)
+ ginscon(s390x.AADD, n1.Xoffset, &tmp)
+ n1.Xoffset = 0
+ }
+ gc.Naddr(a, &n1)
+ return true
+ }
+
+ return false
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/peep.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/peep.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/peep.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/peep.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1827 @@
+// Derived from Inferno utils/6c/peep.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/peep.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "fmt"
+)
+
+var gactive uint32
+
+func peep(firstp *obj.Prog) {
+ g := gc.Flowstart(firstp, nil)
+ if g == nil {
+ return
+ }
+ gactive = 0
+
+ // promote zero moves to MOVD so that they are more likely to
+ // be optimized in later passes
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+ if isMove(p) && p.As != s390x.AMOVD && regzer(&p.From) != 0 && isGPR(&p.To) {
+ p.As = s390x.AMOVD
+ }
+ }
+
+ // constant propagation
+ // find MOV $con,R followed by
+ // another MOV $con,R without
+ // setting R in the interim
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+ switch p.As {
+ case s390x.AMOVD,
+ s390x.AMOVW, s390x.AMOVWZ,
+ s390x.AMOVH, s390x.AMOVHZ,
+ s390x.AMOVB, s390x.AMOVBZ,
+ s390x.AFMOVS, s390x.AFMOVD:
+ if regtyp(&p.To) {
+ if p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_FCONST {
+ conprop(r)
+ }
+ }
+ }
+ }
+
+ for {
+ changed := false
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+
+ // TODO(austin) Handle smaller moves. arm and amd64
+ // distinguish between moves that moves that *must*
+ // sign/zero extend and moves that don't care so they
+ // can eliminate moves that don't care without
+ // breaking moves that do care. This might let us
+ // simplify or remove the next peep loop, too.
+ if p.As == s390x.AMOVD || p.As == s390x.AFMOVD || p.As == s390x.AFMOVS {
+ if regtyp(&p.To) {
+ // Convert uses to $0 to uses of R0 and
+ // propagate R0
+ if p.As == s390x.AMOVD && regzer(&p.From) != 0 {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s390x.REGZERO
+ }
+
+ // Try to eliminate reg->reg moves
+ if isGPR(&p.From) || isFPR(&p.From) {
+ if copyprop(r) || (subprop(r) && copyprop(r)) {
+ excise(r)
+ changed = true
+ }
+ }
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("pass7 copyprop", g.Start, 0)
+ }
+
+ /*
+ * For any kind of MOV in (AFMOVS, AMOVW, AMOVWZ, AMOVH, AMOVHZ, AMOVB, AMOVBZ)
+ * MOV Ra, Rb; ...; MOV Rb, Rc; -> MOV Ra, Rc;
+ */
+
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+
+ switch p.As {
+ case s390x.AMOVW, s390x.AMOVWZ,
+ s390x.AMOVH, s390x.AMOVHZ,
+ s390x.AMOVB, s390x.AMOVBZ:
+
+ if regzer(&p.From) == 1 && regtyp(&p.To) {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s390x.REGZERO
+ }
+
+ if ((regtyp(&p.From) || regzer(&p.From) == 1 ||
+ p.From.Type == obj.TYPE_CONST || p.From.Type == obj.TYPE_SCONST) &&
+ regtyp(&p.To)) != true {
+ continue
+ }
+
+ default:
+ continue
+ }
+
+ r0 := r
+ p0 := r0.Prog
+ s0 := &p0.From
+ v0 := &p0.To
+ r1 := gc.Uniqs(r0)
+
+ // v0used: 0 means must not be used;
+ // 1 means didn't find, but can't decide;
+ // 2 means found a use, must be used;
+ // v0used is used as a tag to decide if r0 can be eliminited.
+ var v0used int = 1
+
+ for ; ; r1 = gc.Uniqs(r1) {
+ var p1 *obj.Prog
+
+ if r1 == nil || r1 == r0 {
+ break
+ }
+ if gc.Uniqp(r1) == nil {
+ break
+ }
+ breakloop := false
+ p1 = r1.Prog
+
+ if p1.As == p0.As && copyas(&p0.To, &p1.From) &&
+ (regtyp(&p0.From) || p0.From.Reg == s390x.REGZERO || regtyp(&p1.To) ||
+ (p0.From.Type != obj.TYPE_CONST && p0.From.Type != obj.TYPE_FCONST && p0.From.Type != obj.TYPE_SCONST && p1.To.Type == obj.TYPE_MEM)) {
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("mov prop\n")
+ fmt.Printf("%v\n", p0)
+ fmt.Printf("%v\n", p1)
+ }
+ p1.From = p0.From
+ } else {
+ t := copyu(p1, v0, nil)
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("try v0 mov prop t=%d\n", t)
+ fmt.Printf("%v\n", p0)
+ fmt.Printf("%v\n", p1)
+ }
+ switch t {
+ case 0: // miss
+ case 1: // use
+ v0used = 2
+ case 2, // rar
+ 4: // use and set
+ v0used = 2
+ breakloop = true
+ case 3: // set
+ if v0used != 2 {
+ v0used = 0
+ }
+ breakloop = true
+ default:
+ }
+
+ if regtyp(s0) {
+ t = copyu(p1, s0, nil)
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("try s0 mov prop t=%d\n", t)
+ fmt.Printf("%v\n", p0)
+ fmt.Printf("%v\n", p1)
+ }
+ switch t {
+ case 0, // miss
+ 1: // use
+ case 2, // rar
+ 4: // use and set
+ breakloop = true
+ case 3: // set
+ breakloop = true
+ default:
+ }
+ }
+ }
+ if breakloop {
+ break
+ }
+ }
+ if v0used == 0 {
+ excise(r0)
+ }
+ }
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("pass 7 MOV copy propagation", g.Start, 0)
+ }
+
+ /*
+ * look for MOVB x,R; MOVB R,R (for small MOVs not handled above)
+ */
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+ switch p.As {
+ default:
+ continue
+
+ case s390x.AMOVH,
+ s390x.AMOVHZ,
+ s390x.AMOVB,
+ s390x.AMOVBZ,
+ s390x.AMOVW,
+ s390x.AMOVWZ:
+ if p.To.Type != obj.TYPE_REG {
+ continue
+ }
+ }
+
+ r1 := r.Link
+ if r1 == nil {
+ continue
+ }
+ // If this is a branch target then the cast might be needed
+ if gc.Uniqp(r1) == nil {
+ continue
+ }
+ p1 := r1.Prog
+ if p1.As != p.As {
+ continue
+ }
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+ continue
+ }
+ if p1.To.Type != obj.TYPE_REG || p1.To.Reg != p.To.Reg {
+ continue
+ }
+ excise(r1)
+ }
+
+ // Remove redundant moves/casts
+ fuseMoveChains(g.Start)
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("fuse move chains", g.Start, 0)
+ }
+
+ // Fuse memory zeroing instructions into XC instructions
+ fuseClear(g.Start)
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("fuse clears", g.Start, 0)
+ }
+
+ // load pipelining
+ // push any load from memory as early as possible
+ // to give it time to complete before use.
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+ switch p.As {
+ case s390x.AMOVB,
+ s390x.AMOVW,
+ s390x.AMOVD:
+
+ if regtyp(&p.To) && !regconsttyp(&p.From) {
+ pushback(r)
+ }
+ }
+ }
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("pass8 push load as early as possible", g.Start, 0)
+ }
+
+ /*
+ * look for OP a, b, c; MOV c, d; -> OP a, b, d;
+ */
+
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+
+ switch p.As {
+ case s390x.AADD,
+ s390x.AADDC,
+ s390x.AADDME,
+ s390x.AADDE,
+ s390x.AADDZE,
+ s390x.AAND,
+ s390x.AANDN,
+ s390x.ADIVW,
+ s390x.ADIVWU,
+ s390x.ADIVD,
+ s390x.ADIVDU,
+ s390x.AMULLW,
+ s390x.AMULHDU,
+ s390x.AMULLD,
+ s390x.ANAND,
+ s390x.ANOR,
+ s390x.AOR,
+ s390x.AORN,
+ s390x.ASLW,
+ s390x.ASRAW,
+ s390x.ASRW,
+ s390x.ASLD,
+ s390x.ASRAD,
+ s390x.ASRD,
+ s390x.ARLL,
+ s390x.ARLLG,
+ s390x.ASUB,
+ s390x.ASUBC,
+ s390x.ASUBME,
+ s390x.ASUBE,
+ s390x.ASUBZE,
+ s390x.AXOR:
+ if p.To.Type != obj.TYPE_REG {
+ continue
+ }
+ if p.Reg == 0 { // Only for 3 ops instruction
+ continue
+ }
+ default:
+ continue
+ }
+
+ r1 := r.Link
+ for ; r1 != nil; r1 = r1.Link {
+ if r1.Prog.As != obj.ANOP {
+ break
+ }
+ }
+
+ if r1 == nil {
+ continue
+ }
+
+ p1 := r1.Prog
+ switch p1.As {
+ case s390x.AMOVD:
+ if p1.To.Type != obj.TYPE_REG {
+ continue
+ }
+
+ default:
+ continue
+ }
+ if p1.From.Type != obj.TYPE_REG || p1.From.Reg != p.To.Reg {
+ continue
+ }
+
+ if trymergeopmv(r1) {
+ p.To = p1.To
+ excise(r1)
+ }
+ }
+
+ if gc.Debug['v'] != 0 {
+ gc.Dumpit("Merge operation and move", g.Start, 0)
+ }
+
+ /*
+ * look for CMP x, y; Branch -> Compare and branch
+ */
+ for r := g.Start; r != nil; r = r.Link {
+ p := r.Prog
+ r1 := gc.Uniqs(r)
+ if r1 == nil {
+ continue
+ }
+ p1 := r1.Prog
+
+ var ins int16
+ switch p.As {
+ case s390x.ACMP:
+ switch p1.As {
+ case s390x.ABCL, s390x.ABC:
+ continue
+ case s390x.ABEQ:
+ ins = s390x.ACMPBEQ
+ case s390x.ABGE:
+ ins = s390x.ACMPBGE
+ case s390x.ABGT:
+ ins = s390x.ACMPBGT
+ case s390x.ABLE:
+ ins = s390x.ACMPBLE
+ case s390x.ABLT:
+ ins = s390x.ACMPBLT
+ case s390x.ABNE:
+ ins = s390x.ACMPBNE
+ default:
+ continue
+ }
+
+ case s390x.ACMPU:
+ switch p1.As {
+ case s390x.ABCL, s390x.ABC:
+ continue
+ case s390x.ABEQ:
+ ins = s390x.ACMPUBEQ
+ case s390x.ABGE:
+ ins = s390x.ACMPUBGE
+ case s390x.ABGT:
+ ins = s390x.ACMPUBGT
+ case s390x.ABLE:
+ ins = s390x.ACMPUBLE
+ case s390x.ABLT:
+ ins = s390x.ACMPUBLT
+ case s390x.ABNE:
+ ins = s390x.ACMPUBNE
+ default:
+ continue
+ }
+
+ case s390x.ACMPW, s390x.ACMPWU:
+ continue
+
+ default:
+ continue
+ }
+
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("cnb %v; %v -> ", p, p1)
+ }
+
+ if p1.To.Sym != nil {
+ continue
+ }
+
+ if p.To.Type == obj.TYPE_REG {
+ p1.As = ins
+ p1.From = p.From
+ p1.Reg = p.To.Reg
+ p1.From3 = nil
+ } else if p.To.Type == obj.TYPE_CONST {
+ switch p.As {
+ case s390x.ACMP, s390x.ACMPW:
+ if (p.To.Offset < -(1 << 7)) || (p.To.Offset >= ((1 << 7) - 1)) {
+ continue
+ }
+ case s390x.ACMPU, s390x.ACMPWU:
+ if p.To.Offset >= (1 << 8) {
+ continue
+ }
+ default:
+ }
+ p1.As = ins
+ p1.From = p.From
+ p1.Reg = 0
+ p1.From3 = new(obj.Addr)
+ *(p1.From3) = p.To
+ } else {
+ continue
+ }
+
+ if gc.Debug['D'] != 0 {
+ fmt.Printf("%v\n", p1)
+ }
+ excise(r)
+ }
+
+ if gc.Debug['v'] != 0 {
+ gc.Dumpit("compare and branch", g.Start, 0)
+ }
+
+ // Fuse LOAD/STORE instructions into LOAD/STORE MULTIPLE instructions
+ fuseMultiple(g.Start)
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ gc.Dumpit("pass 7 fuse load/store instructions", g.Start, 0)
+ }
+
+ gc.Flowend(g)
+}
+
+func conprop(r0 *gc.Flow) {
+ p0 := r0.Prog
+ v0 := &p0.To
+ r := r0
+ for {
+ r = gc.Uniqs(r)
+ if r == nil || r == r0 {
+ return
+ }
+ if gc.Uniqp(r) == nil {
+ return
+ }
+
+ p := r.Prog
+ t := copyu(p, v0, nil)
+ switch t {
+ case 0, // miss
+ 1: // use
+ continue
+ case 3: // set
+ if p.As == p0.As && p.From.Type == p0.From.Type && p.From.Reg == p0.From.Reg && p.From.Node == p0.From.Node &&
+ p.From.Offset == p0.From.Offset && p.From.Scale == p0.From.Scale && p.From.Index == p0.From.Index {
+ if p.From.Val == p0.From.Val {
+ excise(r)
+ continue
+ }
+ }
+ }
+ break
+ }
+}
+
+// is 'a' a register or constant?
+func regconsttyp(a *obj.Addr) bool {
+ if regtyp(a) {
+ return true
+ }
+ switch a.Type {
+ case obj.TYPE_CONST,
+ obj.TYPE_FCONST,
+ obj.TYPE_SCONST,
+ obj.TYPE_ADDR: // TODO(rsc): Not all TYPE_ADDRs are constants.
+ return true
+ }
+
+ return false
+}
+
+func pushback(r0 *gc.Flow) {
+ var r *gc.Flow
+
+ var b *gc.Flow
+ p0 := r0.Prog
+ for r = gc.Uniqp(r0); r != nil && gc.Uniqs(r) != nil; r = gc.Uniqp(r) {
+ p := r.Prog
+ if p.As != obj.ANOP {
+ if !regconsttyp(&p.From) || !regtyp(&p.To) {
+ break
+ }
+ if copyu(p, &p0.To, nil) != 0 || copyu(p0, &p.To, nil) != 0 {
+ break
+ }
+ }
+
+ if p.As == obj.ACALL {
+ break
+ }
+ b = r
+ }
+
+ if b == nil {
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("no pushback: %v\n", r0.Prog)
+ if r != nil {
+ fmt.Printf("\t%v [%v]\n", r.Prog, gc.Uniqs(r) != nil)
+ }
+ }
+
+ return
+ }
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("pushback\n")
+ for r := b; ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+
+ t := obj.Prog(*r0.Prog)
+ for r = gc.Uniqp(r0); ; r = gc.Uniqp(r) {
+ p0 = r.Link.Prog
+ p := r.Prog
+ p0.As = p.As
+ p0.Lineno = p.Lineno
+ p0.From = p.From
+ p0.To = p.To
+ p0.From3 = p.From3
+ p0.Reg = p.Reg
+ p0.RegTo2 = p.RegTo2
+ if r == b {
+ break
+ }
+ }
+
+ p0 = r.Prog
+ p0.As = t.As
+ p0.Lineno = t.Lineno
+ p0.From = t.From
+ p0.To = t.To
+ p0.From3 = t.From3
+ p0.Reg = t.Reg
+ p0.RegTo2 = t.RegTo2
+
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("\tafter\n")
+ for r := (*gc.Flow)(b); ; r = r.Link {
+ fmt.Printf("\t%v\n", r.Prog)
+ if r == r0 {
+ break
+ }
+ }
+ }
+}
+
+func excise(r *gc.Flow) {
+ p := r.Prog
+ if gc.Debug['P'] != 0 && gc.Debug['v'] != 0 {
+ fmt.Printf("%v ===delete===\n", p)
+ }
+ obj.Nopout(p)
+ gc.Ostats.Ndelmov++
+}
+
+/*
+ * regzer returns 1 if a's value is 0 (a is R0 or $0)
+ */
+func regzer(a *obj.Addr) int {
+ if a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_ADDR {
+ if a.Sym == nil && a.Reg == 0 {
+ if a.Offset == 0 {
+ return 1
+ }
+ }
+ }
+ if a.Type == obj.TYPE_REG {
+ if a.Reg == s390x.REGZERO {
+ return 1
+ }
+ }
+ return 0
+}
+
+func regtyp(a *obj.Addr) bool {
+ // TODO(rsc): Floating point register exclusions?
+ return a.Type == obj.TYPE_REG && s390x.REG_R0 <= a.Reg && a.Reg <= s390x.REG_F15 && a.Reg != s390x.REGZERO
+}
+
+// isGPR returns true if a refers to a general purpose register (GPR).
+// R0/REGZERO is treated as a GPR.
+func isGPR(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG &&
+ s390x.REG_R0 <= a.Reg &&
+ a.Reg <= s390x.REG_R15
+}
+
+func isFPR(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG &&
+ s390x.REG_F0 <= a.Reg &&
+ a.Reg <= s390x.REG_F15
+}
+
+func isConst(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_CONST || a.Type == obj.TYPE_FCONST
+}
+
+// isIndirectMem returns true if a refers to a memory location addressable by a
+// register and an offset, such as:
+// x+8(R1)
+// and
+// 0(R10)
+// It returns false if the address contains an index register such as:
+// 16(R1)(R2*1)
+func isIndirectMem(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_MEM &&
+ a.Index == 0 &&
+ (a.Name == obj.NAME_NONE || a.Name == obj.NAME_AUTO || a.Name == obj.NAME_PARAM)
+}
+
+/*
+ * the idea is to substitute
+ * one register for another
+ * from one MOV to another
+ * MOV a, R1
+ * ADD b, R1 / no use of R2
+ * MOV R1, R2
+ * would be converted to
+ * MOV a, R2
+ * ADD b, R2
+ * MOV R2, R1
+ * hopefully, then the former or latter MOV
+ * will be eliminated by copy propagation.
+ *
+ * r0 (the argument, not the register) is the MOV at the end of the
+ * above sequences. This returns 1 if it modified any instructions.
+ */
+func subprop(r0 *gc.Flow) bool {
+ p := r0.Prog
+ v1 := &p.From
+ if !regtyp(v1) {
+ return false
+ }
+ v2 := &p.To
+ if !regtyp(v2) {
+ return false
+ }
+ for r := gc.Uniqp(r0); r != nil; r = gc.Uniqp(r) {
+ if gc.Uniqs(r) == nil {
+ break
+ }
+ p = r.Prog
+ if p.As == obj.AVARDEF || p.As == obj.AVARKILL {
+ continue
+ }
+ if p.Info.Flags&gc.Call != 0 {
+ return false
+ }
+
+ if p.Info.Flags&(gc.RightRead|gc.RightWrite) == gc.RightWrite {
+ if p.To.Type == v1.Type {
+ if p.To.Reg == v1.Reg {
+ copysub(&p.To, v1, v2)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("gotit: %v->%v\n%v", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r.Prog)
+ if p.From.Type == v2.Type {
+ fmt.Printf(" excise")
+ }
+ fmt.Printf("\n")
+ }
+
+ for r = gc.Uniqs(r); r != r0; r = gc.Uniqs(r) {
+ p = r.Prog
+ copysub(&p.From, v1, v2)
+ copysub1(p, v1, v2)
+ copysub(&p.To, v1, v2)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v\n", r.Prog)
+ }
+ }
+
+ v1.Reg, v2.Reg = v2.Reg, v1.Reg
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v last\n", r.Prog)
+ }
+ return true
+ }
+ }
+ }
+
+ if copyau(&p.From, v2) || copyau1(p, v2) || copyau(&p.To, v2) {
+ break
+ }
+ }
+
+ return false
+}
+
+/*
+ * The idea is to remove redundant copies.
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * use v2 return fail (v1->v2 move must remain)
+ * -----------------
+ * v1->v2 F=0
+ * (use v2 s/v2/v1/)*
+ * set v1 F=1
+ * set v2 return success (caller can remove v1->v2 move)
+ */
+func copyprop(r0 *gc.Flow) bool {
+ p := r0.Prog
+ v1 := &p.From
+ v2 := &p.To
+ if copyas(v1, v2) {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("eliminating self-move: %v\n", r0.Prog)
+ }
+ return true
+ }
+
+ gactive++
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("trying to eliminate %v->%v move from:\n%v\n", gc.Ctxt.Dconv(v1), gc.Ctxt.Dconv(v2), r0.Prog)
+ }
+ return copy1(v1, v2, r0.S1, 0)
+}
+
+// copy1 replaces uses of v2 with v1 starting at r and returns true if
+// all uses were rewritten.
+func copy1(v1 *obj.Addr, v2 *obj.Addr, r *gc.Flow, f int) bool {
+ if uint32(r.Active) == gactive {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("act set; return true\n")
+ }
+ return true
+ }
+
+ r.Active = int32(gactive)
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("copy1 replace %v with %v f=%d\n", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), f)
+ }
+ var t int
+ var p *obj.Prog
+ for ; r != nil; r = r.S1 {
+ p = r.Prog
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("%v", p)
+ }
+ if f == 0 && gc.Uniqp(r) == nil {
+ // Multiple predecessors; conservatively
+ // assume v1 was set on other path
+ f = 1
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; merge; f=%d", f)
+ }
+ }
+
+ t = copyu(p, v2, nil)
+ switch t {
+ case 2: /* rar, can't split */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v rar; return 0\n", gc.Ctxt.Dconv(v2))
+ }
+ return false
+
+ case 3: /* set */
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+
+ case 1, /* used, substitute */
+ 4: /* use and set */
+ if f != 0 {
+ if gc.Debug['P'] == 0 {
+ return false
+ }
+ if t == 4 {
+ fmt.Printf("; %v used+set and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ } else {
+ fmt.Printf("; %v used and f=%d; return 0\n", gc.Ctxt.Dconv(v2), f)
+ }
+ return false
+ }
+
+ if copyu(p, v2, v1) != 0 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub fail; return 0\n")
+ }
+ return false
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; sub %v->%v\n => %v", gc.Ctxt.Dconv(v2), gc.Ctxt.Dconv(v1), p)
+ }
+ if t == 4 {
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v used+set; return 1\n", gc.Ctxt.Dconv(v2))
+ }
+ return true
+ }
+ }
+
+ if f == 0 {
+ t = copyu(p, v1, nil)
+ if f == 0 && (t == 2 || t == 3 || t == 4) {
+ f = 1
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("; %v set and !f; f=%d", gc.Ctxt.Dconv(v1), f)
+ }
+ }
+ }
+
+ if gc.Debug['P'] != 0 {
+ fmt.Printf("\n")
+ }
+ if r.S2 != nil {
+ if !copy1(v1, v2, r.S2, f) {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// If s==nil, copyu returns the set/use of v in p; otherwise, it
+// modifies p to replace reads of v with reads of s and returns 0 for
+// success or non-zero for failure.
+//
+// If s==nil, copy returns one of the following values:
+// 1 if v only used
+// 2 if v is set and used in one address (read-alter-rewrite;
+// can't substitute)
+// 3 if v is only set
+// 4 if v is set in one address and used in another (so addresses
+// can be rewritten independently)
+// 0 otherwise (not touched)
+func copyu(p *obj.Prog, v *obj.Addr, s *obj.Addr) int {
+ if p.From3Type() != obj.TYPE_NONE && p.From3Type() != obj.TYPE_CONST {
+ // Currently we never generate a From3 with anything other than a constant in it.
+ fmt.Printf("copyu: From3 (%v) not implemented\n", gc.Ctxt.Dconv(p.From3))
+ }
+
+ switch p.As {
+ default:
+ fmt.Printf("copyu: can't find %v\n", obj.Aconv(int(p.As)))
+ return 2
+
+ case /* read p->from, write p->to */
+ s390x.AMOVH,
+ s390x.AMOVHZ,
+ s390x.AMOVB,
+ s390x.AMOVBZ,
+ s390x.AMOVW,
+ s390x.AMOVWZ,
+ s390x.AMOVD,
+ s390x.ANEG,
+ s390x.AADDME,
+ s390x.AADDZE,
+ s390x.ASUBME,
+ s390x.ASUBZE,
+ s390x.AFMOVS,
+ s390x.AFMOVD,
+ s390x.ALEDBR,
+ s390x.AFNEG,
+ s390x.ALDEBR,
+ s390x.ACLFEBR,
+ s390x.ACLGEBR,
+ s390x.ACLFDBR,
+ s390x.ACLGDBR,
+ s390x.ACFEBRA,
+ s390x.ACGEBRA,
+ s390x.ACFDBRA,
+ s390x.ACGDBRA,
+ s390x.ACELFBR,
+ s390x.ACELGBR,
+ s390x.ACDLFBR,
+ s390x.ACDLGBR,
+ s390x.ACEFBRA,
+ s390x.ACEGBRA,
+ s390x.ACDFBRA,
+ s390x.ACDGBRA,
+ s390x.AFSQRT:
+
+ if s != nil {
+ copysub(&p.From, v, s)
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ copysub(&p.To, v, s)
+ }
+ return 0
+ }
+
+ if copyas(&p.To, v) {
+ // Fix up implicit from
+ if p.From.Type == obj.TYPE_NONE {
+ p.From = p.To
+ }
+ if copyau(&p.From, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ // p->to only indirectly uses v
+ return 1
+ }
+
+ return 0
+
+ // read p->from, read p->reg, write p->to
+ case s390x.AADD,
+ s390x.AADDC,
+ s390x.AADDE,
+ s390x.ASUB,
+ s390x.ASLW,
+ s390x.ASRW,
+ s390x.ASRAW,
+ s390x.ASLD,
+ s390x.ASRD,
+ s390x.ASRAD,
+ s390x.ARLL,
+ s390x.ARLLG,
+ s390x.AOR,
+ s390x.AORN,
+ s390x.AAND,
+ s390x.AANDN,
+ s390x.ANAND,
+ s390x.ANOR,
+ s390x.AXOR,
+ s390x.AMULLW,
+ s390x.AMULLD,
+ s390x.ADIVW,
+ s390x.ADIVD,
+ s390x.ADIVWU,
+ s390x.ADIVDU,
+ s390x.AFADDS,
+ s390x.AFADD,
+ s390x.AFSUBS,
+ s390x.AFSUB,
+ s390x.AFMULS,
+ s390x.AFMUL,
+ s390x.AFDIVS,
+ s390x.AFDIV:
+ if s != nil {
+ copysub(&p.From, v, s)
+ copysub1(p, v, s)
+
+ // Update only indirect uses of v in p->to
+ if !copyas(&p.To, v) {
+ copysub(&p.To, v, s)
+ }
+ }
+
+ if copyas(&p.To, v) {
+ if p.Reg == 0 {
+ // Fix up implicit reg (e.g., ADD
+ // R3,R4 -> ADD R3,R4,R4) so we can
+ // update reg and to separately.
+ p.Reg = p.To.Reg
+ }
+
+ if copyau(&p.From, v) {
+ return 4
+ }
+ if copyau1(p, v) {
+ return 4
+ }
+ return 3
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case s390x.ABEQ,
+ s390x.ABGT,
+ s390x.ABGE,
+ s390x.ABLT,
+ s390x.ABLE,
+ s390x.ABNE,
+ s390x.ABVC,
+ s390x.ABVS:
+ return 0
+
+ case obj.ACHECKNIL, /* read p->from */
+ s390x.ACMP, /* read p->from, read p->to */
+ s390x.ACMPU,
+ s390x.ACMPW,
+ s390x.ACMPWU,
+ s390x.AFCMPO,
+ s390x.AFCMPU,
+ s390x.ACEBR,
+ s390x.AMVC,
+ s390x.ACLC,
+ s390x.AXC,
+ s390x.AOC,
+ s390x.ANC:
+ if s != nil {
+ copysub(&p.From, v, s)
+ copysub(&p.To, v, s)
+ return 0
+ }
+
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case s390x.ACMPBNE, s390x.ACMPBEQ,
+ s390x.ACMPBLT, s390x.ACMPBLE,
+ s390x.ACMPBGT, s390x.ACMPBGE,
+ s390x.ACMPUBNE, s390x.ACMPUBEQ,
+ s390x.ACMPUBLT, s390x.ACMPUBLE,
+ s390x.ACMPUBGT, s390x.ACMPUBGE:
+ if s != nil {
+ copysub(&p.From, v, s)
+ copysub1(p, v, s)
+ return 0
+ }
+ if copyau(&p.From, v) {
+ return 1
+ }
+ if copyau1(p, v) {
+ return 1
+ }
+ return 0
+
+ case s390x.ACLEAR:
+ if s != nil {
+ copysub(&p.To, v, s)
+ return 0
+ }
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ // go never generates a branch to a GPR
+ // read p->to
+ case s390x.ABR:
+ if s != nil {
+ copysub(&p.To, v, s)
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 1
+ }
+ return 0
+
+ case obj.ARET, obj.AUNDEF:
+ if s != nil {
+ return 0
+ }
+
+ // All registers die at this point, so claim
+ // everything is set (and not used).
+ return 3
+
+ case s390x.ABL:
+ if v.Type == obj.TYPE_REG {
+ if s390x.REGARG != -1 && v.Reg == s390x.REGARG {
+ return 2
+ }
+
+ if p.From.Type == obj.TYPE_REG && p.From.Reg == v.Reg {
+ return 2
+ }
+ }
+
+ if s != nil {
+ copysub(&p.To, v, s)
+ return 0
+ }
+
+ if copyau(&p.To, v) {
+ return 4
+ }
+ return 3
+
+ case obj.ATEXT:
+ if v.Type == obj.TYPE_REG {
+ if v.Reg == s390x.REGARG {
+ return 3
+ }
+ }
+ return 0
+
+ case obj.APCDATA,
+ obj.AFUNCDATA,
+ obj.AVARDEF,
+ obj.AVARKILL,
+ obj.AVARLIVE,
+ obj.AUSEFIELD,
+ obj.ANOP:
+ return 0
+ }
+}
+
+// copyas returns 1 if a and v address the same register.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means this operation
+// writes the register in v.
+func copyas(a *obj.Addr, v *obj.Addr) bool {
+ if regtyp(v) {
+ if a.Type == v.Type {
+ if a.Reg == v.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau returns 1 if a either directly or indirectly addresses the
+// same register as v.
+//
+// If a is the from operand, this means this operation reads the
+// register in v. If a is the to operand, this means the operation
+// either reads or writes the register in v (if !copyas(a, v), then
+// the operation reads the register in v).
+func copyau(a *obj.Addr, v *obj.Addr) bool {
+ if copyas(a, v) {
+ return true
+ }
+ if v.Type == obj.TYPE_REG {
+ if a.Type == obj.TYPE_MEM || (a.Type == obj.TYPE_ADDR && a.Reg != 0) {
+ if v.Reg == a.Reg {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+// copyau1 returns 1 if p->reg references the same register as v and v
+// is a direct reference.
+func copyau1(p *obj.Prog, v *obj.Addr) bool {
+ if regtyp(v) && v.Reg != 0 {
+ if p.Reg == v.Reg {
+ return true
+ }
+ }
+ return false
+}
+
+// copysub replaces v with s in a
+func copysub(a *obj.Addr, v *obj.Addr, s *obj.Addr) {
+ if copyau(a, v) {
+ a.Reg = s.Reg
+ }
+}
+
+// copysub1 replaces v with s in p
+func copysub1(p *obj.Prog, v *obj.Addr, s *obj.Addr) {
+ if copyau1(p, v) {
+ p.Reg = s.Reg
+ }
+}
+
+func sameaddr(a *obj.Addr, v *obj.Addr) bool {
+ if a.Type != v.Type {
+ return false
+ }
+ if regtyp(v) && a.Reg == v.Reg {
+ return true
+ }
+ if v.Type == obj.NAME_AUTO || v.Type == obj.NAME_PARAM {
+ if v.Offset == a.Offset {
+ return true
+ }
+ }
+ return false
+}
+
+func smallindir(a *obj.Addr, reg *obj.Addr) bool {
+ return reg.Type == obj.TYPE_REG && a.Type == obj.TYPE_MEM && a.Reg == reg.Reg && 0 <= a.Offset && a.Offset < 4096
+}
+
+func stackaddr(a *obj.Addr) bool {
+ return a.Type == obj.TYPE_REG && a.Reg == s390x.REGSP
+}
+
+func trymergeopmv(r *gc.Flow) bool {
+ p := r.Prog
+ reg := p.From.Reg
+ r2 := gc.Uniqs(r)
+
+ for ; r2 != nil; r2 = gc.Uniqs(r2) {
+ p2 := r2.Prog
+ switch p2.As {
+ case obj.ANOP:
+ continue
+
+ case s390x.AEXRL,
+ s390x.ASYSCALL,
+ s390x.ABR,
+ s390x.ABC,
+ s390x.ABEQ,
+ s390x.ABGE,
+ s390x.ABGT,
+ s390x.ABLE,
+ s390x.ABLT,
+ s390x.ABNE,
+ s390x.ACMPBEQ,
+ s390x.ACMPBGE,
+ s390x.ACMPBGT,
+ s390x.ACMPBLE,
+ s390x.ACMPBLT,
+ s390x.ACMPBNE:
+ return false
+
+ case s390x.ACMP,
+ s390x.ACMPU,
+ s390x.ACMPW,
+ s390x.ACMPWU:
+ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
+ return false
+ }
+ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
+ //different from other instructions, To.Reg is a source register in CMP
+ return false
+ }
+ continue
+
+ case s390x.AMOVD,
+ s390x.AMOVW, s390x.AMOVWZ,
+ s390x.AMOVH, s390x.AMOVHZ,
+ s390x.AMOVB, s390x.AMOVBZ:
+ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.From.Type == obj.TYPE_ADDR && p2.From.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.To.Type == obj.TYPE_ADDR && p2.To.Reg == reg {
+ //For store operations
+ //also use; can't change
+ return false
+ }
+ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
+ return true
+ }
+ continue
+
+ case s390x.AMVC, s390x.ACLC, s390x.AXC, s390x.AOC, s390x.ANC:
+ if p2.From.Type == obj.TYPE_MEM && p2.From.Reg == reg {
+ return false
+ }
+ if p2.To.Type == obj.TYPE_MEM && p2.To.Reg == reg {
+ return false
+ }
+ continue
+
+ default:
+ if p2.From.Type == obj.TYPE_REG && p2.From.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.From.Type == obj.TYPE_ADDR && p2.From.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.Reg != 0 && p2.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.From3 != nil && p2.From3.Type == obj.TYPE_REG && p2.From3.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.From3 != nil && p2.From3.Type == obj.TYPE_ADDR && p2.From3.Reg == reg {
+ //use; can't change
+ return false
+ }
+ if p2.To.Type == obj.TYPE_ADDR && p2.To.Reg == reg {
+ //For store operations
+ //also use; can't change
+ return false
+ }
+ if p2.To.Type == obj.TYPE_REG && p2.To.Reg == reg {
+ if p2.Reg == 0 {
+ //p2.To is also used as source in 2 operands instruction
+ return false
+ } else {
+ //def; can change
+ return true
+ }
+ }
+ continue
+ }
+ }
+ return false
+}
+
+func isMove(p *obj.Prog) bool {
+ switch p.As {
+ case s390x.AMOVD,
+ s390x.AMOVW, s390x.AMOVWZ,
+ s390x.AMOVH, s390x.AMOVHZ,
+ s390x.AMOVB, s390x.AMOVBZ,
+ s390x.AFMOVD, s390x.AFMOVS:
+ return true
+ }
+ return false
+}
+
+func isLoad(p *obj.Prog) bool {
+ if !isMove(p) {
+ return false
+ }
+ if !(isGPR(&p.To) || isFPR(&p.To)) {
+ return false
+ }
+ if p.From.Type != obj.TYPE_MEM {
+ return false
+ }
+ return true
+}
+
+func isStore(p *obj.Prog) bool {
+ if !isMove(p) {
+ return false
+ }
+ if !(isGPR(&p.From) || isFPR(&p.From) || isConst(&p.From)) {
+ return false
+ }
+ if p.To.Type != obj.TYPE_MEM {
+ return false
+ }
+ return true
+}
+
+// fuseMoveChains looks to see if destination register is used
+// again and if not merges the moves.
+//
+// Look for this pattern (sequence of moves):
+// MOVB $17, R1
+// MOVBZ R1, R1
+// Replace with:
+// MOVBZ $17, R1
+func fuseMoveChains(r *gc.Flow) {
+ for ; r != nil; r = r.Link {
+ p := r.Prog
+ if !isMove(p) || !isGPR(&p.To) {
+ continue
+ }
+
+ // r is a move with a destination register
+ var move *gc.Flow
+ for rr := gc.Uniqs(r); rr != nil; rr = gc.Uniqs(rr) {
+ if rr == r {
+ // loop
+ break
+ }
+ if gc.Uniqp(rr) == nil {
+ // branch target: leave alone
+ break
+ }
+ pp := rr.Prog
+ if isMove(pp) && isGPR(&pp.From) && pp.From.Reg == p.To.Reg {
+ if pp.To.Type == obj.TYPE_MEM {
+ if p.From.Type == obj.TYPE_MEM ||
+ p.From.Type == obj.TYPE_ADDR {
+ break
+ }
+ if p.From.Type == obj.TYPE_CONST &&
+ int64(int16(p.From.Offset)) != p.From.Offset {
+ break
+ }
+ }
+ move = rr
+ break
+ }
+ if pp.As == obj.ANOP {
+ continue
+ }
+ break
+ }
+
+ // we have a move that reads from our destination reg, check if any future
+ // instructions also read from the reg
+ if move != nil && move.Prog.From.Reg != move.Prog.To.Reg {
+ safe := true
+ visited := make(map[*gc.Flow]bool)
+ children := make([]*gc.Flow, 0)
+ if move.S1 != nil {
+ children = append(children, move.S1)
+ }
+ if move.S2 != nil {
+ children = append(children, move.S2)
+ }
+ for len(children) > 0 {
+ rr := children[0]
+ if visited[rr] {
+ children = children[1:]
+ continue
+ } else {
+ visited[rr] = true
+ }
+ pp := rr.Prog
+ t := copyu(pp, &p.To, nil)
+ if t == 0 { // not found
+ if rr.S1 != nil {
+ children = append(children, rr.S1)
+ }
+ if rr.S2 != nil {
+ children = append(children, rr.S2)
+ }
+ children = children[1:]
+ continue
+ }
+ if t == 3 { // set
+ children = children[1:]
+ continue
+ }
+ // t is 1, 2 or 4: use
+ safe = false
+ break
+ }
+ if !safe {
+ move = nil
+ }
+ }
+
+ if move == nil {
+ continue
+ }
+
+ pp := move.Prog
+ execute := false
+
+ // at this point we have something like:
+ // MOV* anything, reg1
+ // MOV* reg1, reg2/mem
+ // now check if this is a cast that cannot be forward propagated
+ if p.As == pp.As || regzer(&p.From) == 1 {
+ // if the operations match or our source is zero then we
+ // can always propagate
+ execute = true
+ }
+ if !execute && isConst(&p.From) {
+ v := p.From.Offset
+ switch p.As {
+ case s390x.AMOVWZ:
+ v = int64(uint32(v))
+ case s390x.AMOVHZ:
+ v = int64(uint16(v))
+ case s390x.AMOVBZ:
+ v = int64(uint8(v))
+ case s390x.AMOVW:
+ v = int64(int32(v))
+ case s390x.AMOVH:
+ v = int64(int16(v))
+ case s390x.AMOVB:
+ v = int64(int8(v))
+ }
+ p.From.Offset = v
+ execute = true
+ }
+ if !execute && isGPR(&p.From) {
+ switch p.As {
+ case s390x.AMOVD:
+ fallthrough
+ case s390x.AMOVWZ:
+ if pp.As == s390x.AMOVWZ {
+ execute = true
+ break
+ }
+ fallthrough
+ case s390x.AMOVHZ:
+ if pp.As == s390x.AMOVHZ {
+ execute = true
+ break
+ }
+ fallthrough
+ case s390x.AMOVBZ:
+ if pp.As == s390x.AMOVBZ {
+ execute = true
+ break
+ }
+ }
+ }
+ if !execute {
+ if (p.As == s390x.AMOVB || p.As == s390x.AMOVBZ) && (pp.As == s390x.AMOVB || pp.As == s390x.AMOVBZ) {
+ execute = true
+ }
+ if (p.As == s390x.AMOVH || p.As == s390x.AMOVHZ) && (pp.As == s390x.AMOVH || pp.As == s390x.AMOVHZ) {
+ execute = true
+ }
+ if (p.As == s390x.AMOVW || p.As == s390x.AMOVWZ) && (pp.As == s390x.AMOVW || pp.As == s390x.AMOVWZ) {
+ execute = true
+ }
+ }
+
+ if execute {
+ pp.From = p.From
+ excise(r)
+ }
+ }
+ return
+}
+
+// fuseClear merges memory clear operations.
+//
+// Looks for this pattern (sequence of clears):
+// MOVD R0, n(R15)
+// MOVD R0, n+8(R15)
+// MOVD R0, n+16(R15)
+// Replaces with:
+// CLEAR $24, n(R15)
+func fuseClear(r *gc.Flow) {
+ var align int64
+ var clear *obj.Prog
+ for ; r != nil; r = r.Link {
+ // If there is a branch into the instruction stream then
+ // we can't fuse into previous instructions.
+ if gc.Uniqp(r) == nil {
+ clear = nil
+ }
+
+ p := r.Prog
+ if p.As == obj.ANOP {
+ continue
+ }
+ if p.As == s390x.AXC {
+ if p.From.Reg == p.To.Reg && p.From.Offset == p.To.Offset {
+ // TODO(mundaym): merge clears?
+ p.As = s390x.ACLEAR
+ p.From.Offset = p.From3.Offset
+ p.From3 = nil
+ p.From.Type = obj.TYPE_CONST
+ p.From.Reg = 0
+ clear = p
+ } else {
+ clear = nil
+ }
+ continue
+ }
+
+ // Is our source a constant zero?
+ if regzer(&p.From) == 0 {
+ clear = nil
+ continue
+ }
+
+ // Are we moving to memory?
+ if p.To.Type != obj.TYPE_MEM ||
+ p.To.Index != 0 ||
+ p.To.Offset >= 4096 ||
+ !(p.To.Name == obj.NAME_NONE || p.To.Name == obj.NAME_AUTO || p.To.Name == obj.NAME_PARAM) {
+ clear = nil
+ continue
+ }
+
+ size := int64(0)
+ switch p.As {
+ default:
+ clear = nil
+ continue
+ case s390x.AMOVB, s390x.AMOVBZ:
+ size = 1
+ case s390x.AMOVH, s390x.AMOVHZ:
+ size = 2
+ case s390x.AMOVW, s390x.AMOVWZ:
+ size = 4
+ case s390x.AMOVD:
+ size = 8
+ }
+
+ // doubleword aligned clears should be kept doubleword
+ // aligned
+ if (size == 8 && align != 8) || (size != 8 && align == 8) {
+ clear = nil
+ }
+
+ if clear != nil &&
+ clear.To.Reg == p.To.Reg &&
+ clear.To.Name == p.To.Name &&
+ clear.To.Node == p.To.Node &&
+ clear.To.Sym == p.To.Sym {
+
+ min := clear.To.Offset
+ max := clear.To.Offset + clear.From.Offset
+
+ // previous clear is already clearing this region
+ if min <= p.To.Offset && max >= p.To.Offset+size {
+ excise(r)
+ continue
+ }
+
+ // merge forwards
+ if max == p.To.Offset {
+ clear.From.Offset += size
+ excise(r)
+ continue
+ }
+
+ // merge backwards
+ if min-size == p.To.Offset {
+ clear.From.Offset += size
+ clear.To.Offset -= size
+ excise(r)
+ continue
+ }
+ }
+
+ // transform into clear
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = size
+ p.From.Reg = 0
+ p.As = s390x.ACLEAR
+ clear = p
+ align = size
+ }
+}
+
+// fuseMultiple merges memory loads and stores into load multiple and
+// store multiple operations.
+//
+// Looks for this pattern (sequence of loads or stores):
+// MOVD R1, 0(R15)
+// MOVD R2, 8(R15)
+// MOVD R3, 16(R15)
+// Replaces with:
+// STMG R1, R3, 0(R15)
+func fuseMultiple(r *gc.Flow) {
+ var fused *obj.Prog
+ for ; r != nil; r = r.Link {
+ // If there is a branch into the instruction stream then
+ // we can't fuse into previous instructions.
+ if gc.Uniqp(r) == nil {
+ fused = nil
+ }
+
+ p := r.Prog
+
+ isStore := isGPR(&p.From) && isIndirectMem(&p.To)
+ isLoad := isGPR(&p.To) && isIndirectMem(&p.From)
+
+ // are we a candidate?
+ size := int64(0)
+ switch p.As {
+ default:
+ fused = nil
+ continue
+ case obj.ANOP:
+ // skip over nops
+ continue
+ case s390x.AMOVW, s390x.AMOVWZ:
+ size = 4
+ // TODO(mundaym): 32-bit load multiple is currently not supported
+ // as it requires sign/zero extension.
+ if !isStore {
+ fused = nil
+ continue
+ }
+ case s390x.AMOVD:
+ size = 8
+ if !isLoad && !isStore {
+ fused = nil
+ continue
+ }
+ }
+
+ // If we merge two loads/stores with different source/destination Nodes
+ // then we will lose a reference the second Node which means that the
+ // compiler might mark the Node as unused and free its slot on the stack.
+ // TODO(mundaym): allow this by adding a dummy reference to the Node.
+ if fused == nil ||
+ fused.From.Node != p.From.Node ||
+ fused.From.Type != p.From.Type ||
+ fused.To.Node != p.To.Node ||
+ fused.To.Type != p.To.Type {
+ fused = p
+ continue
+ }
+
+ // check two addresses
+ ca := func(a, b *obj.Addr, offset int64) bool {
+ return a.Reg == b.Reg && a.Offset+offset == b.Offset &&
+ a.Sym == b.Sym && a.Name == b.Name
+ }
+
+ switch fused.As {
+ default:
+ fused = p
+ case s390x.AMOVW, s390x.AMOVWZ:
+ if size == 4 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 4) {
+ fused.As = s390x.ASTMY
+ fused.Reg = p.From.Reg
+ excise(r)
+ } else {
+ fused = p
+ }
+ case s390x.AMOVD:
+ if size == 8 && fused.From.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, 8) {
+ fused.As = s390x.ASTMG
+ fused.Reg = p.From.Reg
+ excise(r)
+ } else if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, 8) {
+ fused.As = s390x.ALMG
+ fused.Reg = fused.To.Reg
+ fused.To.Reg = p.To.Reg
+ excise(r)
+ } else {
+ fused = p
+ }
+ case s390x.ASTMG, s390x.ASTMY:
+ if (fused.As == s390x.ASTMY && size != 4) ||
+ (fused.As == s390x.ASTMG && size != 8) {
+ fused = p
+ continue
+ }
+ offset := size * int64(fused.Reg-fused.From.Reg+1)
+ if fused.Reg+1 == p.From.Reg && ca(&fused.To, &p.To, offset) {
+ fused.Reg = p.From.Reg
+ excise(r)
+ } else {
+ fused = p
+ }
+ case s390x.ALMG:
+ offset := 8 * int64(fused.To.Reg-fused.Reg+1)
+ if size == 8 && fused.To.Reg+1 == p.To.Reg && ca(&fused.From, &p.From, offset) {
+ fused.To.Reg = p.To.Reg
+ excise(r)
+ } else {
+ fused = p
+ }
+ }
+ }
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/prog.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/prog.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/prog.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/prog.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,185 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/gc"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+const (
+ LeftRdwr uint32 = gc.LeftRead | gc.LeftWrite
+ RightRdwr uint32 = gc.RightRead | gc.RightWrite
+)
+
+// This table gives the basic information about instruction
+// generated by the compiler and processed in the optimizer.
+// See opt.h for bit definitions.
+//
+// Instructions not generated need not be listed.
+// As an exception to that rule, we typically write down all the
+// size variants of an operation even if we just use a subset.
+//
+// The table is formatted for 8-space tabs.
+var progtable = [s390x.ALAST]obj.ProgInfo{
+ obj.ATYPE: {Flags: gc.Pseudo | gc.Skip},
+ obj.ATEXT: {Flags: gc.Pseudo},
+ obj.AFUNCDATA: {Flags: gc.Pseudo},
+ obj.APCDATA: {Flags: gc.Pseudo},
+ obj.AUNDEF: {Flags: gc.Break},
+ obj.AUSEFIELD: {Flags: gc.OK},
+ obj.ACHECKNIL: {Flags: gc.LeftRead},
+ obj.AVARDEF: {Flags: gc.Pseudo | gc.RightWrite},
+ obj.AVARKILL: {Flags: gc.Pseudo | gc.RightWrite},
+ obj.AVARLIVE: {Flags: gc.Pseudo | gc.LeftRead},
+
+ // NOP is an internal no-op that also stands
+ // for USED and SET annotations.
+ obj.ANOP: {Flags: gc.LeftRead | gc.RightWrite},
+
+ // Integer
+ s390x.AADD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ASUB: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ANEG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AAND: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AXOR: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AMULLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AMULLW: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AMULHDU: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ADIVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ADIVDU: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ASLD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ASRD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ASRAD: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ARLL: {Flags: gc.SizeL | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ARLLG: {Flags: gc.SizeQ | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.ACMP: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+ s390x.ACMPU: {Flags: gc.SizeQ | gc.LeftRead | gc.RightRead},
+
+ // Floating point.
+ s390x.AFADD: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFADDS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFSUB: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFSUBS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFMUL: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFMULS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFDIV: {Flags: gc.SizeD | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFDIVS: {Flags: gc.SizeF | gc.LeftRead | gc.RegRead | gc.RightWrite},
+ s390x.AFCMPU: {Flags: gc.SizeD | gc.LeftRead | gc.RightRead},
+ s390x.ACEBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightRead},
+ s390x.ALEDBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ALDEBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.AFSQRT: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite},
+
+ // Conversions
+ s390x.ACEFBRA: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACDFBRA: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACEGBRA: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACDGBRA: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACFEBRA: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACFDBRA: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACGEBRA: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACGDBRA: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACELFBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACDLFBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACELGBR: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACDLGBR: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACLFEBR: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACLFDBR: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACLGEBR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+ s390x.ACLGDBR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Conv},
+
+ // Moves
+ s390x.AMOVB: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVBZ: {Flags: gc.SizeB | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVH: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVHZ: {Flags: gc.SizeW | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVW: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVWZ: {Flags: gc.SizeL | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AMOVD: {Flags: gc.SizeQ | gc.LeftRead | gc.RightWrite | gc.Move},
+ s390x.AFMOVS: {Flags: gc.SizeF | gc.LeftRead | gc.RightWrite | gc.Move | gc.Conv},
+ s390x.AFMOVD: {Flags: gc.SizeD | gc.LeftRead | gc.RightWrite | gc.Move},
+
+ // Storage operations
+ s390x.AMVC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
+ s390x.ACLC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightRead | gc.RightAddr},
+ s390x.AXC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
+ s390x.AOC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
+ s390x.ANC: {Flags: gc.LeftRead | gc.LeftAddr | gc.RightWrite | gc.RightAddr},
+
+ // Jumps
+ s390x.ABR: {Flags: gc.Jump | gc.Break},
+ s390x.ABL: {Flags: gc.Call},
+ s390x.ABEQ: {Flags: gc.Cjmp},
+ s390x.ABNE: {Flags: gc.Cjmp},
+ s390x.ABGE: {Flags: gc.Cjmp},
+ s390x.ABLT: {Flags: gc.Cjmp},
+ s390x.ABGT: {Flags: gc.Cjmp},
+ s390x.ABLE: {Flags: gc.Cjmp},
+ s390x.ACMPBEQ: {Flags: gc.Cjmp},
+ s390x.ACMPBNE: {Flags: gc.Cjmp},
+ s390x.ACMPBGE: {Flags: gc.Cjmp},
+ s390x.ACMPBLT: {Flags: gc.Cjmp},
+ s390x.ACMPBGT: {Flags: gc.Cjmp},
+ s390x.ACMPBLE: {Flags: gc.Cjmp},
+ s390x.ACMPUBEQ: {Flags: gc.Cjmp},
+ s390x.ACMPUBNE: {Flags: gc.Cjmp},
+ s390x.ACMPUBGE: {Flags: gc.Cjmp},
+ s390x.ACMPUBLT: {Flags: gc.Cjmp},
+ s390x.ACMPUBGT: {Flags: gc.Cjmp},
+ s390x.ACMPUBLE: {Flags: gc.Cjmp},
+
+ // Macros
+ s390x.ACLEAR: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
+
+ // Load/store multiple
+ s390x.ASTMG: {Flags: gc.SizeQ | gc.LeftRead | gc.RightAddr | gc.RightWrite},
+ s390x.ASTMY: {Flags: gc.SizeL | gc.LeftRead | gc.RightAddr | gc.RightWrite},
+ s390x.ALMG: {Flags: gc.SizeQ | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
+ s390x.ALMY: {Flags: gc.SizeL | gc.LeftAddr | gc.LeftRead | gc.RightWrite},
+
+ obj.ARET: {Flags: gc.Break},
+}
+
+func proginfo(p *obj.Prog) {
+ info := &p.Info
+ *info = progtable[p.As]
+ if info.Flags == 0 {
+ gc.Fatalf("proginfo: unknown instruction %v", p)
+ }
+
+ if (info.Flags&gc.RegRead != 0) && p.Reg == 0 {
+ info.Flags &^= gc.RegRead
+ info.Flags |= gc.RightRead /*CanRegRead |*/
+ }
+
+ if (p.From.Type == obj.TYPE_MEM || p.From.Type == obj.TYPE_ADDR) && p.From.Reg != 0 {
+ info.Regindex |= RtoB(int(p.From.Reg))
+ }
+
+ if (p.To.Type == obj.TYPE_MEM || p.To.Type == obj.TYPE_ADDR) && p.To.Reg != 0 {
+ info.Regindex |= RtoB(int(p.To.Reg))
+ }
+
+ if p.From.Type == obj.TYPE_ADDR && p.From.Sym != nil && (info.Flags&gc.LeftRead != 0) {
+ info.Flags &^= gc.LeftRead
+ info.Flags |= gc.LeftAddr
+ }
+
+ switch p.As {
+ // load multiple sets a range of registers
+ case s390x.ALMG, s390x.ALMY:
+ for r := p.Reg; r <= p.To.Reg; r++ {
+ info.Regset |= RtoB(int(r))
+ }
+ // store multiple reads a range of registers
+ case s390x.ASTMG, s390x.ASTMY:
+ for r := p.From.Reg; r <= p.Reg; r++ {
+ info.Reguse |= RtoB(int(r))
+ }
+ }
+}
diff -pruN 1.6.3-1/src/cmd/compile/internal/s390x/reg.go 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/reg.go
--- 1.6.3-1/src/cmd/compile/internal/s390x/reg.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/internal/s390x/reg.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,130 @@
+// Derived from Inferno utils/6c/reg.c
+// http://code.google.com/p/inferno-os/source/browse/utils/6c/reg.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import "cmd/internal/obj/s390x"
+import "cmd/compile/internal/gc"
+
+const (
+ NREGVAR = 32 /* 16 general + 16 floating */
+)
+
+var regname = []string{
+ ".R0",
+ ".R1",
+ ".R2",
+ ".R3",
+ ".R4",
+ ".R5",
+ ".R6",
+ ".R7",
+ ".R8",
+ ".R9",
+ ".R10",
+ ".R11",
+ ".R12",
+ ".R13",
+ ".R14",
+ ".R15",
+ ".F0",
+ ".F1",
+ ".F2",
+ ".F3",
+ ".F4",
+ ".F5",
+ ".F6",
+ ".F7",
+ ".F8",
+ ".F9",
+ ".F10",
+ ".F11",
+ ".F12",
+ ".F13",
+ ".F14",
+ ".F15",
+}
+
+func regnames(n *int) []string {
+ *n = NREGVAR
+ return regname
+}
+
+func excludedregs() uint64 {
+ // Exclude registers with fixed functions
+ return RtoB(s390x.REG_R0) |
+ RtoB(s390x.REGSP) |
+ RtoB(s390x.REGG) |
+ RtoB(s390x.REGTMP) |
+ RtoB(s390x.REGTMP2) |
+ RtoB(s390x.REG_LR)
+}
+
+func doregbits(r int) uint64 {
+ return 0
+}
+
+/*
+ * track register variables including external registers:
+ * bit reg
+ * 0 R0
+ * ... ...
+ * 15 R15
+ * 16+0 F0
+ * 16+1 F1
+ * ... ...
+ * 16+15 F15
+ */
+func RtoB(r int) uint64 {
+ if r >= s390x.REG_R0 && r <= s390x.REG_R15 {
+ return 1 << uint(r-s390x.REG_R0)
+ }
+ if r >= s390x.REG_F0 && r <= s390x.REG_F15 {
+ return 1 << uint(16+r-s390x.REG_F0)
+ }
+ return 0
+}
+
+func BtoR(b uint64) int {
+ b &= 0xffff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + s390x.REG_R0
+}
+
+func BtoF(b uint64) int {
+ b >>= 16
+ b &= 0xffff
+ if b == 0 {
+ return 0
+ }
+ return gc.Bitno(b) + s390x.REG_F0
+}
diff -pruN 1.6.3-1/src/cmd/compile/main.go 1.6.3-1ubuntu1/src/cmd/compile/main.go
--- 1.6.3-1/src/cmd/compile/main.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/compile/main.go 2016-07-21 13:36:09.000000000 +0000
@@ -10,6 +10,7 @@ import (
"cmd/compile/internal/arm64"
"cmd/compile/internal/mips64"
"cmd/compile/internal/ppc64"
+ "cmd/compile/internal/s390x"
"cmd/compile/internal/x86"
"cmd/internal/obj"
"fmt"
@@ -38,5 +39,7 @@ func main() {
mips64.Main()
case "ppc64", "ppc64le":
ppc64.Main()
+ case "s390x":
+ s390x.Main()
}
}
diff -pruN 1.6.3-1/src/cmd/dist/build.go 1.6.3-1ubuntu1/src/cmd/dist/build.go
--- 1.6.3-1/src/cmd/dist/build.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/dist/build.go 2016-07-21 13:36:09.000000000 +0000
@@ -58,6 +58,7 @@ var okgoarch = []string{
"mips64le",
"ppc64",
"ppc64le",
+ "s390x",
}
// The known operating systems.
diff -pruN 1.6.3-1/src/cmd/dist/buildtool.go 1.6.3-1ubuntu1/src/cmd/dist/buildtool.go
--- 1.6.3-1/src/cmd/dist/buildtool.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/dist/buildtool.go 2016-07-21 13:36:09.000000000 +0000
@@ -37,6 +37,7 @@ var bootstrapDirs = []string{
"compile/internal/mips64",
"compile/internal/ppc64",
"compile/internal/x86",
+ "compile/internal/s390x",
"internal/gcprog",
"internal/obj",
"internal/obj/arm",
@@ -44,6 +45,7 @@ var bootstrapDirs = []string{
"internal/obj/mips",
"internal/obj/ppc64",
"internal/obj/x86",
+ "internal/obj/s390x",
"link",
"link/internal/amd64",
"link/internal/arm",
@@ -52,6 +54,7 @@ var bootstrapDirs = []string{
"link/internal/mips64",
"link/internal/ppc64",
"link/internal/x86",
+ "link/internal/s390x",
}
func bootstrapBuildTools() {
diff -pruN 1.6.3-1/src/cmd/dist/test.go 1.6.3-1ubuntu1/src/cmd/dist/test.go
--- 1.6.3-1/src/cmd/dist/test.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/dist/test.go 2016-07-21 13:36:09.000000000 +0000
@@ -667,7 +667,7 @@ func (t *tester) supportedBuildmode(mode
return false
case "shared":
switch pair {
- case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le":
+ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-ppc64le", "linux-s390x":
return true
}
return false
@@ -711,7 +711,7 @@ func (t *tester) cgoTest(dt *distTest) e
case "android-arm",
"dragonfly-386", "dragonfly-amd64",
"freebsd-386", "freebsd-amd64", "freebsd-arm",
- "linux-386", "linux-amd64", "linux-arm",
+ "linux-386", "linux-amd64", "linux-arm", "linux-s390x",
"netbsd-386", "netbsd-amd64":
cmd := t.addCmd(dt, "misc/cgo/test", "go", "test", "-ldflags", "-linkmode=external")
diff -pruN 1.6.3-1/src/cmd/dist/util.go 1.6.3-1ubuntu1/src/cmd/dist/util.go
--- 1.6.3-1/src/cmd/dist/util.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/dist/util.go 2016-07-21 13:36:09.000000000 +0000
@@ -452,6 +452,8 @@ func main() {
} else {
gohostarch = "mips64le"
}
+ case strings.Contains(out, "s390x"):
+ gohostarch = "s390x"
case gohostos == "darwin":
if strings.Contains(run("", CheckExit, "uname", "-v"), "RELEASE_ARM_") {
gohostarch = "arm"
diff -pruN 1.6.3-1/src/cmd/go/build.go 1.6.3-1ubuntu1/src/cmd/go/build.go
--- 1.6.3-1/src/cmd/go/build.go 2016-07-18 16:24:06.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/go/build.go 2016-07-21 13:36:09.000000000 +0000
@@ -377,7 +377,7 @@ func buildModeInit() {
fatalf("-buildmode=pie not supported by gccgo")
} else {
switch platform {
- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le",
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x",
"android/amd64", "android/arm", "android/arm64", "android/386":
codegenArg = "-shared"
default:
@@ -391,7 +391,7 @@ func buildModeInit() {
codegenArg = "-fPIC"
} else {
switch platform {
- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
default:
fatalf("-buildmode=shared not supported on %s\n", platform)
}
@@ -409,8 +409,7 @@ func buildModeInit() {
codegenArg = "-fPIC"
} else {
switch platform {
- case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le":
- buildAsmflags = append(buildAsmflags, "-D=GOBUILDMODE_shared=1")
+ case "linux/386", "linux/amd64", "linux/arm", "linux/arm64", "linux/ppc64le", "linux/s390x":
default:
fatalf("-linkshared not supported on %s\n", platform)
}
@@ -2315,7 +2314,15 @@ func (gcToolchain) asm(b *builder, p *Pa
// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
inc := filepath.Join(goroot, "pkg", "include")
sfile = mkAbs(p.Dir, sfile)
- args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags, sfile}
+ args := []interface{}{buildToolExec, tool("asm"), "-o", ofile, "-trimpath", b.work, "-I", obj, "-I", inc, "-D", "GOOS_" + goos, "-D", "GOARCH_" + goarch, buildAsmflags}
+ if p.ImportPath == "runtime" && goarch == "386" {
+ for _, arg := range buildAsmflags {
+ if arg == "-dynlink" {
+ args = append(args, "-D=GOBUILDMODE_shared=1")
+ }
+ }
+ }
+ args = append(args, sfile)
if err := b.run(p.Dir, p.ImportPath, nil, args...); err != nil {
return err
}
@@ -2976,6 +2983,8 @@ func (b *builder) gccArchArgs() []string
return []string{"-m64"}
case "arm":
return []string{"-marm"} // not thumb
+ case "s390x":
+ return []string{"-m64", "-march=z196"}
}
return nil
}
diff -pruN 1.6.3-1/src/cmd/internal/obj/link.go 1.6.3-1ubuntu1/src/cmd/internal/obj/link.go
--- 1.6.3-1/src/cmd/internal/obj/link.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/link.go 2016-07-21 13:36:09.000000000 +0000
@@ -521,6 +521,9 @@ const (
// R_ADDRPOWER_DS but inserts the offset from the TOC to the address of the the
// relocated symbol rather than the symbol's address.
R_ADDRPOWER_TOCREL_DS
+
+ // R_PCRELDBL is for S390x (z) 2-byte aligned addresses (e.g. R_390_PLT32DBL)
+ R_PCRELDBL
)
type Auto struct {
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/anames.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/anames.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/anames.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/anames.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,650 @@
+// Generated by stringer -i a.out.go -o anames.go -p s390x
+// Do not edit.
+
+package s390x
+
+import "cmd/internal/obj"
+
+var Anames = []string{
+ obj.A_ARCHSPECIFIC: "ADD",
+ "ADDC",
+ "ADDME",
+ "ADDE",
+ "ADDZE",
+ "DIVW",
+ "DIVWU",
+ "DIVD",
+ "DIVDU",
+ "MULLW",
+ "MULLD",
+ "MULHDU",
+ "SUB",
+ "SUBC",
+ "SUBME",
+ "SUBV",
+ "SUBE",
+ "SUBZE",
+ "NEG",
+ "MOVWBR",
+ "MOVB",
+ "MOVBZ",
+ "MOVH",
+ "MOVHBR",
+ "MOVHZ",
+ "MOVW",
+ "MOVWZ",
+ "MOVD",
+ "MOVDBR",
+ "AND",
+ "ANDN",
+ "NAND",
+ "NOR",
+ "OR",
+ "ORN",
+ "XOR",
+ "SLW",
+ "SLD",
+ "SRW",
+ "SRAW",
+ "SRD",
+ "SRAD",
+ "RLL",
+ "RLLG",
+ "FABS",
+ "FADD",
+ "FADDS",
+ "FCMPO",
+ "FCMPU",
+ "CEBR",
+ "FDIV",
+ "FDIVS",
+ "FMADD",
+ "FMADDS",
+ "FMOVD",
+ "FMOVS",
+ "FMSUB",
+ "FMSUBS",
+ "FMUL",
+ "FMULS",
+ "FNABS",
+ "FNEG",
+ "FNMADD",
+ "FNMADDS",
+ "FNMSUB",
+ "FNMSUBS",
+ "LEDBR",
+ "LDEBR",
+ "FSUB",
+ "FSUBS",
+ "FSQRT",
+ "FSQRTS",
+ "CEFBRA",
+ "CDFBRA",
+ "CEGBRA",
+ "CDGBRA",
+ "CFEBRA",
+ "CFDBRA",
+ "CGEBRA",
+ "CGDBRA",
+ "CELFBR",
+ "CDLFBR",
+ "CELGBR",
+ "CDLGBR",
+ "CLFEBR",
+ "CLFDBR",
+ "CLGEBR",
+ "CLGDBR",
+ "CMP",
+ "CMPU",
+ "CMPW",
+ "CMPWU",
+ "CS",
+ "CSG",
+ "SYNC",
+ "BC",
+ "BCL",
+ "BEQ",
+ "BGE",
+ "BGT",
+ "BLE",
+ "BLT",
+ "BNE",
+ "BVC",
+ "BVS",
+ "SYSCALL",
+ "CMPBEQ",
+ "CMPBGE",
+ "CMPBGT",
+ "CMPBLE",
+ "CMPBLT",
+ "CMPBNE",
+ "CMPUBEQ",
+ "CMPUBGE",
+ "CMPUBGT",
+ "CMPUBLE",
+ "CMPUBLT",
+ "CMPUBNE",
+ "MVC",
+ "CLC",
+ "XC",
+ "OC",
+ "NC",
+ "EXRL",
+ "LARL",
+ "LA",
+ "LAY",
+ "LMY",
+ "LMG",
+ "STMY",
+ "STMG",
+ "STCK",
+ "STCKC",
+ "STCKE",
+ "STCKF",
+ "CLEAR",
+ "VA",
+ "VAB",
+ "VAH",
+ "VAF",
+ "VAG",
+ "VAQ",
+ "VACC",
+ "VACCB",
+ "VACCH",
+ "VACCF",
+ "VACCG",
+ "VACCQ",
+ "VAC",
+ "VACQ",
+ "VACCC",
+ "VACCCQ",
+ "VN",
+ "VNC",
+ "VAVG",
+ "VAVGB",
+ "VAVGH",
+ "VAVGF",
+ "VAVGG",
+ "VAVGL",
+ "VAVGLB",
+ "VAVGLH",
+ "VAVGLF",
+ "VAVGLG",
+ "VCKSM",
+ "VCEQ",
+ "VCEQB",
+ "VCEQH",
+ "VCEQF",
+ "VCEQG",
+ "VCEQBS",
+ "VCEQHS",
+ "VCEQFS",
+ "VCEQGS",
+ "VCH",
+ "VCHB",
+ "VCHH",
+ "VCHF",
+ "VCHG",
+ "VCHBS",
+ "VCHHS",
+ "VCHFS",
+ "VCHGS",
+ "VCHL",
+ "VCHLB",
+ "VCHLH",
+ "VCHLF",
+ "VCHLG",
+ "VCHLBS",
+ "VCHLHS",
+ "VCHLFS",
+ "VCHLGS",
+ "VCLZ",
+ "VCLZB",
+ "VCLZH",
+ "VCLZF",
+ "VCLZG",
+ "VCTZ",
+ "VCTZB",
+ "VCTZH",
+ "VCTZF",
+ "VCTZG",
+ "VEC",
+ "VECB",
+ "VECH",
+ "VECF",
+ "VECG",
+ "VECL",
+ "VECLB",
+ "VECLH",
+ "VECLF",
+ "VECLG",
+ "VERIM",
+ "VERIMB",
+ "VERIMH",
+ "VERIMF",
+ "VERIMG",
+ "VERLL",
+ "VERLLB",
+ "VERLLH",
+ "VERLLF",
+ "VERLLG",
+ "VERLLV",
+ "VERLLVB",
+ "VERLLVH",
+ "VERLLVF",
+ "VERLLVG",
+ "VESLV",
+ "VESLVB",
+ "VESLVH",
+ "VESLVF",
+ "VESLVG",
+ "VESL",
+ "VESLB",
+ "VESLH",
+ "VESLF",
+ "VESLG",
+ "VESRA",
+ "VESRAB",
+ "VESRAH",
+ "VESRAF",
+ "VESRAG",
+ "VESRAV",
+ "VESRAVB",
+ "VESRAVH",
+ "VESRAVF",
+ "VESRAVG",
+ "VESRL",
+ "VESRLB",
+ "VESRLH",
+ "VESRLF",
+ "VESRLG",
+ "VESRLV",
+ "VESRLVB",
+ "VESRLVH",
+ "VESRLVF",
+ "VESRLVG",
+ "VX",
+ "VFAE",
+ "VFAEB",
+ "VFAEH",
+ "VFAEF",
+ "VFAEBS",
+ "VFAEHS",
+ "VFAEFS",
+ "VFAEZB",
+ "VFAEZH",
+ "VFAEZF",
+ "VFAEZBS",
+ "VFAEZHS",
+ "VFAEZFS",
+ "VFEE",
+ "VFEEB",
+ "VFEEH",
+ "VFEEF",
+ "VFEEBS",
+ "VFEEHS",
+ "VFEEFS",
+ "VFEEZB",
+ "VFEEZH",
+ "VFEEZF",
+ "VFEEZBS",
+ "VFEEZHS",
+ "VFEEZFS",
+ "VFENE",
+ "VFENEB",
+ "VFENEH",
+ "VFENEF",
+ "VFENEBS",
+ "VFENEHS",
+ "VFENEFS",
+ "VFENEZB",
+ "VFENEZH",
+ "VFENEZF",
+ "VFENEZBS",
+ "VFENEZHS",
+ "VFENEZFS",
+ "VFA",
+ "VFADB",
+ "WFADB",
+ "WFK",
+ "WFKDB",
+ "VFCE",
+ "VFCEDB",
+ "VFCEDBS",
+ "WFCEDB",
+ "WFCEDBS",
+ "VFCH",
+ "VFCHDB",
+ "VFCHDBS",
+ "WFCHDB",
+ "WFCHDBS",
+ "VFCHE",
+ "VFCHEDB",
+ "VFCHEDBS",
+ "WFCHEDB",
+ "WFCHEDBS",
+ "WFC",
+ "WFCDB",
+ "VCDG",
+ "VCDGB",
+ "WCDGB",
+ "VCDLG",
+ "VCDLGB",
+ "WCDLGB",
+ "VCGD",
+ "VCGDB",
+ "WCGDB",
+ "VCLGD",
+ "VCLGDB",
+ "WCLGDB",
+ "VFD",
+ "VFDDB",
+ "WFDDB",
+ "VLDE",
+ "VLDEB",
+ "WLDEB",
+ "VLED",
+ "VLEDB",
+ "WLEDB",
+ "VFM",
+ "VFMDB",
+ "WFMDB",
+ "VFMA",
+ "VFMADB",
+ "WFMADB",
+ "VFMS",
+ "VFMSDB",
+ "WFMSDB",
+ "VFPSO",
+ "VFPSODB",
+ "WFPSODB",
+ "VFLCDB",
+ "WFLCDB",
+ "VFLNDB",
+ "WFLNDB",
+ "VFLPDB",
+ "WFLPDB",
+ "VFSQ",
+ "VFSQDB",
+ "WFSQDB",
+ "VFS",
+ "VFSDB",
+ "WFSDB",
+ "VFTCI",
+ "VFTCIDB",
+ "WFTCIDB",
+ "VGFM",
+ "VGFMB",
+ "VGFMH",
+ "VGFMF",
+ "VGFMG",
+ "VGFMA",
+ "VGFMAB",
+ "VGFMAH",
+ "VGFMAF",
+ "VGFMAG",
+ "VGEF",
+ "VGEG",
+ "VGBM",
+ "VZERO",
+ "VONE",
+ "VGM",
+ "VGMB",
+ "VGMH",
+ "VGMF",
+ "VGMG",
+ "VISTR",
+ "VISTRB",
+ "VISTRH",
+ "VISTRF",
+ "VISTRBS",
+ "VISTRHS",
+ "VISTRFS",
+ "VL",
+ "VLR",
+ "VLREP",
+ "VLREPB",
+ "VLREPH",
+ "VLREPF",
+ "VLREPG",
+ "VLC",
+ "VLCB",
+ "VLCH",
+ "VLCF",
+ "VLCG",
+ "VLEH",
+ "VLEF",
+ "VLEG",
+ "VLEB",
+ "VLEIH",
+ "VLEIF",
+ "VLEIG",
+ "VLEIB",
+ "VFI",
+ "VFIDB",
+ "WFIDB",
+ "VLGV",
+ "VLGVB",
+ "VLGVH",
+ "VLGVF",
+ "VLGVG",
+ "VLLEZ",
+ "VLLEZB",
+ "VLLEZH",
+ "VLLEZF",
+ "VLLEZG",
+ "VLM",
+ "VLP",
+ "VLPB",
+ "VLPH",
+ "VLPF",
+ "VLPG",
+ "VLBB",
+ "VLVG",
+ "VLVGB",
+ "VLVGH",
+ "VLVGF",
+ "VLVGG",
+ "VLVGP",
+ "VLL",
+ "VMX",
+ "VMXB",
+ "VMXH",
+ "VMXF",
+ "VMXG",
+ "VMXL",
+ "VMXLB",
+ "VMXLH",
+ "VMXLF",
+ "VMXLG",
+ "VMRH",
+ "VMRHB",
+ "VMRHH",
+ "VMRHF",
+ "VMRHG",
+ "VMRL",
+ "VMRLB",
+ "VMRLH",
+ "VMRLF",
+ "VMRLG",
+ "VMN",
+ "VMNB",
+ "VMNH",
+ "VMNF",
+ "VMNG",
+ "VMNL",
+ "VMNLB",
+ "VMNLH",
+ "VMNLF",
+ "VMNLG",
+ "VMAE",
+ "VMAEB",
+ "VMAEH",
+ "VMAEF",
+ "VMAH",
+ "VMAHB",
+ "VMAHH",
+ "VMAHF",
+ "VMALE",
+ "VMALEB",
+ "VMALEH",
+ "VMALEF",
+ "VMALH",
+ "VMALHB",
+ "VMALHH",
+ "VMALHF",
+ "VMALO",
+ "VMALOB",
+ "VMALOH",
+ "VMALOF",
+ "VMAL",
+ "VMALB",
+ "VMALHW",
+ "VMALF",
+ "VMAO",
+ "VMAOB",
+ "VMAOH",
+ "VMAOF",
+ "VME",
+ "VMEB",
+ "VMEH",
+ "VMEF",
+ "VMH",
+ "VMHB",
+ "VMHH",
+ "VMHF",
+ "VMLE",
+ "VMLEB",
+ "VMLEH",
+ "VMLEF",
+ "VMLH",
+ "VMLHB",
+ "VMLHH",
+ "VMLHF",
+ "VMLO",
+ "VMLOB",
+ "VMLOH",
+ "VMLOF",
+ "VML",
+ "VMLB",
+ "VMLHW",
+ "VMLF",
+ "VMO",
+ "VMOB",
+ "VMOH",
+ "VMOF",
+ "VNO",
+ "VNOT",
+ "VO",
+ "VPK",
+ "VPKH",
+ "VPKF",
+ "VPKG",
+ "VPKLS",
+ "VPKLSH",
+ "VPKLSF",
+ "VPKLSG",
+ "VPKLSHS",
+ "VPKLSFS",
+ "VPKLSGS",
+ "VPKS",
+ "VPKSH",
+ "VPKSF",
+ "VPKSG",
+ "VPKSHS",
+ "VPKSFS",
+ "VPKSGS",
+ "VPERM",
+ "VPDI",
+ "VPOPCT",
+ "VREP",
+ "VREPB",
+ "VREPH",
+ "VREPF",
+ "VREPG",
+ "VREPI",
+ "VREPIB",
+ "VREPIH",
+ "VREPIF",
+ "VREPIG",
+ "VSCEF",
+ "VSCEG",
+ "VSEL",
+ "VSL",
+ "VSLB",
+ "VSLDB",
+ "VSRA",
+ "VSRAB",
+ "VSRL",
+ "VSRLB",
+ "VSEG",
+ "VSEGB",
+ "VSEGH",
+ "VSEGF",
+ "VST",
+ "VSTEH",
+ "VSTEF",
+ "VSTEG",
+ "VSTEB",
+ "VSTM",
+ "VSTL",
+ "VSTRC",
+ "VSTRCB",
+ "VSTRCH",
+ "VSTRCF",
+ "VSTRCBS",
+ "VSTRCHS",
+ "VSTRCFS",
+ "VSTRCZB",
+ "VSTRCZH",
+ "VSTRCZF",
+ "VSTRCZBS",
+ "VSTRCZHS",
+ "VSTRCZFS",
+ "VS",
+ "VSB",
+ "VSH",
+ "VSF",
+ "VSG",
+ "VSQ",
+ "VSCBI",
+ "VSCBIB",
+ "VSCBIH",
+ "VSCBIF",
+ "VSCBIG",
+ "VSCBIQ",
+ "VSBCBI",
+ "VSBCBIQ",
+ "VSBI",
+ "VSBIQ",
+ "VSUMG",
+ "VSUMGH",
+ "VSUMGF",
+ "VSUMQ",
+ "VSUMQF",
+ "VSUMQG",
+ "VSUM",
+ "VSUMB",
+ "VSUMH",
+ "VTM",
+ "VUPH",
+ "VUPHB",
+ "VUPHH",
+ "VUPHF",
+ "VUPLH",
+ "VUPLHB",
+ "VUPLHH",
+ "VUPLHF",
+ "VUPLL",
+ "VUPLLB",
+ "VUPLLH",
+ "VUPLLF",
+ "VUPL",
+ "VUPLB",
+ "VUPLHW",
+ "VUPLF",
+ "BYTE",
+ "WORD",
+ "DWORD",
+ "LAST",
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/anamesz.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/anamesz.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/anamesz.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/anamesz.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,35 @@
+package s390x
+
+var cnamesz = []string{
+ "NONE",
+ "REG",
+ "FREG",
+ "VREG",
+ "AREG",
+ "ZCON",
+ "SCON",
+ "UCON",
+ "ADDCON",
+ "ANDCON",
+ "LCON",
+ "DCON",
+ "SACON",
+ "LACON",
+ "DACON",
+ "SBRA",
+ "LBRA",
+ "SAUTO",
+ "LAUTO",
+ "ZOREG",
+ "SOREG",
+ "LOREG",
+ "TLS_LE",
+ "TLS_IE",
+ "GOK",
+ "ADDR",
+ "SYMADDR",
+ "GOTADDR",
+ "TEXTSIZE",
+ "ANY",
+ "NCLASS",
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/a.out.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/a.out.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/a.out.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/a.out.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,888 @@
+// Based on cmd/internal/obj/ppc64/a.out.go.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import "cmd/internal/obj"
+
+//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p s390x
+
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 16 // number of general purpose registers
+ NFREG = 16 // number of floating point registers
+)
+
+const (
+ REG_R0 = obj.RBaseS390X + iota
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+
+ REG_F0
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+
+ // V0-V15 are aliases for F0-F15
+ // We keep them in a separate space to make printing etc. easier
+ // If the code generator ever emits vector instructions it will
+ // need to take into account the aliasing.
+ REG_V0
+ REG_V1
+ REG_V2
+ REG_V3
+ REG_V4
+ REG_V5
+ REG_V6
+ REG_V7
+ REG_V8
+ REG_V9
+ REG_V10
+ REG_V11
+ REG_V12
+ REG_V13
+ REG_V14
+ REG_V15
+ REG_V16
+ REG_V17
+ REG_V18
+ REG_V19
+ REG_V20
+ REG_V21
+ REG_V22
+ REG_V23
+ REG_V24
+ REG_V25
+ REG_V26
+ REG_V27
+ REG_V28
+ REG_V29
+ REG_V30
+ REG_V31
+
+ REG_AR0
+ REG_AR1
+ REG_AR2
+ REG_AR3
+ REG_AR4
+ REG_AR5
+ REG_AR6
+ REG_AR7
+ REG_AR8
+ REG_AR9
+ REG_AR10
+ REG_AR11
+ REG_AR12
+ REG_AR13
+ REG_AR14
+ REG_AR15
+
+ REG_RESERVED // end of allocated registers
+
+ REGZERO = REG_R0 // set to zero
+ REGARG = -1 // -1 disables passing the first argument in register
+ REGRT1 = REG_R3 // used during zeroing of the stack - not reserved
+ REGRT2 = REG_R4 // used during zeroing of the stack - not reserved
+ REGTMP = REG_R10 // scratch register used in the assembler and linker
+ REGTMP2 = REG_R11 // scratch register used in the assembler and linker
+ REGCTXT = REG_R12 // context for closures
+ REGG = REG_R13 // G
+ REG_LR = REG_R14 // link register
+ REGSP = REG_R15 // stack pointer
+)
+
+const (
+ BIG = 32768 - 8
+ DISP12 = 4096
+ DISP16 = 65536
+ DISP20 = 1048576
+)
+
+const (
+ // mark flags
+ LABEL = 1 << 0
+ LEAF = 1 << 1
+ FLOAT = 1 << 2
+ BRANCH = 1 << 3
+ LOAD = 1 << 4
+ FCMP = 1 << 5
+ SYNC = 1 << 6
+ LIST = 1 << 7
+ FOLL = 1 << 8
+ NOSCHED = 1 << 9
+)
+
+const ( // comments from func aclass in asmz.go
+ C_NONE = iota
+ C_REG // general-purpose register (64-bit)
+ C_FREG // floating-point register (64-bit)
+ C_VREG // vector register (128-bit)
+ C_AREG // access register (32-bit)
+ C_ZCON // constant == 0
+ C_SCON // 0 <= constant <= 0x7fff (positive int16)
+ C_UCON // constant & 0xffff == 0 (int16 or uint16)
+ C_ADDCON // 0 > constant >= -0x8000 (negative int16)
+ C_ANDCON // constant <= 0xffff
+ C_LCON // constant (int32 or uint32)
+ C_DCON // constant (int64 or uint64)
+ C_SACON // computed address, 16-bit displacement, possibly SP-relative
+ C_LACON // computed address, 32-bit displacement, possibly SP-relative
+ C_DACON // computed address, 64-bit displacment?
+ C_SBRA // short branch
+ C_LBRA // long branch
+ C_SAUTO // short auto
+ C_LAUTO // long auto
+ C_ZOREG // heap address, register-based, displacement == 0
+ C_SOREG // heap address, register-based, int16 displacement
+ C_LOREG // heap address, register-based, int32 displacement
+ C_TLS_LE // TLS - local exec model (for executables)
+ C_TLS_IE // TLS - initial exec model (for shared libraries loaded at program startup)
+ C_GOK // general address
+ C_ADDR // relocation for extern or static symbols (loads and stores)
+ C_SYMADDR // relocation for extern or static symbols (address taking)
+ C_GOTADDR // GOT slot for a symbol in -dynlink mode
+ C_TEXTSIZE // text size
+ C_ANY
+ C_NCLASS // must be the last
+)
+
+const (
+ // integer arithmetic
+ AADD = obj.ABaseS390X + obj.A_ARCHSPECIFIC + iota
+ AADDC
+ AADDME
+ AADDE
+ AADDZE
+ ADIVW
+ ADIVWU
+ ADIVD
+ ADIVDU
+ AMULLW
+ AMULLD
+ AMULHDU
+ ASUB
+ ASUBC
+ ASUBME
+ ASUBV
+ ASUBE
+ ASUBZE
+ ANEG
+
+ // integer moves
+ AMOVWBR
+ AMOVB
+ AMOVBZ
+ AMOVH
+ AMOVHBR
+ AMOVHZ
+ AMOVW
+ AMOVWZ
+ AMOVD
+ AMOVDBR
+
+ // integer bitwise
+ AAND
+ AANDN
+ ANAND
+ ANOR
+ AOR
+ AORN
+ AXOR
+ ASLW
+ ASLD
+ ASRW
+ ASRAW
+ ASRD
+ ASRAD
+ ARLL
+ ARLLG
+
+ // floating point
+ AFABS
+ AFADD
+ AFADDS
+ AFCMPO
+ AFCMPU
+ ACEBR
+ AFDIV
+ AFDIVS
+ AFMADD
+ AFMADDS
+ AFMOVD
+ AFMOVS
+ AFMSUB
+ AFMSUBS
+ AFMUL
+ AFMULS
+ AFNABS
+ AFNEG
+ AFNMADD
+ AFNMADDS
+ AFNMSUB
+ AFNMSUBS
+ ALEDBR
+ ALDEBR
+ AFSUB
+ AFSUBS
+ AFSQRT
+ AFSQRTS
+
+ // convert from int32/int64 to float/float64
+ ACEFBRA
+ ACDFBRA
+ ACEGBRA
+ ACDGBRA
+
+ // convert from float/float64 to int32/int64
+ ACFEBRA
+ ACFDBRA
+ ACGEBRA
+ ACGDBRA
+
+ // convert from uint32/uint64 to float/float64
+ ACELFBR
+ ACDLFBR
+ ACELGBR
+ ACDLGBR
+
+ // convert from float/float64 to uint32/uint64
+ ACLFEBR
+ ACLFDBR
+ ACLGEBR
+ ACLGDBR
+
+ // compare
+ ACMP
+ ACMPU
+ ACMPW
+ ACMPWU
+
+ // compare and swap
+ ACS
+ ACSG
+
+ // serialize
+ ASYNC
+
+ // branch
+ ABC
+ ABCL
+ ABEQ
+ ABGE
+ ABGT
+ ABLE
+ ABLT
+ ABNE
+ ABVC
+ ABVS
+ ASYSCALL
+
+ // compare and branch
+ ACMPBEQ
+ ACMPBGE
+ ACMPBGT
+ ACMPBLE
+ ACMPBLT
+ ACMPBNE
+ ACMPUBEQ
+ ACMPUBGE
+ ACMPUBGT
+ ACMPUBLE
+ ACMPUBLT
+ ACMPUBNE
+
+ // storage-and-storage
+ AMVC
+ ACLC
+ AXC
+ AOC
+ ANC
+
+ // load
+ AEXRL
+ ALARL
+ ALA
+ ALAY
+
+ // load/store multiple
+ ALMY
+ ALMG
+ ASTMY
+ ASTMG
+
+ // store clock
+ ASTCK
+ ASTCKC
+ ASTCKE
+ ASTCKF
+
+ // macros
+ ACLEAR
+
+ // vector
+ AVA
+ AVAB
+ AVAH
+ AVAF
+ AVAG
+ AVAQ
+ AVACC
+ AVACCB
+ AVACCH
+ AVACCF
+ AVACCG
+ AVACCQ
+ AVAC
+ AVACQ
+ AVACCC
+ AVACCCQ
+ AVN
+ AVNC
+ AVAVG
+ AVAVGB
+ AVAVGH
+ AVAVGF
+ AVAVGG
+ AVAVGL
+ AVAVGLB
+ AVAVGLH
+ AVAVGLF
+ AVAVGLG
+ AVCKSM
+ AVCEQ
+ AVCEQB
+ AVCEQH
+ AVCEQF
+ AVCEQG
+ AVCEQBS
+ AVCEQHS
+ AVCEQFS
+ AVCEQGS
+ AVCH
+ AVCHB
+ AVCHH
+ AVCHF
+ AVCHG
+ AVCHBS
+ AVCHHS
+ AVCHFS
+ AVCHGS
+ AVCHL
+ AVCHLB
+ AVCHLH
+ AVCHLF
+ AVCHLG
+ AVCHLBS
+ AVCHLHS
+ AVCHLFS
+ AVCHLGS
+ AVCLZ
+ AVCLZB
+ AVCLZH
+ AVCLZF
+ AVCLZG
+ AVCTZ
+ AVCTZB
+ AVCTZH
+ AVCTZF
+ AVCTZG
+ AVEC
+ AVECB
+ AVECH
+ AVECF
+ AVECG
+ AVECL
+ AVECLB
+ AVECLH
+ AVECLF
+ AVECLG
+ AVERIM
+ AVERIMB
+ AVERIMH
+ AVERIMF
+ AVERIMG
+ AVERLL
+ AVERLLB
+ AVERLLH
+ AVERLLF
+ AVERLLG
+ AVERLLV
+ AVERLLVB
+ AVERLLVH
+ AVERLLVF
+ AVERLLVG
+ AVESLV
+ AVESLVB
+ AVESLVH
+ AVESLVF
+ AVESLVG
+ AVESL
+ AVESLB
+ AVESLH
+ AVESLF
+ AVESLG
+ AVESRA
+ AVESRAB
+ AVESRAH
+ AVESRAF
+ AVESRAG
+ AVESRAV
+ AVESRAVB
+ AVESRAVH
+ AVESRAVF
+ AVESRAVG
+ AVESRL
+ AVESRLB
+ AVESRLH
+ AVESRLF
+ AVESRLG
+ AVESRLV
+ AVESRLVB
+ AVESRLVH
+ AVESRLVF
+ AVESRLVG
+ AVX
+ AVFAE
+ AVFAEB
+ AVFAEH
+ AVFAEF
+ AVFAEBS
+ AVFAEHS
+ AVFAEFS
+ AVFAEZB
+ AVFAEZH
+ AVFAEZF
+ AVFAEZBS
+ AVFAEZHS
+ AVFAEZFS
+ AVFEE
+ AVFEEB
+ AVFEEH
+ AVFEEF
+ AVFEEBS
+ AVFEEHS
+ AVFEEFS
+ AVFEEZB
+ AVFEEZH
+ AVFEEZF
+ AVFEEZBS
+ AVFEEZHS
+ AVFEEZFS
+ AVFENE
+ AVFENEB
+ AVFENEH
+ AVFENEF
+ AVFENEBS
+ AVFENEHS
+ AVFENEFS
+ AVFENEZB
+ AVFENEZH
+ AVFENEZF
+ AVFENEZBS
+ AVFENEZHS
+ AVFENEZFS
+ AVFA
+ AVFADB
+ AWFADB
+ AWFK
+ AWFKDB
+ AVFCE
+ AVFCEDB
+ AVFCEDBS
+ AWFCEDB
+ AWFCEDBS
+ AVFCH
+ AVFCHDB
+ AVFCHDBS
+ AWFCHDB
+ AWFCHDBS
+ AVFCHE
+ AVFCHEDB
+ AVFCHEDBS
+ AWFCHEDB
+ AWFCHEDBS
+ AWFC
+ AWFCDB
+ AVCDG
+ AVCDGB
+ AWCDGB
+ AVCDLG
+ AVCDLGB
+ AWCDLGB
+ AVCGD
+ AVCGDB
+ AWCGDB
+ AVCLGD
+ AVCLGDB
+ AWCLGDB
+ AVFD
+ AVFDDB
+ AWFDDB
+ AVLDE
+ AVLDEB
+ AWLDEB
+ AVLED
+ AVLEDB
+ AWLEDB
+ AVFM
+ AVFMDB
+ AWFMDB
+ AVFMA
+ AVFMADB
+ AWFMADB
+ AVFMS
+ AVFMSDB
+ AWFMSDB
+ AVFPSO
+ AVFPSODB
+ AWFPSODB
+ AVFLCDB
+ AWFLCDB
+ AVFLNDB
+ AWFLNDB
+ AVFLPDB
+ AWFLPDB
+ AVFSQ
+ AVFSQDB
+ AWFSQDB
+ AVFS
+ AVFSDB
+ AWFSDB
+ AVFTCI
+ AVFTCIDB
+ AWFTCIDB
+ AVGFM
+ AVGFMB
+ AVGFMH
+ AVGFMF
+ AVGFMG
+ AVGFMA
+ AVGFMAB
+ AVGFMAH
+ AVGFMAF
+ AVGFMAG
+ AVGEF
+ AVGEG
+ AVGBM
+ AVZERO
+ AVONE
+ AVGM
+ AVGMB
+ AVGMH
+ AVGMF
+ AVGMG
+ AVISTR
+ AVISTRB
+ AVISTRH
+ AVISTRF
+ AVISTRBS
+ AVISTRHS
+ AVISTRFS
+ AVL
+ AVLR
+ AVLREP
+ AVLREPB
+ AVLREPH
+ AVLREPF
+ AVLREPG
+ AVLC
+ AVLCB
+ AVLCH
+ AVLCF
+ AVLCG
+ AVLEH
+ AVLEF
+ AVLEG
+ AVLEB
+ AVLEIH
+ AVLEIF
+ AVLEIG
+ AVLEIB
+ AVFI
+ AVFIDB
+ AWFIDB
+ AVLGV
+ AVLGVB
+ AVLGVH
+ AVLGVF
+ AVLGVG
+ AVLLEZ
+ AVLLEZB
+ AVLLEZH
+ AVLLEZF
+ AVLLEZG
+ AVLM
+ AVLP
+ AVLPB
+ AVLPH
+ AVLPF
+ AVLPG
+ AVLBB
+ AVLVG
+ AVLVGB
+ AVLVGH
+ AVLVGF
+ AVLVGG
+ AVLVGP
+ AVLL
+ AVMX
+ AVMXB
+ AVMXH
+ AVMXF
+ AVMXG
+ AVMXL
+ AVMXLB
+ AVMXLH
+ AVMXLF
+ AVMXLG
+ AVMRH
+ AVMRHB
+ AVMRHH
+ AVMRHF
+ AVMRHG
+ AVMRL
+ AVMRLB
+ AVMRLH
+ AVMRLF
+ AVMRLG
+ AVMN
+ AVMNB
+ AVMNH
+ AVMNF
+ AVMNG
+ AVMNL
+ AVMNLB
+ AVMNLH
+ AVMNLF
+ AVMNLG
+ AVMAE
+ AVMAEB
+ AVMAEH
+ AVMAEF
+ AVMAH
+ AVMAHB
+ AVMAHH
+ AVMAHF
+ AVMALE
+ AVMALEB
+ AVMALEH
+ AVMALEF
+ AVMALH
+ AVMALHB
+ AVMALHH
+ AVMALHF
+ AVMALO
+ AVMALOB
+ AVMALOH
+ AVMALOF
+ AVMAL
+ AVMALB
+ AVMALHW
+ AVMALF
+ AVMAO
+ AVMAOB
+ AVMAOH
+ AVMAOF
+ AVME
+ AVMEB
+ AVMEH
+ AVMEF
+ AVMH
+ AVMHB
+ AVMHH
+ AVMHF
+ AVMLE
+ AVMLEB
+ AVMLEH
+ AVMLEF
+ AVMLH
+ AVMLHB
+ AVMLHH
+ AVMLHF
+ AVMLO
+ AVMLOB
+ AVMLOH
+ AVMLOF
+ AVML
+ AVMLB
+ AVMLHW
+ AVMLF
+ AVMO
+ AVMOB
+ AVMOH
+ AVMOF
+ AVNO
+ AVNOT
+ AVO
+ AVPK
+ AVPKH
+ AVPKF
+ AVPKG
+ AVPKLS
+ AVPKLSH
+ AVPKLSF
+ AVPKLSG
+ AVPKLSHS
+ AVPKLSFS
+ AVPKLSGS
+ AVPKS
+ AVPKSH
+ AVPKSF
+ AVPKSG
+ AVPKSHS
+ AVPKSFS
+ AVPKSGS
+ AVPERM
+ AVPDI
+ AVPOPCT
+ AVREP
+ AVREPB
+ AVREPH
+ AVREPF
+ AVREPG
+ AVREPI
+ AVREPIB
+ AVREPIH
+ AVREPIF
+ AVREPIG
+ AVSCEF
+ AVSCEG
+ AVSEL
+ AVSL
+ AVSLB
+ AVSLDB
+ AVSRA
+ AVSRAB
+ AVSRL
+ AVSRLB
+ AVSEG
+ AVSEGB
+ AVSEGH
+ AVSEGF
+ AVST
+ AVSTEH
+ AVSTEF
+ AVSTEG
+ AVSTEB
+ AVSTM
+ AVSTL
+ AVSTRC
+ AVSTRCB
+ AVSTRCH
+ AVSTRCF
+ AVSTRCBS
+ AVSTRCHS
+ AVSTRCFS
+ AVSTRCZB
+ AVSTRCZH
+ AVSTRCZF
+ AVSTRCZBS
+ AVSTRCZHS
+ AVSTRCZFS
+ AVS
+ AVSB
+ AVSH
+ AVSF
+ AVSG
+ AVSQ
+ AVSCBI
+ AVSCBIB
+ AVSCBIH
+ AVSCBIF
+ AVSCBIG
+ AVSCBIQ
+ AVSBCBI
+ AVSBCBIQ
+ AVSBI
+ AVSBIQ
+ AVSUMG
+ AVSUMGH
+ AVSUMGF
+ AVSUMQ
+ AVSUMQF
+ AVSUMQG
+ AVSUM
+ AVSUMB
+ AVSUMH
+ AVTM
+ AVUPH
+ AVUPHB
+ AVUPHH
+ AVUPHF
+ AVUPLH
+ AVUPLHB
+ AVUPLHH
+ AVUPLHF
+ AVUPLL
+ AVUPLLB
+ AVUPLLH
+ AVUPLLF
+ AVUPL
+ AVUPLB
+ AVUPLHW
+ AVUPLF
+
+ // binary
+ ABYTE
+ AWORD
+ ADWORD
+
+ // end marker
+ ALAST
+
+ // aliases
+ ABR = obj.AJMP
+ ABL = obj.ACALL
+)
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/asmz.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/asmz.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/asmz.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/asmz.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,4774 @@
+// Based on cmd/internal/obj/ppc64/asm9.go.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/internal/obj"
+ "log"
+ "math"
+ "sort"
+)
+
+// instruction layout.
+const (
+ FuncAlign = 16
+)
+
+type Optab struct {
+ as int16 // opcode
+ a1 uint8 // From
+ a2 uint8 // Reg
+ a3 uint8 // From3
+ a4 uint8 // To
+ type_ int8
+ param int16 // REGSP for auto variables
+}
+
+var optab = []Optab{
+ // instruction, From, Reg, From3, To, type, param
+ Optab{obj.ATEXT, C_ADDR, C_NONE, C_NONE, C_TEXTSIZE, 0, 0},
+ Optab{obj.ATEXT, C_ADDR, C_NONE, C_LCON, C_TEXTSIZE, 0, 0},
+
+ // move register
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_FREG, 1, 0},
+ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_REG, 1, 0},
+
+ // load constant
+ Optab{AMOVD, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
+ Optab{AMOVW, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
+ Optab{AMOVWZ, C_LACON, C_NONE, C_NONE, C_REG, 26, REGSP},
+ Optab{AMOVD, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
+ Optab{AMOVW, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
+ Optab{AMOVWZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
+ Optab{AMOVB, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
+ Optab{AMOVBZ, C_DCON, C_NONE, C_NONE, C_REG, 3, 0},
+
+ // store constant
+ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_ADDR, 73, 0},
+ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LAUTO, 72, REGSP},
+ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_LOREG, 72, 0},
+ Optab{AMOVD, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
+ Optab{AMOVW, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
+ Optab{AMOVWZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
+ Optab{AMOVB, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
+ Optab{AMOVBZ, C_LCON, C_NONE, C_NONE, C_LOREG, 72, 0},
+
+ // store
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVDBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVHBR, C_REG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AMOVD, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
+ Optab{AMOVBZ, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
+ Optab{AMOVB, C_REG, C_NONE, C_NONE, C_ADDR, 74, 0},
+
+ // load
+ Optab{AMOVD, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVW, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVWZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVBZ, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVB, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVDBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVHBR, C_LAUTO, C_NONE, C_NONE, C_REG, 36, REGSP},
+ Optab{AMOVD, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVW, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVWZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVBZ, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVB, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVDBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVHBR, C_LOREG, C_NONE, C_NONE, C_REG, 36, 0},
+ Optab{AMOVD, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
+ Optab{AMOVW, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
+ Optab{AMOVWZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
+ Optab{AMOVBZ, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
+ Optab{AMOVB, C_ADDR, C_NONE, C_NONE, C_REG, 75, 0},
+
+ // integer arithmetic
+ Optab{AADD, C_REG, C_REG, C_NONE, C_REG, 2, 0},
+ Optab{AADD, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
+ Optab{AADD, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
+ Optab{AADD, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
+ Optab{AADDC, C_REG, C_REG, C_NONE, C_REG, 2, 0},
+ Optab{AADDC, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
+ Optab{AADDC, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
+ Optab{AADDC, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
+ Optab{AMULLW, C_REG, C_REG, C_NONE, C_REG, 2, 0},
+ Optab{AMULLW, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
+ Optab{AMULLW, C_LCON, C_REG, C_NONE, C_REG, 22, 0},
+ Optab{AMULLW, C_LCON, C_NONE, C_NONE, C_REG, 22, 0},
+ Optab{ASUBC, C_REG, C_REG, C_NONE, C_REG, 10, 0},
+ Optab{ASUBC, C_REG, C_NONE, C_NONE, C_REG, 10, 0},
+ Optab{ADIVW, C_REG, C_REG, C_NONE, C_REG, 2, 0},
+ Optab{ADIVW, C_REG, C_NONE, C_NONE, C_REG, 2, 0},
+ Optab{ASUB, C_REG, C_REG, C_NONE, C_REG, 10, 0},
+ Optab{ASUB, C_REG, C_NONE, C_NONE, C_REG, 10, 0},
+ Optab{AADDME, C_REG, C_NONE, C_NONE, C_REG, 47, 0},
+ Optab{ANEG, C_REG, C_NONE, C_NONE, C_REG, 47, 0},
+ Optab{ANEG, C_NONE, C_NONE, C_NONE, C_REG, 47, 0},
+
+ // integer logical
+ Optab{AAND, C_REG, C_REG, C_NONE, C_REG, 6, 0},
+ Optab{AAND, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
+ Optab{AAND, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
+ Optab{AAND, C_LCON, C_REG, C_NONE, C_REG, 23, 0},
+ Optab{AOR, C_REG, C_REG, C_NONE, C_REG, 6, 0},
+ Optab{AOR, C_REG, C_NONE, C_NONE, C_REG, 6, 0},
+ Optab{AOR, C_LCON, C_NONE, C_NONE, C_REG, 23, 0},
+ Optab{AOR, C_LCON, C_REG, C_NONE, C_REG, 23, 0},
+ Optab{ASLD, C_REG, C_NONE, C_NONE, C_REG, 7, 0},
+ Optab{ASLD, C_REG, C_REG, C_NONE, C_REG, 7, 0},
+ Optab{ASLD, C_SCON, C_REG, C_NONE, C_REG, 7, 0},
+ Optab{ASLD, C_SCON, C_NONE, C_NONE, C_REG, 7, 0},
+
+ // compare and swap
+ Optab{ACSG, C_REG, C_REG, C_NONE, C_SOREG, 79, 0},
+
+ // floating point
+ Optab{AFADD, C_FREG, C_NONE, C_NONE, C_FREG, 2, 0},
+ Optab{AFADD, C_FREG, C_FREG, C_NONE, C_FREG, 2, 0},
+ Optab{AFABS, C_FREG, C_NONE, C_NONE, C_FREG, 33, 0},
+ Optab{AFABS, C_NONE, C_NONE, C_NONE, C_FREG, 33, 0},
+ Optab{AFMADD, C_FREG, C_FREG, C_FREG, C_FREG, 34, 0},
+ Optab{AFMUL, C_FREG, C_NONE, C_NONE, C_FREG, 32, 0},
+ Optab{AFMUL, C_FREG, C_FREG, C_NONE, C_FREG, 32, 0},
+ Optab{AFMOVD, C_LAUTO, C_NONE, C_NONE, C_FREG, 36, REGSP},
+ Optab{AFMOVD, C_LOREG, C_NONE, C_NONE, C_FREG, 36, 0},
+ Optab{AFMOVD, C_ADDR, C_NONE, C_NONE, C_FREG, 75, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LAUTO, 35, REGSP},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_LOREG, 35, 0},
+ Optab{AFMOVD, C_FREG, C_NONE, C_NONE, C_ADDR, 74, 0},
+ Optab{AFMOVD, C_ZCON, C_NONE, C_NONE, C_FREG, 67, 0},
+ Optab{ACEFBRA, C_REG, C_NONE, C_NONE, C_FREG, 82, 0},
+ Optab{ACFEBRA, C_FREG, C_NONE, C_NONE, C_REG, 83, 0},
+
+ // load symbol address (plus offset)
+ Optab{AMOVD, C_SYMADDR, C_NONE, C_NONE, C_REG, 19, 0},
+ Optab{AMOVD, C_GOTADDR, C_NONE, C_NONE, C_REG, 93, 0},
+ Optab{AMOVD, C_TLS_LE, C_NONE, C_NONE, C_REG, 94, 0},
+ Optab{AMOVD, C_TLS_IE, C_NONE, C_NONE, C_REG, 95, 0},
+
+ // system call
+ Optab{ASYSCALL, C_NONE, C_NONE, C_NONE, C_NONE, 5, 0},
+ Optab{ASYSCALL, C_SCON, C_NONE, C_NONE, C_NONE, 77, 0},
+
+ // branch
+ Optab{ABEQ, C_NONE, C_NONE, C_NONE, C_SBRA, 16, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_LBRA, 11, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_SBRA, 16, 0},
+ Optab{ABC, C_SCON, C_REG, C_NONE, C_LBRA, 17, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_REG, 18, 0},
+ Optab{ABR, C_REG, C_NONE, C_NONE, C_REG, 18, 0},
+ Optab{ABR, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
+ Optab{ABC, C_NONE, C_NONE, C_NONE, C_ZOREG, 15, 0},
+ Optab{ACMPBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
+ Optab{ACMPBEQ, C_REG, C_NONE, C_ADDCON, C_SBRA, 90, 0},
+ Optab{ACMPBEQ, C_REG, C_NONE, C_SCON, C_SBRA, 90, 0},
+ Optab{ACMPUBEQ, C_REG, C_REG, C_NONE, C_SBRA, 89, 0},
+ Optab{ACMPUBEQ, C_REG, C_NONE, C_ANDCON, C_SBRA, 90, 0},
+
+ // compare
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
+ Optab{ACMP, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_REG, 70, 0},
+ Optab{ACMPU, C_REG, C_NONE, C_NONE, C_LCON, 71, 0},
+ Optab{AFCMPO, C_FREG, C_NONE, C_NONE, C_FREG, 70, 0},
+ Optab{AFCMPO, C_FREG, C_REG, C_NONE, C_FREG, 70, 0},
+
+ // 32-bit access registers
+ Optab{AMOVW, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
+ Optab{AMOVWZ, C_AREG, C_NONE, C_NONE, C_REG, 68, 0},
+ Optab{AMOVW, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
+ Optab{AMOVWZ, C_REG, C_NONE, C_NONE, C_AREG, 69, 0},
+
+ // macros
+ Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LOREG, 96, 0},
+ Optab{ACLEAR, C_LCON, C_NONE, C_NONE, C_LAUTO, 96, REGSP},
+
+ // load/store multiple
+ Optab{ASTMG, C_REG, C_REG, C_NONE, C_LOREG, 97, 0},
+ Optab{ASTMG, C_REG, C_REG, C_NONE, C_LAUTO, 97, REGSP},
+ Optab{ALMG, C_LOREG, C_REG, C_NONE, C_REG, 98, 0},
+ Optab{ALMG, C_LAUTO, C_REG, C_NONE, C_REG, 98, REGSP},
+
+ // bytes
+ Optab{ABYTE, C_SCON, C_NONE, C_NONE, C_NONE, 40, 0},
+ Optab{AWORD, C_LCON, C_NONE, C_NONE, C_NONE, 40, 0},
+ Optab{ADWORD, C_LCON, C_NONE, C_NONE, C_NONE, 31, 0},
+ Optab{ADWORD, C_DCON, C_NONE, C_NONE, C_NONE, 31, 0},
+
+ // fast synchronization
+ Optab{ASYNC, C_NONE, C_NONE, C_NONE, C_NONE, 81, 0},
+
+ // store clock
+ Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SAUTO, 88, REGSP},
+ Optab{ASTCK, C_NONE, C_NONE, C_NONE, C_SOREG, 88, 0},
+
+ // storage and storage
+ Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LOREG, 84, 0},
+ Optab{AMVC, C_LOREG, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
+ Optab{AMVC, C_LAUTO, C_NONE, C_SCON, C_LAUTO, 84, REGSP},
+
+ // address
+ Optab{ALARL, C_LCON, C_NONE, C_NONE, C_REG, 85, 0},
+ Optab{ALARL, C_SYMADDR, C_NONE, C_NONE, C_REG, 85, 0},
+ Optab{ALA, C_SOREG, C_NONE, C_NONE, C_REG, 86, 0},
+ Optab{ALA, C_SAUTO, C_NONE, C_NONE, C_REG, 86, REGSP},
+ Optab{AEXRL, C_SYMADDR, C_NONE, C_NONE, C_REG, 87, 0},
+
+ // misc
+ Optab{obj.AUNDEF, C_NONE, C_NONE, C_NONE, C_NONE, 78, 0},
+ Optab{obj.APCDATA, C_LCON, C_NONE, C_NONE, C_LCON, 0, 0},
+ Optab{obj.AFUNCDATA, C_SCON, C_NONE, C_NONE, C_ADDR, 0, 0},
+ Optab{obj.ANOP, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0},
+ Optab{obj.ANOP, C_SAUTO, C_NONE, C_NONE, C_NONE, 0, 0},
+
+ // vector instructions
+
+ // VRX store
+ Optab{AVST, C_VREG, C_NONE, C_NONE, C_SOREG, 100, 0},
+ Optab{AVST, C_VREG, C_NONE, C_NONE, C_SAUTO, 100, REGSP},
+ Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SOREG, 100, 0},
+ Optab{AVSTEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 100, REGSP},
+
+ // VRX load
+ Optab{AVL, C_SOREG, C_NONE, C_NONE, C_VREG, 101, 0},
+ Optab{AVL, C_SAUTO, C_NONE, C_NONE, C_VREG, 101, REGSP},
+ Optab{AVLEG, C_SOREG, C_NONE, C_SCON, C_VREG, 101, 0},
+ Optab{AVLEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 101, REGSP},
+
+ // VRV scatter
+ Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SOREG, 102, 0},
+ Optab{AVSCEG, C_VREG, C_NONE, C_SCON, C_SAUTO, 102, REGSP},
+
+ // VRV gather
+ Optab{AVGEG, C_SOREG, C_NONE, C_SCON, C_VREG, 103, 0},
+ Optab{AVGEG, C_SAUTO, C_NONE, C_SCON, C_VREG, 103, REGSP},
+
+ // VRS element shift/rotate and load gr to/from vr element
+ Optab{AVESLG, C_SCON, C_VREG, C_NONE, C_VREG, 104, 0},
+ Optab{AVESLG, C_REG, C_VREG, C_NONE, C_VREG, 104, 0},
+ Optab{AVESLG, C_SCON, C_NONE, C_NONE, C_VREG, 104, 0},
+ Optab{AVESLG, C_REG, C_NONE, C_NONE, C_VREG, 104, 0},
+ Optab{AVLGVG, C_SCON, C_VREG, C_NONE, C_REG, 104, 0},
+ Optab{AVLGVG, C_REG, C_VREG, C_NONE, C_REG, 104, 0},
+ Optab{AVLVGG, C_SCON, C_REG, C_NONE, C_VREG, 104, 0},
+ Optab{AVLVGG, C_REG, C_REG, C_NONE, C_VREG, 104, 0},
+
+ // VRS store multiple
+ Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SOREG, 105, 0},
+ Optab{AVSTM, C_VREG, C_VREG, C_NONE, C_SAUTO, 105, REGSP},
+
+ // VRS load multiple
+ Optab{AVLM, C_SOREG, C_VREG, C_NONE, C_VREG, 106, 0},
+ Optab{AVLM, C_SAUTO, C_VREG, C_NONE, C_VREG, 106, REGSP},
+
+ // VRS store with length
+ Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SOREG, 107, 0},
+ Optab{AVSTL, C_VREG, C_NONE, C_REG, C_SAUTO, 107, REGSP},
+
+ // VRS load with length
+ Optab{AVLL, C_SOREG, C_NONE, C_REG, C_VREG, 108, 0},
+ Optab{AVLL, C_SAUTO, C_NONE, C_REG, C_VREG, 108, REGSP},
+
+ // VRI-a
+ Optab{AVGBM, C_ANDCON, C_NONE, C_NONE, C_VREG, 109, 0},
+ Optab{AVZERO, C_NONE, C_NONE, C_NONE, C_VREG, 109, 0},
+ Optab{AVREPIG, C_ADDCON, C_NONE, C_NONE, C_VREG, 109, 0},
+ Optab{AVREPIG, C_SCON, C_NONE, C_NONE, C_VREG, 109, 0},
+ Optab{AVLEIG, C_ADDCON, C_NONE, C_SCON, C_VREG, 109, 0},
+ Optab{AVLEIG, C_SCON, C_NONE, C_SCON, C_VREG, 109, 0},
+
+ // VRI-b generate mask
+ Optab{AVGMG, C_SCON, C_NONE, C_SCON, C_VREG, 110, 0},
+
+ // VRI-c replicate
+ Optab{AVREPG, C_UCON, C_VREG, C_NONE, C_VREG, 111, 0},
+
+ // VRI-d element rotate and insert under mask and
+ // shift left double by byte
+ Optab{AVERIMG, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
+ Optab{AVSLDB, C_VREG, C_VREG, C_SCON, C_VREG, 112, 0},
+
+ // VRI-d fp test data class immediate
+ Optab{AVFTCIDB, C_SCON, C_VREG, C_NONE, C_VREG, 113, 0},
+
+ // VRR-a load reg
+ Optab{AVLR, C_VREG, C_NONE, C_NONE, C_VREG, 114, 0},
+
+ // VRR-a compare
+ Optab{AVECG, C_VREG, C_NONE, C_NONE, C_VREG, 115, 0},
+
+ // VRR-b
+ Optab{AVCEQG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
+ Optab{AVFAEF, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
+ Optab{AVPKSG, C_VREG, C_VREG, C_NONE, C_VREG, 117, 0},
+
+ // VRR-c
+ Optab{AVAQ, C_VREG, C_VREG, C_NONE, C_VREG, 118, 0},
+ Optab{AVAQ, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
+ Optab{AVNOT, C_VREG, C_NONE, C_NONE, C_VREG, 118, 0},
+ Optab{AVPDI, C_VREG, C_VREG, C_SCON, C_VREG, 123, 0},
+
+ // VRR-c shifts
+ Optab{AVERLLVG, C_VREG, C_VREG, C_NONE, C_VREG, 119, 0},
+ Optab{AVERLLVG, C_VREG, C_NONE, C_NONE, C_VREG, 119, 0},
+
+ // VRR-d
+ // 2 3 1 4
+ Optab{AVACQ, C_VREG, C_VREG, C_VREG, C_VREG, 120, 0},
+
+ // VRR-e
+ Optab{AVSEL, C_VREG, C_VREG, C_VREG, C_VREG, 121, 0},
+
+ // VRR-f
+ Optab{AVLVGP, C_REG, C_REG, C_NONE, C_VREG, 122, 0},
+
+ Optab{obj.AXXX, C_NONE, C_NONE, C_NONE, C_NONE, 0, 0},
+}
+
+type Oprang struct {
+ start []Optab
+ stop []Optab
+}
+
+var oprange [ALAST & obj.AMask]Oprang
+
+var xcmp [C_NCLASS][C_NCLASS]uint8
+
+func spanz(ctxt *obj.Link, cursym *obj.LSym) {
+ p := cursym.Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+ ctxt.Cursym = cursym
+ ctxt.Autosize = int32(p.To.Offset)
+
+ if oprange[AANDN&obj.AMask].start == nil {
+ buildop(ctxt)
+ }
+
+ buffer := make([]byte, 0)
+ changed := true
+ loop := 0
+ for changed {
+ if loop > 10 {
+ ctxt.Diag("stuck in spanz loop")
+ break
+ }
+ changed = false
+ buffer = buffer[:0]
+ ctxt.Cursym.R = make([]obj.Reloc, 0)
+ for p := cursym.Text; p != nil; p = p.Link {
+ pc := int64(len(buffer))
+ if pc != p.Pc {
+ changed = true
+ }
+ p.Pc = pc
+ ctxt.Pc = p.Pc
+ ctxt.Curp = p
+ asmout(ctxt, &buffer)
+ if pc == int64(len(buffer)) {
+ switch p.As {
+ case obj.ANOP, obj.AFUNCDATA, obj.APCDATA, obj.ATEXT:
+ // ok
+ default:
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ }
+ }
+ loop++
+ }
+
+ cursym.Size = int64(len(buffer))
+ if cursym.Size%FuncAlign != 0 {
+ cursym.Size += FuncAlign - (cursym.Size % FuncAlign)
+ }
+ obj.Symgrow(ctxt, cursym, cursym.Size)
+ copy(cursym.P, buffer)
+}
+
+func isint32(v int64) bool {
+ return int64(int32(v)) == v
+}
+
+func isuint32(v uint64) bool {
+ return uint64(uint32(v)) == v
+}
+
+func aclass(ctxt *obj.Link, a *obj.Addr) int {
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return C_NONE
+
+ case obj.TYPE_REG:
+ if REG_R0 <= a.Reg && a.Reg <= REG_R15 {
+ return C_REG
+ }
+ if REG_F0 <= a.Reg && a.Reg <= REG_F15 {
+ return C_FREG
+ }
+ if REG_AR0 <= a.Reg && a.Reg <= REG_AR15 {
+ return C_AREG
+ }
+ if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
+ return C_VREG
+ }
+ return C_GOK
+
+ case obj.TYPE_MEM:
+ switch a.Name {
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ if a.Sym == nil {
+ // must have a symbol
+ break
+ }
+ ctxt.Instoffset = a.Offset
+ if a.Sym.Type == obj.STLSBSS {
+ if ctxt.Flag_shared != 0 {
+ return C_TLS_IE // initial exec model
+ }
+ return C_TLS_LE // local exec model
+ }
+ return C_ADDR
+
+ case obj.NAME_GOTREF:
+ return C_GOTADDR
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SAUTO
+ }
+ return C_LAUTO
+
+ case obj.NAME_NONE:
+ ctxt.Instoffset = a.Offset
+ if ctxt.Instoffset == 0 {
+ return C_ZOREG
+ }
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+ }
+
+ return C_GOK
+
+ case obj.TYPE_TEXTSIZE:
+ return C_TEXTSIZE
+
+ case obj.TYPE_FCONST:
+ if f64, ok := a.Val.(float64); ok && math.Float64bits(f64) == 0 {
+ return C_ZCON
+ }
+ ctxt.Diag("cannot handle the floating point constant %v", a.Val)
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.TYPE_NONE:
+ ctxt.Instoffset = a.Offset
+ if a.Reg != 0 {
+ if -BIG <= ctxt.Instoffset && ctxt.Instoffset <= BIG {
+ return C_SACON
+ }
+ if isint32(ctxt.Instoffset) {
+ return C_LACON
+ }
+ return C_DACON
+ }
+ goto consize
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ s := a.Sym
+ if s == nil {
+ break
+ }
+ ctxt.Instoffset = s.Value + a.Offset
+ if s.Type == obj.SCONST {
+ goto consize
+ }
+
+ return C_SYMADDR
+
+ case obj.NAME_AUTO:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ case obj.NAME_PARAM:
+ ctxt.Instoffset = int64(ctxt.Autosize) + a.Offset + ctxt.FixedFrameSize()
+ if ctxt.Instoffset >= -BIG && ctxt.Instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+ }
+
+ return C_GOK
+
+ consize:
+ if ctxt.Instoffset == 0 {
+ return C_ZCON
+ }
+ if ctxt.Instoffset >= 0 {
+ if ctxt.Instoffset <= 0x7fff {
+ return C_SCON
+ }
+ if ctxt.Instoffset <= 0xffff {
+ return C_ANDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isuint32(uint64(ctxt.Instoffset)) { /* && (instoffset & (1<<31)) == 0) */
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) || isuint32(uint64(ctxt.Instoffset)) {
+ return C_LCON
+ }
+ return C_DCON
+ }
+
+ if ctxt.Instoffset >= -0x8000 {
+ return C_ADDCON
+ }
+ if ctxt.Instoffset&0xffff == 0 && isint32(ctxt.Instoffset) {
+ return C_UCON
+ }
+ if isint32(ctxt.Instoffset) {
+ return C_LCON
+ }
+ return C_DCON
+
+ case obj.TYPE_BRANCH:
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func oplook(ctxt *obj.Link, p *obj.Prog) *Optab {
+ a1 := int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1:][0]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = aclass(ctxt, &p.From) + 1
+ p.From.Class = int8(a1)
+ }
+
+ a1--
+ a3 := C_NONE + 1
+ if p.From3 != nil {
+ a3 = int(p.From3.Class)
+ if a3 == 0 {
+ a3 = aclass(ctxt, p.From3) + 1
+ p.From3.Class = int8(a3)
+ }
+ }
+
+ a3--
+ a4 := int(p.To.Class)
+ if a4 == 0 {
+ a4 = aclass(ctxt, &p.To) + 1
+ p.To.Class = int8(a4)
+ }
+
+ a4--
+ a2 := C_NONE
+ if p.Reg != 0 {
+ if REG_R0 <= p.Reg && p.Reg <= REG_R15 {
+ a2 = C_REG
+ } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
+ a2 = C_VREG
+ } else if REG_F0 <= p.Reg && p.Reg <= REG_F15 {
+ a2 = C_FREG
+ } else if REG_AR0 <= p.Reg && p.Reg <= REG_AR15 {
+ a2 = C_AREG
+ }
+ }
+
+ r0 := p.As & obj.AMask
+
+ o := oprange[r0].start
+ if o == nil {
+ o = oprange[r0].stop /* just generate an error */
+ }
+
+ e := oprange[r0].stop
+ c1 := xcmp[a1][:]
+ c3 := xcmp[a3][:]
+ c4 := xcmp[a4][:]
+ for ; -cap(o) < -cap(e); o = o[1:] {
+ if int(o[0].a2) == a2 {
+ if c1[o[0].a1] != 0 {
+ if c3[o[0].a3] != 0 {
+ if c4[o[0].a4] != 0 {
+ p.Optab = uint16((-cap(o) + cap(optab)) + 1)
+ return &o[0]
+ }
+ }
+ }
+ }
+ }
+
+ // cannot find a case; abort
+ ctxt.Diag("illegal combination %v %v %v %v %v\n", obj.Aconv(int(p.As)), DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4))
+ ctxt.Diag("prog: %v\n", p)
+ return nil
+}
+
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+ case C_DCON:
+ if b == C_LCON {
+ return true
+ }
+ fallthrough
+ case C_LCON:
+ if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
+ return true
+ }
+
+ case C_ADDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_ANDCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_UCON:
+ if b == C_ZCON || b == C_SCON {
+ return true
+ }
+
+ case C_SCON:
+ if b == C_ZCON {
+ return true
+ }
+
+ case C_LACON:
+ if b == C_SACON {
+ return true
+ }
+
+ case C_LBRA:
+ if b == C_SBRA {
+ return true
+ }
+
+ case C_LAUTO:
+ if b == C_SAUTO {
+ return true
+ }
+
+ case C_LOREG:
+ if b == C_ZOREG || b == C_SOREG {
+ return true
+ }
+
+ case C_SOREG:
+ if b == C_ZOREG {
+ return true
+ }
+
+ case C_ANY:
+ return true
+ }
+
+ return false
+}
+
+type ocmp []Optab
+
+func (x ocmp) Len() int {
+ return len(x)
+}
+
+func (x ocmp) Swap(i, j int) {
+ x[i], x[j] = x[j], x[i]
+}
+
+func (x ocmp) Less(i, j int) bool {
+ p1 := &x[i]
+ p2 := &x[j]
+ n := int(p1.as) - int(p2.as)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a4) - int(p2.a4)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+func opset(a, b0 int16) {
+ oprange[a&obj.AMask] = oprange[b0]
+}
+
+func buildop(ctxt *obj.Link) {
+ var n int
+
+ for i := 0; i < C_NCLASS; i++ {
+ for n = 0; n < C_NCLASS; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = 1
+ }
+ }
+ }
+ for n = 0; optab[n].as != obj.AXXX; n++ {
+ }
+ sort.Sort(ocmp(optab[:n]))
+ for i := 0; i < n; i++ {
+ r := optab[i].as
+ r0 := r & obj.AMask
+ oprange[r0].start = optab[i:]
+ for optab[i].as == r {
+ i++
+ }
+ oprange[r0].stop = optab[i:]
+ i--
+
+ // opset() aliases optab ranges for similar instructions, to reduce the number of optabs in the array.
+ // oprange[] is used by oplook() to find the Optab entry that applies to a given Prog.
+ switch r {
+ default:
+ ctxt.Diag("unknown op in build: %v", obj.Aconv(int(r)))
+ log.Fatalf("bad code")
+
+ case ADIVW: /* op Rb[,Ra],Rd */
+ opset(AADDE, r0)
+ opset(AMULLD, r0)
+ opset(AMULHDU, r0)
+ opset(ADIVD, r0)
+ opset(ADIVDU, r0)
+ opset(ADIVWU, r0)
+
+ case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
+ opset(AMOVH, r0)
+ opset(AMOVHZ, r0)
+
+ case ALA:
+ opset(ALAY, r0)
+
+ case ALARL:
+
+ case AMVC:
+ opset(ACLC, r0)
+ opset(AXC, r0)
+ opset(AOC, r0)
+ opset(ANC, r0)
+
+ case AEXRL:
+
+ case ASTCK:
+ opset(ASTCKC, r0)
+ opset(ASTCKE, r0)
+ opset(ASTCKF, r0)
+
+ case ACLEAR:
+
+ case ASTMG:
+ opset(ASTMY, r0)
+
+ case ALMG:
+ opset(ALMY, r0)
+
+ case AAND: /* logical op Rb,Rs,Ra; no literal */
+ opset(AANDN, r0)
+ opset(ANAND, r0)
+ opset(ANOR, r0)
+ opset(AORN, r0)
+
+ case AADDME: /* op Ra, Rd */
+ opset(AADDZE, r0)
+ opset(ASUBME, r0)
+ opset(ASUBZE, r0)
+
+ case AADDC:
+
+ case ABEQ:
+ opset(ABGE, r0)
+ opset(ABGT, r0)
+ opset(ABLE, r0)
+ opset(ABLT, r0)
+ opset(ABNE, r0)
+ opset(ABVC, r0)
+ opset(ABVS, r0)
+
+ case ABR:
+ opset(ABL, r0)
+
+ case ABC:
+ opset(ABCL, r0)
+
+ case AFABS: /* fop [s,]d */
+ opset(AFNABS, r0)
+ opset(AFNEG, r0)
+ opset(ALEDBR, r0)
+ opset(ALDEBR, r0)
+ opset(AFSQRT, r0)
+ opset(AFSQRTS, r0)
+
+ case AFADD:
+ opset(AFADDS, r0)
+ opset(AFDIV, r0)
+ opset(AFDIVS, r0)
+ opset(AFSUB, r0)
+ opset(AFSUBS, r0)
+
+ case AFMADD:
+ opset(AFMADDS, r0)
+ opset(AFMSUB, r0)
+ opset(AFMSUBS, r0)
+ opset(AFNMADD, r0)
+ opset(AFNMADDS, r0)
+ opset(AFNMSUB, r0)
+ opset(AFNMSUBS, r0)
+
+ case AFMUL:
+ opset(AFMULS, r0)
+
+ case AFCMPO:
+ opset(AFCMPU, r0)
+ opset(ACEBR, r0)
+
+ case ANEG: /* op [Ra,] Rd */
+
+ case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,Ra; oris/xoris $uimm,Rs,Ra */
+ opset(AXOR, r0)
+
+ case ASLD:
+ opset(ASRD, r0)
+ opset(ASLW, r0)
+ opset(ASRW, r0)
+ opset(ASRAD, r0)
+ opset(ASRAW, r0)
+ opset(ARLL, r0)
+ opset(ARLLG, r0)
+
+ case ACSG:
+ opset(ACS, r0)
+
+ case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
+ opset(ASUBC, r0)
+ opset(ASUBE, r0)
+
+ case ASYNC:
+
+ case AFMOVD:
+ opset(AFMOVS, r0)
+
+ case ASYSCALL: /* just the op; flow of control */
+
+ case AMOVDBR:
+ opset(AMOVWBR, r0)
+
+ case AMOVHBR: // no reg-reg moves
+
+ case ACMP:
+ opset(ACMPW, r0)
+
+ case ACMPU:
+ opset(ACMPWU, r0)
+
+ case ACEFBRA:
+ opset(ACDFBRA, r0)
+ opset(ACEGBRA, r0)
+ opset(ACDGBRA, r0)
+ opset(ACELFBR, r0)
+ opset(ACDLFBR, r0)
+ opset(ACELGBR, r0)
+ opset(ACDLGBR, r0)
+
+ case ACFEBRA:
+ opset(ACFDBRA, r0)
+ opset(ACGEBRA, r0)
+ opset(ACGDBRA, r0)
+ opset(ACLFEBR, r0)
+ opset(ACLFDBR, r0)
+ opset(ACLGEBR, r0)
+ opset(ACLGDBR, r0)
+
+ case ACMPBEQ:
+ opset(ACMPBGE, r0)
+ opset(ACMPBGT, r0)
+ opset(ACMPBLE, r0)
+ opset(ACMPBLT, r0)
+ opset(ACMPBNE, r0)
+
+ case ACMPUBEQ:
+ opset(ACMPUBGE, r0)
+ opset(ACMPUBGT, r0)
+ opset(ACMPUBLE, r0)
+ opset(ACMPUBLT, r0)
+ opset(ACMPUBNE, r0)
+
+ case AVL:
+ opset(AVLLEZB, r0)
+ opset(AVLLEZH, r0)
+ opset(AVLLEZF, r0)
+ opset(AVLLEZG, r0)
+ opset(AVLREPB, r0)
+ opset(AVLREPH, r0)
+ opset(AVLREPF, r0)
+ opset(AVLREPG, r0)
+
+ case AVST:
+
+ case AVLEG:
+ opset(AVLBB, r0)
+ opset(AVLEB, r0)
+ opset(AVLEH, r0)
+ opset(AVLEF, r0)
+ opset(AVLEG, r0)
+ opset(AVLREP, r0)
+
+ case AVSTEG:
+ opset(AVSTEB, r0)
+ opset(AVSTEH, r0)
+ opset(AVSTEF, r0)
+
+ case AVSCEG:
+ opset(AVSCEF, r0)
+
+ case AVGEG:
+ opset(AVGEF, r0)
+
+ case AVESLG:
+ opset(AVESLB, r0)
+ opset(AVESLH, r0)
+ opset(AVESLF, r0)
+ opset(AVERLLB, r0)
+ opset(AVERLLH, r0)
+ opset(AVERLLF, r0)
+ opset(AVERLLG, r0)
+ opset(AVESRAB, r0)
+ opset(AVESRAH, r0)
+ opset(AVESRAF, r0)
+ opset(AVESRAG, r0)
+ opset(AVESRLB, r0)
+ opset(AVESRLH, r0)
+ opset(AVESRLF, r0)
+ opset(AVESRLG, r0)
+
+ case AVLGVG:
+ opset(AVLGVB, r0)
+ opset(AVLGVH, r0)
+ opset(AVLGVF, r0)
+
+ case AVLVGG:
+ opset(AVLVGB, r0)
+ opset(AVLVGH, r0)
+ opset(AVLVGF, r0)
+
+ case AVLL:
+
+ case AVSTL:
+
+ case AVLM:
+
+ case AVSTM:
+
+ case AVGBM:
+
+ case AVZERO:
+ opset(AVONE, r0)
+
+ case AVREPIG:
+ opset(AVREPIB, r0)
+ opset(AVREPIH, r0)
+ opset(AVREPIF, r0)
+
+ case AVLEIG:
+ opset(AVLEIB, r0)
+ opset(AVLEIH, r0)
+ opset(AVLEIF, r0)
+
+ case AVGMG:
+ opset(AVGMB, r0)
+ opset(AVGMH, r0)
+ opset(AVGMF, r0)
+
+ case AVREPG:
+ opset(AVREPB, r0)
+ opset(AVREPH, r0)
+ opset(AVREPF, r0)
+
+ case AVERIMG:
+ opset(AVERIMB, r0)
+ opset(AVERIMH, r0)
+ opset(AVERIMF, r0)
+
+ case AVSLDB:
+
+ case AVFTCIDB:
+ opset(AWFTCIDB, r0)
+
+ case AVLR:
+ opset(AVUPHB, r0)
+ opset(AVUPHH, r0)
+ opset(AVUPHF, r0)
+ opset(AVUPLHB, r0)
+ opset(AVUPLHH, r0)
+ opset(AVUPLHF, r0)
+ opset(AVUPLB, r0)
+ opset(AVUPLHW, r0)
+ opset(AVUPLF, r0)
+ opset(AVUPLLB, r0)
+ opset(AVUPLLH, r0)
+ opset(AVUPLLF, r0)
+ opset(AVCLZB, r0)
+ opset(AVCLZH, r0)
+ opset(AVCLZF, r0)
+ opset(AVCLZG, r0)
+ opset(AVCTZB, r0)
+ opset(AVCTZH, r0)
+ opset(AVCTZF, r0)
+ opset(AVCTZG, r0)
+ opset(AVLDEB, r0)
+ opset(AWLDEB, r0)
+ opset(AVFLCDB, r0)
+ opset(AWFLCDB, r0)
+ opset(AVFLNDB, r0)
+ opset(AWFLNDB, r0)
+ opset(AVFLPDB, r0)
+ opset(AWFLPDB, r0)
+ opset(AVFSQDB, r0)
+ opset(AWFSQDB, r0)
+ opset(AVISTRB, r0)
+ opset(AVISTRH, r0)
+ opset(AVISTRF, r0)
+ opset(AVISTRBS, r0)
+ opset(AVISTRHS, r0)
+ opset(AVISTRFS, r0)
+ opset(AVLCB, r0)
+ opset(AVLCH, r0)
+ opset(AVLCF, r0)
+ opset(AVLCG, r0)
+ opset(AVLPB, r0)
+ opset(AVLPH, r0)
+ opset(AVLPF, r0)
+ opset(AVLPG, r0)
+ opset(AVPOPCT, r0)
+ opset(AVSEGB, r0)
+ opset(AVSEGH, r0)
+ opset(AVSEGF, r0)
+
+ case AVECG:
+ opset(AVECB, r0)
+ opset(AVECH, r0)
+ opset(AVECF, r0)
+ opset(AVECLB, r0)
+ opset(AVECLH, r0)
+ opset(AVECLF, r0)
+ opset(AVECLG, r0)
+ opset(AWFCDB, r0)
+ opset(AWFKDB, r0)
+
+ case AVCEQG:
+ opset(AVCEQB, r0)
+ opset(AVCEQH, r0)
+ opset(AVCEQF, r0)
+ opset(AVCEQBS, r0)
+ opset(AVCEQHS, r0)
+ opset(AVCEQFS, r0)
+ opset(AVCEQGS, r0)
+ opset(AVCHB, r0)
+ opset(AVCHH, r0)
+ opset(AVCHF, r0)
+ opset(AVCHG, r0)
+ opset(AVCHBS, r0)
+ opset(AVCHHS, r0)
+ opset(AVCHFS, r0)
+ opset(AVCHGS, r0)
+ opset(AVCHLB, r0)
+ opset(AVCHLH, r0)
+ opset(AVCHLF, r0)
+ opset(AVCHLG, r0)
+ opset(AVCHLBS, r0)
+ opset(AVCHLHS, r0)
+ opset(AVCHLFS, r0)
+ opset(AVCHLGS, r0)
+
+ case AVFAEF:
+ opset(AVFAEB, r0)
+ opset(AVFAEH, r0)
+ opset(AVFAEBS, r0)
+ opset(AVFAEHS, r0)
+ opset(AVFAEFS, r0)
+ opset(AVFAEZB, r0)
+ opset(AVFAEZH, r0)
+ opset(AVFAEZF, r0)
+ opset(AVFAEZBS, r0)
+ opset(AVFAEZHS, r0)
+ opset(AVFAEZFS, r0)
+ opset(AVFEEB, r0)
+ opset(AVFEEH, r0)
+ opset(AVFEEF, r0)
+ opset(AVFEEBS, r0)
+ opset(AVFEEHS, r0)
+ opset(AVFEEFS, r0)
+ opset(AVFEEZB, r0)
+ opset(AVFEEZH, r0)
+ opset(AVFEEZF, r0)
+ opset(AVFEEZBS, r0)
+ opset(AVFEEZHS, r0)
+ opset(AVFEEZFS, r0)
+ opset(AVFENEB, r0)
+ opset(AVFENEH, r0)
+ opset(AVFENEF, r0)
+ opset(AVFENEBS, r0)
+ opset(AVFENEHS, r0)
+ opset(AVFENEFS, r0)
+ opset(AVFENEZB, r0)
+ opset(AVFENEZH, r0)
+ opset(AVFENEZF, r0)
+ opset(AVFENEZBS, r0)
+ opset(AVFENEZHS, r0)
+ opset(AVFENEZFS, r0)
+
+ case AVPKSG:
+ opset(AVPKSH, r0)
+ opset(AVPKSF, r0)
+ opset(AVPKSHS, r0)
+ opset(AVPKSFS, r0)
+ opset(AVPKSGS, r0)
+ opset(AVPKLSH, r0)
+ opset(AVPKLSF, r0)
+ opset(AVPKLSG, r0)
+ opset(AVPKLSHS, r0)
+ opset(AVPKLSFS, r0)
+ opset(AVPKLSGS, r0)
+
+ case AVAQ:
+ opset(AVAB, r0)
+ opset(AVAH, r0)
+ opset(AVAF, r0)
+ opset(AVAG, r0)
+ opset(AVACCB, r0)
+ opset(AVACCH, r0)
+ opset(AVACCF, r0)
+ opset(AVACCG, r0)
+ opset(AVACCQ, r0)
+ opset(AVN, r0)
+ opset(AVNC, r0)
+ opset(AVAVGB, r0)
+ opset(AVAVGH, r0)
+ opset(AVAVGF, r0)
+ opset(AVAVGG, r0)
+ opset(AVAVGLB, r0)
+ opset(AVAVGLH, r0)
+ opset(AVAVGLF, r0)
+ opset(AVAVGLG, r0)
+ opset(AVCKSM, r0)
+ opset(AVX, r0)
+ opset(AVFADB, r0)
+ opset(AWFADB, r0)
+ opset(AVFCEDB, r0)
+ opset(AVFCEDBS, r0)
+ opset(AWFCEDB, r0)
+ opset(AWFCEDBS, r0)
+ opset(AVFCHDB, r0)
+ opset(AVFCHDBS, r0)
+ opset(AWFCHDB, r0)
+ opset(AWFCHDBS, r0)
+ opset(AVFCHEDB, r0)
+ opset(AVFCHEDBS, r0)
+ opset(AWFCHEDB, r0)
+ opset(AWFCHEDBS, r0)
+ opset(AVFMDB, r0)
+ opset(AWFMDB, r0)
+ opset(AVGFMB, r0)
+ opset(AVGFMH, r0)
+ opset(AVGFMF, r0)
+ opset(AVGFMG, r0)
+ opset(AVMXB, r0)
+ opset(AVMXH, r0)
+ opset(AVMXF, r0)
+ opset(AVMXG, r0)
+ opset(AVMXLB, r0)
+ opset(AVMXLH, r0)
+ opset(AVMXLF, r0)
+ opset(AVMXLG, r0)
+ opset(AVMNB, r0)
+ opset(AVMNH, r0)
+ opset(AVMNF, r0)
+ opset(AVMNG, r0)
+ opset(AVMNLB, r0)
+ opset(AVMNLH, r0)
+ opset(AVMNLF, r0)
+ opset(AVMNLG, r0)
+ opset(AVMRHB, r0)
+ opset(AVMRHH, r0)
+ opset(AVMRHF, r0)
+ opset(AVMRHG, r0)
+ opset(AVMRLB, r0)
+ opset(AVMRLH, r0)
+ opset(AVMRLF, r0)
+ opset(AVMRLG, r0)
+ opset(AVMEB, r0)
+ opset(AVMEH, r0)
+ opset(AVMEF, r0)
+ opset(AVMLEB, r0)
+ opset(AVMLEH, r0)
+ opset(AVMLEF, r0)
+ opset(AVMOB, r0)
+ opset(AVMOH, r0)
+ opset(AVMOF, r0)
+ opset(AVMLOB, r0)
+ opset(AVMLOH, r0)
+ opset(AVMLOF, r0)
+ opset(AVMHB, r0)
+ opset(AVMHH, r0)
+ opset(AVMHF, r0)
+ opset(AVMLHB, r0)
+ opset(AVMLHH, r0)
+ opset(AVMLHF, r0)
+ opset(AVMLH, r0)
+ opset(AVMLHW, r0)
+ opset(AVMLF, r0)
+ opset(AVNO, r0)
+ opset(AVO, r0)
+ opset(AVPKH, r0)
+ opset(AVPKF, r0)
+ opset(AVPKG, r0)
+ opset(AVSUMGH, r0)
+ opset(AVSUMGF, r0)
+ opset(AVSUMQF, r0)
+ opset(AVSUMQG, r0)
+ opset(AVSUMB, r0)
+ opset(AVSUMH, r0)
+
+ case AVNOT:
+
+ case AVERLLVG:
+ opset(AVERLLVB, r0)
+ opset(AVERLLVH, r0)
+ opset(AVERLLVF, r0)
+ opset(AVESLVB, r0)
+ opset(AVESLVH, r0)
+ opset(AVESLVF, r0)
+ opset(AVESLVG, r0)
+ opset(AVESRAVB, r0)
+ opset(AVESRAVH, r0)
+ opset(AVESRAVF, r0)
+ opset(AVESRAVG, r0)
+ opset(AVESRLVB, r0)
+ opset(AVESRLVH, r0)
+ opset(AVESRLVF, r0)
+ opset(AVESRLVG, r0)
+ opset(AVFDDB, r0)
+ opset(AWFDDB, r0)
+ opset(AVFSDB, r0)
+ opset(AWFSDB, r0)
+ opset(AVSL, r0)
+ opset(AVSLB, r0)
+ opset(AVSRA, r0)
+ opset(AVSRAB, r0)
+ opset(AVSRL, r0)
+ opset(AVSRLB, r0)
+ opset(AVSF, r0)
+ opset(AVSG, r0)
+ opset(AVSQ, r0)
+ opset(AVSCBIB, r0)
+ opset(AVSCBIH, r0)
+ opset(AVSCBIF, r0)
+ opset(AVSCBIG, r0)
+ opset(AVSCBIQ, r0)
+
+ case AVACQ:
+ opset(AVACCCQ, r0)
+ opset(AVGFMAB, r0)
+ opset(AVGFMAH, r0)
+ opset(AVGFMAF, r0)
+ opset(AVGFMAG, r0)
+ opset(AVMALB, r0)
+ opset(AVMALHW, r0)
+ opset(AVMALF, r0)
+ opset(AVMAHB, r0)
+ opset(AVMAHH, r0)
+ opset(AVMAHF, r0)
+ opset(AVMALHB, r0)
+ opset(AVMALHH, r0)
+ opset(AVMALHF, r0)
+ opset(AVMAEB, r0)
+ opset(AVMAEH, r0)
+ opset(AVMAEF, r0)
+ opset(AVMALEB, r0)
+ opset(AVMALEH, r0)
+ opset(AVMALEF, r0)
+ opset(AVMAOB, r0)
+ opset(AVMAOH, r0)
+ opset(AVMAOF, r0)
+ opset(AVMALOB, r0)
+ opset(AVMALOH, r0)
+ opset(AVMALOF, r0)
+ opset(AVSTRCB, r0)
+ opset(AVSTRCH, r0)
+ opset(AVSTRCF, r0)
+ opset(AVSTRCBS, r0)
+ opset(AVSTRCHS, r0)
+ opset(AVSTRCFS, r0)
+ opset(AVSTRCZB, r0)
+ opset(AVSTRCZH, r0)
+ opset(AVSTRCZF, r0)
+ opset(AVSTRCZBS, r0)
+ opset(AVSTRCZHS, r0)
+ opset(AVSTRCZFS, r0)
+ opset(AVSBCBIQ, r0)
+ opset(AVSBIQ, r0)
+
+ case AVSEL:
+ opset(AVFMADB, r0)
+ opset(AWFMADB, r0)
+ opset(AVFMSDB, r0)
+ opset(AWFMSDB, r0)
+ opset(AVPERM, r0)
+
+ case AVLVGP:
+
+ case AVPDI:
+
+ case AADD,
+ AMOVW,
+ /* load/store/move word with sign extension; special 32-bit move; move 32-bit literals */
+ AMOVWZ, /* load/store/move word with zero extension; move 32-bit literals */
+ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
+ AMOVB, /* macro: move byte with sign extension */
+ AMULLW,
+ /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
+ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
+ ABYTE,
+ AWORD,
+ ADWORD,
+ obj.ANOP,
+ obj.ATEXT,
+ obj.AUNDEF,
+ obj.AFUNCDATA,
+ obj.APCDATA:
+ break
+ }
+ }
+}
+
+const (
+ op_A uint32 = 0x5A00 // FORMAT_RX1 ADD (32)
+ op_AD uint32 = 0x6A00 // FORMAT_RX1 ADD NORMALIZED (long HFP)
+ op_ADB uint32 = 0xED1A // FORMAT_RXE ADD (long BFP)
+ op_ADBR uint32 = 0xB31A // FORMAT_RRE ADD (long BFP)
+ op_ADR uint32 = 0x2A00 // FORMAT_RR ADD NORMALIZED (long HFP)
+ op_ADTR uint32 = 0xB3D2 // FORMAT_RRF1 ADD (long DFP)
+ op_ADTRA uint32 = 0xB3D2 // FORMAT_RRF1 ADD (long DFP)
+ op_AE uint32 = 0x7A00 // FORMAT_RX1 ADD NORMALIZED (short HFP)
+ op_AEB uint32 = 0xED0A // FORMAT_RXE ADD (short BFP)
+ op_AEBR uint32 = 0xB30A // FORMAT_RRE ADD (short BFP)
+ op_AER uint32 = 0x3A00 // FORMAT_RR ADD NORMALIZED (short HFP)
+ op_AFI uint32 = 0xC209 // FORMAT_RIL1 ADD IMMEDIATE (32)
+ op_AG uint32 = 0xE308 // FORMAT_RXY1 ADD (64)
+ op_AGF uint32 = 0xE318 // FORMAT_RXY1 ADD (64<-32)
+ op_AGFI uint32 = 0xC208 // FORMAT_RIL1 ADD IMMEDIATE (64<-32)
+ op_AGFR uint32 = 0xB918 // FORMAT_RRE ADD (64<-32)
+ op_AGHI uint32 = 0xA70B // FORMAT_RI1 ADD HALFWORD IMMEDIATE (64)
+ op_AGHIK uint32 = 0xECD9 // FORMAT_RIE4 ADD IMMEDIATE (64<-16)
+ op_AGR uint32 = 0xB908 // FORMAT_RRE ADD (64)
+ op_AGRK uint32 = 0xB9E8 // FORMAT_RRF1 ADD (64)
+ op_AGSI uint32 = 0xEB7A // FORMAT_SIY ADD IMMEDIATE (64<-8)
+ op_AH uint32 = 0x4A00 // FORMAT_RX1 ADD HALFWORD
+ op_AHHHR uint32 = 0xB9C8 // FORMAT_RRF1 ADD HIGH (32)
+ op_AHHLR uint32 = 0xB9D8 // FORMAT_RRF1 ADD HIGH (32)
+ op_AHI uint32 = 0xA70A // FORMAT_RI1 ADD HALFWORD IMMEDIATE (32)
+ op_AHIK uint32 = 0xECD8 // FORMAT_RIE4 ADD IMMEDIATE (32<-16)
+ op_AHY uint32 = 0xE37A // FORMAT_RXY1 ADD HALFWORD
+ op_AIH uint32 = 0xCC08 // FORMAT_RIL1 ADD IMMEDIATE HIGH (32)
+ op_AL uint32 = 0x5E00 // FORMAT_RX1 ADD LOGICAL (32)
+ op_ALC uint32 = 0xE398 // FORMAT_RXY1 ADD LOGICAL WITH CARRY (32)
+ op_ALCG uint32 = 0xE388 // FORMAT_RXY1 ADD LOGICAL WITH CARRY (64)
+ op_ALCGR uint32 = 0xB988 // FORMAT_RRE ADD LOGICAL WITH CARRY (64)
+ op_ALCR uint32 = 0xB998 // FORMAT_RRE ADD LOGICAL WITH CARRY (32)
+ op_ALFI uint32 = 0xC20B // FORMAT_RIL1 ADD LOGICAL IMMEDIATE (32)
+ op_ALG uint32 = 0xE30A // FORMAT_RXY1 ADD LOGICAL (64)
+ op_ALGF uint32 = 0xE31A // FORMAT_RXY1 ADD LOGICAL (64<-32)
+ op_ALGFI uint32 = 0xC20A // FORMAT_RIL1 ADD LOGICAL IMMEDIATE (64<-32)
+ op_ALGFR uint32 = 0xB91A // FORMAT_RRE ADD LOGICAL (64<-32)
+ op_ALGHSIK uint32 = 0xECDB // FORMAT_RIE4 ADD LOGICAL WITH SIGNED IMMEDIATE (64<-16)
+ op_ALGR uint32 = 0xB90A // FORMAT_RRE ADD LOGICAL (64)
+ op_ALGRK uint32 = 0xB9EA // FORMAT_RRF1 ADD LOGICAL (64)
+ op_ALGSI uint32 = 0xEB7E // FORMAT_SIY ADD LOGICAL WITH SIGNED IMMEDIATE (64<-8)
+ op_ALHHHR uint32 = 0xB9CA // FORMAT_RRF1 ADD LOGICAL HIGH (32)
+ op_ALHHLR uint32 = 0xB9DA // FORMAT_RRF1 ADD LOGICAL HIGH (32)
+ op_ALHSIK uint32 = 0xECDA // FORMAT_RIE4 ADD LOGICAL WITH SIGNED IMMEDIATE (32<-16)
+ op_ALR uint32 = 0x1E00 // FORMAT_RR ADD LOGICAL (32)
+ op_ALRK uint32 = 0xB9FA // FORMAT_RRF1 ADD LOGICAL (32)
+ op_ALSI uint32 = 0xEB6E // FORMAT_SIY ADD LOGICAL WITH SIGNED IMMEDIATE (32<-8)
+ op_ALSIH uint32 = 0xCC0A // FORMAT_RIL1 ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
+ op_ALSIHN uint32 = 0xCC0B // FORMAT_RIL1 ADD LOGICAL WITH SIGNED IMMEDIATE HIGH (32)
+ op_ALY uint32 = 0xE35E // FORMAT_RXY1 ADD LOGICAL (32)
+ op_AP uint32 = 0xFA00 // FORMAT_SS2 ADD DECIMAL
+ op_AR uint32 = 0x1A00 // FORMAT_RR ADD (32)
+ op_ARK uint32 = 0xB9F8 // FORMAT_RRF1 ADD (32)
+ op_ASI uint32 = 0xEB6A // FORMAT_SIY ADD IMMEDIATE (32<-8)
+ op_AU uint32 = 0x7E00 // FORMAT_RX1 ADD UNNORMALIZED (short HFP)
+ op_AUR uint32 = 0x3E00 // FORMAT_RR ADD UNNORMALIZED (short HFP)
+ op_AW uint32 = 0x6E00 // FORMAT_RX1 ADD UNNORMALIZED (long HFP)
+ op_AWR uint32 = 0x2E00 // FORMAT_RR ADD UNNORMALIZED (long HFP)
+ op_AXBR uint32 = 0xB34A // FORMAT_RRE ADD (extended BFP)
+ op_AXR uint32 = 0x3600 // FORMAT_RR ADD NORMALIZED (extended HFP)
+ op_AXTR uint32 = 0xB3DA // FORMAT_RRF1 ADD (extended DFP)
+ op_AXTRA uint32 = 0xB3DA // FORMAT_RRF1 ADD (extended DFP)
+ op_AY uint32 = 0xE35A // FORMAT_RXY1 ADD (32)
+ op_BAKR uint32 = 0xB240 // FORMAT_RRE BRANCH AND STACK
+ op_BAL uint32 = 0x4500 // FORMAT_RX1 BRANCH AND LINK
+ op_BALR uint32 = 0x0500 // FORMAT_RR BRANCH AND LINK
+ op_BAS uint32 = 0x4D00 // FORMAT_RX1 BRANCH AND SAVE
+ op_BASR uint32 = 0x0D00 // FORMAT_RR BRANCH AND SAVE
+ op_BASSM uint32 = 0x0C00 // FORMAT_RR BRANCH AND SAVE AND SET MODE
+ op_BC uint32 = 0x4700 // FORMAT_RX2 BRANCH ON CONDITION
+ op_BCR uint32 = 0x0700 // FORMAT_RR BRANCH ON CONDITION
+ op_BCT uint32 = 0x4600 // FORMAT_RX1 BRANCH ON COUNT (32)
+ op_BCTG uint32 = 0xE346 // FORMAT_RXY1 BRANCH ON COUNT (64)
+ op_BCTGR uint32 = 0xB946 // FORMAT_RRE BRANCH ON COUNT (64)
+ op_BCTR uint32 = 0x0600 // FORMAT_RR BRANCH ON COUNT (32)
+ op_BPP uint32 = 0xC700 // FORMAT_SMI BRANCH PREDICTION PRELOAD
+ op_BPRP uint32 = 0xC500 // FORMAT_MII BRANCH PREDICTION RELATIVE PRELOAD
+ op_BRAS uint32 = 0xA705 // FORMAT_RI2 BRANCH RELATIVE AND SAVE
+ op_BRASL uint32 = 0xC005 // FORMAT_RIL2 BRANCH RELATIVE AND SAVE LONG
+ op_BRC uint32 = 0xA704 // FORMAT_RI3 BRANCH RELATIVE ON CONDITION
+ op_BRCL uint32 = 0xC004 // FORMAT_RIL3 BRANCH RELATIVE ON CONDITION LONG
+ op_BRCT uint32 = 0xA706 // FORMAT_RI2 BRANCH RELATIVE ON COUNT (32)
+ op_BRCTG uint32 = 0xA707 // FORMAT_RI2 BRANCH RELATIVE ON COUNT (64)
+ op_BRCTH uint32 = 0xCC06 // FORMAT_RIL2 BRANCH RELATIVE ON COUNT HIGH (32)
+ op_BRXH uint32 = 0x8400 // FORMAT_RSI BRANCH RELATIVE ON INDEX HIGH (32)
+ op_BRXHG uint32 = 0xEC44 // FORMAT_RIE5 BRANCH RELATIVE ON INDEX HIGH (64)
+ op_BRXLE uint32 = 0x8500 // FORMAT_RSI BRANCH RELATIVE ON INDEX LOW OR EQ. (32)
+ op_BRXLG uint32 = 0xEC45 // FORMAT_RIE5 BRANCH RELATIVE ON INDEX LOW OR EQ. (64)
+ op_BSA uint32 = 0xB25A // FORMAT_RRE BRANCH AND SET AUTHORITY
+ op_BSG uint32 = 0xB258 // FORMAT_RRE BRANCH IN SUBSPACE GROUP
+ op_BSM uint32 = 0x0B00 // FORMAT_RR BRANCH AND SET MODE
+ op_BXH uint32 = 0x8600 // FORMAT_RS1 BRANCH ON INDEX HIGH (32)
+ op_BXHG uint32 = 0xEB44 // FORMAT_RSY1 BRANCH ON INDEX HIGH (64)
+ op_BXLE uint32 = 0x8700 // FORMAT_RS1 BRANCH ON INDEX LOW OR EQUAL (32)
+ op_BXLEG uint32 = 0xEB45 // FORMAT_RSY1 BRANCH ON INDEX LOW OR EQUAL (64)
+ op_C uint32 = 0x5900 // FORMAT_RX1 COMPARE (32)
+ op_CD uint32 = 0x6900 // FORMAT_RX1 COMPARE (long HFP)
+ op_CDB uint32 = 0xED19 // FORMAT_RXE COMPARE (long BFP)
+ op_CDBR uint32 = 0xB319 // FORMAT_RRE COMPARE (long BFP)
+ op_CDFBR uint32 = 0xB395 // FORMAT_RRE CONVERT FROM FIXED (32 to long BFP)
+ op_CDFBRA uint32 = 0xB395 // FORMAT_RRF5 CONVERT FROM FIXED (32 to long BFP)
+ op_CDFR uint32 = 0xB3B5 // FORMAT_RRE CONVERT FROM FIXED (32 to long HFP)
+ op_CDFTR uint32 = 0xB951 // FORMAT_RRE CONVERT FROM FIXED (32 to long DFP)
+ op_CDGBR uint32 = 0xB3A5 // FORMAT_RRE CONVERT FROM FIXED (64 to long BFP)
+ op_CDGBRA uint32 = 0xB3A5 // FORMAT_RRF5 CONVERT FROM FIXED (64 to long BFP)
+ op_CDGR uint32 = 0xB3C5 // FORMAT_RRE CONVERT FROM FIXED (64 to long HFP)
+ op_CDGTR uint32 = 0xB3F1 // FORMAT_RRE CONVERT FROM FIXED (64 to long DFP)
+ op_CDGTRA uint32 = 0xB3F1 // FORMAT_RRF5 CONVERT FROM FIXED (64 to long DFP)
+ op_CDLFBR uint32 = 0xB391 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to long BFP)
+ op_CDLFTR uint32 = 0xB953 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to long DFP)
+ op_CDLGBR uint32 = 0xB3A1 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to long BFP)
+ op_CDLGTR uint32 = 0xB952 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to long DFP)
+ op_CDR uint32 = 0x2900 // FORMAT_RR COMPARE (long HFP)
+ op_CDS uint32 = 0xBB00 // FORMAT_RS1 COMPARE DOUBLE AND SWAP (32)
+ op_CDSG uint32 = 0xEB3E // FORMAT_RSY1 COMPARE DOUBLE AND SWAP (64)
+ op_CDSTR uint32 = 0xB3F3 // FORMAT_RRE CONVERT FROM SIGNED PACKED (64 to long DFP)
+ op_CDSY uint32 = 0xEB31 // FORMAT_RSY1 COMPARE DOUBLE AND SWAP (32)
+ op_CDTR uint32 = 0xB3E4 // FORMAT_RRE COMPARE (long DFP)
+ op_CDUTR uint32 = 0xB3F2 // FORMAT_RRE CONVERT FROM UNSIGNED PACKED (64 to long DFP)
+ op_CDZT uint32 = 0xEDAA // FORMAT_RSL CONVERT FROM ZONED (to long DFP)
+ op_CE uint32 = 0x7900 // FORMAT_RX1 COMPARE (short HFP)
+ op_CEB uint32 = 0xED09 // FORMAT_RXE COMPARE (short BFP)
+ op_CEBR uint32 = 0xB309 // FORMAT_RRE COMPARE (short BFP)
+ op_CEDTR uint32 = 0xB3F4 // FORMAT_RRE COMPARE BIASED EXPONENT (long DFP)
+ op_CEFBR uint32 = 0xB394 // FORMAT_RRE CONVERT FROM FIXED (32 to short BFP)
+ op_CEFBRA uint32 = 0xB394 // FORMAT_RRF5 CONVERT FROM FIXED (32 to short BFP)
+ op_CEFR uint32 = 0xB3B4 // FORMAT_RRE CONVERT FROM FIXED (32 to short HFP)
+ op_CEGBR uint32 = 0xB3A4 // FORMAT_RRE CONVERT FROM FIXED (64 to short BFP)
+ op_CEGBRA uint32 = 0xB3A4 // FORMAT_RRF5 CONVERT FROM FIXED (64 to short BFP)
+ op_CEGR uint32 = 0xB3C4 // FORMAT_RRE CONVERT FROM FIXED (64 to short HFP)
+ op_CELFBR uint32 = 0xB390 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to short BFP)
+ op_CELGBR uint32 = 0xB3A0 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to short BFP)
+ op_CER uint32 = 0x3900 // FORMAT_RR COMPARE (short HFP)
+ op_CEXTR uint32 = 0xB3FC // FORMAT_RRE COMPARE BIASED EXPONENT (extended DFP)
+ op_CFC uint32 = 0xB21A // FORMAT_S COMPARE AND FORM CODEWORD
+ op_CFDBR uint32 = 0xB399 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 32)
+ op_CFDBRA uint32 = 0xB399 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 32)
+ op_CFDR uint32 = 0xB3B9 // FORMAT_RRF5 CONVERT TO FIXED (long HFP to 32)
+ op_CFDTR uint32 = 0xB941 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 32)
+ op_CFEBR uint32 = 0xB398 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 32)
+ op_CFEBRA uint32 = 0xB398 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 32)
+ op_CFER uint32 = 0xB3B8 // FORMAT_RRF5 CONVERT TO FIXED (short HFP to 32)
+ op_CFI uint32 = 0xC20D // FORMAT_RIL1 COMPARE IMMEDIATE (32)
+ op_CFXBR uint32 = 0xB39A // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 32)
+ op_CFXBRA uint32 = 0xB39A // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 32)
+ op_CFXR uint32 = 0xB3BA // FORMAT_RRF5 CONVERT TO FIXED (extended HFP to 32)
+ op_CFXTR uint32 = 0xB949 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 32)
+ op_CG uint32 = 0xE320 // FORMAT_RXY1 COMPARE (64)
+ op_CGDBR uint32 = 0xB3A9 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 64)
+ op_CGDBRA uint32 = 0xB3A9 // FORMAT_RRF5 CONVERT TO FIXED (long BFP to 64)
+ op_CGDR uint32 = 0xB3C9 // FORMAT_RRF5 CONVERT TO FIXED (long HFP to 64)
+ op_CGDTR uint32 = 0xB3E1 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 64)
+ op_CGDTRA uint32 = 0xB3E1 // FORMAT_RRF5 CONVERT TO FIXED (long DFP to 64)
+ op_CGEBR uint32 = 0xB3A8 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 64)
+ op_CGEBRA uint32 = 0xB3A8 // FORMAT_RRF5 CONVERT TO FIXED (short BFP to 64)
+ op_CGER uint32 = 0xB3C8 // FORMAT_RRF5 CONVERT TO FIXED (short HFP to 64)
+ op_CGF uint32 = 0xE330 // FORMAT_RXY1 COMPARE (64<-32)
+ op_CGFI uint32 = 0xC20C // FORMAT_RIL1 COMPARE IMMEDIATE (64<-32)
+ op_CGFR uint32 = 0xB930 // FORMAT_RRE COMPARE (64<-32)
+ op_CGFRL uint32 = 0xC60C // FORMAT_RIL2 COMPARE RELATIVE LONG (64<-32)
+ op_CGH uint32 = 0xE334 // FORMAT_RXY1 COMPARE HALFWORD (64<-16)
+ op_CGHI uint32 = 0xA70F // FORMAT_RI1 COMPARE HALFWORD IMMEDIATE (64<-16)
+ op_CGHRL uint32 = 0xC604 // FORMAT_RIL2 COMPARE HALFWORD RELATIVE LONG (64<-16)
+ op_CGHSI uint32 = 0xE558 // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (64<-16)
+ op_CGIB uint32 = 0xECFC // FORMAT_RIS COMPARE IMMEDIATE AND BRANCH (64<-8)
+ op_CGIJ uint32 = 0xEC7C // FORMAT_RIE3 COMPARE IMMEDIATE AND BRANCH RELATIVE (64<-8)
+ op_CGIT uint32 = 0xEC70 // FORMAT_RIE1 COMPARE IMMEDIATE AND TRAP (64<-16)
+ op_CGR uint32 = 0xB920 // FORMAT_RRE COMPARE (64)
+ op_CGRB uint32 = 0xECE4 // FORMAT_RRS COMPARE AND BRANCH (64)
+ op_CGRJ uint32 = 0xEC64 // FORMAT_RIE2 COMPARE AND BRANCH RELATIVE (64)
+ op_CGRL uint32 = 0xC608 // FORMAT_RIL2 COMPARE RELATIVE LONG (64)
+ op_CGRT uint32 = 0xB960 // FORMAT_RRF3 COMPARE AND TRAP (64)
+ op_CGXBR uint32 = 0xB3AA // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 64)
+ op_CGXBRA uint32 = 0xB3AA // FORMAT_RRF5 CONVERT TO FIXED (extended BFP to 64)
+ op_CGXR uint32 = 0xB3CA // FORMAT_RRF5 CONVERT TO FIXED (extended HFP to 64)
+ op_CGXTR uint32 = 0xB3E9 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 64)
+ op_CGXTRA uint32 = 0xB3E9 // FORMAT_RRF5 CONVERT TO FIXED (extended DFP to 64)
+ op_CH uint32 = 0x4900 // FORMAT_RX1 COMPARE HALFWORD (32<-16)
+ op_CHF uint32 = 0xE3CD // FORMAT_RXY1 COMPARE HIGH (32)
+ op_CHHR uint32 = 0xB9CD // FORMAT_RRE COMPARE HIGH (32)
+ op_CHHSI uint32 = 0xE554 // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (16)
+ op_CHI uint32 = 0xA70E // FORMAT_RI1 COMPARE HALFWORD IMMEDIATE (32<-16)
+ op_CHLR uint32 = 0xB9DD // FORMAT_RRE COMPARE HIGH (32)
+ op_CHRL uint32 = 0xC605 // FORMAT_RIL2 COMPARE HALFWORD RELATIVE LONG (32<-16)
+ op_CHSI uint32 = 0xE55C // FORMAT_SIL COMPARE HALFWORD IMMEDIATE (32<-16)
+ op_CHY uint32 = 0xE379 // FORMAT_RXY1 COMPARE HALFWORD (32<-16)
+ op_CIB uint32 = 0xECFE // FORMAT_RIS COMPARE IMMEDIATE AND BRANCH (32<-8)
+ op_CIH uint32 = 0xCC0D // FORMAT_RIL1 COMPARE IMMEDIATE HIGH (32)
+ op_CIJ uint32 = 0xEC7E // FORMAT_RIE3 COMPARE IMMEDIATE AND BRANCH RELATIVE (32<-8)
+ op_CIT uint32 = 0xEC72 // FORMAT_RIE1 COMPARE IMMEDIATE AND TRAP (32<-16)
+ op_CKSM uint32 = 0xB241 // FORMAT_RRE CHECKSUM
+ op_CL uint32 = 0x5500 // FORMAT_RX1 COMPARE LOGICAL (32)
+ op_CLC uint32 = 0xD500 // FORMAT_SS1 COMPARE LOGICAL (character)
+ op_CLCL uint32 = 0x0F00 // FORMAT_RR COMPARE LOGICAL LONG
+ op_CLCLE uint32 = 0xA900 // FORMAT_RS1 COMPARE LOGICAL LONG EXTENDED
+ op_CLCLU uint32 = 0xEB8F // FORMAT_RSY1 COMPARE LOGICAL LONG UNICODE
+ op_CLFDBR uint32 = 0xB39D // FORMAT_RRF5 CONVERT TO LOGICAL (long BFP to 32)
+ op_CLFDTR uint32 = 0xB943 // FORMAT_RRF5 CONVERT TO LOGICAL (long DFP to 32)
+ op_CLFEBR uint32 = 0xB39C // FORMAT_RRF5 CONVERT TO LOGICAL (short BFP to 32)
+ op_CLFHSI uint32 = 0xE55D // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (32<-16)
+ op_CLFI uint32 = 0xC20F // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE (32)
+ op_CLFIT uint32 = 0xEC73 // FORMAT_RIE1 COMPARE LOGICAL IMMEDIATE AND TRAP (32<-16)
+ op_CLFXBR uint32 = 0xB39E // FORMAT_RRF5 CONVERT TO LOGICAL (extended BFP to 32)
+ op_CLFXTR uint32 = 0xB94B // FORMAT_RRF5 CONVERT TO LOGICAL (extended DFP to 32)
+ op_CLG uint32 = 0xE321 // FORMAT_RXY1 COMPARE LOGICAL (64)
+ op_CLGDBR uint32 = 0xB3AD // FORMAT_RRF5 CONVERT TO LOGICAL (long BFP to 64)
+ op_CLGDTR uint32 = 0xB942 // FORMAT_RRF5 CONVERT TO LOGICAL (long DFP to 64)
+ op_CLGEBR uint32 = 0xB3AC // FORMAT_RRF5 CONVERT TO LOGICAL (short BFP to 64)
+ op_CLGF uint32 = 0xE331 // FORMAT_RXY1 COMPARE LOGICAL (64<-32)
+ op_CLGFI uint32 = 0xC20E // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE (64<-32)
+ op_CLGFR uint32 = 0xB931 // FORMAT_RRE COMPARE LOGICAL (64<-32)
+ op_CLGFRL uint32 = 0xC60E // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64<-32)
+ op_CLGHRL uint32 = 0xC606 // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64<-16)
+ op_CLGHSI uint32 = 0xE559 // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (64<-16)
+ op_CLGIB uint32 = 0xECFD // FORMAT_RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (64<-8)
+ op_CLGIJ uint32 = 0xEC7D // FORMAT_RIE3 COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (64<-8)
+ op_CLGIT uint32 = 0xEC71 // FORMAT_RIE1 COMPARE LOGICAL IMMEDIATE AND TRAP (64<-16)
+ op_CLGR uint32 = 0xB921 // FORMAT_RRE COMPARE LOGICAL (64)
+ op_CLGRB uint32 = 0xECE5 // FORMAT_RRS COMPARE LOGICAL AND BRANCH (64)
+ op_CLGRJ uint32 = 0xEC65 // FORMAT_RIE2 COMPARE LOGICAL AND BRANCH RELATIVE (64)
+ op_CLGRL uint32 = 0xC60A // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (64)
+ op_CLGRT uint32 = 0xB961 // FORMAT_RRF3 COMPARE LOGICAL AND TRAP (64)
+ op_CLGT uint32 = 0xEB2B // FORMAT_RSY2 COMPARE LOGICAL AND TRAP (64)
+ op_CLGXBR uint32 = 0xB3AE // FORMAT_RRF5 CONVERT TO LOGICAL (extended BFP to 64)
+ op_CLGXTR uint32 = 0xB94A // FORMAT_RRF5 CONVERT TO LOGICAL (extended DFP to 64)
+ op_CLHF uint32 = 0xE3CF // FORMAT_RXY1 COMPARE LOGICAL HIGH (32)
+ op_CLHHR uint32 = 0xB9CF // FORMAT_RRE COMPARE LOGICAL HIGH (32)
+ op_CLHHSI uint32 = 0xE555 // FORMAT_SIL COMPARE LOGICAL IMMEDIATE (16)
+ op_CLHLR uint32 = 0xB9DF // FORMAT_RRE COMPARE LOGICAL HIGH (32)
+ op_CLHRL uint32 = 0xC607 // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (32<-16)
+ op_CLI uint32 = 0x9500 // FORMAT_SI COMPARE LOGICAL (immediate)
+ op_CLIB uint32 = 0xECFF // FORMAT_RIS COMPARE LOGICAL IMMEDIATE AND BRANCH (32<-8)
+ op_CLIH uint32 = 0xCC0F // FORMAT_RIL1 COMPARE LOGICAL IMMEDIATE HIGH (32)
+ op_CLIJ uint32 = 0xEC7F // FORMAT_RIE3 COMPARE LOGICAL IMMEDIATE AND BRANCH RELATIVE (32<-8)
+ op_CLIY uint32 = 0xEB55 // FORMAT_SIY COMPARE LOGICAL (immediate)
+ op_CLM uint32 = 0xBD00 // FORMAT_RS2 COMPARE LOGICAL CHAR. UNDER MASK (low)
+ op_CLMH uint32 = 0xEB20 // FORMAT_RSY2 COMPARE LOGICAL CHAR. UNDER MASK (high)
+ op_CLMY uint32 = 0xEB21 // FORMAT_RSY2 COMPARE LOGICAL CHAR. UNDER MASK (low)
+ op_CLR uint32 = 0x1500 // FORMAT_RR COMPARE LOGICAL (32)
+ op_CLRB uint32 = 0xECF7 // FORMAT_RRS COMPARE LOGICAL AND BRANCH (32)
+ op_CLRJ uint32 = 0xEC77 // FORMAT_RIE2 COMPARE LOGICAL AND BRANCH RELATIVE (32)
+ op_CLRL uint32 = 0xC60F // FORMAT_RIL2 COMPARE LOGICAL RELATIVE LONG (32)
+ op_CLRT uint32 = 0xB973 // FORMAT_RRF3 COMPARE LOGICAL AND TRAP (32)
+ op_CLST uint32 = 0xB25D // FORMAT_RRE COMPARE LOGICAL STRING
+ op_CLT uint32 = 0xEB23 // FORMAT_RSY2 COMPARE LOGICAL AND TRAP (32)
+ op_CLY uint32 = 0xE355 // FORMAT_RXY1 COMPARE LOGICAL (32)
+ op_CMPSC uint32 = 0xB263 // FORMAT_RRE COMPRESSION CALL
+ op_CP uint32 = 0xF900 // FORMAT_SS2 COMPARE DECIMAL
+ op_CPSDR uint32 = 0xB372 // FORMAT_RRF2 COPY SIGN (long)
+ op_CPYA uint32 = 0xB24D // FORMAT_RRE COPY ACCESS
+ op_CR uint32 = 0x1900 // FORMAT_RR COMPARE (32)
+ op_CRB uint32 = 0xECF6 // FORMAT_RRS COMPARE AND BRANCH (32)
+ op_CRDTE uint32 = 0xB98F // FORMAT_RRF2 COMPARE AND REPLACE DAT TABLE ENTRY
+ op_CRJ uint32 = 0xEC76 // FORMAT_RIE2 COMPARE AND BRANCH RELATIVE (32)
+ op_CRL uint32 = 0xC60D // FORMAT_RIL2 COMPARE RELATIVE LONG (32)
+ op_CRT uint32 = 0xB972 // FORMAT_RRF3 COMPARE AND TRAP (32)
+ op_CS uint32 = 0xBA00 // FORMAT_RS1 COMPARE AND SWAP (32)
+ op_CSCH uint32 = 0xB230 // FORMAT_S CLEAR SUBCHANNEL
+ op_CSDTR uint32 = 0xB3E3 // FORMAT_RRF4 CONVERT TO SIGNED PACKED (long DFP to 64)
+ op_CSG uint32 = 0xEB30 // FORMAT_RSY1 COMPARE AND SWAP (64)
+ op_CSP uint32 = 0xB250 // FORMAT_RRE COMPARE AND SWAP AND PURGE
+ op_CSPG uint32 = 0xB98A // FORMAT_RRE COMPARE AND SWAP AND PURGE
+ op_CSST uint32 = 0xC802 // FORMAT_SSF COMPARE AND SWAP AND STORE
+ op_CSXTR uint32 = 0xB3EB // FORMAT_RRF4 CONVERT TO SIGNED PACKED (extended DFP to 128)
+ op_CSY uint32 = 0xEB14 // FORMAT_RSY1 COMPARE AND SWAP (32)
+ op_CU12 uint32 = 0xB2A7 // FORMAT_RRF3 CONVERT UTF-8 TO UTF-16
+ op_CU14 uint32 = 0xB9B0 // FORMAT_RRF3 CONVERT UTF-8 TO UTF-32
+ op_CU21 uint32 = 0xB2A6 // FORMAT_RRF3 CONVERT UTF-16 TO UTF-8
+ op_CU24 uint32 = 0xB9B1 // FORMAT_RRF3 CONVERT UTF-16 TO UTF-32
+ op_CU41 uint32 = 0xB9B2 // FORMAT_RRE CONVERT UTF-32 TO UTF-8
+ op_CU42 uint32 = 0xB9B3 // FORMAT_RRE CONVERT UTF-32 TO UTF-16
+ op_CUDTR uint32 = 0xB3E2 // FORMAT_RRE CONVERT TO UNSIGNED PACKED (long DFP to 64)
+ op_CUSE uint32 = 0xB257 // FORMAT_RRE COMPARE UNTIL SUBSTRING EQUAL
+ op_CUTFU uint32 = 0xB2A7 // FORMAT_RRF3 CONVERT UTF-8 TO UNICODE
+ op_CUUTF uint32 = 0xB2A6 // FORMAT_RRF3 CONVERT UNICODE TO UTF-8
+ op_CUXTR uint32 = 0xB3EA // FORMAT_RRE CONVERT TO UNSIGNED PACKED (extended DFP to 128)
+ op_CVB uint32 = 0x4F00 // FORMAT_RX1 CONVERT TO BINARY (32)
+ op_CVBG uint32 = 0xE30E // FORMAT_RXY1 CONVERT TO BINARY (64)
+ op_CVBY uint32 = 0xE306 // FORMAT_RXY1 CONVERT TO BINARY (32)
+ op_CVD uint32 = 0x4E00 // FORMAT_RX1 CONVERT TO DECIMAL (32)
+ op_CVDG uint32 = 0xE32E // FORMAT_RXY1 CONVERT TO DECIMAL (64)
+ op_CVDY uint32 = 0xE326 // FORMAT_RXY1 CONVERT TO DECIMAL (32)
+ op_CXBR uint32 = 0xB349 // FORMAT_RRE COMPARE (extended BFP)
+ op_CXFBR uint32 = 0xB396 // FORMAT_RRE CONVERT FROM FIXED (32 to extended BFP)
+ op_CXFBRA uint32 = 0xB396 // FORMAT_RRF5 CONVERT FROM FIXED (32 to extended BFP)
+ op_CXFR uint32 = 0xB3B6 // FORMAT_RRE CONVERT FROM FIXED (32 to extended HFP)
+ op_CXFTR uint32 = 0xB959 // FORMAT_RRE CONVERT FROM FIXED (32 to extended DFP)
+ op_CXGBR uint32 = 0xB3A6 // FORMAT_RRE CONVERT FROM FIXED (64 to extended BFP)
+ op_CXGBRA uint32 = 0xB3A6 // FORMAT_RRF5 CONVERT FROM FIXED (64 to extended BFP)
+ op_CXGR uint32 = 0xB3C6 // FORMAT_RRE CONVERT FROM FIXED (64 to extended HFP)
+ op_CXGTR uint32 = 0xB3F9 // FORMAT_RRE CONVERT FROM FIXED (64 to extended DFP)
+ op_CXGTRA uint32 = 0xB3F9 // FORMAT_RRF5 CONVERT FROM FIXED (64 to extended DFP)
+ op_CXLFBR uint32 = 0xB392 // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to extended BFP)
+ op_CXLFTR uint32 = 0xB95B // FORMAT_RRF5 CONVERT FROM LOGICAL (32 to extended DFP)
+ op_CXLGBR uint32 = 0xB3A2 // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to extended BFP)
+ op_CXLGTR uint32 = 0xB95A // FORMAT_RRF5 CONVERT FROM LOGICAL (64 to extended DFP)
+ op_CXR uint32 = 0xB369 // FORMAT_RRE COMPARE (extended HFP)
+ op_CXSTR uint32 = 0xB3FB // FORMAT_RRE CONVERT FROM SIGNED PACKED (128 to extended DFP)
+ op_CXTR uint32 = 0xB3EC // FORMAT_RRE COMPARE (extended DFP)
+ op_CXUTR uint32 = 0xB3FA // FORMAT_RRE CONVERT FROM UNSIGNED PACKED (128 to ext. DFP)
+ op_CXZT uint32 = 0xEDAB // FORMAT_RSL CONVERT FROM ZONED (to extended DFP)
+ op_CY uint32 = 0xE359 // FORMAT_RXY1 COMPARE (32)
+ op_CZDT uint32 = 0xEDA8 // FORMAT_RSL CONVERT TO ZONED (from long DFP)
+ op_CZXT uint32 = 0xEDA9 // FORMAT_RSL CONVERT TO ZONED (from extended DFP)
+ op_D uint32 = 0x5D00 // FORMAT_RX1 DIVIDE (32<-64)
+ op_DD uint32 = 0x6D00 // FORMAT_RX1 DIVIDE (long HFP)
+ op_DDB uint32 = 0xED1D // FORMAT_RXE DIVIDE (long BFP)
+ op_DDBR uint32 = 0xB31D // FORMAT_RRE DIVIDE (long BFP)
+ op_DDR uint32 = 0x2D00 // FORMAT_RR DIVIDE (long HFP)
+ op_DDTR uint32 = 0xB3D1 // FORMAT_RRF1 DIVIDE (long DFP)
+ op_DDTRA uint32 = 0xB3D1 // FORMAT_RRF1 DIVIDE (long DFP)
+ op_DE uint32 = 0x7D00 // FORMAT_RX1 DIVIDE (short HFP)
+ op_DEB uint32 = 0xED0D // FORMAT_RXE DIVIDE (short BFP)
+ op_DEBR uint32 = 0xB30D // FORMAT_RRE DIVIDE (short BFP)
+ op_DER uint32 = 0x3D00 // FORMAT_RR DIVIDE (short HFP)
+ op_DIDBR uint32 = 0xB35B // FORMAT_RRF2 DIVIDE TO INTEGER (long BFP)
+ op_DIEBR uint32 = 0xB353 // FORMAT_RRF2 DIVIDE TO INTEGER (short BFP)
+ op_DL uint32 = 0xE397 // FORMAT_RXY1 DIVIDE LOGICAL (32<-64)
+ op_DLG uint32 = 0xE387 // FORMAT_RXY1 DIVIDE LOGICAL (64<-128)
+ op_DLGR uint32 = 0xB987 // FORMAT_RRE DIVIDE LOGICAL (64<-128)
+ op_DLR uint32 = 0xB997 // FORMAT_RRE DIVIDE LOGICAL (32<-64)
+ op_DP uint32 = 0xFD00 // FORMAT_SS2 DIVIDE DECIMAL
+ op_DR uint32 = 0x1D00 // FORMAT_RR DIVIDE (32<-64)
+ op_DSG uint32 = 0xE30D // FORMAT_RXY1 DIVIDE SINGLE (64)
+ op_DSGF uint32 = 0xE31D // FORMAT_RXY1 DIVIDE SINGLE (64<-32)
+ op_DSGFR uint32 = 0xB91D // FORMAT_RRE DIVIDE SINGLE (64<-32)
+ op_DSGR uint32 = 0xB90D // FORMAT_RRE DIVIDE SINGLE (64)
+ op_DXBR uint32 = 0xB34D // FORMAT_RRE DIVIDE (extended BFP)
+ op_DXR uint32 = 0xB22D // FORMAT_RRE DIVIDE (extended HFP)
+ op_DXTR uint32 = 0xB3D9 // FORMAT_RRF1 DIVIDE (extended DFP)
+ op_DXTRA uint32 = 0xB3D9 // FORMAT_RRF1 DIVIDE (extended DFP)
+ op_EAR uint32 = 0xB24F // FORMAT_RRE EXTRACT ACCESS
+ op_ECAG uint32 = 0xEB4C // FORMAT_RSY1 EXTRACT CACHE ATTRIBUTE
+ op_ECTG uint32 = 0xC801 // FORMAT_SSF EXTRACT CPU TIME
+ op_ED uint32 = 0xDE00 // FORMAT_SS1 EDIT
+ op_EDMK uint32 = 0xDF00 // FORMAT_SS1 EDIT AND MARK
+ op_EEDTR uint32 = 0xB3E5 // FORMAT_RRE EXTRACT BIASED EXPONENT (long DFP to 64)
+ op_EEXTR uint32 = 0xB3ED // FORMAT_RRE EXTRACT BIASED EXPONENT (extended DFP to 64)
+ op_EFPC uint32 = 0xB38C // FORMAT_RRE EXTRACT FPC
+ op_EPAIR uint32 = 0xB99A // FORMAT_RRE EXTRACT PRIMARY ASN AND INSTANCE
+ op_EPAR uint32 = 0xB226 // FORMAT_RRE EXTRACT PRIMARY ASN
+ op_EPSW uint32 = 0xB98D // FORMAT_RRE EXTRACT PSW
+ op_EREG uint32 = 0xB249 // FORMAT_RRE EXTRACT STACKED REGISTERS (32)
+ op_EREGG uint32 = 0xB90E // FORMAT_RRE EXTRACT STACKED REGISTERS (64)
+ op_ESAIR uint32 = 0xB99B // FORMAT_RRE EXTRACT SECONDARY ASN AND INSTANCE
+ op_ESAR uint32 = 0xB227 // FORMAT_RRE EXTRACT SECONDARY ASN
+ op_ESDTR uint32 = 0xB3E7 // FORMAT_RRE EXTRACT SIGNIFICANCE (long DFP)
+ op_ESEA uint32 = 0xB99D // FORMAT_RRE EXTRACT AND SET EXTENDED AUTHORITY
+ op_ESTA uint32 = 0xB24A // FORMAT_RRE EXTRACT STACKED STATE
+ op_ESXTR uint32 = 0xB3EF // FORMAT_RRE EXTRACT SIGNIFICANCE (extended DFP)
+ op_ETND uint32 = 0xB2EC // FORMAT_RRE EXTRACT TRANSACTION NESTING DEPTH
+ op_EX uint32 = 0x4400 // FORMAT_RX1 EXECUTE
+ op_EXRL uint32 = 0xC600 // FORMAT_RIL2 EXECUTE RELATIVE LONG
+ op_FIDBR uint32 = 0xB35F // FORMAT_RRF5 LOAD FP INTEGER (long BFP)
+ op_FIDBRA uint32 = 0xB35F // FORMAT_RRF5 LOAD FP INTEGER (long BFP)
+ op_FIDR uint32 = 0xB37F // FORMAT_RRE LOAD FP INTEGER (long HFP)
+ op_FIDTR uint32 = 0xB3D7 // FORMAT_RRF5 LOAD FP INTEGER (long DFP)
+ op_FIEBR uint32 = 0xB357 // FORMAT_RRF5 LOAD FP INTEGER (short BFP)
+ op_FIEBRA uint32 = 0xB357 // FORMAT_RRF5 LOAD FP INTEGER (short BFP)
+ op_FIER uint32 = 0xB377 // FORMAT_RRE LOAD FP INTEGER (short HFP)
+ op_FIXBR uint32 = 0xB347 // FORMAT_RRF5 LOAD FP INTEGER (extended BFP)
+ op_FIXBRA uint32 = 0xB347 // FORMAT_RRF5 LOAD FP INTEGER (extended BFP)
+ op_FIXR uint32 = 0xB367 // FORMAT_RRE LOAD FP INTEGER (extended HFP)
+ op_FIXTR uint32 = 0xB3DF // FORMAT_RRF5 LOAD FP INTEGER (extended DFP)
+ op_FLOGR uint32 = 0xB983 // FORMAT_RRE FIND LEFTMOST ONE
+ op_HDR uint32 = 0x2400 // FORMAT_RR HALVE (long HFP)
+ op_HER uint32 = 0x3400 // FORMAT_RR HALVE (short HFP)
+ op_HSCH uint32 = 0xB231 // FORMAT_S HALT SUBCHANNEL
+ op_IAC uint32 = 0xB224 // FORMAT_RRE INSERT ADDRESS SPACE CONTROL
+ op_IC uint32 = 0x4300 // FORMAT_RX1 INSERT CHARACTER
+ op_ICM uint32 = 0xBF00 // FORMAT_RS2 INSERT CHARACTERS UNDER MASK (low)
+ op_ICMH uint32 = 0xEB80 // FORMAT_RSY2 INSERT CHARACTERS UNDER MASK (high)
+ op_ICMY uint32 = 0xEB81 // FORMAT_RSY2 INSERT CHARACTERS UNDER MASK (low)
+ op_ICY uint32 = 0xE373 // FORMAT_RXY1 INSERT CHARACTER
+ op_IDTE uint32 = 0xB98E // FORMAT_RRF2 INVALIDATE DAT TABLE ENTRY
+ op_IEDTR uint32 = 0xB3F6 // FORMAT_RRF2 INSERT BIASED EXPONENT (64 to long DFP)
+ op_IEXTR uint32 = 0xB3FE // FORMAT_RRF2 INSERT BIASED EXPONENT (64 to extended DFP)
+ op_IIHF uint32 = 0xC008 // FORMAT_RIL1 INSERT IMMEDIATE (high)
+ op_IIHH uint32 = 0xA500 // FORMAT_RI1 INSERT IMMEDIATE (high high)
+ op_IIHL uint32 = 0xA501 // FORMAT_RI1 INSERT IMMEDIATE (high low)
+ op_IILF uint32 = 0xC009 // FORMAT_RIL1 INSERT IMMEDIATE (low)
+ op_IILH uint32 = 0xA502 // FORMAT_RI1 INSERT IMMEDIATE (low high)
+ op_IILL uint32 = 0xA503 // FORMAT_RI1 INSERT IMMEDIATE (low low)
+ op_IPK uint32 = 0xB20B // FORMAT_S INSERT PSW KEY
+ op_IPM uint32 = 0xB222 // FORMAT_RRE INSERT PROGRAM MASK
+ op_IPTE uint32 = 0xB221 // FORMAT_RRF1 INVALIDATE PAGE TABLE ENTRY
+ op_ISKE uint32 = 0xB229 // FORMAT_RRE INSERT STORAGE KEY EXTENDED
+ op_IVSK uint32 = 0xB223 // FORMAT_RRE INSERT VIRTUAL STORAGE KEY
+ op_KDB uint32 = 0xED18 // FORMAT_RXE COMPARE AND SIGNAL (long BFP)
+ op_KDBR uint32 = 0xB318 // FORMAT_RRE COMPARE AND SIGNAL (long BFP)
+ op_KDTR uint32 = 0xB3E0 // FORMAT_RRE COMPARE AND SIGNAL (long DFP)
+ op_KEB uint32 = 0xED08 // FORMAT_RXE COMPARE AND SIGNAL (short BFP)
+ op_KEBR uint32 = 0xB308 // FORMAT_RRE COMPARE AND SIGNAL (short BFP)
+ op_KIMD uint32 = 0xB93E // FORMAT_RRE COMPUTE INTERMEDIATE MESSAGE DIGEST
+ op_KLMD uint32 = 0xB93F // FORMAT_RRE COMPUTE LAST MESSAGE DIGEST
+ op_KM uint32 = 0xB92E // FORMAT_RRE CIPHER MESSAGE
+ op_KMAC uint32 = 0xB91E // FORMAT_RRE COMPUTE MESSAGE AUTHENTICATION CODE
+ op_KMC uint32 = 0xB92F // FORMAT_RRE CIPHER MESSAGE WITH CHAINING
+ op_KMCTR uint32 = 0xB92D // FORMAT_RRF2 CIPHER MESSAGE WITH COUNTER
+ op_KMF uint32 = 0xB92A // FORMAT_RRE CIPHER MESSAGE WITH CFB
+ op_KMO uint32 = 0xB92B // FORMAT_RRE CIPHER MESSAGE WITH OFB
+ op_KXBR uint32 = 0xB348 // FORMAT_RRE COMPARE AND SIGNAL (extended BFP)
+ op_KXTR uint32 = 0xB3E8 // FORMAT_RRE COMPARE AND SIGNAL (extended DFP)
+ op_L uint32 = 0x5800 // FORMAT_RX1 LOAD (32)
+ op_LA uint32 = 0x4100 // FORMAT_RX1 LOAD ADDRESS
+ op_LAA uint32 = 0xEBF8 // FORMAT_RSY1 LOAD AND ADD (32)
+ op_LAAG uint32 = 0xEBE8 // FORMAT_RSY1 LOAD AND ADD (64)
+ op_LAAL uint32 = 0xEBFA // FORMAT_RSY1 LOAD AND ADD LOGICAL (32)
+ op_LAALG uint32 = 0xEBEA // FORMAT_RSY1 LOAD AND ADD LOGICAL (64)
+ op_LAE uint32 = 0x5100 // FORMAT_RX1 LOAD ADDRESS EXTENDED
+ op_LAEY uint32 = 0xE375 // FORMAT_RXY1 LOAD ADDRESS EXTENDED
+ op_LAM uint32 = 0x9A00 // FORMAT_RS1 LOAD ACCESS MULTIPLE
+ op_LAMY uint32 = 0xEB9A // FORMAT_RSY1 LOAD ACCESS MULTIPLE
+ op_LAN uint32 = 0xEBF4 // FORMAT_RSY1 LOAD AND AND (32)
+ op_LANG uint32 = 0xEBE4 // FORMAT_RSY1 LOAD AND AND (64)
+ op_LAO uint32 = 0xEBF6 // FORMAT_RSY1 LOAD AND OR (32)
+ op_LAOG uint32 = 0xEBE6 // FORMAT_RSY1 LOAD AND OR (64)
+ op_LARL uint32 = 0xC000 // FORMAT_RIL2 LOAD ADDRESS RELATIVE LONG
+ op_LASP uint32 = 0xE500 // FORMAT_SSE LOAD ADDRESS SPACE PARAMETERS
+ op_LAT uint32 = 0xE39F // FORMAT_RXY1 LOAD AND TRAP (32L<-32)
+ op_LAX uint32 = 0xEBF7 // FORMAT_RSY1 LOAD AND EXCLUSIVE OR (32)
+ op_LAXG uint32 = 0xEBE7 // FORMAT_RSY1 LOAD AND EXCLUSIVE OR (64)
+ op_LAY uint32 = 0xE371 // FORMAT_RXY1 LOAD ADDRESS
+ op_LB uint32 = 0xE376 // FORMAT_RXY1 LOAD BYTE (32)
+ op_LBH uint32 = 0xE3C0 // FORMAT_RXY1 LOAD BYTE HIGH (32<-8)
+ op_LBR uint32 = 0xB926 // FORMAT_RRE LOAD BYTE (32)
+ op_LCDBR uint32 = 0xB313 // FORMAT_RRE LOAD COMPLEMENT (long BFP)
+ op_LCDFR uint32 = 0xB373 // FORMAT_RRE LOAD COMPLEMENT (long)
+ op_LCDR uint32 = 0x2300 // FORMAT_RR LOAD COMPLEMENT (long HFP)
+ op_LCEBR uint32 = 0xB303 // FORMAT_RRE LOAD COMPLEMENT (short BFP)
+ op_LCER uint32 = 0x3300 // FORMAT_RR LOAD COMPLEMENT (short HFP)
+ op_LCGFR uint32 = 0xB913 // FORMAT_RRE LOAD COMPLEMENT (64<-32)
+ op_LCGR uint32 = 0xB903 // FORMAT_RRE LOAD COMPLEMENT (64)
+ op_LCR uint32 = 0x1300 // FORMAT_RR LOAD COMPLEMENT (32)
+ op_LCTL uint32 = 0xB700 // FORMAT_RS1 LOAD CONTROL (32)
+ op_LCTLG uint32 = 0xEB2F // FORMAT_RSY1 LOAD CONTROL (64)
+ op_LCXBR uint32 = 0xB343 // FORMAT_RRE LOAD COMPLEMENT (extended BFP)
+ op_LCXR uint32 = 0xB363 // FORMAT_RRE LOAD COMPLEMENT (extended HFP)
+ op_LD uint32 = 0x6800 // FORMAT_RX1 LOAD (long)
+ op_LDE uint32 = 0xED24 // FORMAT_RXE LOAD LENGTHENED (short to long HFP)
+ op_LDEB uint32 = 0xED04 // FORMAT_RXE LOAD LENGTHENED (short to long BFP)
+ op_LDEBR uint32 = 0xB304 // FORMAT_RRE LOAD LENGTHENED (short to long BFP)
+ op_LDER uint32 = 0xB324 // FORMAT_RRE LOAD LENGTHENED (short to long HFP)
+ op_LDETR uint32 = 0xB3D4 // FORMAT_RRF4 LOAD LENGTHENED (short to long DFP)
+ op_LDGR uint32 = 0xB3C1 // FORMAT_RRE LOAD FPR FROM GR (64 to long)
+ op_LDR uint32 = 0x2800 // FORMAT_RR LOAD (long)
+ op_LDXBR uint32 = 0xB345 // FORMAT_RRE LOAD ROUNDED (extended to long BFP)
+ op_LDXBRA uint32 = 0xB345 // FORMAT_RRF5 LOAD ROUNDED (extended to long BFP)
+ op_LDXR uint32 = 0x2500 // FORMAT_RR LOAD ROUNDED (extended to long HFP)
+ op_LDXTR uint32 = 0xB3DD // FORMAT_RRF5 LOAD ROUNDED (extended to long DFP)
+ op_LDY uint32 = 0xED65 // FORMAT_RXY1 LOAD (long)
+ op_LE uint32 = 0x7800 // FORMAT_RX1 LOAD (short)
+ op_LEDBR uint32 = 0xB344 // FORMAT_RRE LOAD ROUNDED (long to short BFP)
+ op_LEDBRA uint32 = 0xB344 // FORMAT_RRF5 LOAD ROUNDED (long to short BFP)
+ op_LEDR uint32 = 0x3500 // FORMAT_RR LOAD ROUNDED (long to short HFP)
+ op_LEDTR uint32 = 0xB3D5 // FORMAT_RRF5 LOAD ROUNDED (long to short DFP)
+ op_LER uint32 = 0x3800 // FORMAT_RR LOAD (short)
+ op_LEXBR uint32 = 0xB346 // FORMAT_RRE LOAD ROUNDED (extended to short BFP)
+ op_LEXBRA uint32 = 0xB346 // FORMAT_RRF5 LOAD ROUNDED (extended to short BFP)
+ op_LEXR uint32 = 0xB366 // FORMAT_RRE LOAD ROUNDED (extended to short HFP)
+ op_LEY uint32 = 0xED64 // FORMAT_RXY1 LOAD (short)
+ op_LFAS uint32 = 0xB2BD // FORMAT_S LOAD FPC AND SIGNAL
+ op_LFH uint32 = 0xE3CA // FORMAT_RXY1 LOAD HIGH (32)
+ op_LFHAT uint32 = 0xE3C8 // FORMAT_RXY1 LOAD HIGH AND TRAP (32H<-32)
+ op_LFPC uint32 = 0xB29D // FORMAT_S LOAD FPC
+ op_LG uint32 = 0xE304 // FORMAT_RXY1 LOAD (64)
+ op_LGAT uint32 = 0xE385 // FORMAT_RXY1 LOAD AND TRAP (64)
+ op_LGB uint32 = 0xE377 // FORMAT_RXY1 LOAD BYTE (64)
+ op_LGBR uint32 = 0xB906 // FORMAT_RRE LOAD BYTE (64)
+ op_LGDR uint32 = 0xB3CD // FORMAT_RRE LOAD GR FROM FPR (long to 64)
+ op_LGF uint32 = 0xE314 // FORMAT_RXY1 LOAD (64<-32)
+ op_LGFI uint32 = 0xC001 // FORMAT_RIL1 LOAD IMMEDIATE (64<-32)
+ op_LGFR uint32 = 0xB914 // FORMAT_RRE LOAD (64<-32)
+ op_LGFRL uint32 = 0xC40C // FORMAT_RIL2 LOAD RELATIVE LONG (64<-32)
+ op_LGH uint32 = 0xE315 // FORMAT_RXY1 LOAD HALFWORD (64)
+ op_LGHI uint32 = 0xA709 // FORMAT_RI1 LOAD HALFWORD IMMEDIATE (64)
+ op_LGHR uint32 = 0xB907 // FORMAT_RRE LOAD HALFWORD (64)
+ op_LGHRL uint32 = 0xC404 // FORMAT_RIL2 LOAD HALFWORD RELATIVE LONG (64<-16)
+ op_LGR uint32 = 0xB904 // FORMAT_RRE LOAD (64)
+ op_LGRL uint32 = 0xC408 // FORMAT_RIL2 LOAD RELATIVE LONG (64)
+ op_LH uint32 = 0x4800 // FORMAT_RX1 LOAD HALFWORD (32)
+ op_LHH uint32 = 0xE3C4 // FORMAT_RXY1 LOAD HALFWORD HIGH (32<-16)
+ op_LHI uint32 = 0xA708 // FORMAT_RI1 LOAD HALFWORD IMMEDIATE (32)
+ op_LHR uint32 = 0xB927 // FORMAT_RRE LOAD HALFWORD (32)
+ op_LHRL uint32 = 0xC405 // FORMAT_RIL2 LOAD HALFWORD RELATIVE LONG (32<-16)
+ op_LHY uint32 = 0xE378 // FORMAT_RXY1 LOAD HALFWORD (32)
+ op_LLC uint32 = 0xE394 // FORMAT_RXY1 LOAD LOGICAL CHARACTER (32)
+ op_LLCH uint32 = 0xE3C2 // FORMAT_RXY1 LOAD LOGICAL CHARACTER HIGH (32<-8)
+ op_LLCR uint32 = 0xB994 // FORMAT_RRE LOAD LOGICAL CHARACTER (32)
+ op_LLGC uint32 = 0xE390 // FORMAT_RXY1 LOAD LOGICAL CHARACTER (64)
+ op_LLGCR uint32 = 0xB984 // FORMAT_RRE LOAD LOGICAL CHARACTER (64)
+ op_LLGF uint32 = 0xE316 // FORMAT_RXY1 LOAD LOGICAL (64<-32)
+ op_LLGFAT uint32 = 0xE39D // FORMAT_RXY1 LOAD LOGICAL AND TRAP (64<-32)
+ op_LLGFR uint32 = 0xB916 // FORMAT_RRE LOAD LOGICAL (64<-32)
+ op_LLGFRL uint32 = 0xC40E // FORMAT_RIL2 LOAD LOGICAL RELATIVE LONG (64<-32)
+ op_LLGH uint32 = 0xE391 // FORMAT_RXY1 LOAD LOGICAL HALFWORD (64)
+ op_LLGHR uint32 = 0xB985 // FORMAT_RRE LOAD LOGICAL HALFWORD (64)
+ op_LLGHRL uint32 = 0xC406 // FORMAT_RIL2 LOAD LOGICAL HALFWORD RELATIVE LONG (64<-16)
+ op_LLGT uint32 = 0xE317 // FORMAT_RXY1 LOAD LOGICAL THIRTY ONE BITS
+ op_LLGTAT uint32 = 0xE39C // FORMAT_RXY1 LOAD LOGICAL THIRTY ONE BITS AND TRAP (64<-31)
+ op_LLGTR uint32 = 0xB917 // FORMAT_RRE LOAD LOGICAL THIRTY ONE BITS
+ op_LLH uint32 = 0xE395 // FORMAT_RXY1 LOAD LOGICAL HALFWORD (32)
+ op_LLHH uint32 = 0xE3C6 // FORMAT_RXY1 LOAD LOGICAL HALFWORD HIGH (32<-16)
+ op_LLHR uint32 = 0xB995 // FORMAT_RRE LOAD LOGICAL HALFWORD (32)
+ op_LLHRL uint32 = 0xC402 // FORMAT_RIL2 LOAD LOGICAL HALFWORD RELATIVE LONG (32<-16)
+ op_LLIHF uint32 = 0xC00E // FORMAT_RIL1 LOAD LOGICAL IMMEDIATE (high)
+ op_LLIHH uint32 = 0xA50C // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (high high)
+ op_LLIHL uint32 = 0xA50D // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (high low)
+ op_LLILF uint32 = 0xC00F // FORMAT_RIL1 LOAD LOGICAL IMMEDIATE (low)
+ op_LLILH uint32 = 0xA50E // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (low high)
+ op_LLILL uint32 = 0xA50F // FORMAT_RI1 LOAD LOGICAL IMMEDIATE (low low)
+ op_LM uint32 = 0x9800 // FORMAT_RS1 LOAD MULTIPLE (32)
+ op_LMD uint32 = 0xEF00 // FORMAT_SS5 LOAD MULTIPLE DISJOINT
+ op_LMG uint32 = 0xEB04 // FORMAT_RSY1 LOAD MULTIPLE (64)
+ op_LMH uint32 = 0xEB96 // FORMAT_RSY1 LOAD MULTIPLE HIGH
+ op_LMY uint32 = 0xEB98 // FORMAT_RSY1 LOAD MULTIPLE (32)
+ op_LNDBR uint32 = 0xB311 // FORMAT_RRE LOAD NEGATIVE (long BFP)
+ op_LNDFR uint32 = 0xB371 // FORMAT_RRE LOAD NEGATIVE (long)
+ op_LNDR uint32 = 0x2100 // FORMAT_RR LOAD NEGATIVE (long HFP)
+ op_LNEBR uint32 = 0xB301 // FORMAT_RRE LOAD NEGATIVE (short BFP)
+ op_LNER uint32 = 0x3100 // FORMAT_RR LOAD NEGATIVE (short HFP)
+ op_LNGFR uint32 = 0xB911 // FORMAT_RRE LOAD NEGATIVE (64<-32)
+ op_LNGR uint32 = 0xB901 // FORMAT_RRE LOAD NEGATIVE (64)
+ op_LNR uint32 = 0x1100 // FORMAT_RR LOAD NEGATIVE (32)
+ op_LNXBR uint32 = 0xB341 // FORMAT_RRE LOAD NEGATIVE (extended BFP)
+ op_LNXR uint32 = 0xB361 // FORMAT_RRE LOAD NEGATIVE (extended HFP)
+ op_LOC uint32 = 0xEBF2 // FORMAT_RSY2 LOAD ON CONDITION (32)
+ op_LOCG uint32 = 0xEBE2 // FORMAT_RSY2 LOAD ON CONDITION (64)
+ op_LOCGR uint32 = 0xB9E2 // FORMAT_RRF3 LOAD ON CONDITION (64)
+ op_LOCR uint32 = 0xB9F2 // FORMAT_RRF3 LOAD ON CONDITION (32)
+ op_LPD uint32 = 0xC804 // FORMAT_SSF LOAD PAIR DISJOINT (32)
+ op_LPDBR uint32 = 0xB310 // FORMAT_RRE LOAD POSITIVE (long BFP)
+ op_LPDFR uint32 = 0xB370 // FORMAT_RRE LOAD POSITIVE (long)
+ op_LPDG uint32 = 0xC805 // FORMAT_SSF LOAD PAIR DISJOINT (64)
+ op_LPDR uint32 = 0x2000 // FORMAT_RR LOAD POSITIVE (long HFP)
+ op_LPEBR uint32 = 0xB300 // FORMAT_RRE LOAD POSITIVE (short BFP)
+ op_LPER uint32 = 0x3000 // FORMAT_RR LOAD POSITIVE (short HFP)
+ op_LPGFR uint32 = 0xB910 // FORMAT_RRE LOAD POSITIVE (64<-32)
+ op_LPGR uint32 = 0xB900 // FORMAT_RRE LOAD POSITIVE (64)
+ op_LPQ uint32 = 0xE38F // FORMAT_RXY1 LOAD PAIR FROM QUADWORD
+ op_LPR uint32 = 0x1000 // FORMAT_RR LOAD POSITIVE (32)
+ op_LPSW uint32 = 0x8200 // FORMAT_S LOAD PSW
+ op_LPSWE uint32 = 0xB2B2 // FORMAT_S LOAD PSW EXTENDED
+ op_LPTEA uint32 = 0xB9AA // FORMAT_RRF2 LOAD PAGE TABLE ENTRY ADDRESS
+ op_LPXBR uint32 = 0xB340 // FORMAT_RRE LOAD POSITIVE (extended BFP)
+ op_LPXR uint32 = 0xB360 // FORMAT_RRE LOAD POSITIVE (extended HFP)
+ op_LR uint32 = 0x1800 // FORMAT_RR LOAD (32)
+ op_LRA uint32 = 0xB100 // FORMAT_RX1 LOAD REAL ADDRESS (32)
+ op_LRAG uint32 = 0xE303 // FORMAT_RXY1 LOAD REAL ADDRESS (64)
+ op_LRAY uint32 = 0xE313 // FORMAT_RXY1 LOAD REAL ADDRESS (32)
+ op_LRDR uint32 = 0x2500 // FORMAT_RR LOAD ROUNDED (extended to long HFP)
+ op_LRER uint32 = 0x3500 // FORMAT_RR LOAD ROUNDED (long to short HFP)
+ op_LRL uint32 = 0xC40D // FORMAT_RIL2 LOAD RELATIVE LONG (32)
+ op_LRV uint32 = 0xE31E // FORMAT_RXY1 LOAD REVERSED (32)
+ op_LRVG uint32 = 0xE30F // FORMAT_RXY1 LOAD REVERSED (64)
+ op_LRVGR uint32 = 0xB90F // FORMAT_RRE LOAD REVERSED (64)
+ op_LRVH uint32 = 0xE31F // FORMAT_RXY1 LOAD REVERSED (16)
+ op_LRVR uint32 = 0xB91F // FORMAT_RRE LOAD REVERSED (32)
+ op_LT uint32 = 0xE312 // FORMAT_RXY1 LOAD AND TEST (32)
+ op_LTDBR uint32 = 0xB312 // FORMAT_RRE LOAD AND TEST (long BFP)
+ op_LTDR uint32 = 0x2200 // FORMAT_RR LOAD AND TEST (long HFP)
+ op_LTDTR uint32 = 0xB3D6 // FORMAT_RRE LOAD AND TEST (long DFP)
+ op_LTEBR uint32 = 0xB302 // FORMAT_RRE LOAD AND TEST (short BFP)
+ op_LTER uint32 = 0x3200 // FORMAT_RR LOAD AND TEST (short HFP)
+ op_LTG uint32 = 0xE302 // FORMAT_RXY1 LOAD AND TEST (64)
+ op_LTGF uint32 = 0xE332 // FORMAT_RXY1 LOAD AND TEST (64<-32)
+ op_LTGFR uint32 = 0xB912 // FORMAT_RRE LOAD AND TEST (64<-32)
+ op_LTGR uint32 = 0xB902 // FORMAT_RRE LOAD AND TEST (64)
+ op_LTR uint32 = 0x1200 // FORMAT_RR LOAD AND TEST (32)
+ op_LTXBR uint32 = 0xB342 // FORMAT_RRE LOAD AND TEST (extended BFP)
+ op_LTXR uint32 = 0xB362 // FORMAT_RRE LOAD AND TEST (extended HFP)
+ op_LTXTR uint32 = 0xB3DE // FORMAT_RRE LOAD AND TEST (extended DFP)
+ op_LURA uint32 = 0xB24B // FORMAT_RRE LOAD USING REAL ADDRESS (32)
+ op_LURAG uint32 = 0xB905 // FORMAT_RRE LOAD USING REAL ADDRESS (64)
+ op_LXD uint32 = 0xED25 // FORMAT_RXE LOAD LENGTHENED (long to extended HFP)
+ op_LXDB uint32 = 0xED05 // FORMAT_RXE LOAD LENGTHENED (long to extended BFP)
+ op_LXDBR uint32 = 0xB305 // FORMAT_RRE LOAD LENGTHENED (long to extended BFP)
+ op_LXDR uint32 = 0xB325 // FORMAT_RRE LOAD LENGTHENED (long to extended HFP)
+ op_LXDTR uint32 = 0xB3DC // FORMAT_RRF4 LOAD LENGTHENED (long to extended DFP)
+ op_LXE uint32 = 0xED26 // FORMAT_RXE LOAD LENGTHENED (short to extended HFP)
+ op_LXEB uint32 = 0xED06 // FORMAT_RXE LOAD LENGTHENED (short to extended BFP)
+ op_LXEBR uint32 = 0xB306 // FORMAT_RRE LOAD LENGTHENED (short to extended BFP)
+ op_LXER uint32 = 0xB326 // FORMAT_RRE LOAD LENGTHENED (short to extended HFP)
+ op_LXR uint32 = 0xB365 // FORMAT_RRE LOAD (extended)
+ op_LY uint32 = 0xE358 // FORMAT_RXY1 LOAD (32)
+ op_LZDR uint32 = 0xB375 // FORMAT_RRE LOAD ZERO (long)
+ op_LZER uint32 = 0xB374 // FORMAT_RRE LOAD ZERO (short)
+ op_LZXR uint32 = 0xB376 // FORMAT_RRE LOAD ZERO (extended)
+ op_M uint32 = 0x5C00 // FORMAT_RX1 MULTIPLY (64<-32)
+ op_MAD uint32 = 0xED3E // FORMAT_RXF MULTIPLY AND ADD (long HFP)
+ op_MADB uint32 = 0xED1E // FORMAT_RXF MULTIPLY AND ADD (long BFP)
+ op_MADBR uint32 = 0xB31E // FORMAT_RRD MULTIPLY AND ADD (long BFP)
+ op_MADR uint32 = 0xB33E // FORMAT_RRD MULTIPLY AND ADD (long HFP)
+ op_MAE uint32 = 0xED2E // FORMAT_RXF MULTIPLY AND ADD (short HFP)
+ op_MAEB uint32 = 0xED0E // FORMAT_RXF MULTIPLY AND ADD (short BFP)
+ op_MAEBR uint32 = 0xB30E // FORMAT_RRD MULTIPLY AND ADD (short BFP)
+ op_MAER uint32 = 0xB32E // FORMAT_RRD MULTIPLY AND ADD (short HFP)
+ op_MAY uint32 = 0xED3A // FORMAT_RXF MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
+ op_MAYH uint32 = 0xED3C // FORMAT_RXF MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
+ op_MAYHR uint32 = 0xB33C // FORMAT_RRD MULTIPLY AND ADD UNNRM. (long to ext. high HFP)
+ op_MAYL uint32 = 0xED38 // FORMAT_RXF MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
+ op_MAYLR uint32 = 0xB338 // FORMAT_RRD MULTIPLY AND ADD UNNRM. (long to ext. low HFP)
+ op_MAYR uint32 = 0xB33A // FORMAT_RRD MULTIPLY & ADD UNNORMALIZED (long to ext. HFP)
+ op_MC uint32 = 0xAF00 // FORMAT_SI MONITOR CALL
+ op_MD uint32 = 0x6C00 // FORMAT_RX1 MULTIPLY (long HFP)
+ op_MDB uint32 = 0xED1C // FORMAT_RXE MULTIPLY (long BFP)
+ op_MDBR uint32 = 0xB31C // FORMAT_RRE MULTIPLY (long BFP)
+ op_MDE uint32 = 0x7C00 // FORMAT_RX1 MULTIPLY (short to long HFP)
+ op_MDEB uint32 = 0xED0C // FORMAT_RXE MULTIPLY (short to long BFP)
+ op_MDEBR uint32 = 0xB30C // FORMAT_RRE MULTIPLY (short to long BFP)
+ op_MDER uint32 = 0x3C00 // FORMAT_RR MULTIPLY (short to long HFP)
+ op_MDR uint32 = 0x2C00 // FORMAT_RR MULTIPLY (long HFP)
+ op_MDTR uint32 = 0xB3D0 // FORMAT_RRF1 MULTIPLY (long DFP)
+ op_MDTRA uint32 = 0xB3D0 // FORMAT_RRF1 MULTIPLY (long DFP)
+ op_ME uint32 = 0x7C00 // FORMAT_RX1 MULTIPLY (short to long HFP)
+ op_MEE uint32 = 0xED37 // FORMAT_RXE MULTIPLY (short HFP)
+ op_MEEB uint32 = 0xED17 // FORMAT_RXE MULTIPLY (short BFP)
+ op_MEEBR uint32 = 0xB317 // FORMAT_RRE MULTIPLY (short BFP)
+ op_MEER uint32 = 0xB337 // FORMAT_RRE MULTIPLY (short HFP)
+ op_MER uint32 = 0x3C00 // FORMAT_RR MULTIPLY (short to long HFP)
+ op_MFY uint32 = 0xE35C // FORMAT_RXY1 MULTIPLY (64<-32)
+ op_MGHI uint32 = 0xA70D // FORMAT_RI1 MULTIPLY HALFWORD IMMEDIATE (64)
+ op_MH uint32 = 0x4C00 // FORMAT_RX1 MULTIPLY HALFWORD (32)
+ op_MHI uint32 = 0xA70C // FORMAT_RI1 MULTIPLY HALFWORD IMMEDIATE (32)
+ op_MHY uint32 = 0xE37C // FORMAT_RXY1 MULTIPLY HALFWORD (32)
+ op_ML uint32 = 0xE396 // FORMAT_RXY1 MULTIPLY LOGICAL (64<-32)
+ op_MLG uint32 = 0xE386 // FORMAT_RXY1 MULTIPLY LOGICAL (128<-64)
+ op_MLGR uint32 = 0xB986 // FORMAT_RRE MULTIPLY LOGICAL (128<-64)
+ op_MLR uint32 = 0xB996 // FORMAT_RRE MULTIPLY LOGICAL (64<-32)
+ op_MP uint32 = 0xFC00 // FORMAT_SS2 MULTIPLY DECIMAL
+ op_MR uint32 = 0x1C00 // FORMAT_RR MULTIPLY (64<-32)
+ op_MS uint32 = 0x7100 // FORMAT_RX1 MULTIPLY SINGLE (32)
+ op_MSCH uint32 = 0xB232 // FORMAT_S MODIFY SUBCHANNEL
+ op_MSD uint32 = 0xED3F // FORMAT_RXF MULTIPLY AND SUBTRACT (long HFP)
+ op_MSDB uint32 = 0xED1F // FORMAT_RXF MULTIPLY AND SUBTRACT (long BFP)
+ op_MSDBR uint32 = 0xB31F // FORMAT_RRD MULTIPLY AND SUBTRACT (long BFP)
+ op_MSDR uint32 = 0xB33F // FORMAT_RRD MULTIPLY AND SUBTRACT (long HFP)
+ op_MSE uint32 = 0xED2F // FORMAT_RXF MULTIPLY AND SUBTRACT (short HFP)
+ op_MSEB uint32 = 0xED0F // FORMAT_RXF MULTIPLY AND SUBTRACT (short BFP)
+ op_MSEBR uint32 = 0xB30F // FORMAT_RRD MULTIPLY AND SUBTRACT (short BFP)
+ op_MSER uint32 = 0xB32F // FORMAT_RRD MULTIPLY AND SUBTRACT (short HFP)
+ op_MSFI uint32 = 0xC201 // FORMAT_RIL1 MULTIPLY SINGLE IMMEDIATE (32)
+ op_MSG uint32 = 0xE30C // FORMAT_RXY1 MULTIPLY SINGLE (64)
+ op_MSGF uint32 = 0xE31C // FORMAT_RXY1 MULTIPLY SINGLE (64<-32)
+ op_MSGFI uint32 = 0xC200 // FORMAT_RIL1 MULTIPLY SINGLE IMMEDIATE (64<-32)
+ op_MSGFR uint32 = 0xB91C // FORMAT_RRE MULTIPLY SINGLE (64<-32)
+ op_MSGR uint32 = 0xB90C // FORMAT_RRE MULTIPLY SINGLE (64)
+ op_MSR uint32 = 0xB252 // FORMAT_RRE MULTIPLY SINGLE (32)
+ op_MSTA uint32 = 0xB247 // FORMAT_RRE MODIFY STACKED STATE
+ op_MSY uint32 = 0xE351 // FORMAT_RXY1 MULTIPLY SINGLE (32)
+ op_MVC uint32 = 0xD200 // FORMAT_SS1 MOVE (character)
+ op_MVCDK uint32 = 0xE50F // FORMAT_SSE MOVE WITH DESTINATION KEY
+ op_MVCIN uint32 = 0xE800 // FORMAT_SS1 MOVE INVERSE
+ op_MVCK uint32 = 0xD900 // FORMAT_SS4 MOVE WITH KEY
+ op_MVCL uint32 = 0x0E00 // FORMAT_RR MOVE LONG
+ op_MVCLE uint32 = 0xA800 // FORMAT_RS1 MOVE LONG EXTENDED
+ op_MVCLU uint32 = 0xEB8E // FORMAT_RSY1 MOVE LONG UNICODE
+ op_MVCOS uint32 = 0xC800 // FORMAT_SSF MOVE WITH OPTIONAL SPECIFICATIONS
+ op_MVCP uint32 = 0xDA00 // FORMAT_SS4 MOVE TO PRIMARY
+ op_MVCS uint32 = 0xDB00 // FORMAT_SS4 MOVE TO SECONDARY
+ op_MVCSK uint32 = 0xE50E // FORMAT_SSE MOVE WITH SOURCE KEY
+ op_MVGHI uint32 = 0xE548 // FORMAT_SIL MOVE (64<-16)
+ op_MVHHI uint32 = 0xE544 // FORMAT_SIL MOVE (16<-16)
+ op_MVHI uint32 = 0xE54C // FORMAT_SIL MOVE (32<-16)
+ op_MVI uint32 = 0x9200 // FORMAT_SI MOVE (immediate)
+ op_MVIY uint32 = 0xEB52 // FORMAT_SIY MOVE (immediate)
+ op_MVN uint32 = 0xD100 // FORMAT_SS1 MOVE NUMERICS
+ op_MVO uint32 = 0xF100 // FORMAT_SS2 MOVE WITH OFFSET
+ op_MVPG uint32 = 0xB254 // FORMAT_RRE MOVE PAGE
+ op_MVST uint32 = 0xB255 // FORMAT_RRE MOVE STRING
+ op_MVZ uint32 = 0xD300 // FORMAT_SS1 MOVE ZONES
+ op_MXBR uint32 = 0xB34C // FORMAT_RRE MULTIPLY (extended BFP)
+ op_MXD uint32 = 0x6700 // FORMAT_RX1 MULTIPLY (long to extended HFP)
+ op_MXDB uint32 = 0xED07 // FORMAT_RXE MULTIPLY (long to extended BFP)
+ op_MXDBR uint32 = 0xB307 // FORMAT_RRE MULTIPLY (long to extended BFP)
+ op_MXDR uint32 = 0x2700 // FORMAT_RR MULTIPLY (long to extended HFP)
+ op_MXR uint32 = 0x2600 // FORMAT_RR MULTIPLY (extended HFP)
+ op_MXTR uint32 = 0xB3D8 // FORMAT_RRF1 MULTIPLY (extended DFP)
+ op_MXTRA uint32 = 0xB3D8 // FORMAT_RRF1 MULTIPLY (extended DFP)
+ op_MY uint32 = 0xED3B // FORMAT_RXF MULTIPLY UNNORMALIZED (long to ext. HFP)
+ op_MYH uint32 = 0xED3D // FORMAT_RXF MULTIPLY UNNORM. (long to ext. high HFP)
+ op_MYHR uint32 = 0xB33D // FORMAT_RRD MULTIPLY UNNORM. (long to ext. high HFP)
+ op_MYL uint32 = 0xED39 // FORMAT_RXF MULTIPLY UNNORM. (long to ext. low HFP)
+ op_MYLR uint32 = 0xB339 // FORMAT_RRD MULTIPLY UNNORM. (long to ext. low HFP)
+ op_MYR uint32 = 0xB33B // FORMAT_RRD MULTIPLY UNNORMALIZED (long to ext. HFP)
+ op_N uint32 = 0x5400 // FORMAT_RX1 AND (32)
+ op_NC uint32 = 0xD400 // FORMAT_SS1 AND (character)
+ op_NG uint32 = 0xE380 // FORMAT_RXY1 AND (64)
+ op_NGR uint32 = 0xB980 // FORMAT_RRE AND (64)
+ op_NGRK uint32 = 0xB9E4 // FORMAT_RRF1 AND (64)
+ op_NI uint32 = 0x9400 // FORMAT_SI AND (immediate)
+ op_NIAI uint32 = 0xB2FA // FORMAT_IE NEXT INSTRUCTION ACCESS INTENT
+ op_NIHF uint32 = 0xC00A // FORMAT_RIL1 AND IMMEDIATE (high)
+ op_NIHH uint32 = 0xA504 // FORMAT_RI1 AND IMMEDIATE (high high)
+ op_NIHL uint32 = 0xA505 // FORMAT_RI1 AND IMMEDIATE (high low)
+ op_NILF uint32 = 0xC00B // FORMAT_RIL1 AND IMMEDIATE (low)
+ op_NILH uint32 = 0xA506 // FORMAT_RI1 AND IMMEDIATE (low high)
+ op_NILL uint32 = 0xA507 // FORMAT_RI1 AND IMMEDIATE (low low)
+ op_NIY uint32 = 0xEB54 // FORMAT_SIY AND (immediate)
+ op_NR uint32 = 0x1400 // FORMAT_RR AND (32)
+ op_NRK uint32 = 0xB9F4 // FORMAT_RRF1 AND (32)
+ op_NTSTG uint32 = 0xE325 // FORMAT_RXY1 NONTRANSACTIONAL STORE
+ op_NY uint32 = 0xE354 // FORMAT_RXY1 AND (32)
+ op_O uint32 = 0x5600 // FORMAT_RX1 OR (32)
+ op_OC uint32 = 0xD600 // FORMAT_SS1 OR (character)
+ op_OG uint32 = 0xE381 // FORMAT_RXY1 OR (64)
+ op_OGR uint32 = 0xB981 // FORMAT_RRE OR (64)
+ op_OGRK uint32 = 0xB9E6 // FORMAT_RRF1 OR (64)
+ op_OI uint32 = 0x9600 // FORMAT_SI OR (immediate)
+ op_OIHF uint32 = 0xC00C // FORMAT_RIL1 OR IMMEDIATE (high)
+ op_OIHH uint32 = 0xA508 // FORMAT_RI1 OR IMMEDIATE (high high)
+ op_OIHL uint32 = 0xA509 // FORMAT_RI1 OR IMMEDIATE (high low)
+ op_OILF uint32 = 0xC00D // FORMAT_RIL1 OR IMMEDIATE (low)
+ op_OILH uint32 = 0xA50A // FORMAT_RI1 OR IMMEDIATE (low high)
+ op_OILL uint32 = 0xA50B // FORMAT_RI1 OR IMMEDIATE (low low)
+ op_OIY uint32 = 0xEB56 // FORMAT_SIY OR (immediate)
+ op_OR uint32 = 0x1600 // FORMAT_RR OR (32)
+ op_ORK uint32 = 0xB9F6 // FORMAT_RRF1 OR (32)
+ op_OY uint32 = 0xE356 // FORMAT_RXY1 OR (32)
+ op_PACK uint32 = 0xF200 // FORMAT_SS2 PACK
+ op_PALB uint32 = 0xB248 // FORMAT_RRE PURGE ALB
+ op_PC uint32 = 0xB218 // FORMAT_S PROGRAM CALL
+ op_PCC uint32 = 0xB92C // FORMAT_RRE PERFORM CRYPTOGRAPHIC COMPUTATION
+ op_PCKMO uint32 = 0xB928 // FORMAT_RRE PERFORM CRYPTOGRAPHIC KEY MGMT. OPERATIONS
+ op_PFD uint32 = 0xE336 // FORMAT_RXY2 PREFETCH DATA
+ op_PFDRL uint32 = 0xC602 // FORMAT_RIL3 PREFETCH DATA RELATIVE LONG
+ op_PFMF uint32 = 0xB9AF // FORMAT_RRE PERFORM FRAME MANAGEMENT FUNCTION
+ op_PFPO uint32 = 0x010A // FORMAT_E PERFORM FLOATING-POINT OPERATION
+ op_PGIN uint32 = 0xB22E // FORMAT_RRE PAGE IN
+ op_PGOUT uint32 = 0xB22F // FORMAT_RRE PAGE OUT
+ op_PKA uint32 = 0xE900 // FORMAT_SS6 PACK ASCII
+ op_PKU uint32 = 0xE100 // FORMAT_SS6 PACK UNICODE
+ op_PLO uint32 = 0xEE00 // FORMAT_SS5 PERFORM LOCKED OPERATION
+ op_POPCNT uint32 = 0xB9E1 // FORMAT_RRE POPULATION COUNT
+ op_PPA uint32 = 0xB2E8 // FORMAT_RRF3 PERFORM PROCESSOR ASSIST
+ op_PR uint32 = 0x0101 // FORMAT_E PROGRAM RETURN
+ op_PT uint32 = 0xB228 // FORMAT_RRE PROGRAM TRANSFER
+ op_PTF uint32 = 0xB9A2 // FORMAT_RRE PERFORM TOPOLOGY FUNCTION
+ op_PTFF uint32 = 0x0104 // FORMAT_E PERFORM TIMING FACILITY FUNCTION
+ op_PTI uint32 = 0xB99E // FORMAT_RRE PROGRAM TRANSFER WITH INSTANCE
+ op_PTLB uint32 = 0xB20D // FORMAT_S PURGE TLB
+ op_QADTR uint32 = 0xB3F5 // FORMAT_RRF2 QUANTIZE (long DFP)
+ op_QAXTR uint32 = 0xB3FD // FORMAT_RRF2 QUANTIZE (extended DFP)
+ op_RCHP uint32 = 0xB23B // FORMAT_S RESET CHANNEL PATH
+ op_RISBG uint32 = 0xEC55 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS
+ op_RISBGN uint32 = 0xEC59 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS
+ op_RISBHG uint32 = 0xEC5D // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS HIGH
+ op_RISBLG uint32 = 0xEC51 // FORMAT_RIE6 ROTATE THEN INSERT SELECTED BITS LOW
+ op_RLL uint32 = 0xEB1D // FORMAT_RSY1 ROTATE LEFT SINGLE LOGICAL (32)
+ op_RLLG uint32 = 0xEB1C // FORMAT_RSY1 ROTATE LEFT SINGLE LOGICAL (64)
+ op_RNSBG uint32 = 0xEC54 // FORMAT_RIE6 ROTATE THEN AND SELECTED BITS
+ op_ROSBG uint32 = 0xEC56 // FORMAT_RIE6 ROTATE THEN OR SELECTED BITS
+ op_RP uint32 = 0xB277 // FORMAT_S RESUME PROGRAM
+ op_RRBE uint32 = 0xB22A // FORMAT_RRE RESET REFERENCE BIT EXTENDED
+ op_RRBM uint32 = 0xB9AE // FORMAT_RRE RESET REFERENCE BITS MULTIPLE
+ op_RRDTR uint32 = 0xB3F7 // FORMAT_RRF2 REROUND (long DFP)
+ op_RRXTR uint32 = 0xB3FF // FORMAT_RRF2 REROUND (extended DFP)
+ op_RSCH uint32 = 0xB238 // FORMAT_S RESUME SUBCHANNEL
+ op_RXSBG uint32 = 0xEC57 // FORMAT_RIE6 ROTATE THEN EXCLUSIVE OR SELECTED BITS
+ op_S uint32 = 0x5B00 // FORMAT_RX1 SUBTRACT (32)
+ op_SAC uint32 = 0xB219 // FORMAT_S SET ADDRESS SPACE CONTROL
+ op_SACF uint32 = 0xB279 // FORMAT_S SET ADDRESS SPACE CONTROL FAST
+ op_SAL uint32 = 0xB237 // FORMAT_S SET ADDRESS LIMIT
+ op_SAM24 uint32 = 0x010C // FORMAT_E SET ADDRESSING MODE (24)
+ op_SAM31 uint32 = 0x010D // FORMAT_E SET ADDRESSING MODE (31)
+ op_SAM64 uint32 = 0x010E // FORMAT_E SET ADDRESSING MODE (64)
+ op_SAR uint32 = 0xB24E // FORMAT_RRE SET ACCESS
+ op_SCHM uint32 = 0xB23C // FORMAT_S SET CHANNEL MONITOR
+ op_SCK uint32 = 0xB204 // FORMAT_S SET CLOCK
+ op_SCKC uint32 = 0xB206 // FORMAT_S SET CLOCK COMPARATOR
+ op_SCKPF uint32 = 0x0107 // FORMAT_E SET CLOCK PROGRAMMABLE FIELD
+ op_SD uint32 = 0x6B00 // FORMAT_RX1 SUBTRACT NORMALIZED (long HFP)
+ op_SDB uint32 = 0xED1B // FORMAT_RXE SUBTRACT (long BFP)
+ op_SDBR uint32 = 0xB31B // FORMAT_RRE SUBTRACT (long BFP)
+ op_SDR uint32 = 0x2B00 // FORMAT_RR SUBTRACT NORMALIZED (long HFP)
+ op_SDTR uint32 = 0xB3D3 // FORMAT_RRF1 SUBTRACT (long DFP)
+ op_SDTRA uint32 = 0xB3D3 // FORMAT_RRF1 SUBTRACT (long DFP)
+ op_SE uint32 = 0x7B00 // FORMAT_RX1 SUBTRACT NORMALIZED (short HFP)
+ op_SEB uint32 = 0xED0B // FORMAT_RXE SUBTRACT (short BFP)
+ op_SEBR uint32 = 0xB30B // FORMAT_RRE SUBTRACT (short BFP)
+ op_SER uint32 = 0x3B00 // FORMAT_RR SUBTRACT NORMALIZED (short HFP)
+ op_SFASR uint32 = 0xB385 // FORMAT_RRE SET FPC AND SIGNAL
+ op_SFPC uint32 = 0xB384 // FORMAT_RRE SET FPC
+ op_SG uint32 = 0xE309 // FORMAT_RXY1 SUBTRACT (64)
+ op_SGF uint32 = 0xE319 // FORMAT_RXY1 SUBTRACT (64<-32)
+ op_SGFR uint32 = 0xB919 // FORMAT_RRE SUBTRACT (64<-32)
+ op_SGR uint32 = 0xB909 // FORMAT_RRE SUBTRACT (64)
+ op_SGRK uint32 = 0xB9E9 // FORMAT_RRF1 SUBTRACT (64)
+ op_SH uint32 = 0x4B00 // FORMAT_RX1 SUBTRACT HALFWORD
+ op_SHHHR uint32 = 0xB9C9 // FORMAT_RRF1 SUBTRACT HIGH (32)
+ op_SHHLR uint32 = 0xB9D9 // FORMAT_RRF1 SUBTRACT HIGH (32)
+ op_SHY uint32 = 0xE37B // FORMAT_RXY1 SUBTRACT HALFWORD
+ op_SIGP uint32 = 0xAE00 // FORMAT_RS1 SIGNAL PROCESSOR
+ op_SL uint32 = 0x5F00 // FORMAT_RX1 SUBTRACT LOGICAL (32)
+ op_SLA uint32 = 0x8B00 // FORMAT_RS1 SHIFT LEFT SINGLE (32)
+ op_SLAG uint32 = 0xEB0B // FORMAT_RSY1 SHIFT LEFT SINGLE (64)
+ op_SLAK uint32 = 0xEBDD // FORMAT_RSY1 SHIFT LEFT SINGLE (32)
+ op_SLB uint32 = 0xE399 // FORMAT_RXY1 SUBTRACT LOGICAL WITH BORROW (32)
+ op_SLBG uint32 = 0xE389 // FORMAT_RXY1 SUBTRACT LOGICAL WITH BORROW (64)
+ op_SLBGR uint32 = 0xB989 // FORMAT_RRE SUBTRACT LOGICAL WITH BORROW (64)
+ op_SLBR uint32 = 0xB999 // FORMAT_RRE SUBTRACT LOGICAL WITH BORROW (32)
+ op_SLDA uint32 = 0x8F00 // FORMAT_RS1 SHIFT LEFT DOUBLE
+ op_SLDL uint32 = 0x8D00 // FORMAT_RS1 SHIFT LEFT DOUBLE LOGICAL
+ op_SLDT uint32 = 0xED40 // FORMAT_RXF SHIFT SIGNIFICAND LEFT (long DFP)
+ op_SLFI uint32 = 0xC205 // FORMAT_RIL1 SUBTRACT LOGICAL IMMEDIATE (32)
+ op_SLG uint32 = 0xE30B // FORMAT_RXY1 SUBTRACT LOGICAL (64)
+ op_SLGF uint32 = 0xE31B // FORMAT_RXY1 SUBTRACT LOGICAL (64<-32)
+ op_SLGFI uint32 = 0xC204 // FORMAT_RIL1 SUBTRACT LOGICAL IMMEDIATE (64<-32)
+ op_SLGFR uint32 = 0xB91B // FORMAT_RRE SUBTRACT LOGICAL (64<-32)
+ op_SLGR uint32 = 0xB90B // FORMAT_RRE SUBTRACT LOGICAL (64)
+ op_SLGRK uint32 = 0xB9EB // FORMAT_RRF1 SUBTRACT LOGICAL (64)
+ op_SLHHHR uint32 = 0xB9CB // FORMAT_RRF1 SUBTRACT LOGICAL HIGH (32)
+ op_SLHHLR uint32 = 0xB9DB // FORMAT_RRF1 SUBTRACT LOGICAL HIGH (32)
+ op_SLL uint32 = 0x8900 // FORMAT_RS1 SHIFT LEFT SINGLE LOGICAL (32)
+ op_SLLG uint32 = 0xEB0D // FORMAT_RSY1 SHIFT LEFT SINGLE LOGICAL (64)
+ op_SLLK uint32 = 0xEBDF // FORMAT_RSY1 SHIFT LEFT SINGLE LOGICAL (32)
+ op_SLR uint32 = 0x1F00 // FORMAT_RR SUBTRACT LOGICAL (32)
+ op_SLRK uint32 = 0xB9FB // FORMAT_RRF1 SUBTRACT LOGICAL (32)
+ op_SLXT uint32 = 0xED48 // FORMAT_RXF SHIFT SIGNIFICAND LEFT (extended DFP)
+ op_SLY uint32 = 0xE35F // FORMAT_RXY1 SUBTRACT LOGICAL (32)
+ op_SP uint32 = 0xFB00 // FORMAT_SS2 SUBTRACT DECIMAL
+ op_SPKA uint32 = 0xB20A // FORMAT_S SET PSW KEY FROM ADDRESS
+ op_SPM uint32 = 0x0400 // FORMAT_RR SET PROGRAM MASK
+ op_SPT uint32 = 0xB208 // FORMAT_S SET CPU TIMER
+ op_SPX uint32 = 0xB210 // FORMAT_S SET PREFIX
+ op_SQD uint32 = 0xED35 // FORMAT_RXE SQUARE ROOT (long HFP)
+ op_SQDB uint32 = 0xED15 // FORMAT_RXE SQUARE ROOT (long BFP)
+ op_SQDBR uint32 = 0xB315 // FORMAT_RRE SQUARE ROOT (long BFP)
+ op_SQDR uint32 = 0xB244 // FORMAT_RRE SQUARE ROOT (long HFP)
+ op_SQE uint32 = 0xED34 // FORMAT_RXE SQUARE ROOT (short HFP)
+ op_SQEB uint32 = 0xED14 // FORMAT_RXE SQUARE ROOT (short BFP)
+ op_SQEBR uint32 = 0xB314 // FORMAT_RRE SQUARE ROOT (short BFP)
+ op_SQER uint32 = 0xB245 // FORMAT_RRE SQUARE ROOT (short HFP)
+ op_SQXBR uint32 = 0xB316 // FORMAT_RRE SQUARE ROOT (extended BFP)
+ op_SQXR uint32 = 0xB336 // FORMAT_RRE SQUARE ROOT (extended HFP)
+ op_SR uint32 = 0x1B00 // FORMAT_RR SUBTRACT (32)
+ op_SRA uint32 = 0x8A00 // FORMAT_RS1 SHIFT RIGHT SINGLE (32)
+ op_SRAG uint32 = 0xEB0A // FORMAT_RSY1 SHIFT RIGHT SINGLE (64)
+ op_SRAK uint32 = 0xEBDC // FORMAT_RSY1 SHIFT RIGHT SINGLE (32)
+ op_SRDA uint32 = 0x8E00 // FORMAT_RS1 SHIFT RIGHT DOUBLE
+ op_SRDL uint32 = 0x8C00 // FORMAT_RS1 SHIFT RIGHT DOUBLE LOGICAL
+ op_SRDT uint32 = 0xED41 // FORMAT_RXF SHIFT SIGNIFICAND RIGHT (long DFP)
+ op_SRK uint32 = 0xB9F9 // FORMAT_RRF1 SUBTRACT (32)
+ op_SRL uint32 = 0x8800 // FORMAT_RS1 SHIFT RIGHT SINGLE LOGICAL (32)
+ op_SRLG uint32 = 0xEB0C // FORMAT_RSY1 SHIFT RIGHT SINGLE LOGICAL (64)
+ op_SRLK uint32 = 0xEBDE // FORMAT_RSY1 SHIFT RIGHT SINGLE LOGICAL (32)
+ op_SRNM uint32 = 0xB299 // FORMAT_S SET BFP ROUNDING MODE (2 bit)
+ op_SRNMB uint32 = 0xB2B8 // FORMAT_S SET BFP ROUNDING MODE (3 bit)
+ op_SRNMT uint32 = 0xB2B9 // FORMAT_S SET DFP ROUNDING MODE
+ op_SRP uint32 = 0xF000 // FORMAT_SS3 SHIFT AND ROUND DECIMAL
+ op_SRST uint32 = 0xB25E // FORMAT_RRE SEARCH STRING
+ op_SRSTU uint32 = 0xB9BE // FORMAT_RRE SEARCH STRING UNICODE
+ op_SRXT uint32 = 0xED49 // FORMAT_RXF SHIFT SIGNIFICAND RIGHT (extended DFP)
+ op_SSAIR uint32 = 0xB99F // FORMAT_RRE SET SECONDARY ASN WITH INSTANCE
+ op_SSAR uint32 = 0xB225 // FORMAT_RRE SET SECONDARY ASN
+ op_SSCH uint32 = 0xB233 // FORMAT_S START SUBCHANNEL
+ op_SSKE uint32 = 0xB22B // FORMAT_RRF3 SET STORAGE KEY EXTENDED
+ op_SSM uint32 = 0x8000 // FORMAT_S SET SYSTEM MASK
+ op_ST uint32 = 0x5000 // FORMAT_RX1 STORE (32)
+ op_STAM uint32 = 0x9B00 // FORMAT_RS1 STORE ACCESS MULTIPLE
+ op_STAMY uint32 = 0xEB9B // FORMAT_RSY1 STORE ACCESS MULTIPLE
+ op_STAP uint32 = 0xB212 // FORMAT_S STORE CPU ADDRESS
+ op_STC uint32 = 0x4200 // FORMAT_RX1 STORE CHARACTER
+ op_STCH uint32 = 0xE3C3 // FORMAT_RXY1 STORE CHARACTER HIGH (8)
+ op_STCK uint32 = 0xB205 // FORMAT_S STORE CLOCK
+ op_STCKC uint32 = 0xB207 // FORMAT_S STORE CLOCK COMPARATOR
+ op_STCKE uint32 = 0xB278 // FORMAT_S STORE CLOCK EXTENDED
+ op_STCKF uint32 = 0xB27C // FORMAT_S STORE CLOCK FAST
+ op_STCM uint32 = 0xBE00 // FORMAT_RS2 STORE CHARACTERS UNDER MASK (low)
+ op_STCMH uint32 = 0xEB2C // FORMAT_RSY2 STORE CHARACTERS UNDER MASK (high)
+ op_STCMY uint32 = 0xEB2D // FORMAT_RSY2 STORE CHARACTERS UNDER MASK (low)
+ op_STCPS uint32 = 0xB23A // FORMAT_S STORE CHANNEL PATH STATUS
+ op_STCRW uint32 = 0xB239 // FORMAT_S STORE CHANNEL REPORT WORD
+ op_STCTG uint32 = 0xEB25 // FORMAT_RSY1 STORE CONTROL (64)
+ op_STCTL uint32 = 0xB600 // FORMAT_RS1 STORE CONTROL (32)
+ op_STCY uint32 = 0xE372 // FORMAT_RXY1 STORE CHARACTER
+ op_STD uint32 = 0x6000 // FORMAT_RX1 STORE (long)
+ op_STDY uint32 = 0xED67 // FORMAT_RXY1 STORE (long)
+ op_STE uint32 = 0x7000 // FORMAT_RX1 STORE (short)
+ op_STEY uint32 = 0xED66 // FORMAT_RXY1 STORE (short)
+ op_STFH uint32 = 0xE3CB // FORMAT_RXY1 STORE HIGH (32)
+ op_STFL uint32 = 0xB2B1 // FORMAT_S STORE FACILITY LIST
+ op_STFLE uint32 = 0xB2B0 // FORMAT_S STORE FACILITY LIST EXTENDED
+ op_STFPC uint32 = 0xB29C // FORMAT_S STORE FPC
+ op_STG uint32 = 0xE324 // FORMAT_RXY1 STORE (64)
+ op_STGRL uint32 = 0xC40B // FORMAT_RIL2 STORE RELATIVE LONG (64)
+ op_STH uint32 = 0x4000 // FORMAT_RX1 STORE HALFWORD
+ op_STHH uint32 = 0xE3C7 // FORMAT_RXY1 STORE HALFWORD HIGH (16)
+ op_STHRL uint32 = 0xC407 // FORMAT_RIL2 STORE HALFWORD RELATIVE LONG
+ op_STHY uint32 = 0xE370 // FORMAT_RXY1 STORE HALFWORD
+ op_STIDP uint32 = 0xB202 // FORMAT_S STORE CPU ID
+ op_STM uint32 = 0x9000 // FORMAT_RS1 STORE MULTIPLE (32)
+ op_STMG uint32 = 0xEB24 // FORMAT_RSY1 STORE MULTIPLE (64)
+ op_STMH uint32 = 0xEB26 // FORMAT_RSY1 STORE MULTIPLE HIGH
+ op_STMY uint32 = 0xEB90 // FORMAT_RSY1 STORE MULTIPLE (32)
+ op_STNSM uint32 = 0xAC00 // FORMAT_SI STORE THEN AND SYSTEM MASK
+ op_STOC uint32 = 0xEBF3 // FORMAT_RSY2 STORE ON CONDITION (32)
+ op_STOCG uint32 = 0xEBE3 // FORMAT_RSY2 STORE ON CONDITION (64)
+ op_STOSM uint32 = 0xAD00 // FORMAT_SI STORE THEN OR SYSTEM MASK
+ op_STPQ uint32 = 0xE38E // FORMAT_RXY1 STORE PAIR TO QUADWORD
+ op_STPT uint32 = 0xB209 // FORMAT_S STORE CPU TIMER
+ op_STPX uint32 = 0xB211 // FORMAT_S STORE PREFIX
+ op_STRAG uint32 = 0xE502 // FORMAT_SSE STORE REAL ADDRESS
+ op_STRL uint32 = 0xC40F // FORMAT_RIL2 STORE RELATIVE LONG (32)
+ op_STRV uint32 = 0xE33E // FORMAT_RXY1 STORE REVERSED (32)
+ op_STRVG uint32 = 0xE32F // FORMAT_RXY1 STORE REVERSED (64)
+ op_STRVH uint32 = 0xE33F // FORMAT_RXY1 STORE REVERSED (16)
+ op_STSCH uint32 = 0xB234 // FORMAT_S STORE SUBCHANNEL
+ op_STSI uint32 = 0xB27D // FORMAT_S STORE SYSTEM INFORMATION
+ op_STURA uint32 = 0xB246 // FORMAT_RRE STORE USING REAL ADDRESS (32)
+ op_STURG uint32 = 0xB925 // FORMAT_RRE STORE USING REAL ADDRESS (64)
+ op_STY uint32 = 0xE350 // FORMAT_RXY1 STORE (32)
+ op_SU uint32 = 0x7F00 // FORMAT_RX1 SUBTRACT UNNORMALIZED (short HFP)
+ op_SUR uint32 = 0x3F00 // FORMAT_RR SUBTRACT UNNORMALIZED (short HFP)
+ op_SVC uint32 = 0x0A00 // FORMAT_I SUPERVISOR CALL
+ op_SW uint32 = 0x6F00 // FORMAT_RX1 SUBTRACT UNNORMALIZED (long HFP)
+ op_SWR uint32 = 0x2F00 // FORMAT_RR SUBTRACT UNNORMALIZED (long HFP)
+ op_SXBR uint32 = 0xB34B // FORMAT_RRE SUBTRACT (extended BFP)
+ op_SXR uint32 = 0x3700 // FORMAT_RR SUBTRACT NORMALIZED (extended HFP)
+ op_SXTR uint32 = 0xB3DB // FORMAT_RRF1 SUBTRACT (extended DFP)
+ op_SXTRA uint32 = 0xB3DB // FORMAT_RRF1 SUBTRACT (extended DFP)
+ op_SY uint32 = 0xE35B // FORMAT_RXY1 SUBTRACT (32)
+ op_TABORT uint32 = 0xB2FC // FORMAT_S TRANSACTION ABORT
+ op_TAM uint32 = 0x010B // FORMAT_E TEST ADDRESSING MODE
+ op_TAR uint32 = 0xB24C // FORMAT_RRE TEST ACCESS
+ op_TB uint32 = 0xB22C // FORMAT_RRE TEST BLOCK
+ op_TBDR uint32 = 0xB351 // FORMAT_RRF5 CONVERT HFP TO BFP (long)
+ op_TBEDR uint32 = 0xB350 // FORMAT_RRF5 CONVERT HFP TO BFP (long to short)
+ op_TBEGIN uint32 = 0xE560 // FORMAT_SIL TRANSACTION BEGIN
+ op_TBEGINC uint32 = 0xE561 // FORMAT_SIL TRANSACTION BEGIN
+ op_TCDB uint32 = 0xED11 // FORMAT_RXE TEST DATA CLASS (long BFP)
+ op_TCEB uint32 = 0xED10 // FORMAT_RXE TEST DATA CLASS (short BFP)
+ op_TCXB uint32 = 0xED12 // FORMAT_RXE TEST DATA CLASS (extended BFP)
+ op_TDCDT uint32 = 0xED54 // FORMAT_RXE TEST DATA CLASS (long DFP)
+ op_TDCET uint32 = 0xED50 // FORMAT_RXE TEST DATA CLASS (short DFP)
+ op_TDCXT uint32 = 0xED58 // FORMAT_RXE TEST DATA CLASS (extended DFP)
+ op_TDGDT uint32 = 0xED55 // FORMAT_RXE TEST DATA GROUP (long DFP)
+ op_TDGET uint32 = 0xED51 // FORMAT_RXE TEST DATA GROUP (short DFP)
+ op_TDGXT uint32 = 0xED59 // FORMAT_RXE TEST DATA GROUP (extended DFP)
+ op_TEND uint32 = 0xB2F8 // FORMAT_S TRANSACTION END
+ op_THDER uint32 = 0xB358 // FORMAT_RRE CONVERT BFP TO HFP (short to long)
+ op_THDR uint32 = 0xB359 // FORMAT_RRE CONVERT BFP TO HFP (long)
+ op_TM uint32 = 0x9100 // FORMAT_SI TEST UNDER MASK
+ op_TMH uint32 = 0xA700 // FORMAT_RI1 TEST UNDER MASK HIGH
+ op_TMHH uint32 = 0xA702 // FORMAT_RI1 TEST UNDER MASK (high high)
+ op_TMHL uint32 = 0xA703 // FORMAT_RI1 TEST UNDER MASK (high low)
+ op_TML uint32 = 0xA701 // FORMAT_RI1 TEST UNDER MASK LOW
+ op_TMLH uint32 = 0xA700 // FORMAT_RI1 TEST UNDER MASK (low high)
+ op_TMLL uint32 = 0xA701 // FORMAT_RI1 TEST UNDER MASK (low low)
+ op_TMY uint32 = 0xEB51 // FORMAT_SIY TEST UNDER MASK
+ op_TP uint32 = 0xEBC0 // FORMAT_RSL TEST DECIMAL
+ op_TPI uint32 = 0xB236 // FORMAT_S TEST PENDING INTERRUPTION
+ op_TPROT uint32 = 0xE501 // FORMAT_SSE TEST PROTECTION
+ op_TR uint32 = 0xDC00 // FORMAT_SS1 TRANSLATE
+ op_TRACE uint32 = 0x9900 // FORMAT_RS1 TRACE (32)
+ op_TRACG uint32 = 0xEB0F // FORMAT_RSY1 TRACE (64)
+ op_TRAP2 uint32 = 0x01FF // FORMAT_E TRAP
+ op_TRAP4 uint32 = 0xB2FF // FORMAT_S TRAP
+ op_TRE uint32 = 0xB2A5 // FORMAT_RRE TRANSLATE EXTENDED
+ op_TROO uint32 = 0xB993 // FORMAT_RRF3 TRANSLATE ONE TO ONE
+ op_TROT uint32 = 0xB992 // FORMAT_RRF3 TRANSLATE ONE TO TWO
+ op_TRT uint32 = 0xDD00 // FORMAT_SS1 TRANSLATE AND TEST
+ op_TRTE uint32 = 0xB9BF // FORMAT_RRF3 TRANSLATE AND TEST EXTENDED
+ op_TRTO uint32 = 0xB991 // FORMAT_RRF3 TRANSLATE TWO TO ONE
+ op_TRTR uint32 = 0xD000 // FORMAT_SS1 TRANSLATE AND TEST REVERSE
+ op_TRTRE uint32 = 0xB9BD // FORMAT_RRF3 TRANSLATE AND TEST REVERSE EXTENDED
+ op_TRTT uint32 = 0xB990 // FORMAT_RRF3 TRANSLATE TWO TO TWO
+ op_TS uint32 = 0x9300 // FORMAT_S TEST AND SET
+ op_TSCH uint32 = 0xB235 // FORMAT_S TEST SUBCHANNEL
+ op_UNPK uint32 = 0xF300 // FORMAT_SS2 UNPACK
+ op_UNPKA uint32 = 0xEA00 // FORMAT_SS1 UNPACK ASCII
+ op_UNPKU uint32 = 0xE200 // FORMAT_SS1 UNPACK UNICODE
+ op_UPT uint32 = 0x0102 // FORMAT_E UPDATE TREE
+ op_X uint32 = 0x5700 // FORMAT_RX1 EXCLUSIVE OR (32)
+ op_XC uint32 = 0xD700 // FORMAT_SS1 EXCLUSIVE OR (character)
+ op_XG uint32 = 0xE382 // FORMAT_RXY1 EXCLUSIVE OR (64)
+ op_XGR uint32 = 0xB982 // FORMAT_RRE EXCLUSIVE OR (64)
+ op_XGRK uint32 = 0xB9E7 // FORMAT_RRF1 EXCLUSIVE OR (64)
+ op_XI uint32 = 0x9700 // FORMAT_SI EXCLUSIVE OR (immediate)
+ op_XIHF uint32 = 0xC006 // FORMAT_RIL1 EXCLUSIVE OR IMMEDIATE (high)
+ op_XILF uint32 = 0xC007 // FORMAT_RIL1 EXCLUSIVE OR IMMEDIATE (low)
+ op_XIY uint32 = 0xEB57 // FORMAT_SIY EXCLUSIVE OR (immediate)
+ op_XR uint32 = 0x1700 // FORMAT_RR EXCLUSIVE OR (32)
+ op_XRK uint32 = 0xB9F7 // FORMAT_RRF1 EXCLUSIVE OR (32)
+ op_XSCH uint32 = 0xB276 // FORMAT_S CANCEL SUBCHANNEL
+ op_XY uint32 = 0xE357 // FORMAT_RXY1 EXCLUSIVE OR (32)
+ op_ZAP uint32 = 0xF800 // FORMAT_SS2 ZERO AND ADD
+
+ // added in z13
+ op_CXPT uint32 = 0xEDAF // RSL-b CONVERT FROM PACKED (to extended DFP)
+ op_CDPT uint32 = 0xEDAE // RSL-b CONVERT FROM PACKED (to long DFP)
+ op_CPXT uint32 = 0xEDAD // RSL-b CONVERT TO PACKED (from extended DFP)
+ op_CPDT uint32 = 0xEDAC // RSL-b CONVERT TO PACKED (from long DFP)
+ op_LZRF uint32 = 0xE33B // RXY-a LOAD AND ZERO RIGHTMOST BYTE (32)
+ op_LZRG uint32 = 0xE32A // RXY-a LOAD AND ZERO RIGHTMOST BYTE (64)
+ op_LCCB uint32 = 0xE727 // RXE LOAD COUNT TO BLOCK BOUNDARY
+ op_LOCHHI uint32 = 0xEC4E // RIE-g LOAD HALFWORD HIGH IMMEDIATE ON CONDITION (32←16)
+ op_LOCHI uint32 = 0xEC42 // RIE-g LOAD HALFWORD IMMEDIATE ON CONDITION (32←16)
+ op_LOCGHI uint32 = 0xEC46 // RIE-g LOAD HALFWORD IMMEDIATE ON CONDITION (64←16)
+ op_LOCFH uint32 = 0xEBE0 // RSY-b LOAD HIGH ON CONDITION (32)
+ op_LOCFHR uint32 = 0xB9E0 // RRF-c LOAD HIGH ON CONDITION (32)
+ op_LLZRGF uint32 = 0xE33A // RXY-a LOAD LOGICAL AND ZERO RIGHTMOST BYTE (64←32)
+ op_STOCFH uint32 = 0xEBE1 // RSY-b STORE HIGH ON CONDITION
+ op_VA uint32 = 0xE7F3 // VRR-c VECTOR ADD
+ op_VACC uint32 = 0xE7F1 // VRR-c VECTOR ADD COMPUTE CARRY
+ op_VAC uint32 = 0xE7BB // VRR-d VECTOR ADD WITH CARRY
+ op_VACCC uint32 = 0xE7B9 // VRR-d VECTOR ADD WITH CARRY COMPUTE CARRY
+ op_VN uint32 = 0xE768 // VRR-c VECTOR AND
+ op_VNC uint32 = 0xE769 // VRR-c VECTOR AND WITH COMPLEMENT
+ op_VAVG uint32 = 0xE7F2 // VRR-c VECTOR AVERAGE
+ op_VAVGL uint32 = 0xE7F0 // VRR-c VECTOR AVERAGE LOGICAL
+ op_VCKSM uint32 = 0xE766 // VRR-c VECTOR CHECKSUM
+ op_VCEQ uint32 = 0xE7F8 // VRR-b VECTOR COMPARE EQUAL
+ op_VCH uint32 = 0xE7FB // VRR-b VECTOR COMPARE HIGH
+ op_VCHL uint32 = 0xE7F9 // VRR-b VECTOR COMPARE HIGH LOGICAL
+ op_VCLZ uint32 = 0xE753 // VRR-a VECTOR COUNT LEADING ZEROS
+ op_VCTZ uint32 = 0xE752 // VRR-a VECTOR COUNT TRAILING ZEROS
+ op_VEC uint32 = 0xE7DB // VRR-a VECTOR ELEMENT COMPARE
+ op_VECL uint32 = 0xE7D9 // VRR-a VECTOR ELEMENT COMPARE LOGICAL
+ op_VERIM uint32 = 0xE772 // VRI-d VECTOR ELEMENT ROTATE AND INSERT UNDER MASK
+ op_VERLL uint32 = 0xE733 // VRS-a VECTOR ELEMENT ROTATE LEFT LOGICAL
+ op_VERLLV uint32 = 0xE773 // VRR-c VECTOR ELEMENT ROTATE LEFT LOGICAL
+ op_VESLV uint32 = 0xE770 // VRR-c VECTOR ELEMENT SHIFT LEFT
+ op_VESL uint32 = 0xE730 // VRS-a VECTOR ELEMENT SHIFT LEFT
+ op_VESRA uint32 = 0xE73A // VRS-a VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
+ op_VESRAV uint32 = 0xE77A // VRR-c VECTOR ELEMENT SHIFT RIGHT ARITHMETIC
+ op_VESRL uint32 = 0xE738 // VRS-a VECTOR ELEMENT SHIFT RIGHT LOGICAL
+ op_VESRLV uint32 = 0xE778 // VRR-c VECTOR ELEMENT SHIFT RIGHT LOGICAL
+ op_VX uint32 = 0xE76D // VRR-c VECTOR EXCLUSIVE OR
+ op_VFAE uint32 = 0xE782 // VRR-b VECTOR FIND ANY ELEMENT EQUAL
+ op_VFEE uint32 = 0xE780 // VRR-b VECTOR FIND ELEMENT EQUAL
+ op_VFENE uint32 = 0xE781 // VRR-b VECTOR FIND ELEMENT NOT EQUAL
+ op_VFA uint32 = 0xE7E3 // VRR-c VECTOR FP ADD
+ op_WFK uint32 = 0xE7CA // VRR-a VECTOR FP COMPARE AND SIGNAL SCALAR
+ op_VFCE uint32 = 0xE7E8 // VRR-c VECTOR FP COMPARE EQUAL
+ op_VFCH uint32 = 0xE7EB // VRR-c VECTOR FP COMPARE HIGH
+ op_VFCHE uint32 = 0xE7EA // VRR-c VECTOR FP COMPARE HIGH OR EQUAL
+ op_WFC uint32 = 0xE7CB // VRR-a VECTOR FP COMPARE SCALAR
+ op_VCDG uint32 = 0xE7C3 // VRR-a VECTOR FP CONVERT FROM FIXED 64-BIT
+ op_VCDLG uint32 = 0xE7C1 // VRR-a VECTOR FP CONVERT FROM LOGICAL 64-BIT
+ op_VCGD uint32 = 0xE7C2 // VRR-a VECTOR FP CONVERT TO FIXED 64-BIT
+ op_VCLGD uint32 = 0xE7C0 // VRR-a VECTOR FP CONVERT TO LOGICAL 64-BIT
+ op_VFD uint32 = 0xE7E5 // VRR-c VECTOR FP DIVIDE
+ op_VLDE uint32 = 0xE7C4 // VRR-a VECTOR FP LOAD LENGTHENED
+ op_VLED uint32 = 0xE7C5 // VRR-a VECTOR FP LOAD ROUNDED
+ op_VFM uint32 = 0xE7E7 // VRR-c VECTOR FP MULTIPLY
+ op_VFMA uint32 = 0xE78F // VRR-e VECTOR FP MULTIPLY AND ADD
+ op_VFMS uint32 = 0xE78E // VRR-e VECTOR FP MULTIPLY AND SUBTRACT
+ op_VFPSO uint32 = 0xE7CC // VRR-a VECTOR FP PERFORM SIGN OPERATION
+ op_VFSQ uint32 = 0xE7CE // VRR-a VECTOR FP SQUARE ROOT
+ op_VFS uint32 = 0xE7E2 // VRR-c VECTOR FP SUBTRACT
+ op_VFTCI uint32 = 0xE74A // VRI-e VECTOR FP TEST DATA CLASS IMMEDIATE
+ op_VGFM uint32 = 0xE7B4 // VRR-c VECTOR GALOIS FIELD MULTIPLY SUM
+ op_VGFMA uint32 = 0xE7BC // VRR-d VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE
+ op_VGEF uint32 = 0xE713 // VRV VECTOR GATHER ELEMENT (32)
+ op_VGEG uint32 = 0xE712 // VRV VECTOR GATHER ELEMENT (64)
+ op_VGBM uint32 = 0xE744 // VRI-a VECTOR GENERATE BYTE MASK
+ op_VGM uint32 = 0xE746 // VRI-b VECTOR GENERATE MASK
+ op_VISTR uint32 = 0xE75C // VRR-a VECTOR ISOLATE STRING
+ op_VL uint32 = 0xE706 // VRX VECTOR LOAD
+ op_VLR uint32 = 0xE756 // VRR-a VECTOR LOAD
+ op_VLREP uint32 = 0xE705 // VRX VECTOR LOAD AND REPLICATE
+ op_VLC uint32 = 0xE7DE // VRR-a VECTOR LOAD COMPLEMENT
+ op_VLEH uint32 = 0xE701 // VRX VECTOR LOAD ELEMENT (16)
+ op_VLEF uint32 = 0xE703 // VRX VECTOR LOAD ELEMENT (32)
+ op_VLEG uint32 = 0xE702 // VRX VECTOR LOAD ELEMENT (64)
+ op_VLEB uint32 = 0xE700 // VRX VECTOR LOAD ELEMENT (8)
+ op_VLEIH uint32 = 0xE741 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (16)
+ op_VLEIF uint32 = 0xE743 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (32)
+ op_VLEIG uint32 = 0xE742 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (64)
+ op_VLEIB uint32 = 0xE740 // VRI-a VECTOR LOAD ELEMENT IMMEDIATE (8)
+ op_VFI uint32 = 0xE7C7 // VRR-a VECTOR LOAD FP INTEGER
+ op_VLGV uint32 = 0xE721 // VRS-c VECTOR LOAD GR FROM VR ELEMENT
+ op_VLLEZ uint32 = 0xE704 // VRX VECTOR LOAD LOGICAL ELEMENT AND ZERO
+ op_VLM uint32 = 0xE736 // VRS-a VECTOR LOAD MULTIPLE
+ op_VLP uint32 = 0xE7DF // VRR-a VECTOR LOAD POSITIVE
+ op_VLBB uint32 = 0xE707 // VRX VECTOR LOAD TO BLOCK BOUNDARY
+ op_VLVG uint32 = 0xE722 // VRS-b VECTOR LOAD VR ELEMENT FROM GR
+ op_VLVGP uint32 = 0xE762 // VRR-f VECTOR LOAD VR FROM GRS DISJOINT
+ op_VLL uint32 = 0xE737 // VRS-b VECTOR LOAD WITH LENGTH
+ op_VMX uint32 = 0xE7FF // VRR-c VECTOR MAXIMUM
+ op_VMXL uint32 = 0xE7FD // VRR-c VECTOR MAXIMUM LOGICAL
+ op_VMRH uint32 = 0xE761 // VRR-c VECTOR MERGE HIGH
+ op_VMRL uint32 = 0xE760 // VRR-c VECTOR MERGE LOW
+ op_VMN uint32 = 0xE7FE // VRR-c VECTOR MINIMUM
+ op_VMNL uint32 = 0xE7FC // VRR-c VECTOR MINIMUM LOGICAL
+ op_VMAE uint32 = 0xE7AE // VRR-d VECTOR MULTIPLY AND ADD EVEN
+ op_VMAH uint32 = 0xE7AB // VRR-d VECTOR MULTIPLY AND ADD HIGH
+ op_VMALE uint32 = 0xE7AC // VRR-d VECTOR MULTIPLY AND ADD LOGICAL EVEN
+ op_VMALH uint32 = 0xE7A9 // VRR-d VECTOR MULTIPLY AND ADD LOGICAL HIGH
+ op_VMALO uint32 = 0xE7AD // VRR-d VECTOR MULTIPLY AND ADD LOGICAL ODD
+ op_VMAL uint32 = 0xE7AA // VRR-d VECTOR MULTIPLY AND ADD LOW
+ op_VMAO uint32 = 0xE7AF // VRR-d VECTOR MULTIPLY AND ADD ODD
+ op_VME uint32 = 0xE7A6 // VRR-c VECTOR MULTIPLY EVEN
+ op_VMH uint32 = 0xE7A3 // VRR-c VECTOR MULTIPLY HIGH
+ op_VMLE uint32 = 0xE7A4 // VRR-c VECTOR MULTIPLY EVEN LOGICAL
+ op_VMLH uint32 = 0xE7A1 // VRR-c VECTOR MULTIPLY HIGH LOGICAL
+ op_VMLO uint32 = 0xE7A5 // VRR-c VECTOR MULTIPLY ODD LOGICAL
+ op_VML uint32 = 0xE7A2 // VRR-c VECTOR MULTIPLY LOW
+ op_VMO uint32 = 0xE7A7 // VRR-c VECTOR MULTIPLY ODD
+ op_VNO uint32 = 0xE76B // VRR-c VECTOR NOR
+ op_VO uint32 = 0xE76A // VRR-c VECTOR OR
+ op_VPK uint32 = 0xE794 // VRR-c VECTOR PACK
+ op_VPKLS uint32 = 0xE795 // VRR-b VECTOR PACK LOGICAL SATURATE
+ op_VPKS uint32 = 0xE797 // VRR-b VECTOR PACK SATURATE
+ op_VPERM uint32 = 0xE78C // VRR-e VECTOR PERMUTE
+ op_VPDI uint32 = 0xE784 // VRR-c VECTOR PERMUTE DOUBLEWORD IMMEDIATE
+ op_VPOPCT uint32 = 0xE750 // VRR-a VECTOR POPULATION COUNT
+ op_VREP uint32 = 0xE74D // VRI-c VECTOR REPLICATE
+ op_VREPI uint32 = 0xE745 // VRI-a VECTOR REPLICATE IMMEDIATE
+ op_VSCEF uint32 = 0xE71B // VRV VECTOR SCATTER ELEMENT (32)
+ op_VSCEG uint32 = 0xE71A // VRV VECTOR SCATTER ELEMENT (64)
+ op_VSEL uint32 = 0xE78D // VRR-e VECTOR SELECT
+ op_VSL uint32 = 0xE774 // VRR-c VECTOR SHIFT LEFT
+ op_VSLB uint32 = 0xE775 // VRR-c VECTOR SHIFT LEFT BY BYTE
+ op_VSLDB uint32 = 0xE777 // VRI-d VECTOR SHIFT LEFT DOUBLE BY BYTE
+ op_VSRA uint32 = 0xE77E // VRR-c VECTOR SHIFT RIGHT ARITHMETIC
+ op_VSRAB uint32 = 0xE77F // VRR-c VECTOR SHIFT RIGHT ARITHMETIC BY BYTE
+ op_VSRL uint32 = 0xE77C // VRR-c VECTOR SHIFT RIGHT LOGICAL
+ op_VSRLB uint32 = 0xE77D // VRR-c VECTOR SHIFT RIGHT LOGICAL BY BYTE
+ op_VSEG uint32 = 0xE75F // VRR-a VECTOR SIGN EXTEND TO DOUBLEWORD
+ op_VST uint32 = 0xE70E // VRX VECTOR STORE
+ op_VSTEH uint32 = 0xE709 // VRX VECTOR STORE ELEMENT (16)
+ op_VSTEF uint32 = 0xE70B // VRX VECTOR STORE ELEMENT (32)
+ op_VSTEG uint32 = 0xE70A // VRX VECTOR STORE ELEMENT (64)
+ op_VSTEB uint32 = 0xE708 // VRX VECTOR STORE ELEMENT (8)
+ op_VSTM uint32 = 0xE73E // VRS-a VECTOR STORE MULTIPLE
+ op_VSTL uint32 = 0xE73F // VRS-b VECTOR STORE WITH LENGTH
+ op_VSTRC uint32 = 0xE78A // VRR-d VECTOR STRING RANGE COMPARE
+ op_VS uint32 = 0xE7F7 // VRR-c VECTOR SUBTRACT
+ op_VSCBI uint32 = 0xE7F5 // VRR-c VECTOR SUBTRACT COMPUTE BORROW INDICATION
+ op_VSBCBI uint32 = 0xE7BD // VRR-d VECTOR SUBTRACT WITH BORROW COMPUTE BORROW INDICATION
+ op_VSBI uint32 = 0xE7BF // VRR-d VECTOR SUBTRACT WITH BORROW INDICATION
+ op_VSUMG uint32 = 0xE765 // VRR-c VECTOR SUM ACROSS DOUBLEWORD
+ op_VSUMQ uint32 = 0xE767 // VRR-c VECTOR SUM ACROSS QUADWORD
+ op_VSUM uint32 = 0xE764 // VRR-c VECTOR SUM ACROSS WORD
+ op_VTM uint32 = 0xE7D8 // VRR-a VECTOR TEST UNDER MASK
+ op_VUPH uint32 = 0xE7D7 // VRR-a VECTOR UNPACK HIGH
+ op_VUPLH uint32 = 0xE7D5 // VRR-a VECTOR UNPACK LOGICAL HIGH
+ op_VUPLL uint32 = 0xE7D4 // VRR-a VECTOR UNPACK LOGICAL LOW
+ op_VUPL uint32 = 0xE7D6 // VRR-a VECTOR UNPACK LOW
+)
+
+func oclass(a *obj.Addr) int {
+ return int(a.Class) - 1
+}
+
+// Add a relocation for the immediate in a RIL style instruction.
+// The addend will be adjusted as required.
+func addrilreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
+ if sym == nil {
+ ctxt.Diag("require symbol to apply relocation")
+ }
+ offset := int64(2) // relocation offset from start of instruction
+ rel := obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc + offset)
+ rel.Siz = 4
+ rel.Sym = sym
+ rel.Add = add + offset + int64(rel.Siz)
+ rel.Type = obj.R_PCRELDBL
+ return rel
+}
+
+func addrilrelocoffset(ctxt *obj.Link, sym *obj.LSym, add, offset int64) *obj.Reloc {
+ if sym == nil {
+ ctxt.Diag("require symbol to apply relocation")
+ }
+ offset += int64(2) // relocation offset from start of instruction
+ rel := obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc + offset)
+ rel.Siz = 4
+ rel.Sym = sym
+ rel.Add = add + offset + int64(rel.Siz)
+ rel.Type = obj.R_PCRELDBL
+ return rel
+}
+
+// Add a CALL relocation for the immediate in a RIL style instruction.
+// The addend will be adjusted as required.
+func addcallreloc(ctxt *obj.Link, sym *obj.LSym, add int64) *obj.Reloc {
+ if sym == nil {
+ ctxt.Diag("require symbol to apply relocation")
+ }
+ offset := int64(2) // relocation offset from start of instruction
+ rel := obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc + offset)
+ rel.Siz = 4
+ rel.Sym = sym
+ rel.Add = add + offset + int64(rel.Siz)
+ rel.Type = obj.R_CALL
+ return rel
+}
+
+func branchMask(ctxt *obj.Link, p *obj.Prog) uint32 {
+ switch p.As {
+ case ABEQ, ACMPBEQ, ACMPUBEQ:
+ return 0x8
+ case ABGE, ACMPBGE, ACMPUBGE:
+ return 0xA
+ case ABGT, ACMPBGT, ACMPUBGT:
+ return 0x2
+ case ABLE, ACMPBLE, ACMPUBLE:
+ return 0xC
+ case ABLT, ACMPBLT, ACMPUBLT:
+ return 0x4
+ case ABNE, ACMPBNE, ACMPUBNE:
+ return 0x7
+ case ABVC:
+ return 0x0 //needs extra instruction
+ case ABVS:
+ return 0x1
+ }
+ ctxt.Diag("unknown conditional branch %v", p.As)
+ return 0xF
+}
+
+func asmout(ctxt *obj.Link, asm *[]byte) {
+ p := ctxt.Curp
+ o := oplook(ctxt, p)
+ ctxt.Printp = p
+
+ switch o.type_ {
+ default:
+ ctxt.Diag("unknown type %d", o.type_)
+
+ case 0: // PSEUDO OPS
+ break
+
+ case 1: // MOV REG TO REG
+ switch p.As {
+ default:
+ ctxt.Diag("unhandled operation: %v", p.As)
+ case AMOVD:
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ // sign extend
+ case AMOVW:
+ zRRE(op_LGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ case AMOVH:
+ zRRE(op_LGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ case AMOVB:
+ zRRE(op_LGBR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ // zero extend
+ case AMOVWZ:
+ zRRE(op_LLGFR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ case AMOVHZ:
+ zRRE(op_LLGHR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ case AMOVBZ:
+ zRRE(op_LLGCR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ // reverse bytes
+ case AMOVDBR:
+ zRRE(op_LRVGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ case AMOVWBR:
+ zRRE(op_LRVR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ // floating point
+ case AFMOVD, AFMOVS:
+ zRR(op_LDR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case 2: /* int/cr/fp op Rb,[Ra],Rd */
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ var opcode uint32
+
+ switch p.As {
+ default:
+ ctxt.Diag("invalid opcode")
+ case AADD:
+ opcode = op_AGRK
+ case AADDC:
+ opcode = op_ALGRK
+ case AADDE:
+ opcode = op_ALCGR
+ case AMULLW:
+ opcode = op_MSGFR
+ case AMULLD:
+ opcode = op_MSGR
+ case AMULHDU:
+ opcode = op_MLGR
+ case ADIVW:
+ opcode = op_DSGFR
+ case ADIVWU:
+ opcode = op_DLR
+ case ADIVD:
+ opcode = op_DSGR
+ case ADIVDU:
+ opcode = op_DLGR
+ case AFADD:
+ opcode = op_ADBR
+ case AFADDS:
+ opcode = op_AEBR
+ case AFSUB:
+ opcode = op_SDBR
+ case AFSUBS:
+ opcode = op_SEBR
+ case AFDIV:
+ opcode = op_DDBR
+ case AFDIVS:
+ opcode = op_DEBR
+ }
+
+ switch p.As {
+ default:
+
+ case AADD, AADDC:
+ zRRF(opcode, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
+
+ case AADDE, AMULLW, AMULLD:
+ if r == int(p.To.Reg) {
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else if p.From.Reg == p.To.Reg {
+ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
+ } else {
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case ADIVW, ADIVWU, ADIVD, ADIVDU:
+ if p.As == ADIVWU || p.As == ADIVDU {
+ zRRE(op_LGR, REGTMP, REGZERO, asm)
+ }
+ zRRE(op_LGR, REGTMP2, uint32(r), asm)
+ zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
+ zRRE(op_LGR, uint32(p.To.Reg), REGTMP2, asm)
+
+ case AMULHDU:
+ zRRE(op_LGR, REGTMP2, uint32(r), asm)
+ zRRE(opcode, REGTMP, uint32(p.From.Reg), asm)
+ zRRE(op_LGR, uint32(p.To.Reg), REGTMP, asm)
+
+ case AFADD, AFADDS:
+ if r == int(p.To.Reg) {
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else if p.From.Reg == p.To.Reg {
+ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
+ } else {
+ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case AFSUB, AFSUBS, AFDIV, AFDIVS:
+ if r == int(p.To.Reg) {
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else if p.From.Reg == p.To.Reg {
+ zRRE(op_LGDR, REGTMP, uint32(r), asm)
+ zRRE(opcode, uint32(r), uint32(p.From.Reg), asm)
+ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(op_LDGR, uint32(r), REGTMP, asm)
+ } else {
+ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ }
+
+ case 3: // MOV CONSTANT TO REG
+ v := vregoff(ctxt, &p.From)
+ switch p.As {
+ case AMOVBZ:
+ v = int64(uint8(v))
+ case AMOVHZ:
+ v = int64(uint16(v))
+ case AMOVWZ:
+ v = int64(uint32(v))
+ case AMOVB:
+ v = int64(int8(v))
+ case AMOVH:
+ v = int64(int16(v))
+ case AMOVW:
+ v = int64(int32(v))
+ }
+ if v&0xffff == v {
+ zRI(op_LLILL, uint32(p.To.Reg), uint32(v), asm)
+ } else if v&0xffff0000 == v {
+ zRI(op_LLILH, uint32(p.To.Reg), uint32(v>>16), asm)
+ } else if v&0xffff00000000 == v {
+ zRI(op_LLIHL, uint32(p.To.Reg), uint32(v>>32), asm)
+ } else if uint64(v)&0xffff000000000000 == uint64(v) {
+ zRI(op_LLIHH, uint32(p.To.Reg), uint32(v>>48), asm)
+ } else if int64(int16(v)) == v {
+ zRI(op_LGHI, uint32(p.To.Reg), uint32(v), asm)
+ } else if int64(int32(v)) == v {
+ zRIL(a, op_LGFI, uint32(p.To.Reg), uint32(v), asm)
+ } else if int64(uint32(v)) == v {
+ zRIL(a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
+ } else if uint64(v)&0xffffffff00000000 == uint64(v) {
+ zRIL(a, op_LLIHF, uint32(p.To.Reg), uint32(v>>32), asm)
+ } else {
+ zRIL(a, op_LLILF, uint32(p.To.Reg), uint32(v), asm)
+ zRIL(a, op_IIHF, uint32(p.To.Reg), uint32(v>>32), asm)
+ }
+
+ case 5: /* syscall */ // This might be right, assuming SVC is the same as Power's SC
+ zI(op_SVC, 0, asm)
+
+ case 6: /* logical op Rb,[Rs,]Ra; no literal */
+ if p.To.Reg == 0 {
+ ctxt.Diag("literal operation on R0\n%v", p)
+ }
+
+ switch p.As {
+ case AAND, AOR, AXOR:
+ var opcode1, opcode2 uint32
+ switch p.As {
+ default:
+ case AAND:
+ opcode1 = op_NGR
+ opcode2 = op_NGRK
+ case AOR:
+ opcode1 = op_OGR
+ opcode2 = op_OGRK
+ case AXOR:
+ opcode1 = op_XGR
+ opcode2 = op_XGRK
+ }
+
+ r := int(p.Reg)
+ if r == 0 {
+ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else {
+ zRRF(opcode2, uint32(r), 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case AANDN, AORN:
+ var opcode1, opcode2 uint32
+ switch p.As {
+ default:
+ case AANDN:
+ opcode1 = op_NGR
+ opcode2 = op_NGRK
+ case AORN:
+ opcode1 = op_OGR
+ opcode2 = op_OGRK
+ }
+
+ r := int(p.Reg)
+ if r == 0 {
+ zRRE(op_LCGR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
+ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else {
+ zRRE(op_LCGR, REGTMP, uint32(r), asm)
+ zRRF(opcode2, REGTMP, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case ANAND, ANOR:
+ var opcode1, opcode2 uint32
+ switch p.As {
+ default:
+ case ANAND:
+ opcode1 = op_NGR
+ opcode2 = op_NGRK
+ case ANOR:
+ opcode1 = op_OGR
+ opcode2 = op_OGRK
+ }
+
+ r := int(p.Reg)
+ if r == 0 {
+ zRRE(opcode1, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else {
+ zRRF(opcode2, uint32(r), 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ zRRE(op_LCGR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
+ }
+
+ case 7: // shift left/right and rotate left
+ d2 := vregoff(ctxt, &p.From)
+ b2 := p.From.Reg
+ r3 := p.Reg
+ if r3 == 0 {
+ r3 = p.To.Reg
+ }
+ r1 := p.To.Reg
+ var opcode uint32
+ switch p.As {
+ default:
+ case ASLD:
+ opcode = op_SLLG
+ case ASRD:
+ opcode = op_SRLG
+ case ASLW:
+ opcode = op_SLLK
+ case ASRW:
+ opcode = op_SRLK
+ case ARLL:
+ opcode = op_RLL
+ case ARLLG:
+ opcode = op_RLLG
+ case ASRAW:
+ opcode = op_SRAK
+ case ASRAD:
+ opcode = op_SRAG
+ }
+ zRSY(opcode, uint32(r1), uint32(r3), uint32(b2), uint32(d2), asm)
+
+ case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
+ r := int(p.Reg)
+
+ switch p.As {
+ default:
+ case ASUB:
+ if r == 0 {
+ zRRE(op_SGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else {
+ zRRF(op_SGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
+ }
+ case ASUBC:
+ if r == 0 {
+ zRRE(op_SLGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else {
+ zRRF(op_SLGRK, uint32(p.From.Reg), 0, uint32(p.To.Reg), uint32(r), asm)
+ }
+
+ case ASUBE:
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if r == int(p.To.Reg) {
+ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else if p.From.Reg == p.To.Reg {
+ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(op_SLBGR, uint32(p.To.Reg), REGTMP, asm)
+ } else {
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+ }
+
+ case 11: /* br/bl lbra */
+ v := int32(0)
+
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - p.Pc) >> 1)
+ }
+
+ if p.As == ABR && p.To.Sym == nil && int32(int16(v)) == v {
+ zRI(op_BRC, 0xF, uint32(v), asm)
+ } else {
+ if p.As == ABL {
+ zRIL(b, op_BRASL, uint32(REG_LR), uint32(v), asm)
+ } else {
+ zRIL(c, op_BRCL, 0xF, uint32(v), asm)
+ }
+ if p.To.Sym != nil {
+ addcallreloc(ctxt, p.To.Sym, p.To.Offset)
+ }
+ }
+
+ case 15: /* br/bl (r) */
+ r := p.To.Reg
+ if p.As == ABCL || p.As == ABL {
+ zRR(op_BASR, uint32(REG_LR), uint32(r), asm)
+ } else {
+ zRR(op_BCR, 0xF, uint32(r), asm)
+ }
+
+ case 17, /* bc bo,bi,lbra (same for now) */
+ 16: /* bc bo,bi,sbra */
+ v := int32(0)
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - p.Pc) >> 1)
+ }
+ mask := branchMask(ctxt, p)
+ if p.To.Sym == nil && int32(int16(v)) == v {
+ zRI(op_BRC, mask, uint32(v), asm)
+ } else {
+ zRIL(c, op_BRCL, mask, uint32(v), asm)
+ }
+ if p.To.Sym != nil {
+ addrilreloc(ctxt, p.To.Sym, p.To.Offset)
+ }
+
+ case 18: // br/bl r
+ switch oclass(&p.To) {
+ case C_REG:
+ if p.As == ABL {
+ zRR(op_BASR, uint32(REG_LR), uint32(p.To.Reg), asm)
+ } else {
+ zRR(op_BCR, 0xF, uint32(p.To.Reg), asm)
+ }
+ default:
+ ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
+ }
+
+ case 19: // MOV $sym+n(SB) TO REG
+ d := vregoff(ctxt, &p.From)
+ zRIL(b, op_LARL, uint32(p.To.Reg), 0, asm)
+ if d&1 != 0 {
+ zRX(op_LA, uint32(p.To.Reg), uint32(p.To.Reg), 0, 1, asm)
+ d -= 1
+ }
+ addrilreloc(ctxt, p.From.Sym, d)
+
+ case 22: /* add $lcon,r1,r2 ==> cau+or+add */ /* could do add/sub more efficiently */
+
+ if p.From.Sym != nil {
+ ctxt.Diag("%v is not supported", p)
+ }
+
+ v := vregoff(ctxt, &p.From)
+ r := p.Reg
+ if r == 0 {
+ r = p.To.Reg
+ }
+ switch p.As {
+ default:
+ case AADD:
+ if r == p.To.Reg {
+ zRIL(a, op_AGFI, uint32(p.To.Reg), uint32(v), asm)
+ } else if int64(int16(v)) == v {
+ zRIE(d, op_AGHIK, uint32(p.To.Reg), uint32(r), uint32(v), 0, 0, 0, 0, asm)
+ } else {
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ zRIL(a, op_AGFI, uint32(p.To.Reg), uint32(v), asm)
+ }
+ case AADDC:
+ if r != p.To.Reg {
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ }
+ zRIL(a, op_ALGFI, uint32(p.To.Reg), uint32(v), asm)
+ case AMULLW:
+ if r != p.To.Reg {
+ zRRE(op_LGR, uint32(p.To.Reg), uint32(r), asm)
+ }
+ zRIL(a, op_MSGFI, uint32(p.To.Reg), uint32(v), asm)
+ }
+
+ case 23: /* and $lcon,r1,r2 ==> cau+or+and */ /* masks could be done using rlnm etc. */
+
+ v := vregoff(ctxt, &p.From)
+ var opcode uint32
+ r := p.Reg
+ if r == 0 {
+ r = p.To.Reg
+ }
+ if r == p.To.Reg {
+ switch p.As {
+ default:
+ ctxt.Diag("%v is not supported", p)
+ case AAND:
+ if v >= 0 { // needs zero extend
+ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
+ zRRE(op_NGR, uint32(p.To.Reg), REGTMP, asm)
+ } else if int64(int16(v)) == v {
+ zRI(op_NILL, uint32(p.To.Reg), uint32(v), asm)
+ } else { // r.To.Reg & 0xffffffff00000000 & uint32(v)
+ zRIL(a, op_NILF, uint32(p.To.Reg), uint32(v), asm)
+ }
+ case AOR:
+ if int64(uint32(v)) != v { // needs sign extend
+ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
+ zRRE(op_OGR, uint32(p.To.Reg), REGTMP, asm)
+ } else if int64(uint16(v)) == v {
+ zRI(op_OILL, uint32(p.To.Reg), uint32(v), asm)
+ } else {
+ zRIL(a, op_OILF, uint32(p.To.Reg), uint32(v), asm)
+ }
+ case AXOR:
+ if int64(uint32(v)) != v { // needs sign extend
+ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
+ zRRE(op_XGR, uint32(p.To.Reg), REGTMP, asm)
+ } else {
+ zRIL(a, op_XILF, uint32(p.To.Reg), uint32(v), asm)
+ }
+ }
+ } else {
+ switch p.As {
+ default:
+ ctxt.Diag("%v is not supported", p)
+ case AAND:
+ opcode = op_NGRK
+ case AOR:
+ opcode = op_OGRK
+ case AXOR:
+ opcode = op_XGRK
+ }
+ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
+ zRRF(opcode, uint32(r), 0, uint32(p.To.Reg), REGTMP, asm)
+ }
+
+ case 26: // MOV LACON
+ v := regoff(ctxt, &p.From)
+ r := p.From.Reg
+ if r == 0 {
+ r = o.param
+ }
+ if v >= 0 && v < DISP12 {
+ zRX(op_LA, uint32(p.To.Reg), uint32(r), 0, uint32(v), asm)
+ } else if v >= -DISP20/2 && v < DISP20/2 {
+ zRXY(a, op_LAY, uint32(p.To.Reg), uint32(r), 0, uint32(v), asm)
+ } else {
+ zRIL(a, op_LGFI, REGTMP, uint32(v), asm)
+ zRX(op_LA, uint32(p.To.Reg), uint32(r), REGTMP, 0, asm)
+ }
+
+ case 31: /* dword */
+ wd := uint64(vregoff(ctxt, &p.From))
+ *asm = append(*asm,
+ uint8(wd>>56),
+ uint8(wd>>48),
+ uint8(wd>>40),
+ uint8(wd>>32),
+ uint8(wd>>24),
+ uint8(wd>>16),
+ uint8(wd>>8),
+ uint8(wd))
+
+ case 32: /* fmul frc,fra,frd */
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ var opcode uint32
+
+ switch p.As {
+ default:
+ ctxt.Diag("invalid opcode")
+ case AFMUL:
+ opcode = op_MDBR
+ case AFMULS:
+ opcode = op_MEEBR
+ }
+
+ if r == int(p.To.Reg) {
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ } else if p.From.Reg == p.To.Reg {
+ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
+ } else {
+ zRR(op_LDR, uint32(p.To.Reg), uint32(r), asm)
+ zRRE(opcode, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+ }
+
+ case 33: /* fabs [frb,]frd; fmr. frb,frd */
+ r := p.From.Reg
+ if oclass(&p.From) == C_NONE {
+ r = p.To.Reg
+ }
+ var opcode uint32
+ switch p.As {
+ default:
+ case AFABS:
+ opcode = op_LPDBR
+ case AFNABS:
+ opcode = op_LNDBR
+ case AFNEG:
+ opcode = op_LCDFR
+ case ALEDBR:
+ opcode = op_LEDBR
+ case ALDEBR:
+ opcode = op_LDEBR
+ case AFSQRT:
+ opcode = op_SQDBR
+ case AFSQRTS:
+ opcode = op_SQEBR
+ }
+ zRRE(opcode, uint32(p.To.Reg), uint32(r), asm)
+
+ case 34: /* FMADDx fra,frb,frc,frd (d=a*b+c); FSELx a<0? (d=b): (d=c) */
+
+ var opcode uint32
+
+ switch p.As {
+ default:
+ ctxt.Diag("invalid opcode")
+ case AFMADD:
+ opcode = op_MADBR
+ case AFMADDS:
+ opcode = op_MAEBR
+ case AFMSUB:
+ opcode = op_MSDBR
+ case AFMSUBS:
+ opcode = op_MSEBR
+ case AFNMADD:
+ opcode = op_MADBR
+ case AFNMADDS:
+ opcode = op_MAEBR
+ case AFNMSUB:
+ opcode = op_MSDBR
+ case AFNMSUBS:
+ opcode = op_MSEBR
+ }
+
+ zRR(op_LDR, uint32(p.To.Reg), uint32(p.Reg), asm)
+ zRRD(opcode, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From3.Reg), asm)
+
+ if p.As == AFNMADD || p.As == AFNMADDS || p.As == AFNMSUB || p.As == AFNMSUBS {
+ zRRE(op_LCDFR, uint32(p.To.Reg), uint32(p.To.Reg), asm)
+ }
+
+ case 35: // MOVE REG TO LAUTO/LOREG
+ d2 := regoff(ctxt, &p.To)
+ b2 := p.To.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ x2 := p.To.Index
+ if d2 < -DISP20/2 || d2 >= DISP20/2 {
+ zRIL(a, op_LGFI, REGTMP, uint32(d2), asm)
+ if x2 != 0 {
+ zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
+ }
+ x2 = REGTMP
+ d2 = 0
+ }
+ zRXY(0, zopstore(ctxt, p.As), uint32(p.From.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
+
+ case 36: // MOV LAUTO/LOREG TO REG
+ d2 := regoff(ctxt, &p.From)
+ b2 := p.From.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ x2 := p.From.Index
+ if d2 < -DISP20/2 || d2 >= DISP20/2 {
+ zRIL(a, op_LGFI, REGTMP, uint32(d2), asm)
+ if x2 != 0 {
+ zRX(op_LA, REGTMP, REGTMP, uint32(x2), 0, asm)
+ }
+ x2 = REGTMP
+ d2 = 0
+ }
+ zRXY(0, zopload(ctxt, p.As), uint32(p.To.Reg), uint32(x2), uint32(b2), uint32(d2), asm)
+
+ case 40: /* word and byte*/
+ wd := uint32(regoff(ctxt, &p.From))
+ if p.As == AWORD { //WORD
+ *asm = append(*asm, uint8(wd>>24), uint8(wd>>16), uint8(wd>>8), uint8(wd))
+ } else { //BYTE
+ *asm = append(*asm, uint8(wd))
+ }
+
+ case 47: /* op Ra, Rd; also op [Ra,] Rd */
+ switch p.As {
+ default:
+
+ case AADDME:
+ r := int(p.From.Reg)
+ if p.To.Reg == p.From.Reg {
+ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
+ r = REGTMP
+ }
+ zRIL(a, op_LGFI, uint32(p.To.Reg), 0xffffffff, asm) // p.To.Reg <- -1
+ zRRE(op_ALCGR, uint32(p.To.Reg), uint32(r), asm)
+
+ case AADDZE:
+ r := int(p.From.Reg)
+ if p.To.Reg == p.From.Reg {
+ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
+ r = REGTMP
+ }
+ zRRE(op_LGR, uint32(p.To.Reg), REGZERO, asm) // p.To.Reg <- 0
+ zRRE(op_ALCGR, uint32(p.To.Reg), uint32(r), asm)
+
+ case ASUBME:
+ r := int(p.From.Reg)
+ if p.To.Reg == p.From.Reg {
+ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
+ r = REGTMP
+ }
+ zRIL(a, op_LGFI, uint32(p.To.Reg), 0xffffffff, asm) // p.To.Reg <- -1
+ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(r), asm)
+
+ case ASUBZE:
+ r := int(p.From.Reg)
+ if p.To.Reg == p.From.Reg {
+ zRRE(op_LGR, REGTMP, uint32(p.From.Reg), asm)
+ r = REGTMP
+ }
+ zRRE(op_LGR, uint32(p.To.Reg), REGZERO, asm) // p.To.Reg <- 0
+ zRRE(op_SLBGR, uint32(p.To.Reg), uint32(r), asm)
+
+ case ANEG:
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ zRRE(op_LCGR, uint32(p.To.Reg), uint32(r), asm)
+ }
+
+ case 67: // AFMOVx $0, Fy -- move +0 into reg
+ var opcode uint32
+ switch p.As {
+ case AFMOVS:
+ opcode = op_LZER
+ case AFMOVD:
+ opcode = op_LZDR
+ }
+ zRRE(opcode, uint32(p.To.Reg), 0, asm)
+
+ case 68: /* ear arS,rD */
+ zRRE(op_EAR, uint32(p.To.Reg), uint32(p.From.Reg-REG_AR0), asm)
+
+ case 69: /* sar rS,arD */
+ zRRE(op_SAR, uint32(p.To.Reg-REG_AR0), uint32(p.From.Reg), asm)
+
+ case 70: /* [f]cmp r,r,cr*/
+ if p.Reg != 0 {
+ ctxt.Diag("unsupported nozero CC in Z")
+ }
+ if p.As == ACMPW || p.As == ACMPWU {
+ zRR(zoprr(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
+ } else {
+ zRRE(zoprre(ctxt, p.As), uint32(p.From.Reg), uint32(p.To.Reg), asm)
+ }
+
+ case 71: // cmp reg $constant
+ v := vregoff(ctxt, &p.To)
+ switch p.As {
+ case ACMP, ACMPW:
+ if int64(int32(v)) != v {
+ ctxt.Diag("%v overflows an int32", v)
+ }
+ case ACMPU, ACMPWU:
+ if int64(uint32(v)) != v {
+ ctxt.Diag("%v overflows a uint32", v)
+ }
+ }
+ zRIL(0, zopril(ctxt, p.As), uint32(p.From.Reg), uint32(regoff(ctxt, &p.To)), asm)
+
+ case 72: // MOV int32 -> s+o(r)(i*1)
+ v := regoff(ctxt, &p.From)
+ d := regoff(ctxt, &p.To)
+ r := p.To.Reg
+ x := p.To.Index
+ if r == 0 {
+ r = o.param
+ }
+ if p.From.Sym != nil {
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ if v&0x1 != 0 {
+ v -= 1
+ zRX(op_LA, REGTMP, REGTMP, 0, 1, asm)
+ }
+ addrilreloc(ctxt, p.From.Sym, int64(v))
+ if d < -DISP20/2 || d >= DISP20/2 {
+ zRIL(a, op_LGFI, REGTMP2, uint32(d), asm)
+ if x != 0 {
+ zRRE(op_AGR, REGTMP2, uint32(x), asm)
+ }
+ d = 0
+ x = REGTMP2
+ }
+ zRXY(0, zopstore(ctxt, p.As), REGTMP, uint32(x), uint32(r), uint32(d), asm)
+ } else if int32(int16(v)) == v && x == 0 {
+ if d < 0 || d >= DISP12 {
+ if r == REGTMP || r == REGTMP2 {
+ zRIL(a, op_AGFI, uint32(r), uint32(d), asm)
+ } else {
+ zRIL(a, op_LGFI, REGTMP, uint32(d), asm)
+ zRRE(op_AGR, REGTMP, uint32(r), asm)
+ r = REGTMP
+ }
+ d = 0
+ }
+ var opcode uint32
+ switch p.As {
+ case AMOVD:
+ opcode = op_MVGHI
+ case AMOVW, AMOVWZ:
+ opcode = op_MVHI
+ case AMOVH, AMOVHZ:
+ opcode = op_MVHHI
+ case AMOVB, AMOVBZ:
+ opcode = op_MVI
+ }
+ if opcode == op_MVI {
+ zSI(opcode, uint32(v), uint32(r), uint32(d), asm)
+ } else {
+ zSIL(opcode, uint32(r), uint32(d), uint32(v), asm)
+ }
+ } else {
+ zRIL(a, op_LGFI, REGTMP2, uint32(v), asm)
+ if d < -DISP20/2 || d >= DISP20/2 {
+ if r == REGTMP {
+ zRIL(a, op_AGFI, REGTMP, uint32(d), asm)
+ } else {
+ zRIL(a, op_LGFI, REGTMP, uint32(d), asm)
+ if x != 0 {
+ zRRE(op_AGR, REGTMP, uint32(x), asm)
+ }
+ x = REGTMP
+ }
+ d = 0
+ }
+ zRXY(0, zopstore(ctxt, p.As), REGTMP2, uint32(x), uint32(r), uint32(d), asm)
+ }
+
+ case 73: // MOV int32 -> addr
+ v := regoff(ctxt, &p.From)
+ d := regoff(ctxt, &p.To)
+ a := uint32(0)
+ if d&1 != 0 {
+ d -= 1
+ a = 1
+ }
+ zRIL(b, op_LARL, REGTMP, uint32(d), asm)
+ addrilreloc(ctxt, p.To.Sym, int64(d))
+ if p.From.Sym != nil {
+ zRIL(b, op_LARL, REGTMP2, 0, asm)
+ a := uint32(0)
+ if v&0x1 != 0 {
+ v -= 1
+ zRX(op_LA, REGTMP2, REGTMP2, 0, 1, asm)
+ }
+ addrilrelocoffset(ctxt, p.From.Sym, int64(v), sizeRIL)
+ zRXY(0, zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm)
+ } else if int32(int16(v)) == v {
+ var opcode uint32
+ switch p.As {
+ case AMOVD:
+ opcode = op_MVGHI
+ case AMOVW, AMOVWZ:
+ opcode = op_MVHI
+ case AMOVH, AMOVHZ:
+ opcode = op_MVHHI
+ case AMOVB, AMOVBZ:
+ opcode = op_MVI
+ }
+ if opcode == op_MVI {
+ zSI(opcode, uint32(v), REGTMP, a, asm)
+ } else {
+ zSIL(opcode, REGTMP, a, uint32(v), asm)
+ }
+ } else {
+ zRIL(a, op_LGFI, REGTMP2, uint32(v), asm)
+ zRXY(0, zopstore(ctxt, p.As), REGTMP2, 0, REGTMP, a, asm)
+ }
+
+ case 74: // MOV sym+n(SB) TO REG (requires relocation)
+ i2 := regoff(ctxt, &p.To)
+ switch p.As {
+ case AMOVD:
+ zRIL(b, op_STGRL, uint32(p.From.Reg), 0, asm)
+ case AMOVW, AMOVWZ: // The zero extension doesn't affect store instructions
+ zRIL(b, op_STRL, uint32(p.From.Reg), 0, asm)
+ case AMOVH, AMOVHZ: // The zero extension doesn't affect store instructions
+ zRIL(b, op_STHRL, uint32(p.From.Reg), 0, asm)
+ case AMOVB, AMOVBZ: // The zero extension doesn't affect store instructions
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ adj := uint32(0) // adjustment needed for odd addresses
+ if i2&1 != 0 {
+ i2 -= 1
+ adj = 1
+ }
+ zRX(op_STC, uint32(p.From.Reg), 0, REGTMP, adj, asm)
+ case AFMOVD:
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ zRX(op_STD, uint32(p.From.Reg), 0, REGTMP, 0, asm)
+ case AFMOVS:
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ zRX(op_STE, uint32(p.From.Reg), 0, REGTMP, 0, asm)
+ }
+ addrilreloc(ctxt, p.To.Sym, int64(i2))
+
+ case 75: // MOV REG TO sym+n(SB) (requires relocation)
+ i2 := regoff(ctxt, &p.From)
+ switch p.As {
+ case AMOVD:
+ if i2&1 != 0 {
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ zRXY(0, op_LG, uint32(p.To.Reg), REGTMP, 0, 1, asm)
+ i2 -= 1
+ } else {
+ zRIL(b, op_LGRL, uint32(p.To.Reg), uint32(d), asm)
+ }
+ case AMOVW:
+ zRIL(b, op_LGFRL, uint32(p.To.Reg), 0, asm)
+ case AMOVWZ:
+ zRIL(b, op_LLGFRL, uint32(p.To.Reg), 0, asm)
+ case AMOVH:
+ zRIL(b, op_LGHRL, uint32(p.To.Reg), 0, asm)
+ case AMOVHZ:
+ zRIL(b, op_LLGHRL, uint32(p.To.Reg), 0, asm)
+ case AMOVB, AMOVBZ:
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ adj := uint32(0) // adjustment needed for odd addresses
+ if i2&1 != 0 {
+ i2 -= 1
+ adj = 1
+ }
+ switch p.As {
+ case AMOVB:
+ zRXY(0, op_LGB, uint32(p.To.Reg), 0, REGTMP, adj, asm)
+ case AMOVBZ:
+ zRXY(0, op_LLGC, uint32(p.To.Reg), 0, REGTMP, adj, asm)
+ }
+ case AFMOVD:
+ zRIL(a, op_LARL, REGTMP, 0, asm)
+ zRX(op_LD, uint32(p.To.Reg), 0, REGTMP, 0, asm)
+ case AFMOVS:
+ zRIL(a, op_LARL, REGTMP, 0, asm)
+ zRX(op_LE, uint32(p.To.Reg), 0, REGTMP, 0, asm)
+ }
+ addrilreloc(ctxt, p.From.Sym, int64(i2))
+
+ case 77: /* syscall $scon */
+ if p.From.Offset > 255 || p.From.Offset < 1 {
+ ctxt.Diag("illegal system call; system call number out of range: %v", p)
+ zE(op_TRAP2, asm) // trap always
+ } else {
+ zI(op_SVC, uint32(p.From.Offset), asm)
+ }
+
+ case 78: /* undef */
+ /* "An instruction consisting entirely of binary 0s is guaranteed
+ always to be an illegal instruction." */
+ *asm = append(*asm, 0, 0, 0, 0)
+
+ case 79: /* cs,csg r1,r3,off(r2) -> compare & swap; if (r1 ==off(r2)) then off(r2)= r3 */
+ v := regoff(ctxt, &p.To)
+ if v < 0 {
+ v = 0
+ }
+ if p.As == ACS {
+ zRS(op_CS, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
+ } else if p.As == ACSG {
+ zRSY(op_CSG, uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg), uint32(v), asm)
+ }
+
+ case 81: /* SYNC-> BCR 14,0 */
+ zRR(op_BCR, 0xE, 0, asm)
+
+ case 82: /* conversion from GPR to FPR */
+ var opcode uint32
+ switch p.As {
+ default:
+ log.Fatalf("unexpected opcode %v", p.As)
+ case ACEFBRA:
+ opcode = op_CEFBRA
+ case ACDFBRA:
+ opcode = op_CDFBRA
+ case ACEGBRA:
+ opcode = op_CEGBRA
+ case ACDGBRA:
+ opcode = op_CDGBRA
+ case ACELFBR:
+ opcode = op_CELFBR
+ case ACDLFBR:
+ opcode = op_CDLFBR
+ case ACELGBR:
+ opcode = op_CELGBR
+ case ACDLGBR:
+ opcode = op_CDLGBR
+ }
+ /* set immediate operand M3 to 0 to use the default BFP rounding mode
+ (usually round to nearest, ties to even); M4 is reserved and must be 0 */
+ zRRF(opcode, 0, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+
+ case 83: /* conversion from FPR to GPR */
+ var opcode uint32
+ switch p.As {
+ default:
+ log.Fatalf("unexpected opcode %v", p.As)
+ case ACFEBRA:
+ opcode = op_CFEBRA
+ case ACFDBRA:
+ opcode = op_CFDBRA
+ case ACGEBRA:
+ opcode = op_CGEBRA
+ case ACGDBRA:
+ opcode = op_CGDBRA
+ case ACLFEBR:
+ opcode = op_CLFEBR
+ case ACLFDBR:
+ opcode = op_CLFDBR
+ case ACLGEBR:
+ opcode = op_CLGEBR
+ case ACLGDBR:
+ opcode = op_CLGDBR
+ }
+ /* set immediate operand M3 to 5 for rounding toward zero (required by Go spec); M4 is reserved and must be 0 */
+ zRRF(opcode, 5, 0, uint32(p.To.Reg), uint32(p.From.Reg), asm)
+
+ case 84: /* storage-and-storage operations (mvc, clc, xc, oc, nc) */
+ l := regoff(ctxt, p.From3)
+ if l < 1 || l > 256 {
+ ctxt.Diag("number of bytes (%v) not in range [1,256]", l)
+ }
+ if p.From.Index != 0 || p.To.Index != 0 {
+ ctxt.Diag("cannot use index reg")
+ }
+ b1 := p.To.Reg
+ b2 := p.From.Reg
+ if b1 == 0 {
+ b1 = o.param
+ }
+ if b2 == 0 {
+ b2 = o.param
+ }
+ d1 := regoff(ctxt, &p.To)
+ d2 := regoff(ctxt, &p.From)
+ if d1 < 0 || d1 >= DISP12 {
+ if b2 == REGTMP {
+ ctxt.Diag("REGTMP conflict")
+ }
+ if b1 != REGTMP {
+ zRRE(op_LGR, REGTMP, uint32(b1), asm)
+ }
+ zRIL(a, op_AGFI, REGTMP, uint32(d1), asm)
+ if d1 == d2 && b1 == b2 {
+ d2 = 0
+ b2 = REGTMP
+ }
+ d1 = 0
+ b1 = REGTMP
+ }
+ if d2 < 0 || d2 >= DISP12 {
+ if b1 == REGTMP2 {
+ ctxt.Diag("REGTMP2 conflict")
+ }
+ if b2 != REGTMP2 {
+ zRRE(op_LGR, REGTMP2, uint32(b2), asm)
+ }
+ zRIL(a, op_AGFI, REGTMP2, uint32(d2), asm)
+ d2 = 0
+ b2 = REGTMP2
+ }
+ var opcode uint32
+ switch p.As {
+ default:
+ ctxt.Diag("unexpected opcode %v", p.As)
+ case AMVC:
+ opcode = op_MVC
+ case ACLC:
+ opcode = op_CLC
+ // swap operand order for CLC so that it matches CMP
+ b1, b2 = b2, b1
+ d1, d2 = d2, d1
+ case AXC:
+ opcode = op_XC
+ case AOC:
+ opcode = op_OC
+ case ANC:
+ opcode = op_NC
+ }
+ zSS(a, opcode, uint32(l-1), 0, uint32(b1), uint32(d1), uint32(b2), uint32(d2), asm)
+
+ case 85: /* larl: load address relative long */
+ // When using larl directly, don't add a nop
+ v := regoff(ctxt, &p.From)
+ if p.From.Sym == nil {
+ if (v & 1) != 0 {
+ ctxt.Diag("cannot use LARL with odd offset: %v", v)
+ }
+ } else {
+ addrilreloc(ctxt, p.From.Sym, int64(v))
+ v = 0
+ }
+ zRIL(b, op_LARL, uint32(p.To.Reg), uint32(v>>1), asm)
+
+ case 86: /* lay?: load address */
+ d := vregoff(ctxt, &p.From)
+ x := p.From.Index
+ b := p.From.Reg
+ if b == 0 {
+ b = o.param
+ }
+ switch p.As {
+ case ALA:
+ zRX(op_LA, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
+ case ALAY:
+ zRXY(0, op_LAY, uint32(p.To.Reg), uint32(x), uint32(b), uint32(d), asm)
+ }
+
+ case 87: /* exrl: execute relative long */
+ v := vregoff(ctxt, &p.From)
+ if p.From.Sym == nil {
+ if v&1 != 0 {
+ ctxt.Diag("cannot use EXRL with odd offset: %v", v)
+ }
+ } else {
+ addrilreloc(ctxt, p.From.Sym, v)
+ v = 0
+ }
+ zRIL(b, op_EXRL, uint32(p.To.Reg), uint32(v>>1), asm)
+
+ case 88: /* stck[cef]?: store clock (comparator/extended/fast) */
+ var opcode uint32
+ switch p.As {
+ case ASTCK:
+ opcode = op_STCK
+ case ASTCKC:
+ opcode = op_STCKC
+ case ASTCKE:
+ opcode = op_STCKE
+ case ASTCKF:
+ opcode = op_STCKF
+ }
+ v := vregoff(ctxt, &p.To)
+ r := int(p.To.Reg)
+ if r == 0 {
+ r = int(o.param)
+ }
+ zS(opcode, uint32(r), uint32(v), asm)
+
+ case 89:
+ var v int32
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - p.Pc) >> 1)
+ }
+ var opcode, opcode2 uint32
+ switch p.As {
+ case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
+ opcode = op_CGRJ
+ opcode2 = op_CGR
+ case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
+ opcode = op_CLGRJ
+ opcode2 = op_CLGR
+ }
+ mask := branchMask(ctxt, p)
+ if int32(int16(v)) != v {
+ zRRE(opcode2, uint32(p.From.Reg), uint32(p.Reg), asm)
+ zRIL(c, op_BRCL, mask, uint32(v-sizeRRE/2), asm)
+ } else {
+ zRIE(b, opcode, uint32(p.From.Reg), uint32(p.Reg), uint32(v), 0, 0, mask, 0, asm)
+ }
+
+ case 90:
+ var v int32
+ if p.Pcond != nil {
+ v = int32((p.Pcond.Pc - p.Pc) >> 1)
+ }
+ var opcode, opcode2 uint32
+ switch p.As {
+ case ACMPBEQ, ACMPBGE, ACMPBGT, ACMPBLE, ACMPBLT, ACMPBNE:
+ opcode = op_CGIJ
+ opcode2 = op_CGFI
+ case ACMPUBEQ, ACMPUBGE, ACMPUBGT, ACMPUBLE, ACMPUBLT, ACMPUBNE:
+ opcode = op_CLGIJ
+ opcode2 = op_CLGFI
+ }
+ mask := branchMask(ctxt, p)
+ if int32(int16(v)) != v {
+ zRIL(0, opcode2, uint32(p.From.Reg), uint32(regoff(ctxt, p.From3)), asm)
+ zRIL(c, op_BRCL, mask, uint32(v-sizeRIL/2), asm)
+ } else {
+ zRIE(c, opcode, uint32(p.From.Reg), mask, uint32(v), 0, 0, 0, uint32(regoff(ctxt, p.From3)), asm)
+ }
+
+ case 93: // GOT lookup
+ v := vregoff(ctxt, &p.To)
+ if v != 0 {
+ ctxt.Diag("invalid offset against GOT slot %v", p)
+ }
+ zRIL(b, op_LGRL, uint32(p.To.Reg), 0, asm)
+ rel := obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc + 2)
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Type = obj.R_GOTPCREL
+ rel.Add = 2 + int64(rel.Siz)
+
+ case 94: // TLS local exec model
+ zRIL(b, op_LARL, REGTMP, (sizeRIL+sizeRXY+sizeRI)>>1, asm)
+ zRXY(0, op_LG, uint32(p.To.Reg), REGTMP, 0, 0, asm)
+ zRI(op_BRC, 0xF, (sizeRI+8)>>1, asm)
+ *asm = append(*asm, 0, 0, 0, 0, 0, 0, 0, 0)
+ rel := obj.Addrel(ctxt.Cursym)
+ rel.Off = int32(ctxt.Pc + sizeRIL + sizeRXY + sizeRI)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Type = obj.R_TLS_LE
+ rel.Add = 0
+
+ case 95: // TLS initial exec model
+ // Assembly | Relocation symbol | Done Here?
+ // --------------------------------------------------------------
+ // ear %r11, %a0 | |
+ // sllg %r11, %r11, 32 | |
+ // ear %r11, %a1 | |
+ // larl %r10, @indntpoff | R_390_TLS_IEENT | Y
+ // lg %r10, 0(%r10) | R_390_TLS_LOAD (tag) | Y
+ // la %r10, 0(%r10, %r11) | |
+ // --------------------------------------------------------------
+
+ // R_390_TLS_IEENT
+ zRIL(b, op_LARL, REGTMP, 0, asm)
+ ieent := obj.Addrel(ctxt.Cursym)
+ ieent.Off = int32(ctxt.Pc + 2)
+ ieent.Siz = 4
+ ieent.Sym = p.From.Sym
+ ieent.Type = obj.R_TLS_IE
+ ieent.Add = 2 + int64(ieent.Siz)
+
+ // R_390_TLS_LOAD
+ zRXY(0, op_LGF, uint32(p.To.Reg), REGTMP, 0, 0, asm)
+ // TODO(mundaym): add R_390_TLS_LOAD relocation here
+ // not strictly required but might allow the linker to optimize
+
+ case 96: // CLEAR macro
+ length := vregoff(ctxt, &p.From)
+ offset := vregoff(ctxt, &p.To)
+ reg := p.To.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ if length <= 0 {
+ ctxt.Diag("cannot CLEAR %d bytes, must be greater than 0", length)
+ }
+ for length > 0 {
+ if offset < 0 || offset >= DISP12 {
+ if offset >= -DISP20/2 && offset < DISP20/2 {
+ zRXY(0, op_LAY, REGTMP, uint32(reg), 0, uint32(offset), asm)
+ } else {
+ if reg != REGTMP {
+ zRRE(op_LGR, REGTMP, uint32(reg), asm)
+ }
+ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
+ }
+ reg = REGTMP
+ offset = 0
+ }
+ size := length
+ if size > 256 {
+ size = 256
+ }
+
+ switch size {
+ case 1:
+ zSI(op_MVI, 0, uint32(reg), uint32(offset), asm)
+ case 2:
+ zSIL(op_MVHHI, uint32(reg), uint32(offset), 0, asm)
+ case 4:
+ zSIL(op_MVHI, uint32(reg), uint32(offset), 0, asm)
+ case 8:
+ zSIL(op_MVGHI, uint32(reg), uint32(offset), 0, asm)
+ default:
+ zSS(a, op_XC, uint32(size-1), 0, uint32(reg), uint32(offset), uint32(reg), uint32(offset), asm)
+ }
+
+ length -= size
+ offset += size
+ }
+
+ case 97: // STORE MULTIPLE (STMG/STMY)
+ rstart := p.From.Reg
+ rend := p.Reg
+ offset := regoff(ctxt, &p.To)
+ reg := p.To.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ if offset < -DISP20/2 || offset >= DISP20/2 {
+ if reg != REGTMP {
+ zRRE(op_LGR, REGTMP, uint32(reg), asm)
+ }
+ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
+ reg = REGTMP
+ offset = 0
+ }
+ switch p.As {
+ case ASTMY:
+ if offset >= 0 && offset < DISP12 {
+ zRS(op_STM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ } else {
+ zRSY(op_STMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ }
+ case ASTMG:
+ zRSY(op_STMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ }
+
+ case 98: // LOAD MULTIPLE (LMG/LMY)
+ rstart := p.Reg
+ rend := p.To.Reg
+ offset := regoff(ctxt, &p.From)
+ reg := p.From.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ if offset < -DISP20/2 || offset >= DISP20/2 {
+ if reg != REGTMP {
+ zRRE(op_LGR, REGTMP, uint32(reg), asm)
+ }
+ zRIL(a, op_AGFI, REGTMP, uint32(offset), asm)
+ reg = REGTMP
+ offset = 0
+ }
+ switch p.As {
+ case ALMY:
+ if offset >= 0 && offset < DISP12 {
+ zRS(op_LM, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ } else {
+ zRSY(op_LMY, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ }
+ case ALMG:
+ zRSY(op_LMG, uint32(rstart), uint32(rend), uint32(reg), uint32(offset), asm)
+ }
+
+ case 100: // VRX STORE
+ op, m3, _ := vop(p.As)
+ if p.From3 != nil {
+ m3 = uint32(vregoff(ctxt, p.From3))
+ }
+ b2 := p.To.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ d2 := uint32(vregoff(ctxt, &p.To))
+ zVRX(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
+
+ case 101: // VRX LOAD
+ op, m3, _ := vop(p.As)
+ if p.From3 != nil {
+ m3 = uint32(vregoff(ctxt, p.From3))
+ }
+ b2 := p.From.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ d2 := uint32(vregoff(ctxt, &p.From))
+ zVRX(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
+
+ case 102: // VRV SCATTER
+ op, m3, _ := vop(p.As)
+ if p.From3 != nil {
+ m3 = uint32(vregoff(ctxt, p.From3))
+ }
+ b2 := p.To.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ d2 := uint32(vregoff(ctxt, &p.To))
+ zVRV(op, uint32(p.From.Reg), uint32(p.To.Index), uint32(b2), d2, m3, asm)
+
+ case 103: // VRV GATHER
+ op, m3, _ := vop(p.As)
+ if p.From3 != nil {
+ m3 = uint32(vregoff(ctxt, p.From3))
+ }
+ b2 := p.From.Reg
+ if b2 == 0 {
+ b2 = o.param
+ }
+ d2 := uint32(vregoff(ctxt, &p.From))
+ zVRV(op, uint32(p.To.Reg), uint32(p.From.Index), uint32(b2), d2, m3, asm)
+
+ case 104: // VRS SHIFT/ROTATE and LOAD GR FROM VR ELEMENT
+ op, m4, _ := vop(p.As)
+ fr := p.Reg
+ if fr == 0 {
+ fr = p.To.Reg
+ }
+ bits := uint32(vregoff(ctxt, &p.From))
+ zVRS(op, uint32(p.To.Reg), uint32(fr), uint32(p.From.Reg), bits, m4, asm)
+
+ case 105: // VRS STORE MULTIPLE
+ op, _, _ := vop(p.As)
+ offset := uint32(vregoff(ctxt, &p.To))
+ reg := p.To.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ zVRS(op, uint32(p.From.Reg), uint32(p.Reg), uint32(reg), offset, 0, asm)
+
+ case 106: // VRS LOAD MULTIPLE
+ op, _, _ := vop(p.As)
+ offset := uint32(vregoff(ctxt, &p.From))
+ reg := p.From.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ zVRS(op, uint32(p.Reg), uint32(p.To.Reg), uint32(reg), offset, 0, asm)
+
+ case 107: // VRS STORE WITH LENGTH
+ op, _, _ := vop(p.As)
+ offset := uint32(vregoff(ctxt, &p.To))
+ reg := p.To.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ zVRS(op, uint32(p.From.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
+
+ case 108: // VRS LOAD WITH LENGTH
+ op, _, _ := vop(p.As)
+ offset := uint32(vregoff(ctxt, &p.From))
+ reg := p.From.Reg
+ if reg == 0 {
+ reg = o.param
+ }
+ zVRS(op, uint32(p.To.Reg), uint32(p.From3.Reg), uint32(reg), offset, 0, asm)
+
+ case 109: // VRI-a instructions
+ op, _, _ := vop(p.As)
+ i2 := uint32(vregoff(ctxt, &p.From))
+ switch p.As {
+ case AVZERO:
+ i2 = 0
+ case AVONE:
+ i2 = 0xffff
+ }
+ m3 := uint32(0)
+ if p.From3 != nil {
+ m3 = uint32(vregoff(ctxt, p.From3))
+ }
+ zVRIa(op, uint32(p.To.Reg), i2, m3, asm)
+
+ case 110:
+ op, m4, _ := vop(p.As)
+ i2 := uint32(vregoff(ctxt, p.From3))
+ i3 := uint32(vregoff(ctxt, &p.From))
+ zVRIb(op, uint32(p.To.Reg), i2, i3, m4, asm)
+
+ case 111:
+ op, m4, _ := vop(p.As)
+ i2 := uint32(vregoff(ctxt, &p.From))
+ zVRIc(op, uint32(p.To.Reg), uint32(p.Reg), i2, m4, asm)
+
+ case 112:
+ op, m5, _ := vop(p.As)
+ i4 := uint32(vregoff(ctxt, p.From3))
+ zVRId(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), i4, m5, asm)
+
+ case 113:
+ op, m4, _ := vop(p.As)
+ m5 := singleElementMask(p.As)
+ i3 := uint32(vregoff(ctxt, &p.From))
+ zVRIe(op, uint32(p.To.Reg), uint32(p.Reg), i3, m5, m4, asm)
+
+ case 114: // VRR-a
+ op, m3, m5 := vop(p.As)
+ m4 := singleElementMask(p.As)
+ zVRRa(op, uint32(p.To.Reg), uint32(p.From.Reg), m5, m4, m3, asm)
+
+ case 115: // VRR-a COMPARE
+ op, m3, m5 := vop(p.As)
+ m4 := singleElementMask(p.As)
+ zVRRa(op, uint32(p.From.Reg), uint32(p.To.Reg), m5, m4, m3, asm)
+
+ case 116: // VRR-a
+
+ case 117: // VRR-b
+ op, m4, m5 := vop(p.As)
+ zVRRb(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), m5, m4, asm)
+
+ case 118: // VRR-c
+ op, m4, m6 := vop(p.As)
+ m5 := singleElementMask(p.As)
+ v3 := p.Reg
+ if v3 == 0 {
+ v3 = p.To.Reg
+ }
+ zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(v3), m6, m5, m4, asm)
+
+ case 119: // VRR-c SHIFT/ROTATE/DIVIDE/SUB (rhs value on the left, like SLD, DIV etc.)
+ op, m4, m6 := vop(p.As)
+ m5 := singleElementMask(p.As)
+ v2 := p.Reg
+ if v2 == 0 {
+ v2 = p.To.Reg
+ }
+ zVRRc(op, uint32(p.To.Reg), uint32(v2), uint32(p.From.Reg), m6, m5, m4, asm)
+
+ case 120: // VRR-d
+ op, m6, _ := vop(p.As)
+ m5 := singleElementMask(p.As)
+ v1 := uint32(p.To.Reg)
+ v2 := uint32(p.From3.Reg)
+ v3 := uint32(p.From.Reg)
+ v4 := uint32(p.Reg)
+ zVRRd(op, v1, v2, v3, m6, m5, v4, asm)
+
+ case 121: // VRR-e
+ op, m6, _ := vop(p.As)
+ m5 := singleElementMask(p.As)
+ v1 := uint32(p.To.Reg)
+ v2 := uint32(p.From3.Reg)
+ v3 := uint32(p.From.Reg)
+ v4 := uint32(p.Reg)
+ zVRRe(op, v1, v2, v3, m5, m6, v4, asm)
+
+ case 122: // VRR-f LOAD VRS FROM GRS DISJOINT
+ op, _, _ := vop(p.As)
+ zVRRf(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), asm)
+
+ case 123: // VPDI $m4, V2, V3, V1
+ op, _, _ := vop(p.As)
+ m4 := regoff(ctxt, p.From3)
+ zVRRc(op, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), 0, 0, uint32(m4), asm)
+ }
+}
+
+func vregoff(ctxt *obj.Link, a *obj.Addr) int64 {
+ ctxt.Instoffset = 0
+ if a != nil {
+ aclass(ctxt, a)
+ }
+ return ctxt.Instoffset
+}
+
+func regoff(ctxt *obj.Link, a *obj.Addr) int32 {
+ return int32(vregoff(ctxt, a))
+}
+
+/*
+ * load o(a), d
+ */
+func zopload(ctxt *obj.Link, a int16) uint32 {
+ switch a {
+ /* fixed point load */
+ case AMOVD:
+ return op_LG
+ case AMOVW:
+ return op_LGF
+ case AMOVWZ:
+ return op_LLGF
+ case AMOVH:
+ return op_LGH
+ case AMOVHZ:
+ return op_LLGH
+ case AMOVB:
+ return op_LGB
+ case AMOVBZ:
+ return op_LLGC
+
+ /* floating point load */
+ case AFMOVD:
+ return op_LDY
+ case AFMOVS:
+ return op_LEY
+
+ /* byte reversed load*/
+ case AMOVDBR:
+ return op_LRVG
+ case AMOVWBR:
+ return op_LRV
+ case AMOVHBR:
+ return op_LRVH
+ }
+
+ ctxt.Diag("unknown store opcode %v", obj.Aconv(int(a)))
+ return 0
+}
+
+/*
+ * store s,o(d)
+ */
+func zopstore(ctxt *obj.Link, a int16) uint32 {
+ switch a {
+ /* fixed point store */
+ case AMOVD:
+ return op_STG
+ case AMOVW, AMOVWZ:
+ return op_STY
+ case AMOVH, AMOVHZ:
+ return op_STHY
+ case AMOVB, AMOVBZ:
+ return op_STCY
+
+ /* floating point store */
+ case AFMOVD:
+ return op_STDY
+ case AFMOVS:
+ return op_STEY
+
+ /* byte reversed store */
+ case AMOVDBR:
+ return op_STRVG
+ case AMOVWBR:
+ return op_STRV
+ case AMOVHBR:
+ return op_STRVH
+ }
+
+ ctxt.Diag("unknown store opcode %v", obj.Aconv(int(a)))
+ return 0
+}
+
+func zoprre(ctxt *obj.Link, a int16) uint32 {
+ switch a {
+ case ACMP:
+ return op_CGR
+ case ACMPU:
+ return op_CLGR
+ case AFCMPO: //ordered
+ return op_KDBR
+ case AFCMPU: //unordered
+ return op_CDBR
+ case ACEBR:
+ return op_CEBR
+ }
+ ctxt.Diag("unknown rre opcode %v", obj.Aconv(int(a)))
+ return 0
+}
+
+func zoprr(ctxt *obj.Link, a int16) uint32 {
+ switch a {
+ case ACMPW:
+ return op_CR
+ case ACMPWU:
+ return op_CLR
+ }
+ ctxt.Diag("unknown rr opcode %v", obj.Aconv(int(a)))
+ return 0
+}
+
+func zopril(ctxt *obj.Link, a int16) uint32 {
+ switch a {
+ case ACMP:
+ return op_CGFI
+ case ACMPU:
+ return op_CLGFI
+ case ACMPW:
+ return op_CFI
+ case ACMPWU:
+ return op_CLFI
+ }
+ ctxt.Diag("unknown ril opcode %v", obj.Aconv(int(a)))
+ return 0
+}
+
+// z instructions sizes.
+const (
+ sizeE = 2
+ sizeI = 2
+ sizeIE = 4
+ sizeMII = 6
+ sizeRI = 4
+ sizeRI1 = 4
+ sizeRI2 = 4
+ sizeRI3 = 4
+ sizeRIE = 6
+ sizeRIE1 = 6
+ sizeRIE2 = 6
+ sizeRIE3 = 6
+ sizeRIE4 = 6
+ sizeRIE5 = 6
+ sizeRIE6 = 6
+ sizeRIL = 6
+ sizeRIL1 = 6
+ sizeRIL2 = 6
+ sizeRIL3 = 6
+ sizeRIS = 6
+ sizeRR = 2
+ sizeRRD = 4
+ sizeRRE = 4
+ sizeRRF = 4
+ sizeRRF1 = 4
+ sizeRRF2 = 4
+ sizeRRF3 = 4
+ sizeRRF4 = 4
+ sizeRRF5 = 4
+ sizeRRR = 2
+ sizeRRS = 6
+ sizeRS = 4
+ sizeRS1 = 4
+ sizeRS2 = 4
+ sizeRSI = 4
+ sizeRSL = 6
+ sizeRSY = 6
+ sizeRSY1 = 6
+ sizeRSY2 = 6
+ sizeRX = 4
+ sizeRX1 = 4
+ sizeRX2 = 4
+ sizeRXE = 6
+ sizeRXF = 6
+ sizeRXY = 6
+ sizeRXY1 = 6
+ sizeRXY2 = 6
+ sizeS = 4
+ sizeSI = 4
+ sizeSIL = 6
+ sizeSIY = 6
+ sizeSMI = 6
+ sizeSS = 6
+ sizeSS1 = 6
+ sizeSS2 = 6
+ sizeSS3 = 6
+ sizeSS4 = 6
+ sizeSS5 = 6
+ sizeSS6 = 6
+ sizeSSE = 6
+ sizeSSF = 6
+)
+
+// instruction format variations.
+const (
+ a = iota
+ b
+ c
+ d
+ e
+ f
+ g
+)
+
+func zE(op uint32, asm *[]byte) {
+ *asm = append(*asm, uint8(op>>8), uint8(op))
+}
+
+func zI(op, i1 uint32, asm *[]byte) {
+ *asm = append(*asm, uint8(op>>8), uint8(i1))
+}
+
+func zMII(op, m1, ri2, ri3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(m1)<<4)|uint8((ri2>>8)&0x0F),
+ uint8(ri2),
+ uint8(ri3>>16),
+ uint8(ri3>>8),
+ uint8(ri3))
+}
+
+func zRI(op, r1_m1, i2_ri2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1_m1)<<4)|(uint8(op)&0x0F),
+ uint8(i2_ri2>>8),
+ uint8(i2_ri2))
+}
+
+// Expected argument values for the instruction formats.
+//
+// Format a1 a2 a3 a4 a5 a6 a7
+// ------------------------------------
+// a r1, 0, i2, 0, 0, m3, 0
+// b r1, r2, ri4, 0, 0, m3, 0
+// c r1, m3, ri4, 0, 0, 0, i2
+// d r1, r3, i2, 0, 0, 0, 0
+// e r1, r3, ri2, 0, 0, 0, 0
+// f r1, r2, 0, i3, i4, 0, i5
+// g r1, m3, i2, 0, 0, 0, 0
+func zRIE(type_, op, r1, r2_m3_r3, i2_ri4_ri2, i3, i4, m3, i2_i5 uint32, asm *[]byte) {
+ *asm = append(*asm, uint8(op>>8), uint8(r1)<<4|uint8(r2_m3_r3&0x0F))
+
+ switch type_ {
+ default:
+ *asm = append(*asm, uint8(i2_ri4_ri2>>8), uint8(i2_ri4_ri2))
+ case f:
+ *asm = append(*asm, uint8(i3), uint8(i4))
+ }
+
+ switch type_ {
+ case a, b:
+ *asm = append(*asm, uint8(m3)<<4)
+ default:
+ *asm = append(*asm, uint8(i2_i5))
+ }
+
+ *asm = append(*asm, uint8(op))
+}
+
+func zRIL(type_, op, r1_m1, i2_ri2 uint32, asm *[]byte) {
+ if type_ == a || type_ == b {
+ r1_m1 = r1_m1 - obj.RBaseS390X // this is a register base
+ }
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1_m1)<<4)|(uint8(op)&0x0F),
+ uint8(i2_ri2>>24),
+ uint8(i2_ri2>>16),
+ uint8(i2_ri2>>8),
+ uint8(i2_ri2))
+}
+
+func zRIS(op, r1, m3, b4, d4, i2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(m3&0x0F),
+ (uint8(b4)<<4)|(uint8(d4>>8)&0x0F),
+ uint8(d4),
+ uint8(i2),
+ uint8(op))
+}
+
+func zRR(op, r1, r2 uint32, asm *[]byte) {
+ *asm = append(*asm, uint8(op>>8), (uint8(r1)<<4)|uint8(r2&0x0F))
+}
+
+func zRRD(op, r1, r3, r2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ uint8(r1)<<4,
+ (uint8(r3)<<4)|uint8(r2&0x0F))
+}
+
+func zRRE(op, r1, r2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ 0,
+ (uint8(r1)<<4)|uint8(r2&0x0F))
+}
+
+func zRRF(op, r3_m3, m4, r1, r2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ (uint8(r3_m3)<<4)|uint8(m4&0x0F),
+ (uint8(r1)<<4)|uint8(r2&0x0F))
+}
+
+func zRRS(op, r1, r2, b4, d4, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(r2&0x0F),
+ (uint8(b4)<<4)|uint8((d4>>8)&0x0F),
+ uint8(d4),
+ uint8(m3)<<4,
+ uint8(op))
+}
+
+func zRS(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(r3_m3&0x0F),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2))
+}
+
+func zRSI(op, r1, r3, ri2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(r3&0x0F),
+ uint8(ri2>>8),
+ uint8(ri2))
+}
+
+func zRSL(type_, op, l1, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(l1),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2),
+ uint8(op))
+}
+
+// (20b) d2 with (12b) dl2 and (8b) dh2.
+func zRSY(op, r1, r3_m3, b2, d2 uint32, asm *[]byte) {
+ dl2 := uint16(d2) & 0x0FFF
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(r3_m3&0x0F),
+ (uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
+ uint8(dl2),
+ uint8(d2>>12),
+ uint8(op))
+}
+
+func zRX(op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1_m1)<<4)|uint8(x2&0x0F),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2))
+}
+
+func zRXE(op, r1, x2, b2, d2, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1)<<4)|uint8(x2&0x0F),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2),
+ uint8(m3)<<4,
+ uint8(op))
+}
+
+func zRXF(op, r3, x2, b2, d2, m1 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r3)<<4)|uint8(x2&0x0F),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2),
+ uint8(m1)<<4,
+ uint8(op))
+}
+
+func zRXY(type_, op, r1_m1, x2, b2, d2 uint32, asm *[]byte) {
+ dl2 := uint16(d2) & 0x0FFF
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r1_m1)<<4)|uint8(x2&0x0F),
+ (uint8(b2)<<4)|(uint8(dl2>>8)&0x0F),
+ uint8(dl2),
+ uint8(d2>>12),
+ uint8(op))
+}
+
+func zS(op, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2))
+}
+
+func zSI(op, i2, b1, d1 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(i2),
+ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
+ uint8(d1))
+}
+
+func zSIL(op, b1, d1, i2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
+ uint8(d1),
+ uint8(i2>>8),
+ uint8(i2))
+}
+
+func zSIY(op, i2, b1, d1 uint32, asm *[]byte) {
+ dl1 := uint16(d1) & 0x0FFF
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(i2),
+ (uint8(b1)<<4)|(uint8(dl1>>8)&0x0F),
+ uint8(dl1),
+ uint8(d1>>12),
+ uint8(op))
+}
+
+func zSMI(op, m1, b3, d3, ri2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(m1)<<4,
+ (uint8(b3)<<4)|uint8((d3>>8)&0x0F),
+ uint8(d3),
+ uint8(ri2>>8),
+ uint8(ri2))
+}
+
+// Expected argument values for the instruction formats.
+//
+// Format a1 a2 a3 a4 a5 a6
+// -------------------------------
+// a l1, 0, b1, d1, b2, d2
+// b l1, l2, b1, d1, b2, d2
+// c l1, i3, b1, d1, b2, d2
+// d r1, r3, b1, d1, b2, d2
+// e r1, r3, b2, d2, b4, d4
+// f 0, l2, b1, d1, b2, d2
+func zSS(type_, op, l1_r1, l2_i3_r3, b1_b2, d1_d2, b2_b4, d2_d4 uint32, asm *[]byte) {
+ *asm = append(*asm, uint8(op>>8))
+
+ switch type_ {
+ case a:
+ *asm = append(*asm, uint8(l1_r1))
+ case b, c, d, e:
+ *asm = append(*asm, (uint8(l1_r1)<<4)|uint8(l2_i3_r3&0x0F))
+ case f:
+ *asm = append(*asm, uint8(l2_i3_r3))
+ }
+
+ *asm = append(*asm,
+ (uint8(b1_b2)<<4)|uint8((d1_d2>>8)&0x0F),
+ uint8(d1_d2),
+ (uint8(b2_b4)<<4)|uint8((d2_d4>>8)&0x0F),
+ uint8(d2_d4))
+}
+
+func zSSE(op, b1, d1, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(op),
+ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
+ uint8(d1),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2))
+}
+
+func zSSF(op, r3, b1, d1, b2, d2 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(r3)<<4)|(uint8(op)&0x0F),
+ (uint8(b1)<<4)|uint8((d1>>8)&0x0F),
+ uint8(d1),
+ (uint8(b2)<<4)|uint8((d2>>8)&0x0F),
+ uint8(d2))
+}
+
+func rxb(va, vb, vc, vd uint32) uint8 {
+ mask := uint8(0)
+ if va >= REG_V16 && va <= REG_V31 {
+ mask |= 0x8
+ }
+ if vb >= REG_V16 && vb <= REG_V31 {
+ mask |= 0x4
+ }
+ if vc >= REG_V16 && vc <= REG_V31 {
+ mask |= 0x2
+ }
+ if vd >= REG_V16 && vd <= REG_V31 {
+ mask |= 0x1
+ }
+ return mask
+}
+
+func zVRX(op, v1, x2, b2, d2, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(x2)&0xf),
+ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
+ uint8(d2),
+ (uint8(m3)<<4)|rxb(v1, 0, 0, 0),
+ uint8(op))
+}
+
+func zVRV(op, v1, v2, b2, d2, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
+ uint8(d2),
+ (uint8(m3)<<4)|rxb(v1, v2, 0, 0),
+ uint8(op))
+}
+
+func zVRS(op, v1, v3_r3, b2, d2, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v3_r3)&0xf),
+ (uint8(b2)<<4)|(uint8(d2>>8)&0xf),
+ uint8(d2),
+ (uint8(m4)<<4)|rxb(v1, v3_r3, 0, 0),
+ uint8(op))
+}
+
+func zVRRa(op, v1, v2, m5, m4, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ 0,
+ (uint8(m5)<<4)|(uint8(m4)&0xf),
+ (uint8(m3)<<4)|rxb(v1, v2, 0, 0),
+ uint8(op))
+}
+
+func zVRRb(op, v1, v2, v3, m5, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ uint8(v3)<<4,
+ uint8(m5)<<4,
+ (uint8(m4)<<4)|rxb(v1, v2, v3, 0),
+ uint8(op))
+}
+
+func zVRRc(op, v1, v2, v3, m6, m5, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ uint8(v3)<<4,
+ (uint8(m6)<<4)|(uint8(m5)&0xf),
+ (uint8(m4)<<4)|rxb(v1, v2, v3, 0),
+ uint8(op))
+}
+
+func zVRRd(op, v1, v2, v3, m5, m6, v4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ (uint8(v3)<<4)|(uint8(m5)&0xf),
+ uint8(m6)<<4,
+ (uint8(v4)<<4)|rxb(v1, v2, v3, v4),
+ uint8(op))
+}
+
+func zVRRe(op, v1, v2, v3, m6, m5, v4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ (uint8(v3)<<4)|(uint8(m6)&0xf),
+ uint8(m5),
+ (uint8(v4)<<4)|rxb(v1, v2, v3, v4),
+ uint8(op))
+}
+
+func zVRRf(op, v1, r2, r3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(r2)&0xf),
+ uint8(r3)<<4,
+ 0,
+ rxb(v1, 0, 0, 0),
+ uint8(op))
+}
+
+func zVRIa(op, v1, i2, m3 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(v1)<<4,
+ uint8(i2>>8),
+ uint8(i2),
+ (uint8(m3)<<4)|rxb(v1, 0, 0, 0),
+ uint8(op))
+}
+
+func zVRIb(op, v1, i2, i3, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ uint8(v1)<<4,
+ uint8(i2),
+ uint8(i3),
+ (uint8(m4)<<4)|rxb(v1, 0, 0, 0),
+ uint8(op))
+}
+
+func zVRIc(op, v1, v3, i2, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v3)&0xf),
+ uint8(i2>>8),
+ uint8(i2),
+ (uint8(m4)<<4)|rxb(v1, v3, 0, 0),
+ uint8(op))
+}
+
+func zVRId(op, v1, v2, v3, i4, m5 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ uint8(v3)<<4,
+ uint8(i4),
+ (uint8(m5)<<4)|rxb(v1, v2, v3, 0),
+ uint8(op))
+}
+
+func zVRIe(op, v1, v2, i3, m5, m4 uint32, asm *[]byte) {
+ *asm = append(*asm,
+ uint8(op>>8),
+ (uint8(v1)<<4)|(uint8(v2)&0xf),
+ uint8(i3>>4),
+ (uint8(i3)<<4)|(uint8(m5)&0xf),
+ (uint8(m4)<<4)|rxb(v1, v2, 0, 0),
+ uint8(op))
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/listz.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/listz.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/listz.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/listz.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,73 @@
+// Based on cmd/internal/obj/ppc64/list9.go.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+func init() {
+ obj.RegisterRegister(obj.RBaseS390X, REG_R0+1024, Rconv)
+ obj.RegisterOpcode(obj.ABaseS390X, Anames)
+}
+
+func Rconv(r int) string {
+ if r == 0 {
+ return "NONE"
+ }
+ if r == REGG {
+ // Special case.
+ return "g"
+ }
+ if REG_R0 <= r && r <= REG_R15 {
+ return fmt.Sprintf("R%d", r-REG_R0)
+ }
+ if REG_F0 <= r && r <= REG_F15 {
+ return fmt.Sprintf("F%d", r-REG_F0)
+ }
+ if REG_AR0 <= r && r <= REG_AR15 {
+ return fmt.Sprintf("AR%d", r-REG_AR0)
+ }
+ if REG_V0 <= r && r <= REG_V31 {
+ return fmt.Sprintf("V%d", r-REG_V0)
+ }
+ return fmt.Sprintf("Rgok(%d)", r-obj.RBaseS390X)
+}
+
+func DRconv(a int) string {
+ s := "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnamesz[a]
+ }
+ var fp string
+ fp += s
+ return fp
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/objz.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/objz.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/objz.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/objz.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1024 @@
+// Based on cmd/internal/obj/ppc64/obj9.go.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/internal/obj"
+ "encoding/binary"
+ "fmt"
+ "math"
+)
+
+func progedit(ctxt *obj.Link, p *obj.Prog) {
+ p.From.Class = 0
+ p.To.Class = 0
+
+ // Rewrite BR/BL to symbol as TYPE_BRANCH.
+ switch p.As {
+ case ABR,
+ ABL,
+ obj.ARET,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ if p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory unless they are +0.
+ switch p.As {
+ case AFMOVS:
+ if p.From.Type == obj.TYPE_FCONST {
+ f32 := float32(p.From.Val.(float64))
+ i32 := math.Float32bits(f32)
+ if i32 == 0 { // +0
+ break
+ }
+ literal := fmt.Sprintf("$f32.%08x", i32)
+ s := obj.Linklookup(ctxt, literal, 0)
+ s.Size = 4
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Sym.Local = true
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AFMOVD:
+ if p.From.Type == obj.TYPE_FCONST {
+ i64 := math.Float64bits(p.From.Val.(float64))
+ if i64 == 0 { // +0
+ break
+ }
+ literal := fmt.Sprintf("$f64.%016x", i64)
+ s := obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Sym.Local = true
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ // put constants not loadable by LOAD IMMEDIATE into memory
+ case AMOVD:
+ if p.From.Type == obj.TYPE_CONST {
+ val := p.From.Offset
+ if int64(int32(val)) != val &&
+ int64(uint32(val)) != val &&
+ int64(uint64(val)&(0xffffffff<<32)) != val {
+ literal := fmt.Sprintf("$i64.%016x", uint64(p.From.Offset))
+ s := obj.Linklookup(ctxt, literal, 0)
+ s.Size = 8
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = s
+ p.From.Sym.Local = true
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+ }
+
+ // Rewrite SUB constants into ADD.
+ switch p.As {
+ case ASUBC:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDC
+ }
+
+ case ASUB:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADD
+ }
+ }
+
+ if ctxt.Flag_dynlink {
+ rewriteToUseGot(ctxt, p)
+ }
+}
+
+// Rewrite p, if necessary, to access global data via the global offset table.
+func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog) {
+ // At the moment EXRL instructions are not emitted by the compiler and only reference local symbols in
+ // assembly code.
+ if p.As == AEXRL {
+ return
+ }
+
+ // We only care about global data: NAME_EXTERN means a global
+ // symbol in the Go sense, and p.Sym.Local is true for a few
+ // internally defined symbols.
+ if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
+ // MOVD $sym, Rx becomes MOVD sym@GOT, Rx
+ // MOVD $sym+, Rx becomes MOVD sym@GOT, Rx; ADD , Rx
+ if p.To.Type != obj.TYPE_REG || p.As != AMOVD {
+ ctxt.Diag("do not know how to handle LEA-type insn to non-register in %v with -dynlink", p)
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_GOTREF
+ q := p
+ if p.From.Offset != 0 {
+ q = obj.Appendp(ctxt, p)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = p.From.Offset
+ q.To = p.To
+ p.From.Offset = 0
+ }
+ }
+ if p.From3 != nil && p.From3.Name == obj.NAME_EXTERN {
+ ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ var source *obj.Addr
+ // MOVD sym, Ry becomes MOVD sym@GOT, REGTMP; MOVD (REGTMP), Ry
+ // MOVD Ry, sym becomes MOVD sym@GOT, REGTMP; MOVD Ry, (REGTMP)
+ // An addition may be inserted between the two MOVs if there is an offset.
+ if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local {
+ if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
+ ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
+ }
+ source = &p.From
+ } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local {
+ source = &p.To
+ } else {
+ return
+ }
+ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+ return
+ }
+ if source.Sym.Type == obj.STLSBSS {
+ return
+ }
+ if source.Type != obj.TYPE_MEM {
+ ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ p1 := obj.Appendp(ctxt, p)
+ p2 := obj.Appendp(ctxt, p1)
+
+ p1.As = AMOVD
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Sym = source.Sym
+ p1.From.Name = obj.NAME_GOTREF
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = REGTMP
+
+ p2.As = p.As
+ p2.From = p.From
+ p2.To = p.To
+ if p.From.Name == obj.NAME_EXTERN {
+ p2.From.Reg = REGTMP
+ p2.From.Name = obj.NAME_NONE
+ p2.From.Sym = nil
+ } else if p.To.Name == obj.NAME_EXTERN {
+ p2.To.Reg = REGTMP
+ p2.To.Name = obj.NAME_NONE
+ p2.To.Sym = nil
+ } else {
+ return
+ }
+ obj.Nopout(p)
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym) {
+ // TODO(minux): add morestack short-cuts with small fixed frame-size.
+ ctxt.Cursym = cursym
+
+ if cursym.Text == nil || cursym.Text.Link == nil {
+ return
+ }
+
+ p := cursym.Text
+ textstksiz := p.To.Offset
+ if textstksiz == -8 {
+ // Compatibility hack.
+ p.From3.Offset |= obj.NOFRAME
+ textstksiz = 0
+ }
+ if textstksiz%8 != 0 {
+ ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
+ }
+ if p.From3.Offset&obj.NOFRAME != 0 {
+ if textstksiz != 0 {
+ ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
+ }
+ }
+
+ cursym.Args = p.To.Val.(int32)
+ cursym.Locals = int32(textstksiz)
+
+ /*
+ * find leaf subroutines
+ * strip NOPs
+ * expand RET
+ * expand BECOME pseudo
+ */
+ if ctxt.Debugvlog != 0 {
+ fmt.Fprintf(ctxt.Bso, "%5.2f noops\n", obj.Cputime())
+ }
+ ctxt.Bso.Flush()
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+ for p := cursym.Text; p != nil; p = p.Link {
+ switch p.As {
+ /* too hard, just leave alone */
+ case obj.ATEXT:
+ q = p
+
+ p.Mark |= LABEL | LEAF | SYNC
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+
+ case ANOR:
+ q = p
+ if p.To.Type == obj.TYPE_REG {
+ if p.To.Reg == REGZERO {
+ p.Mark |= LABEL | SYNC
+ }
+ }
+
+ case ASYNC,
+ AWORD:
+ q = p
+ p.Mark |= LABEL | SYNC
+ continue
+
+ case AMOVW, AMOVWZ, AMOVD:
+ q = p
+ if p.From.Reg >= REG_RESERVED || p.To.Reg >= REG_RESERVED {
+ p.Mark |= LABEL | SYNC
+ }
+ continue
+
+ case AFABS,
+ AFADD,
+ AFDIV,
+ AFMADD,
+ AFMOVD,
+ AFMOVS,
+ AFMSUB,
+ AFMUL,
+ AFNABS,
+ AFNEG,
+ AFNMADD,
+ AFNMSUB,
+ ALEDBR,
+ ALDEBR,
+ AFSUB:
+ q = p
+
+ p.Mark |= FLOAT
+ continue
+
+ case ABL,
+ ABCL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ cursym.Text.Mark &^= LEAF
+ fallthrough
+
+ case ABC,
+ ABEQ,
+ ABGE,
+ ABGT,
+ ABLE,
+ ABLT,
+ ABNE,
+ ABR,
+ ABVC,
+ ABVS,
+ ACMPBEQ,
+ ACMPBGE,
+ ACMPBGT,
+ ACMPBLE,
+ ACMPBLT,
+ ACMPBNE,
+ ACMPUBEQ,
+ ACMPUBGE,
+ ACMPUBGT,
+ ACMPUBLE,
+ ACMPUBLT,
+ ACMPUBNE:
+ p.Mark |= BRANCH
+ q = p
+ q1 = p.Pcond
+ if q1 != nil {
+ for q1.As == obj.ANOP {
+ q1 = q1.Link
+ p.Pcond = q1
+ }
+
+ if q1.Mark&LEAF == 0 {
+ q1.Mark |= LABEL
+ }
+ } else {
+ p.Mark |= LABEL
+ }
+ q1 = p.Link
+ if q1 != nil {
+ q1.Mark |= LABEL
+ }
+ continue
+
+ case AFCMPO, AFCMPU:
+ q = p
+ p.Mark |= FCMP | FLOAT
+ continue
+
+ case obj.ARET:
+ q = p
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+ continue
+
+ case obj.ANOP:
+ q1 = p.Link
+ q.Link = q1 /* q is non-nop */
+ q1.Mark |= p.Mark
+ continue
+
+ default:
+ q = p
+ continue
+ }
+ }
+
+ autosize := int32(0)
+ var o int
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ var pLast *obj.Prog
+ var pPre *obj.Prog
+ var pPreempt *obj.Prog
+ wasSplit := false
+ for p := cursym.Text; p != nil; p = p.Link {
+ pLast = p
+ o = int(p.As)
+ switch o {
+ case obj.ATEXT:
+ autosize = int32(textstksiz)
+
+ if p.Mark&LEAF != 0 && autosize == 0 && p.From3.Offset&obj.NOFRAME == 0 {
+ // A leaf function with no locals has no frame.
+ p.From3.Offset |= obj.NOFRAME
+ }
+
+ if p.From3.Offset&obj.NOFRAME == 0 {
+ // If there is a stack frame at all, it includes
+ // space to save the LR.
+ autosize += int32(ctxt.FixedFrameSize())
+ }
+
+ p.To.Offset = int64(autosize)
+
+ q = p
+
+ if p.From3.Offset&obj.NOSPLIT == 0 {
+ p, pPreempt = stacksplitPre(ctxt, p, autosize) // emit pre part of split check
+ pPre = p
+ wasSplit = true //need post part of split
+ }
+
+ if autosize != 0 {
+ q = obj.Appendp(ctxt, p)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_ADDR
+ q.From.Offset = int64(-autosize)
+ q.From.Reg = REGSP // not actually needed - REGSP is assumed if no reg is provided
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = autosize
+ } else if cursym.Text.Mark&LEAF == 0 {
+ // A very few functions that do not return to their caller
+ // (e.g. gogo) are not identified as leaves but still have
+ // no frame.
+ cursym.Text.Mark |= LEAF
+ }
+
+ if cursym.Text.Mark&LEAF != 0 {
+ cursym.Leaf = 1
+ break
+ }
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_LR
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REGSP
+ q.To.Offset = 0
+
+ if cursym.Text.From3.Offset&obj.WRAPPER != 0 {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVD g_panic(g), R3
+ // CMP R0, R3
+ // BEQ end
+ // MOVD panic_argp(R3), R4
+ // ADD $(autosize+8), R1, R5
+ // CMP R4, R5
+ // BNE end
+ // ADD $8, R1, R6
+ // MOVD R6, panic_argp(R3)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not a s390x NOP: it encodes to 0 instruction bytes.
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REGG
+ q.From.Offset = 4 * int64(ctxt.Arch.Ptrsize) // G.panic
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R0
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R3
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABEQ
+ q.To.Type = obj.TYPE_BRANCH
+ p1 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REG_R3
+ q.From.Offset = 0 // Panic.argp
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R4
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize) + ctxt.FixedFrameSize()
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R4
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R5
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABNE
+ q.To.Type = obj.TYPE_BRANCH
+ p2 = q
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = ctxt.FixedFrameSize()
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R6
+
+ q = obj.Appendp(ctxt, q)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R6
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REG_R3
+ q.To.Offset = 0 // Panic.argp
+
+ q = obj.Appendp(ctxt, q)
+
+ q.As = obj.ANOP
+ p1.Pcond = q
+ p2.Pcond = q
+ }
+
+ case obj.ARET:
+ if p.From.Type == obj.TYPE_CONST {
+ ctxt.Diag("using BECOME (%v) is not supported!", p)
+ break
+ }
+
+ retTarget := p.To.Sym
+
+ if cursym.Text.Mark&LEAF != 0 {
+ if autosize == 0 {
+ p.As = ABR
+ p.From = obj.Addr{}
+ if retTarget == nil {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+ } else {
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Sym = retTarget
+ }
+ p.Mark |= BRANCH
+ break
+ }
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autosize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -autosize
+
+ q = obj.Appendp(ctxt, p)
+ q.As = ABR
+ q.From = obj.Addr{}
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ q.Mark |= BRANCH
+ q.Spadj = autosize
+ break
+ }
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGSP
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+
+ q = p
+
+ if autosize != 0 {
+ q = obj.Appendp(ctxt, q)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -autosize
+ }
+
+ q = obj.Appendp(ctxt, q)
+ q.As = ABR
+ q.From = obj.Addr{}
+ if retTarget == nil {
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ } else {
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Sym = retTarget
+ }
+ q.Mark |= BRANCH
+ q.Spadj = autosize
+
+ case AADD:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ }
+ }
+ if wasSplit {
+ pLast = stacksplitPost(ctxt, pLast, pPre, pPreempt) // emit post part of split check
+ }
+}
+
+/*
+// instruction scheduling
+ if(debug['Q'] == 0)
+ return;
+
+ curtext = nil;
+ q = nil; // p - 1
+ q1 = firstp; // top of block
+ o = 0; // count of instructions
+ for(p = firstp; p != nil; p = p1) {
+ p1 = p->link;
+ o++;
+ if(p->mark & NOSCHED){
+ if(q1 != p){
+ sched(q1, q);
+ }
+ for(; p != nil; p = p->link){
+ if(!(p->mark & NOSCHED))
+ break;
+ q = p;
+ }
+ p1 = p;
+ q1 = p;
+ o = 0;
+ continue;
+ }
+ if(p->mark & (LABEL|SYNC)) {
+ if(q1 != p)
+ sched(q1, q);
+ q1 = p;
+ o = 1;
+ }
+ if(p->mark & (BRANCH|SYNC)) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ if(o >= NSCHED) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ q = p;
+ }
+*/
+func stacksplitPre(ctxt *obj.Link, p *obj.Prog, framesize int32) (*obj.Prog, *obj.Prog) {
+ var q *obj.Prog
+
+ // MOVD g_stackguard(g), R3
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(ctxt.Arch.Ptrsize) // G.stackguard0
+ if ctxt.Cursym.Cfunc != 0 {
+ p.From.Offset = 3 * int64(ctxt.Arch.Ptrsize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R3
+
+ q = nil
+ if framesize <= obj.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+
+ //p.To.Type = obj.TYPE_REG
+ //p.To.Reg = REGSP
+
+ // q1: BLT done
+
+ p = obj.Appendp(ctxt, p)
+ //q1 = p
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.Reg = REGSP
+ p.As = ACMPUBGE
+ p.To.Type = obj.TYPE_BRANCH
+ //p = obj.Appendp(ctxt, p)
+
+ //p.As = ACMPU
+ //p.From.Type = obj.TYPE_REG
+ //p.From.Reg = REG_R3
+ //p.To.Type = obj.TYPE_REG
+ //p.To.Reg = REGSP
+
+ //p = obj.Appendp(ctxt, p)
+ //p.As = ABGE
+ //p.To.Type = obj.TYPE_BRANCH
+
+ } else if framesize <= obj.StackBig {
+ // large stack: SP-framesize < stackguard-StackSmall
+ // ADD $-framesize, SP, R4
+ // CMP stackguard, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(-framesize)
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.Reg = REG_R4
+ p.As = ACMPUBGE
+ p.To.Type = obj.TYPE_BRANCH
+
+ } else {
+ // Such a large stack we need to protect against wraparound.
+ // If SP is close to zero:
+ // SP-stackguard+StackGuard <= framesize + (StackGuard-StackSmall)
+ // The +StackGuard on both sides is required to keep the left side positive:
+ // SP is allowed to be slightly below stackguard. See stack.h.
+ //
+ // Preemption sets stackguard to StackPreempt, a very large value.
+ // That breaks the math above, so we have to check for that explicitly.
+ // // stackguard is R3
+ // CMP R3, $StackPreempt
+ // BEQ label-of-call-to-morestack
+ // ADD $StackGuard, SP, R4
+ // SUB R3, R4
+ // MOVD $(framesize+(StackGuard-StackSmall)), TEMP
+ // CMPUBGE TEMP, R4
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ACMP
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = obj.StackPreempt
+
+ p = obj.Appendp(ctxt, p)
+ q = p
+ p.As = ABEQ
+ p.To.Type = obj.TYPE_BRANCH
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = obj.StackGuard
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = ASUB
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R4
+
+ p = obj.Appendp(ctxt, p)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(framesize) + obj.StackGuard - obj.StackSmall
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ p = obj.Appendp(ctxt, p)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.Reg = REG_R4
+ p.As = ACMPUBGE
+ p.To.Type = obj.TYPE_BRANCH
+ }
+
+ return p, q
+}
+
+func stacksplitPost(ctxt *obj.Link, p *obj.Prog, pPre *obj.Prog, pPreempt *obj.Prog) *obj.Prog {
+
+ // MOVD LR, R5
+ p = obj.Appendp(ctxt, p)
+ pPre.Pcond = p
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R5
+ if pPreempt != nil {
+ pPreempt.Pcond = p
+ }
+
+ // BL runtime.morestack(SB)
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ if ctxt.Cursym.Cfunc != 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestackc", 0)
+ } else if ctxt.Cursym.Text.From3.Offset&obj.NEEDCTXT == 0 {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack_noctxt", 0)
+ } else {
+ p.To.Sym = obj.Linklookup(ctxt, "runtime.morestack", 0)
+ }
+
+ // BR start
+ p = obj.Appendp(ctxt, p)
+
+ p.As = ABR
+ p.To.Type = obj.TYPE_BRANCH
+ p.Pcond = ctxt.Cursym.Text.Link
+ return p
+}
+
+var pc_cnt int64
+
+func follow(ctxt *obj.Link, s *obj.LSym) {
+ ctxt.Cursym = s
+
+ pc_cnt = 0
+ firstp := ctxt.NewProg()
+ lastp := firstp
+ xfol(ctxt, s.Text, &lastp)
+ lastp.Link = nil
+ s.Text = firstp.Link
+}
+
+func relinv(a int) int {
+ switch a {
+ case ABEQ:
+ return ABNE
+ case ABNE:
+ return ABEQ
+
+ case ABGE:
+ return ABLT
+ case ABLT:
+ return ABGE
+
+ case ABGT:
+ return ABLE
+ case ABLE:
+ return ABGT
+
+ case ABVC:
+ return ABVS
+ case ABVS:
+ return ABVC
+ }
+
+ return 0
+}
+
+func xfol(ctxt *obj.Link, p *obj.Prog, last **obj.Prog) {
+ var q *obj.Prog
+ var r *obj.Prog
+ var a int
+ var b int
+ var i int
+
+loop:
+ if p == nil {
+ return
+ }
+ a = int(p.As)
+ if a == ABR {
+ q = p.Pcond
+ if (p.Mark&NOSCHED != 0) || q != nil && (q.Mark&NOSCHED != 0) {
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ (*last).Pc = pc_cnt
+ pc_cnt += 1
+ p = p.Link
+ xfol(ctxt, p, last)
+ p = q
+ if p != nil && p.Mark&FOLL == 0 {
+ goto loop
+ }
+ return
+ }
+
+ if q != nil {
+ p.Mark |= FOLL
+ p = q
+ if p.Mark&FOLL == 0 {
+ goto loop
+ }
+ }
+ }
+
+ if p.Mark&FOLL != 0 {
+ i = 0
+ q = p
+ for ; i < 4; i, q = i+1, q.Link {
+ if q == *last || (q.Mark&NOSCHED != 0) {
+ break
+ }
+ b = 0 /* set */
+ a = int(q.As)
+ if a == obj.ANOP {
+ i--
+ continue
+ }
+
+ if a == ABR || a == obj.ARET {
+ goto copy
+ }
+ if q.Pcond == nil || (q.Pcond.Mark&FOLL != 0) {
+ continue
+ }
+ b = relinv(a)
+ if b == 0 {
+ continue
+ }
+
+ copy:
+ for {
+ r = ctxt.NewProg()
+ *r = *p
+ if r.Mark&FOLL == 0 {
+ fmt.Printf("cant happen 1\n")
+ }
+ r.Mark |= FOLL
+ if p != q {
+ p = p.Link
+ (*last).Link = r
+ *last = r
+ (*last).Pc = pc_cnt
+ pc_cnt += 1
+ continue
+ }
+
+ (*last).Link = r
+ *last = r
+ (*last).Pc = pc_cnt
+ pc_cnt += 1
+ if a == ABR || a == obj.ARET {
+ return
+ }
+ r.As = int16(b)
+ r.Pcond = p.Link
+ r.Link = p.Pcond
+ if r.Link.Mark&FOLL == 0 {
+ xfol(ctxt, r.Link, last)
+ }
+ if r.Pcond.Mark&FOLL == 0 {
+ fmt.Printf("cant happen 2\n")
+ }
+ return
+ }
+ }
+
+ a = ABR
+ q = ctxt.NewProg()
+ q.As = int16(a)
+ q.Lineno = p.Lineno
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Offset = p.Pc
+ q.Pcond = p
+ p = q
+ }
+
+ p.Mark |= FOLL
+ (*last).Link = p
+ *last = p
+ (*last).Pc = pc_cnt
+ pc_cnt += 1
+
+ if a == ABR || a == obj.ARET {
+ if p.Mark&NOSCHED != 0 {
+ p = p.Link
+ goto loop
+ }
+
+ return
+ }
+
+ if p.Pcond != nil {
+ if a != ABL && p.Link != nil {
+ xfol(ctxt, p.Link, last)
+ p = p.Pcond
+ if p == nil || (p.Mark&FOLL != 0) {
+ return
+ }
+ goto loop
+ }
+ }
+
+ p = p.Link
+ goto loop
+}
+
+var unaryDst = map[int]bool{
+ ASTCK: true,
+ ASTCKC: true,
+ ASTCKE: true,
+ ASTCKF: true,
+ ANEG: true,
+ AVONE: true,
+ AVZERO: true,
+}
+
+var Links390x = obj.LinkArch{
+ ByteOrder: binary.BigEndian,
+ Name: "s390x",
+ Thechar: 'z',
+ Preprocess: preprocess,
+ Assemble: spanz,
+ Follow: follow,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ Minlc: 2,
+ Ptrsize: 8,
+ Regsize: 8,
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/s390x/vector.go 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/vector.go
--- 1.6.3-1/src/cmd/internal/obj/s390x/vector.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/s390x/vector.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1057 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+// This file contains utility functions for use when
+// assembling vector instructions.
+
+// vop returns the opcode, element size and condition
+// setting for the given (possibly extended) mnemonic.
+func vop(as int16) (opcode, es, cs uint32) {
+ switch as {
+ default:
+ return 0, 0, 0
+ case AVA:
+ return op_VA, 0, 0
+ case AVAB:
+ return op_VA, 0, 0
+ case AVAH:
+ return op_VA, 1, 0
+ case AVAF:
+ return op_VA, 2, 0
+ case AVAG:
+ return op_VA, 3, 0
+ case AVAQ:
+ return op_VA, 4, 0
+ case AVACC:
+ return op_VACC, 0, 0
+ case AVACCB:
+ return op_VACC, 0, 0
+ case AVACCH:
+ return op_VACC, 1, 0
+ case AVACCF:
+ return op_VACC, 2, 0
+ case AVACCG:
+ return op_VACC, 3, 0
+ case AVACCQ:
+ return op_VACC, 4, 0
+ case AVAC:
+ return op_VAC, 0, 0
+ case AVACQ:
+ return op_VAC, 4, 0
+ case AVACCC:
+ return op_VACCC, 0, 0
+ case AVACCCQ:
+ return op_VACCC, 4, 0
+ case AVN:
+ return op_VN, 0, 0
+ case AVNC:
+ return op_VNC, 0, 0
+ case AVAVG:
+ return op_VAVG, 0, 0
+ case AVAVGB:
+ return op_VAVG, 0, 0
+ case AVAVGH:
+ return op_VAVG, 1, 0
+ case AVAVGF:
+ return op_VAVG, 2, 0
+ case AVAVGG:
+ return op_VAVG, 3, 0
+ case AVAVGL:
+ return op_VAVGL, 0, 0
+ case AVAVGLB:
+ return op_VAVGL, 0, 0
+ case AVAVGLH:
+ return op_VAVGL, 1, 0
+ case AVAVGLF:
+ return op_VAVGL, 2, 0
+ case AVAVGLG:
+ return op_VAVGL, 3, 0
+ case AVCKSM:
+ return op_VCKSM, 0, 0
+ case AVCEQ:
+ return op_VCEQ, 0, 0
+ case AVCEQB:
+ return op_VCEQ, 0, 0
+ case AVCEQH:
+ return op_VCEQ, 1, 0
+ case AVCEQF:
+ return op_VCEQ, 2, 0
+ case AVCEQG:
+ return op_VCEQ, 3, 0
+ case AVCEQBS:
+ return op_VCEQ, 0, 1
+ case AVCEQHS:
+ return op_VCEQ, 1, 1
+ case AVCEQFS:
+ return op_VCEQ, 2, 1
+ case AVCEQGS:
+ return op_VCEQ, 3, 1
+ case AVCH:
+ return op_VCH, 0, 0
+ case AVCHB:
+ return op_VCH, 0, 0
+ case AVCHH:
+ return op_VCH, 1, 0
+ case AVCHF:
+ return op_VCH, 2, 0
+ case AVCHG:
+ return op_VCH, 3, 0
+ case AVCHBS:
+ return op_VCH, 0, 1
+ case AVCHHS:
+ return op_VCH, 1, 1
+ case AVCHFS:
+ return op_VCH, 2, 1
+ case AVCHGS:
+ return op_VCH, 3, 1
+ case AVCHL:
+ return op_VCHL, 0, 0
+ case AVCHLB:
+ return op_VCHL, 0, 0
+ case AVCHLH:
+ return op_VCHL, 1, 0
+ case AVCHLF:
+ return op_VCHL, 2, 0
+ case AVCHLG:
+ return op_VCHL, 3, 0
+ case AVCHLBS:
+ return op_VCHL, 0, 1
+ case AVCHLHS:
+ return op_VCHL, 1, 1
+ case AVCHLFS:
+ return op_VCHL, 2, 1
+ case AVCHLGS:
+ return op_VCHL, 3, 1
+ case AVCLZ:
+ return op_VCLZ, 0, 0
+ case AVCLZB:
+ return op_VCLZ, 0, 0
+ case AVCLZH:
+ return op_VCLZ, 1, 0
+ case AVCLZF:
+ return op_VCLZ, 2, 0
+ case AVCLZG:
+ return op_VCLZ, 3, 0
+ case AVCTZ:
+ return op_VCTZ, 0, 0
+ case AVCTZB:
+ return op_VCTZ, 0, 0
+ case AVCTZH:
+ return op_VCTZ, 1, 0
+ case AVCTZF:
+ return op_VCTZ, 2, 0
+ case AVCTZG:
+ return op_VCTZ, 3, 0
+ case AVEC:
+ return op_VEC, 0, 0
+ case AVECB:
+ return op_VEC, 0, 0
+ case AVECH:
+ return op_VEC, 1, 0
+ case AVECF:
+ return op_VEC, 2, 0
+ case AVECG:
+ return op_VEC, 3, 0
+ case AVECL:
+ return op_VECL, 0, 0
+ case AVECLB:
+ return op_VECL, 0, 0
+ case AVECLH:
+ return op_VECL, 1, 0
+ case AVECLF:
+ return op_VECL, 2, 0
+ case AVECLG:
+ return op_VECL, 3, 0
+ case AVERIM:
+ return op_VERIM, 0, 0
+ case AVERIMB:
+ return op_VERIM, 0, 0
+ case AVERIMH:
+ return op_VERIM, 1, 0
+ case AVERIMF:
+ return op_VERIM, 2, 0
+ case AVERIMG:
+ return op_VERIM, 3, 0
+ case AVERLL:
+ return op_VERLL, 0, 0
+ case AVERLLB:
+ return op_VERLL, 0, 0
+ case AVERLLH:
+ return op_VERLL, 1, 0
+ case AVERLLF:
+ return op_VERLL, 2, 0
+ case AVERLLG:
+ return op_VERLL, 3, 0
+ case AVERLLV:
+ return op_VERLLV, 0, 0
+ case AVERLLVB:
+ return op_VERLLV, 0, 0
+ case AVERLLVH:
+ return op_VERLLV, 1, 0
+ case AVERLLVF:
+ return op_VERLLV, 2, 0
+ case AVERLLVG:
+ return op_VERLLV, 3, 0
+ case AVESLV:
+ return op_VESLV, 0, 0
+ case AVESLVB:
+ return op_VESLV, 0, 0
+ case AVESLVH:
+ return op_VESLV, 1, 0
+ case AVESLVF:
+ return op_VESLV, 2, 0
+ case AVESLVG:
+ return op_VESLV, 3, 0
+ case AVESL:
+ return op_VESL, 0, 0
+ case AVESLB:
+ return op_VESL, 0, 0
+ case AVESLH:
+ return op_VESL, 1, 0
+ case AVESLF:
+ return op_VESL, 2, 0
+ case AVESLG:
+ return op_VESL, 3, 0
+ case AVESRA:
+ return op_VESRA, 0, 0
+ case AVESRAB:
+ return op_VESRA, 0, 0
+ case AVESRAH:
+ return op_VESRA, 1, 0
+ case AVESRAF:
+ return op_VESRA, 2, 0
+ case AVESRAG:
+ return op_VESRA, 3, 0
+ case AVESRAV:
+ return op_VESRAV, 0, 0
+ case AVESRAVB:
+ return op_VESRAV, 0, 0
+ case AVESRAVH:
+ return op_VESRAV, 1, 0
+ case AVESRAVF:
+ return op_VESRAV, 2, 0
+ case AVESRAVG:
+ return op_VESRAV, 3, 0
+ case AVESRL:
+ return op_VESRL, 0, 0
+ case AVESRLB:
+ return op_VESRL, 0, 0
+ case AVESRLH:
+ return op_VESRL, 1, 0
+ case AVESRLF:
+ return op_VESRL, 2, 0
+ case AVESRLG:
+ return op_VESRL, 3, 0
+ case AVESRLV:
+ return op_VESRLV, 0, 0
+ case AVESRLVB:
+ return op_VESRLV, 0, 0
+ case AVESRLVH:
+ return op_VESRLV, 1, 0
+ case AVESRLVF:
+ return op_VESRLV, 2, 0
+ case AVESRLVG:
+ return op_VESRLV, 3, 0
+ case AVX:
+ return op_VX, 0, 0
+ case AVFAE:
+ return op_VFAE, 0, 0
+ case AVFAEB:
+ return op_VFAE, 0, 0
+ case AVFAEH:
+ return op_VFAE, 1, 0
+ case AVFAEF:
+ return op_VFAE, 2, 0
+ case AVFAEBS:
+ return op_VFAE, 0, 1
+ case AVFAEHS:
+ return op_VFAE, 1, 1
+ case AVFAEFS:
+ return op_VFAE, 2, 1
+ case AVFAEZB:
+ return op_VFAE, 0, 2
+ case AVFAEZH:
+ return op_VFAE, 1, 2
+ case AVFAEZF:
+ return op_VFAE, 2, 2
+ case AVFAEZBS:
+ return op_VFAE, 0, 3
+ case AVFAEZHS:
+ return op_VFAE, 1, 3
+ case AVFAEZFS:
+ return op_VFAE, 2, 3
+ case AVFEE:
+ return op_VFEE, 0, 0
+ case AVFEEB:
+ return op_VFEE, 0, 0
+ case AVFEEH:
+ return op_VFEE, 1, 0
+ case AVFEEF:
+ return op_VFEE, 2, 0
+ case AVFEEBS:
+ return op_VFEE, 0, 1
+ case AVFEEHS:
+ return op_VFEE, 1, 1
+ case AVFEEFS:
+ return op_VFEE, 2, 1
+ case AVFEEZB:
+ return op_VFEE, 0, 2
+ case AVFEEZH:
+ return op_VFEE, 1, 2
+ case AVFEEZF:
+ return op_VFEE, 2, 2
+ case AVFEEZBS:
+ return op_VFEE, 0, 3
+ case AVFEEZHS:
+ return op_VFEE, 1, 3
+ case AVFEEZFS:
+ return op_VFEE, 2, 3
+ case AVFENE:
+ return op_VFENE, 0, 0
+ case AVFENEB:
+ return op_VFENE, 0, 0
+ case AVFENEH:
+ return op_VFENE, 1, 0
+ case AVFENEF:
+ return op_VFENE, 2, 0
+ case AVFENEBS:
+ return op_VFENE, 0, 1
+ case AVFENEHS:
+ return op_VFENE, 1, 1
+ case AVFENEFS:
+ return op_VFENE, 2, 1
+ case AVFENEZB:
+ return op_VFENE, 0, 2
+ case AVFENEZH:
+ return op_VFENE, 1, 2
+ case AVFENEZF:
+ return op_VFENE, 2, 2
+ case AVFENEZBS:
+ return op_VFENE, 0, 3
+ case AVFENEZHS:
+ return op_VFENE, 1, 3
+ case AVFENEZFS:
+ return op_VFENE, 2, 3
+ case AVFA:
+ return op_VFA, 0, 0
+ case AVFADB:
+ return op_VFA, 3, 0
+ case AWFADB:
+ return op_VFA, 3, 0
+ case AWFK:
+ return op_WFK, 0, 0
+ case AWFKDB:
+ return op_WFK, 3, 0
+ case AVFCE:
+ return op_VFCE, 0, 0
+ case AVFCEDB:
+ return op_VFCE, 3, 0
+ case AVFCEDBS:
+ return op_VFCE, 3, 1
+ case AWFCEDB:
+ return op_VFCE, 3, 0
+ case AWFCEDBS:
+ return op_VFCE, 3, 1
+ case AVFCH:
+ return op_VFCH, 0, 0
+ case AVFCHDB:
+ return op_VFCH, 3, 0
+ case AVFCHDBS:
+ return op_VFCH, 3, 1
+ case AWFCHDB:
+ return op_VFCH, 3, 0
+ case AWFCHDBS:
+ return op_VFCH, 3, 1
+ case AVFCHE:
+ return op_VFCHE, 0, 0
+ case AVFCHEDB:
+ return op_VFCHE, 3, 0
+ case AVFCHEDBS:
+ return op_VFCHE, 3, 1
+ case AWFCHEDB:
+ return op_VFCHE, 3, 0
+ case AWFCHEDBS:
+ return op_VFCHE, 3, 1
+ case AWFC:
+ return op_WFC, 0, 0
+ case AWFCDB:
+ return op_WFC, 3, 0
+ case AVCDG:
+ return op_VCDG, 0, 0
+ case AVCDGB:
+ return op_VCDG, 3, 0
+ case AWCDGB:
+ return op_VCDG, 3, 0
+ case AVCDLG:
+ return op_VCDLG, 0, 0
+ case AVCDLGB:
+ return op_VCDLG, 3, 0
+ case AWCDLGB:
+ return op_VCDLG, 3, 0
+ case AVCGD:
+ return op_VCGD, 0, 0
+ case AVCGDB:
+ return op_VCGD, 3, 0
+ case AWCGDB:
+ return op_VCGD, 3, 0
+ case AVCLGD:
+ return op_VCLGD, 0, 0
+ case AVCLGDB:
+ return op_VCLGD, 3, 0
+ case AWCLGDB:
+ return op_VCLGD, 3, 0
+ case AVFD:
+ return op_VFD, 0, 0
+ case AVFDDB:
+ return op_VFD, 3, 0
+ case AWFDDB:
+ return op_VFD, 3, 0
+ case AVLDE:
+ return op_VLDE, 0, 0
+ case AVLDEB:
+ return op_VLDE, 2, 0
+ case AWLDEB:
+ return op_VLDE, 2, 0
+ case AVLED:
+ return op_VLED, 0, 0
+ case AVLEDB:
+ return op_VLED, 3, 0
+ case AWLEDB:
+ return op_VLED, 3, 0
+ case AVFM:
+ return op_VFM, 0, 0
+ case AVFMDB:
+ return op_VFM, 3, 0
+ case AWFMDB:
+ return op_VFM, 3, 0
+ case AVFMA:
+ return op_VFMA, 0, 0
+ case AVFMADB:
+ return op_VFMA, 3, 0
+ case AWFMADB:
+ return op_VFMA, 3, 0
+ case AVFMS:
+ return op_VFMS, 0, 0
+ case AVFMSDB:
+ return op_VFMS, 3, 0
+ case AWFMSDB:
+ return op_VFMS, 3, 0
+ case AVFPSO:
+ return op_VFPSO, 0, 0
+ case AVFPSODB:
+ return op_VFPSO, 3, 0
+ case AWFPSODB:
+ return op_VFPSO, 3, 0
+ case AVFLCDB:
+ return op_VFPSO, 3, 0
+ case AWFLCDB:
+ return op_VFPSO, 3, 0
+ case AVFLNDB:
+ return op_VFPSO, 3, 1
+ case AWFLNDB:
+ return op_VFPSO, 3, 1
+ case AVFLPDB:
+ return op_VFPSO, 3, 2
+ case AWFLPDB:
+ return op_VFPSO, 3, 2
+ case AVFSQ:
+ return op_VFSQ, 0, 0
+ case AVFSQDB:
+ return op_VFSQ, 3, 0
+ case AWFSQDB:
+ return op_VFSQ, 3, 0
+ case AVFS:
+ return op_VFS, 0, 0
+ case AVFSDB:
+ return op_VFS, 3, 0
+ case AWFSDB:
+ return op_VFS, 3, 0
+ case AVFTCI:
+ return op_VFTCI, 0, 0
+ case AVFTCIDB:
+ return op_VFTCI, 3, 0
+ case AWFTCIDB:
+ return op_VFTCI, 3, 0
+ case AVGFM:
+ return op_VGFM, 0, 0
+ case AVGFMB:
+ return op_VGFM, 0, 0
+ case AVGFMH:
+ return op_VGFM, 1, 0
+ case AVGFMF:
+ return op_VGFM, 2, 0
+ case AVGFMG:
+ return op_VGFM, 3, 0
+ case AVGFMA:
+ return op_VGFMA, 0, 0
+ case AVGFMAB:
+ return op_VGFMA, 0, 0
+ case AVGFMAH:
+ return op_VGFMA, 1, 0
+ case AVGFMAF:
+ return op_VGFMA, 2, 0
+ case AVGFMAG:
+ return op_VGFMA, 3, 0
+ case AVGEF:
+ return op_VGEF, 0, 0
+ case AVGEG:
+ return op_VGEG, 0, 0
+ case AVGBM:
+ return op_VGBM, 0, 0
+ case AVZERO:
+ return op_VGBM, 0, 0
+ case AVONE:
+ return op_VGBM, 0, 0
+ case AVGM:
+ return op_VGM, 0, 0
+ case AVGMB:
+ return op_VGM, 0, 0
+ case AVGMH:
+ return op_VGM, 1, 0
+ case AVGMF:
+ return op_VGM, 2, 0
+ case AVGMG:
+ return op_VGM, 3, 0
+ case AVISTR:
+ return op_VISTR, 0, 0
+ case AVISTRB:
+ return op_VISTR, 0, 0
+ case AVISTRH:
+ return op_VISTR, 1, 0
+ case AVISTRF:
+ return op_VISTR, 2, 0
+ case AVISTRBS:
+ return op_VISTR, 0, 1
+ case AVISTRHS:
+ return op_VISTR, 1, 1
+ case AVISTRFS:
+ return op_VISTR, 2, 1
+ case AVL:
+ return op_VL, 0, 0
+ case AVLR:
+ return op_VLR, 0, 0
+ case AVLREP:
+ return op_VLREP, 0, 0
+ case AVLREPB:
+ return op_VLREP, 0, 0
+ case AVLREPH:
+ return op_VLREP, 1, 0
+ case AVLREPF:
+ return op_VLREP, 2, 0
+ case AVLREPG:
+ return op_VLREP, 3, 0
+ case AVLC:
+ return op_VLC, 0, 0
+ case AVLCB:
+ return op_VLC, 0, 0
+ case AVLCH:
+ return op_VLC, 1, 0
+ case AVLCF:
+ return op_VLC, 2, 0
+ case AVLCG:
+ return op_VLC, 3, 0
+ case AVLEH:
+ return op_VLEH, 0, 0
+ case AVLEF:
+ return op_VLEF, 0, 0
+ case AVLEG:
+ return op_VLEG, 0, 0
+ case AVLEB:
+ return op_VLEB, 0, 0
+ case AVLEIH:
+ return op_VLEIH, 0, 0
+ case AVLEIF:
+ return op_VLEIF, 0, 0
+ case AVLEIG:
+ return op_VLEIG, 0, 0
+ case AVLEIB:
+ return op_VLEIB, 0, 0
+ case AVFI:
+ return op_VFI, 0, 0
+ case AVFIDB:
+ return op_VFI, 3, 0
+ case AWFIDB:
+ return op_VFI, 3, 0
+ case AVLGV:
+ return op_VLGV, 0, 0
+ case AVLGVB:
+ return op_VLGV, 0, 0
+ case AVLGVH:
+ return op_VLGV, 1, 0
+ case AVLGVF:
+ return op_VLGV, 2, 0
+ case AVLGVG:
+ return op_VLGV, 3, 0
+ case AVLLEZ:
+ return op_VLLEZ, 0, 0
+ case AVLLEZB:
+ return op_VLLEZ, 0, 0
+ case AVLLEZH:
+ return op_VLLEZ, 1, 0
+ case AVLLEZF:
+ return op_VLLEZ, 2, 0
+ case AVLLEZG:
+ return op_VLLEZ, 3, 0
+ case AVLM:
+ return op_VLM, 0, 0
+ case AVLP:
+ return op_VLP, 0, 0
+ case AVLPB:
+ return op_VLP, 0, 0
+ case AVLPH:
+ return op_VLP, 1, 0
+ case AVLPF:
+ return op_VLP, 2, 0
+ case AVLPG:
+ return op_VLP, 3, 0
+ case AVLBB:
+ return op_VLBB, 0, 0
+ case AVLVG:
+ return op_VLVG, 0, 0
+ case AVLVGB:
+ return op_VLVG, 0, 0
+ case AVLVGH:
+ return op_VLVG, 1, 0
+ case AVLVGF:
+ return op_VLVG, 2, 0
+ case AVLVGG:
+ return op_VLVG, 3, 0
+ case AVLVGP:
+ return op_VLVGP, 0, 0
+ case AVLL:
+ return op_VLL, 0, 0
+ case AVMX:
+ return op_VMX, 0, 0
+ case AVMXB:
+ return op_VMX, 0, 0
+ case AVMXH:
+ return op_VMX, 1, 0
+ case AVMXF:
+ return op_VMX, 2, 0
+ case AVMXG:
+ return op_VMX, 3, 0
+ case AVMXL:
+ return op_VMXL, 0, 0
+ case AVMXLB:
+ return op_VMXL, 0, 0
+ case AVMXLH:
+ return op_VMXL, 1, 0
+ case AVMXLF:
+ return op_VMXL, 2, 0
+ case AVMXLG:
+ return op_VMXL, 3, 0
+ case AVMRH:
+ return op_VMRH, 0, 0
+ case AVMRHB:
+ return op_VMRH, 0, 0
+ case AVMRHH:
+ return op_VMRH, 1, 0
+ case AVMRHF:
+ return op_VMRH, 2, 0
+ case AVMRHG:
+ return op_VMRH, 3, 0
+ case AVMRL:
+ return op_VMRL, 0, 0
+ case AVMRLB:
+ return op_VMRL, 0, 0
+ case AVMRLH:
+ return op_VMRL, 1, 0
+ case AVMRLF:
+ return op_VMRL, 2, 0
+ case AVMRLG:
+ return op_VMRL, 3, 0
+ case AVMN:
+ return op_VMN, 0, 0
+ case AVMNB:
+ return op_VMN, 0, 0
+ case AVMNH:
+ return op_VMN, 1, 0
+ case AVMNF:
+ return op_VMN, 2, 0
+ case AVMNG:
+ return op_VMN, 3, 0
+ case AVMNL:
+ return op_VMNL, 0, 0
+ case AVMNLB:
+ return op_VMNL, 0, 0
+ case AVMNLH:
+ return op_VMNL, 1, 0
+ case AVMNLF:
+ return op_VMNL, 2, 0
+ case AVMNLG:
+ return op_VMNL, 3, 0
+ case AVMAE:
+ return op_VMAE, 0, 0
+ case AVMAEB:
+ return op_VMAE, 0, 0
+ case AVMAEH:
+ return op_VMAE, 1, 0
+ case AVMAEF:
+ return op_VMAE, 2, 0
+ case AVMAH:
+ return op_VMAH, 0, 0
+ case AVMAHB:
+ return op_VMAH, 0, 0
+ case AVMAHH:
+ return op_VMAH, 1, 0
+ case AVMAHF:
+ return op_VMAH, 2, 0
+ case AVMALE:
+ return op_VMALE, 0, 0
+ case AVMALEB:
+ return op_VMALE, 0, 0
+ case AVMALEH:
+ return op_VMALE, 1, 0
+ case AVMALEF:
+ return op_VMALE, 2, 0
+ case AVMALH:
+ return op_VMALH, 0, 0
+ case AVMALHB:
+ return op_VMALH, 0, 0
+ case AVMALHH:
+ return op_VMALH, 1, 0
+ case AVMALHF:
+ return op_VMALH, 2, 0
+ case AVMALO:
+ return op_VMALO, 0, 0
+ case AVMALOB:
+ return op_VMALO, 0, 0
+ case AVMALOH:
+ return op_VMALO, 1, 0
+ case AVMALOF:
+ return op_VMALO, 2, 0
+ case AVMAL:
+ return op_VMAL, 0, 0
+ case AVMALB:
+ return op_VMAL, 0, 0
+ case AVMALHW:
+ return op_VMAL, 1, 0
+ case AVMALF:
+ return op_VMAL, 2, 0
+ case AVMAO:
+ return op_VMAO, 0, 0
+ case AVMAOB:
+ return op_VMAO, 0, 0
+ case AVMAOH:
+ return op_VMAO, 1, 0
+ case AVMAOF:
+ return op_VMAO, 2, 0
+ case AVME:
+ return op_VME, 0, 0
+ case AVMEB:
+ return op_VME, 0, 0
+ case AVMEH:
+ return op_VME, 1, 0
+ case AVMEF:
+ return op_VME, 2, 0
+ case AVMH:
+ return op_VMH, 0, 0
+ case AVMHB:
+ return op_VMH, 0, 0
+ case AVMHH:
+ return op_VMH, 1, 0
+ case AVMHF:
+ return op_VMH, 2, 0
+ case AVMLE:
+ return op_VMLE, 0, 0
+ case AVMLEB:
+ return op_VMLE, 0, 0
+ case AVMLEH:
+ return op_VMLE, 1, 0
+ case AVMLEF:
+ return op_VMLE, 2, 0
+ case AVMLH:
+ return op_VMLH, 0, 0
+ case AVMLHB:
+ return op_VMLH, 0, 0
+ case AVMLHH:
+ return op_VMLH, 1, 0
+ case AVMLHF:
+ return op_VMLH, 2, 0
+ case AVMLO:
+ return op_VMLO, 0, 0
+ case AVMLOB:
+ return op_VMLO, 0, 0
+ case AVMLOH:
+ return op_VMLO, 1, 0
+ case AVMLOF:
+ return op_VMLO, 2, 0
+ case AVML:
+ return op_VML, 0, 0
+ case AVMLB:
+ return op_VML, 0, 0
+ case AVMLHW:
+ return op_VML, 1, 0
+ case AVMLF:
+ return op_VML, 2, 0
+ case AVMO:
+ return op_VMO, 0, 0
+ case AVMOB:
+ return op_VMO, 0, 0
+ case AVMOH:
+ return op_VMO, 1, 0
+ case AVMOF:
+ return op_VMO, 2, 0
+ case AVNO:
+ return op_VNO, 0, 0
+ case AVNOT:
+ return op_VNO, 0, 0
+ case AVO:
+ return op_VO, 0, 0
+ case AVPK:
+ return op_VPK, 0, 0
+ case AVPKH:
+ return op_VPK, 1, 0
+ case AVPKF:
+ return op_VPK, 2, 0
+ case AVPKG:
+ return op_VPK, 3, 0
+ case AVPKLS:
+ return op_VPKLS, 0, 0
+ case AVPKLSH:
+ return op_VPKLS, 1, 0
+ case AVPKLSF:
+ return op_VPKLS, 2, 0
+ case AVPKLSG:
+ return op_VPKLS, 3, 0
+ case AVPKLSHS:
+ return op_VPKLS, 1, 1
+ case AVPKLSFS:
+ return op_VPKLS, 2, 1
+ case AVPKLSGS:
+ return op_VPKLS, 3, 1
+ case AVPKS:
+ return op_VPKS, 0, 0
+ case AVPKSH:
+ return op_VPKS, 1, 0
+ case AVPKSF:
+ return op_VPKS, 2, 0
+ case AVPKSG:
+ return op_VPKS, 3, 0
+ case AVPKSHS:
+ return op_VPKS, 1, 1
+ case AVPKSFS:
+ return op_VPKS, 2, 1
+ case AVPKSGS:
+ return op_VPKS, 3, 1
+ case AVPERM:
+ return op_VPERM, 0, 0
+ case AVPDI:
+ return op_VPDI, 0, 0
+ case AVPOPCT:
+ return op_VPOPCT, 0, 0
+ case AVREP:
+ return op_VREP, 0, 0
+ case AVREPB:
+ return op_VREP, 0, 0
+ case AVREPH:
+ return op_VREP, 1, 0
+ case AVREPF:
+ return op_VREP, 2, 0
+ case AVREPG:
+ return op_VREP, 3, 0
+ case AVREPI:
+ return op_VREPI, 0, 0
+ case AVREPIB:
+ return op_VREPI, 0, 0
+ case AVREPIH:
+ return op_VREPI, 1, 0
+ case AVREPIF:
+ return op_VREPI, 2, 0
+ case AVREPIG:
+ return op_VREPI, 3, 0
+ case AVSCEF:
+ return op_VSCEF, 0, 0
+ case AVSCEG:
+ return op_VSCEG, 0, 0
+ case AVSEL:
+ return op_VSEL, 0, 0
+ case AVSL:
+ return op_VSL, 0, 0
+ case AVSLB:
+ return op_VSLB, 0, 0
+ case AVSLDB:
+ return op_VSLDB, 0, 0
+ case AVSRA:
+ return op_VSRA, 0, 0
+ case AVSRAB:
+ return op_VSRAB, 0, 0
+ case AVSRL:
+ return op_VSRL, 0, 0
+ case AVSRLB:
+ return op_VSRLB, 0, 0
+ case AVSEG:
+ return op_VSEG, 0, 0
+ case AVSEGB:
+ return op_VSEG, 0, 0
+ case AVSEGH:
+ return op_VSEG, 1, 0
+ case AVSEGF:
+ return op_VSEG, 2, 0
+ case AVST:
+ return op_VST, 0, 0
+ case AVSTEH:
+ return op_VSTEH, 0, 0
+ case AVSTEF:
+ return op_VSTEF, 0, 0
+ case AVSTEG:
+ return op_VSTEG, 0, 0
+ case AVSTEB:
+ return op_VSTEB, 0, 0
+ case AVSTM:
+ return op_VSTM, 0, 0
+ case AVSTL:
+ return op_VSTL, 0, 0
+ case AVSTRC:
+ return op_VSTRC, 0, 0
+ case AVSTRCB:
+ return op_VSTRC, 0, 0
+ case AVSTRCH:
+ return op_VSTRC, 1, 0
+ case AVSTRCF:
+ return op_VSTRC, 2, 0
+ case AVSTRCBS:
+ return op_VSTRC, 0, 1
+ case AVSTRCHS:
+ return op_VSTRC, 1, 1
+ case AVSTRCFS:
+ return op_VSTRC, 2, 1
+ case AVSTRCZB:
+ return op_VSTRC, 0, 2
+ case AVSTRCZH:
+ return op_VSTRC, 1, 2
+ case AVSTRCZF:
+ return op_VSTRC, 2, 2
+ case AVSTRCZBS:
+ return op_VSTRC, 0, 3
+ case AVSTRCZHS:
+ return op_VSTRC, 1, 3
+ case AVSTRCZFS:
+ return op_VSTRC, 2, 3
+ case AVS:
+ return op_VS, 0, 0
+ case AVSB:
+ return op_VS, 0, 0
+ case AVSH:
+ return op_VS, 1, 0
+ case AVSF:
+ return op_VS, 2, 0
+ case AVSG:
+ return op_VS, 3, 0
+ case AVSQ:
+ return op_VS, 4, 0
+ case AVSCBI:
+ return op_VSCBI, 0, 0
+ case AVSCBIB:
+ return op_VSCBI, 0, 0
+ case AVSCBIH:
+ return op_VSCBI, 1, 0
+ case AVSCBIF:
+ return op_VSCBI, 2, 0
+ case AVSCBIG:
+ return op_VSCBI, 3, 0
+ case AVSCBIQ:
+ return op_VSCBI, 4, 0
+ case AVSBCBI:
+ return op_VSBCBI, 0, 0
+ case AVSBCBIQ:
+ return op_VSBCBI, 4, 0
+ case AVSBI:
+ return op_VSBI, 0, 0
+ case AVSBIQ:
+ return op_VSBI, 4, 0
+ case AVSUMG:
+ return op_VSUMG, 0, 0
+ case AVSUMGH:
+ return op_VSUMG, 1, 0
+ case AVSUMGF:
+ return op_VSUMG, 2, 0
+ case AVSUMQ:
+ return op_VSUMQ, 0, 0
+ case AVSUMQF:
+ return op_VSUMQ, 1, 0
+ case AVSUMQG:
+ return op_VSUMQ, 2, 0
+ case AVSUM:
+ return op_VSUM, 0, 0
+ case AVSUMB:
+ return op_VSUM, 0, 0
+ case AVSUMH:
+ return op_VSUM, 1, 0
+ case AVTM:
+ return op_VTM, 0, 0
+ case AVUPH:
+ return op_VUPH, 0, 0
+ case AVUPHB:
+ return op_VUPH, 0, 0
+ case AVUPHH:
+ return op_VUPH, 1, 0
+ case AVUPHF:
+ return op_VUPH, 2, 0
+ case AVUPLH:
+ return op_VUPLH, 0, 0
+ case AVUPLHB:
+ return op_VUPLH, 0, 0
+ case AVUPLHH:
+ return op_VUPLH, 1, 0
+ case AVUPLHF:
+ return op_VUPLH, 2, 0
+ case AVUPLL:
+ return op_VUPLL, 0, 0
+ case AVUPLLB:
+ return op_VUPLL, 0, 0
+ case AVUPLLH:
+ return op_VUPLL, 1, 0
+ case AVUPLLF:
+ return op_VUPLL, 2, 0
+ case AVUPL:
+ return op_VUPL, 0, 0
+ case AVUPLB:
+ return op_VUPL, 0, 0
+ case AVUPLHW:
+ return op_VUPL, 1, 0
+ case AVUPLF:
+ return op_VUPL, 2, 0
+ }
+}
+
+// singleElementMask returns the single element mask bits required for the
+// given instruction.
+func singleElementMask(as int16) uint32 {
+ switch as {
+ case AWFADB,
+ AWFK,
+ AWFKDB,
+ AWFCEDB,
+ AWFCEDBS,
+ AWFCHDB,
+ AWFCHDBS,
+ AWFCHEDB,
+ AWFCHEDBS,
+ AWFC,
+ AWFCDB,
+ AWCDGB,
+ AWCDLGB,
+ AWCGDB,
+ AWCLGDB,
+ AWFDDB,
+ AWLDEB,
+ AWLEDB,
+ AWFMDB,
+ AWFMADB,
+ AWFMSDB,
+ AWFPSODB,
+ AWFLCDB,
+ AWFLNDB,
+ AWFLPDB,
+ AWFSQDB,
+ AWFSDB,
+ AWFTCIDB,
+ AWFIDB:
+ return 8
+ }
+ return 0
+}
diff -pruN 1.6.3-1/src/cmd/internal/obj/util.go 1.6.3-1ubuntu1/src/cmd/internal/obj/util.go
--- 1.6.3-1/src/cmd/internal/obj/util.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/util.go 2016-07-21 13:36:09.000000000 +0000
@@ -530,6 +530,7 @@ const (
RBasePPC64 = 4 * 1024 // range [4k, 8k)
RBaseARM64 = 8 * 1024 // range [8k, 13k)
RBaseMIPS64 = 13 * 1024 // range [13k, 14k)
+ RBaseS390X = 14 * 1024 // range [14k, 15k)
)
// RegisterRegister binds a pretty-printer (Rconv) for register
@@ -590,6 +591,7 @@ const (
ABasePPC64
ABaseARM64
ABaseMIPS64
+ ABaseS390X
AMask = 1<<12 - 1 // AND with this to use the opcode as an array index.
)
diff -pruN 1.6.3-1/src/cmd/internal/obj/x86/obj6.go 1.6.3-1ubuntu1/src/cmd/internal/obj/x86/obj6.go
--- 1.6.3-1/src/cmd/internal/obj/x86/obj6.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/obj/x86/obj6.go 2016-07-21 13:36:09.000000000 +0000
@@ -1089,6 +1089,7 @@ func stacksplit(ctxt *obj.Link, p *obj.P
call.Mode = ctxt.Cursym.Text.Mode
call.As = obj.ACALL
call.To.Type = obj.TYPE_BRANCH
+ call.To.Name = obj.NAME_EXTERN
morestack := "runtime.morestack"
switch {
case ctxt.Cursym.Cfunc != 0:
@@ -1097,8 +1098,17 @@ func stacksplit(ctxt *obj.Link, p *obj.P
morestack = "runtime.morestack_noctxt"
}
call.To.Sym = obj.Linklookup(ctxt, morestack, 0)
+ // When compiling 386 code for dynamic linking, the call needs to be adjusted
+ // to follow PIC rules. This in turn can insert more instructions, so we need
+ // to keep track of the start of the call (where the jump will be to) and the
+ // end (which following instructions are appended to).
+ callend := call
+ progedit(ctxt, callend)
+ for ; callend.Link != nil; callend = callend.Link {
+ progedit(ctxt, callend.Link)
+ }
- jmp := obj.Appendp(ctxt, call)
+ jmp := obj.Appendp(ctxt, callend)
jmp.As = obj.AJMP
jmp.To.Type = obj.TYPE_BRANCH
jmp.Pcond = ctxt.Cursym.Text.Link
diff -pruN 1.6.3-1/src/cmd/internal/objfile/disasm.go 1.6.3-1ubuntu1/src/cmd/internal/objfile/disasm.go
--- 1.6.3-1/src/cmd/internal/objfile/disasm.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/objfile/disasm.go 2016-07-21 13:36:09.000000000 +0000
@@ -245,4 +245,5 @@ var byteOrders = map[string]binary.ByteO
"arm": binary.LittleEndian,
"ppc64": binary.BigEndian,
"ppc64le": binary.LittleEndian,
+ "s390x": binary.BigEndian,
}
diff -pruN 1.6.3-1/src/cmd/internal/objfile/elf.go 1.6.3-1ubuntu1/src/cmd/internal/objfile/elf.go
--- 1.6.3-1/src/cmd/internal/objfile/elf.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/internal/objfile/elf.go 2016-07-21 13:36:09.000000000 +0000
@@ -99,6 +99,8 @@ func (f *elfFile) goarch() string {
return "arm"
case elf.EM_PPC64:
return "ppc64"
+ case elf.EM_S390:
+ return "s390x"
}
return ""
}
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/arch.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/arch.go
--- 1.6.3-1/src/cmd/link/internal/ld/arch.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/arch.go 2016-07-21 13:36:09.000000000 +0000
@@ -86,3 +86,12 @@ var Linkmips64le = LinkArch{
Ptrsize: 8,
Regsize: 8,
}
+
+var Links390x = LinkArch{
+ ByteOrder: binary.BigEndian,
+ Name: "s390x",
+ Thechar: 'z',
+ Minlc: 2,
+ Ptrsize: 8,
+ Regsize: 8,
+}
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/data.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/data.go
--- 1.6.3-1/src/cmd/link/internal/ld/data.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/data.go 2016-07-21 13:36:09.000000000 +0000
@@ -148,6 +148,9 @@ func Addpcrelplus(ctxt *Link, s *LSym, t
r.Add = add
r.Type = obj.R_PCREL
r.Siz = 4
+ if Thearch.Thechar == 'z' {
+ r.Variant = RV_390_DBL
+ }
return i + int64(r.Siz)
}
@@ -364,6 +367,18 @@ func relocsym(s *LSym) {
Diag("unreachable sym in relocation: %s %s", s.Name, r.Sym.Name)
}
+ // TODO(mundaym): Move this conversion somewhere more appropriate.
+ // Ideally the obj relocations would support variants.
+ if Thearch.Thechar == 'z' {
+ switch r.Type {
+ case obj.R_PCRELDBL:
+ r.Type = obj.R_PCREL
+ r.Variant = RV_390_DBL
+ case obj.R_CALL:
+ r.Variant = RV_390_DBL
+ }
+ }
+
switch r.Type {
default:
switch siz {
@@ -454,7 +469,7 @@ func relocsym(s *LSym) {
o = r.Xadd
if Iself {
- if Thearch.Thechar == '6' {
+ if Thearch.Thechar == '6' || Thearch.Thechar == 'z' {
o = 0
}
} else if HEADTYPE == obj.Hdarwin {
@@ -514,7 +529,7 @@ func relocsym(s *LSym) {
o = r.Xadd
if Iself {
- if Thearch.Thechar == '6' {
+ if Thearch.Thechar == '6' || Thearch.Thechar == 'z' {
o = 0
}
} else if HEADTYPE == obj.Hdarwin {
@@ -1030,6 +1045,13 @@ func symalign(s *LSym) int32 {
if align < s.Align {
align = s.Align
}
+
+ // TODO(mundaym): Minalign should probably be a new attribute on 'Thearch'
+ if Thearch.Thechar == 'z' && align < 2 {
+ // Relative addressing requires a 2 byte alignment on s390x.
+ align = 2
+ }
+
return align
}
@@ -1159,6 +1181,7 @@ func dodata() {
for s := datap; s != nil; s = s.Next {
if int64(len(s.P)) > s.Size {
Diag("%s: initialize bounds (%d < %d)", s.Name, int64(s.Size), len(s.P))
+ s.Size = int64(len(s.P)) // hack to allow linking of asm into go // TODO(WGO)
}
}
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/dwarf.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/dwarf.go
--- 1.6.3-1/src/cmd/link/internal/ld/dwarf.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/dwarf.go 2016-07-21 13:36:09.000000000 +0000
@@ -1715,7 +1715,7 @@ func writelines() {
* Emit .debug_frame
*/
const (
- CIERESERVE = 16
+ CIERESERVE = 32
DATAALIGNMENTFACTOR = -4
)
@@ -1754,7 +1754,6 @@ func writeframes() {
uleb128put(int64(Thearch.Dwarfreglr)) // return_address_register
Cput(DW_CFA_def_cfa)
-
uleb128put(int64(Thearch.Dwarfregsp)) // register SP (**ABI-dependent, defined in l.h)
if haslinkregister() {
uleb128put(int64(0)) // offset
@@ -1762,14 +1761,21 @@ func writeframes() {
uleb128put(int64(Thearch.Ptrsize)) // offset
}
- Cput(DW_CFA_offset_extended)
- uleb128put(int64(Thearch.Dwarfreglr)) // return address
if haslinkregister() {
- uleb128put(int64(0) / DATAALIGNMENTFACTOR) // at cfa - 0
+ Cput(DW_CFA_same_value)
+ uleb128put(int64(Thearch.Dwarfreglr))
} else {
+ Cput(DW_CFA_offset_extended)
+ uleb128put(int64(Thearch.Dwarfreglr)) // return address
uleb128put(int64(-Thearch.Ptrsize) / DATAALIGNMENTFACTOR) // at cfa - x*4
}
+ if haslinkregister() {
+ Cput(DW_CFA_val_offset)
+ uleb128put(int64(Thearch.Dwarfregsp))
+ uleb128put(int64(0))
+ }
+
// 4 is to exclude the length field.
pad := CIERESERVE + frameo + 4 - Cpos()
@@ -1788,7 +1794,7 @@ func writeframes() {
fdeo := Cpos()
- // Emit a FDE, Section 6.4.1, starting wit a placeholder.
+ // Emit a FDE, Section 6.4.1, starting with a placeholder.
Thearch.Lput(0) // length, must be multiple of thearch.ptrsize
Thearch.Lput(0) // Pointer to the CIE above, at offset 0
addrput(0) // initial location
@@ -1807,6 +1813,21 @@ func writeframes() {
}
if haslinkregister() {
+ // TODO(bryanpkc): This is imprecise. In general, the instruction
+ // that stores the return address to the stack frame is not the
+ // same one that allocates the frame.
+ if pcsp.value > 0 {
+ // The return address is preserved at (CFA-frame_size)
+ // after a stack frame has been allocated.
+ Cput(DW_CFA_offset_extended_sf)
+ uleb128put(int64(Thearch.Dwarfreglr))
+ sleb128put(-int64(pcsp.value) / DATAALIGNMENTFACTOR)
+ } else {
+ // The return address is restored into the link register
+ // when a stack frame has been de-allocated.
+ Cput(DW_CFA_same_value)
+ uleb128put(int64(Thearch.Dwarfreglr))
+ }
putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(pcsp.value))
} else {
putpccfadelta(int64(nextpc)-int64(pcsp.pc), int64(Thearch.Ptrsize)+int64(pcsp.value))
@@ -2237,7 +2258,7 @@ func dwarfaddshstrings(shstrtab *LSym) {
elfstrdbg[ElfStrGDBScripts] = Addstring(shstrtab, ".debug_gdb_scripts")
if Linkmode == LinkExternal {
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
elfstrdbg[ElfStrRelDebugInfo] = Addstring(shstrtab, ".rela.debug_info")
elfstrdbg[ElfStrRelDebugAranges] = Addstring(shstrtab, ".rela.debug_aranges")
elfstrdbg[ElfStrRelDebugLine] = Addstring(shstrtab, ".rela.debug_line")
@@ -2290,7 +2311,7 @@ func dwarfaddelfsectionsyms() {
func dwarfaddelfrelocheader(elfstr int, shdata *ElfShdr, off int64, size int64) {
sh := newElfShdr(elfstrdbg[elfstr])
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
sh.type_ = SHT_RELA
default:
sh.type_ = SHT_REL
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/elf.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/elf.go
--- 1.6.3-1/src/cmd/link/internal/ld/elf.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/elf.go 2016-07-21 13:36:09.000000000 +0000
@@ -646,6 +646,68 @@ const (
R_SPARC_UA64 = 54
R_SPARC_UA16 = 55
+ R_390_NONE = 0
+ R_390_8 = 1
+ R_390_12 = 2
+ R_390_16 = 3
+ R_390_32 = 4
+ R_390_PC32 = 5
+ R_390_GOT12 = 6
+ R_390_GOT32 = 7
+ R_390_PLT32 = 8
+ R_390_COPY = 9
+ R_390_GLOB_DAT = 10
+ R_390_JMP_SLOT = 11
+ R_390_RELATIVE = 12
+ R_390_GOTOFF = 13
+ R_390_GOTPC = 14
+ R_390_GOT16 = 15
+ R_390_PC16 = 16
+ R_390_PC16DBL = 17
+ R_390_PLT16DBL = 18
+ R_390_PC32DBL = 19
+ R_390_PLT32DBL = 20
+ R_390_GOTPCDBL = 21
+ R_390_64 = 22
+ R_390_PC64 = 23
+ R_390_GOT64 = 24
+ R_390_PLT64 = 25
+ R_390_GOTENT = 26
+ R_390_GOTOFF16 = 27
+ R_390_GOTOFF64 = 28
+ R_390_GOTPLT12 = 29
+ R_390_GOTPLT16 = 30
+ R_390_GOTPLT32 = 31
+ R_390_GOTPLT64 = 32
+ R_390_GOTPLTENT = 33
+ R_390_GOTPLTOFF16 = 34
+ R_390_GOTPLTOFF32 = 35
+ R_390_GOTPLTOFF64 = 36
+ R_390_TLS_LOAD = 37
+ R_390_TLS_GDCALL = 38
+ R_390_TLS_LDCALL = 39
+ R_390_TLS_GD32 = 40
+ R_390_TLS_GD64 = 41
+ R_390_TLS_GOTIE12 = 42
+ R_390_TLS_GOTIE32 = 43
+ R_390_TLS_GOTIE64 = 44
+ R_390_TLS_LDM32 = 45
+ R_390_TLS_LDM64 = 46
+ R_390_TLS_IE32 = 47
+ R_390_TLS_IE64 = 48
+ R_390_TLS_IEENT = 49
+ R_390_TLS_LE32 = 50
+ R_390_TLS_LE64 = 51
+ R_390_TLS_LDO32 = 52
+ R_390_TLS_LDO64 = 53
+ R_390_TLS_DTPMOD = 54
+ R_390_TLS_DTPOFF = 55
+ R_390_TLS_TPOFF = 56
+ R_390_20 = 57
+ R_390_GOT20 = 58
+ R_390_GOTPLT20 = 59
+ R_390_TLS_GOTIE20 = 60
+
ARM_MAGIC_TRAMP_NUMBER = 0x5c000003
)
@@ -803,7 +865,7 @@ func Elfinit() {
switch Thearch.Thechar {
// 64-bit architectures
- case '9':
+ case '9', 'z':
if Ctxt.Arch.ByteOrder == binary.BigEndian {
ehdr.flags = 1 /* Version 1 ABI */
} else {
@@ -1380,13 +1442,25 @@ func elfdynhash() {
buckets[b] = uint32(sy.Dynid)
}
- Adduint32(Ctxt, s, uint32(nbucket))
- Adduint32(Ctxt, s, uint32(nsym))
- for i := 0; i < nbucket; i++ {
- Adduint32(Ctxt, s, buckets[i])
- }
- for i := 0; i < nsym; i++ {
- Adduint32(Ctxt, s, chain[i])
+ // s390x hash table entries are 8 bytes
+ if Thearch.Thechar == 'z' && elf64 {
+ Adduint64(Ctxt, s, uint64(nbucket))
+ Adduint64(Ctxt, s, uint64(nsym))
+ for i := 0; i < nbucket; i++ {
+ Adduint64(Ctxt, s, uint64(buckets[i]))
+ }
+ for i := 0; i < nsym; i++ {
+ Adduint64(Ctxt, s, uint64(chain[i]))
+ }
+ } else {
+ Adduint32(Ctxt, s, uint32(nbucket))
+ Adduint32(Ctxt, s, uint32(nsym))
+ for i := 0; i < nbucket; i++ {
+ Adduint32(Ctxt, s, buckets[i])
+ }
+ for i := 0; i < nsym; i++ {
+ Adduint32(Ctxt, s, chain[i])
+ }
}
// version symbols
@@ -1454,7 +1528,7 @@ func elfdynhash() {
}
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
sy := Linklookup(Ctxt, ".rela.plt", 0)
if sy.Size > 0 {
Elfwritedynent(s, DT_PLTREL, DT_RELA)
@@ -1594,7 +1668,7 @@ func elfshreloc(sect *Section) *ElfShdr
var prefix string
var typ int
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
prefix = ".rela"
typ = SHT_RELA
default:
@@ -1767,7 +1841,7 @@ func doelf() {
Debug['d'] = 1
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
Addstring(shstrtab, ".rela.text")
Addstring(shstrtab, ".rela.rodata")
Addstring(shstrtab, ".rela"+relro_prefix+".typelink")
@@ -1813,7 +1887,7 @@ func doelf() {
if hasinitarr {
Addstring(shstrtab, ".init_array")
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
Addstring(shstrtab, ".rela.init_array")
default:
Addstring(shstrtab, ".rel.init_array")
@@ -1840,7 +1914,7 @@ func doelf() {
Addstring(shstrtab, ".dynsym")
Addstring(shstrtab, ".dynstr")
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
Addstring(shstrtab, ".rela")
Addstring(shstrtab, ".rela.plt")
default:
@@ -1858,7 +1932,7 @@ func doelf() {
s.Type = obj.SELFROSECT
s.Reachable = true
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
s.Size += ELF64SYMSIZE
default:
s.Size += ELF32SYMSIZE
@@ -1876,7 +1950,7 @@ func doelf() {
/* relocation table */
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
s = Linklookup(Ctxt, ".rela", 0)
default:
s = Linklookup(Ctxt, ".rel", 0)
@@ -1921,7 +1995,7 @@ func doelf() {
Thearch.Elfsetupplt()
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
s = Linklookup(Ctxt, ".rela.plt", 0)
default:
s = Linklookup(Ctxt, ".rel.plt", 0)
@@ -1950,7 +2024,7 @@ func doelf() {
elfwritedynentsym(s, DT_SYMTAB, Linklookup(Ctxt, ".dynsym", 0))
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
Elfwritedynent(s, DT_SYMENT, ELF64SYMSIZE)
default:
Elfwritedynent(s, DT_SYMENT, ELF32SYMSIZE)
@@ -1958,7 +2032,7 @@ func doelf() {
elfwritedynentsym(s, DT_STRTAB, Linklookup(Ctxt, ".dynstr", 0))
elfwritedynentsymsize(s, DT_STRSZ, Linklookup(Ctxt, ".dynstr", 0))
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
elfwritedynentsym(s, DT_RELA, Linklookup(Ctxt, ".rela", 0))
elfwritedynentsymsize(s, DT_RELASZ, Linklookup(Ctxt, ".rela", 0))
Elfwritedynent(s, DT_RELAENT, ELF64RELASIZE)
@@ -1974,6 +2048,8 @@ func doelf() {
if Thearch.Thechar == '9' {
elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".plt", 0))
+ } else if Thearch.Thechar == 'z' {
+ elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got", 0))
} else {
elfwritedynentsym(s, DT_PLTGOT, Linklookup(Ctxt, ".got.plt", 0))
}
@@ -2069,6 +2145,8 @@ func Asmbelf(symo int64) {
eh.machine = EM_386
case '9':
eh.machine = EM_PPC64
+ case 'z':
+ eh.machine = EM_S390
}
elfreserve := int64(ELFRESERVE)
@@ -2254,7 +2332,7 @@ func Asmbelf(symo int64) {
}
switch eh.machine {
- case EM_X86_64, EM_PPC64, EM_AARCH64:
+ case EM_X86_64, EM_PPC64, EM_AARCH64, EM_S390:
sh := elfshname(".rela.plt")
sh.type_ = SHT_RELA
sh.flags = SHF_ALLOC
@@ -2303,6 +2381,8 @@ func Asmbelf(symo int64) {
sh.flags = SHF_ALLOC + SHF_EXECINSTR
if eh.machine == EM_X86_64 {
sh.entsize = 16
+ } else if eh.machine == EM_S390 {
+ sh.entsize = 32
} else if eh.machine == EM_PPC64 {
// On ppc64, this is just a table of addresses
// filled by the dynamic linker
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/ldelf.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/ldelf.go
--- 1.6.3-1/src/cmd/link/internal/ld/ldelf.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/ldelf.go 2016-07-21 13:36:09.000000000 +0000
@@ -586,6 +586,11 @@ func ldelf(f *obj.Biobuf, pkg string, le
Diag("%s: elf object but not ppc64", pn)
return
}
+ case 'z':
+ if elfobj.machine != ElfMachS390 || hdr.Ident[4] != ElfClass64 {
+ Diag("%s: elf object but not s390x", pn)
+ return
+ }
}
// load section list into memory.
@@ -778,6 +783,9 @@ func ldelf(f *obj.Biobuf, pkg string, le
continue
}
+ if strings.HasPrefix(sym.name, ".LASF") { // gcc on s390x does this
+ continue
+ }
Diag("%s: sym#%d: ignoring %s in section %d (type %d)", pn, i, sym.name, sym.shndx, sym.type_)
continue
}
@@ -1124,6 +1132,14 @@ func reltype(pn string, elftype int, siz
Diag("%s: unknown relocation type %d; compiled without -fpic?", pn, elftype)
fallthrough
+ case 'z' | R_390_NONE<<24,
+ 'z' | R_390_COPY<<24,
+ 'z' | R_390_JMP_SLOT<<24:
+ *siz = 0
+
+ case 'z' | R_390_8:
+ *siz = 1
+
case '9' | R_PPC64_TOC16<<24,
'9' | R_PPC64_TOC16_LO<<24,
'9' | R_PPC64_TOC16_HI<<24,
@@ -1132,7 +1148,14 @@ func reltype(pn string, elftype int, siz
'9' | R_PPC64_TOC16_LO_DS<<24,
'9' | R_PPC64_REL16_LO<<24,
'9' | R_PPC64_REL16_HI<<24,
- '9' | R_PPC64_REL16_HA<<24:
+ '9' | R_PPC64_REL16_HA<<24,
+ 'z' | R_390_12<<24,
+ 'z' | R_390_16<<24,
+ 'z' | R_390_GOT12<<24,
+ 'z' | R_390_GOT16<<24,
+ 'z' | R_390_PC16<<24,
+ 'z' | R_390_PC16DBL<<24,
+ 'z' | R_390_PLT16DBL<<24:
*siz = 2
case '5' | R_ARM_ABS32<<24,
@@ -1160,11 +1183,27 @@ func reltype(pn string, elftype int, siz
'8' | R_386_GOTPC<<24,
'8' | R_386_GOT32X<<24,
'9' | R_PPC64_REL24<<24,
- '9' | R_PPC_REL32<<24:
+ '9' | R_PPC_REL32<<24,
+ 'z' | R_390_32<<24,
+ 'z' | R_390_PC32<<24,
+ 'z' | R_390_GOT32<<24,
+ 'z' | R_390_PLT32<<24,
+ 'z' | R_390_PC32DBL<<24,
+ 'z' | R_390_PLT32DBL<<24,
+ 'z' | R_390_GOTPCDBL<<24,
+ 'z' | R_390_GOTENT<<24:
*siz = 4
case '6' | R_X86_64_64<<24,
- '9' | R_PPC64_ADDR64<<24:
+ '9' | R_PPC64_ADDR64<<24,
+ 'z' | R_390_GLOB_DAT<<24,
+ 'z' | R_390_RELATIVE<<24,
+ 'z' | R_390_GOTOFF<<24,
+ 'z' | R_390_GOTPC<<24,
+ 'z' | R_390_64<<24,
+ 'z' | R_390_PC64<<24,
+ 'z' | R_390_GOT64<<24,
+ 'z' | R_390_PLT64<<24:
*siz = 8
}
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/lib.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/lib.go
--- 1.6.3-1/src/cmd/link/internal/ld/lib.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/lib.go 2016-07-21 13:36:09.000000000 +0000
@@ -329,7 +329,7 @@ func (mode *BuildMode) Set(s string) err
switch goos {
case "linux":
switch goarch {
- case "386", "amd64", "arm", "arm64", "ppc64le":
+ case "386", "amd64", "arm", "arm64", "ppc64le", "s390x":
default:
return badmode()
}
@@ -1173,6 +1173,24 @@ func hostlink() {
argv = append(argv, ldflag...)
+ if flag_race != 0 {
+ // On a system where the toolchain creates position independent
+ // executables by default, tsan initialization can fail. So we pass
+ // -no-pie here, but support for that flag is quite new and we test
+ // for its support first.
+ src := filepath.Join(tmpdir, "trivial.c")
+ if err := ioutil.WriteFile(src, []byte{}, 0666); err != nil {
+ Ctxt.Diag("WriteFile trivial.c failed: %v", err)
+ }
+ cmd := exec.Command(argv[0], "-c", "-no-pie", "trivial.c")
+ cmd.Dir = tmpdir
+ out, err := cmd.CombinedOutput()
+ supported := err == nil && !bytes.Contains(out, []byte("unrecognized"))
+ if supported {
+ argv = append(argv, "-no-pie")
+ }
+ }
+
for _, p := range strings.Fields(extldflags) {
argv = append(argv, p)
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/link.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/link.go
--- 1.6.3-1/src/cmd/link/internal/ld/link.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/link.go 2016-07-21 13:36:09.000000000 +0000
@@ -233,6 +233,7 @@ const (
RV_POWER_HI
RV_POWER_HA
RV_POWER_DS
+ RV_390_DBL
RV_CHECK_OVERFLOW = 1 << 8
RV_TYPE_MASK = RV_CHECK_OVERFLOW - 1
)
diff -pruN 1.6.3-1/src/cmd/link/internal/ld/symtab.go 1.6.3-1ubuntu1/src/cmd/link/internal/ld/symtab.go
--- 1.6.3-1/src/cmd/link/internal/ld/symtab.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/ld/symtab.go 2016-07-21 13:36:09.000000000 +0000
@@ -67,7 +67,7 @@ func putelfstr(s string) int {
func putelfsyment(off int, addr int64, size int64, info int, shndx int, other int) {
switch Thearch.Thechar {
- case '0', '6', '7', '9':
+ case '0', '6', '7', '9', 'z':
Thearch.Lput(uint32(off))
Cput(uint8(info))
Cput(uint8(other))
@@ -562,6 +562,7 @@ func symtab() {
adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
adduint(Ctxt, moduledata, uint64(len(Ctxt.Shlibs)))
}
+
// The rest of moduledata is zero initialized.
// When linking an object that does not contain the runtime we are
// creating the moduledata from scratch and it does not have a
diff -pruN 1.6.3-1/src/cmd/link/internal/s390x/asm.go 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/asm.go
--- 1.6.3-1/src/cmd/link/internal/s390x/asm.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/asm.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,643 @@
+// Inferno utils/5l/asm.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/internal/obj"
+ "cmd/link/internal/ld"
+ "debug/elf"
+ "fmt"
+ "log"
+)
+
+// gentext generates assembly to append the local moduledata to the global
+// moduledata linked list at initialization time. This is only done if the runtime
+// is in a different module.
+//
+// :
+// larl %r2,
+// jg
+// undef
+//
+// The job of appending the moduledata is delegated to runtime.addmoduledata.
+func gentext() {
+ if !ld.DynlinkingGo() {
+ return
+ }
+ addmoduledata := ld.Linklookup(ld.Ctxt, "runtime.addmoduledata", 0)
+ if addmoduledata.Type == obj.STEXT {
+ // we're linking a module containing the runtime -> no need for
+ // an init function
+ return
+ }
+ addmoduledata.Reachable = true
+ initfunc := ld.Linklookup(ld.Ctxt, "go.link.addmoduledata", 0)
+ initfunc.Type = obj.STEXT
+ initfunc.Local = true
+ initfunc.Reachable = true
+
+ // larl %r2,
+ ld.Adduint8(ld.Ctxt, initfunc, 0xc0)
+ ld.Adduint8(ld.Ctxt, initfunc, 0x20)
+ lmd := ld.Addrel(initfunc)
+ lmd.Off = int32(initfunc.Size)
+ lmd.Siz = 4
+ lmd.Sym = ld.Ctxt.Moduledata
+ lmd.Type = obj.R_PCREL
+ lmd.Variant = ld.RV_390_DBL
+ lmd.Add = 2 + int64(lmd.Siz)
+ ld.Adduint32(ld.Ctxt, initfunc, 0)
+
+ // jg
+ ld.Adduint8(ld.Ctxt, initfunc, 0xc0)
+ ld.Adduint8(ld.Ctxt, initfunc, 0xf4)
+ rel := ld.Addrel(initfunc)
+ rel.Off = int32(initfunc.Size)
+ rel.Siz = 4
+ rel.Sym = ld.Linklookup(ld.Ctxt, "runtime.addmoduledata", 0)
+ rel.Type = obj.R_CALL
+ rel.Variant = ld.RV_390_DBL
+ rel.Add = 2 + int64(rel.Siz)
+ ld.Adduint32(ld.Ctxt, initfunc, 0)
+
+ // undef (for debugging)
+ ld.Adduint32(ld.Ctxt, initfunc, 0)
+
+ if ld.Ctxt.Etextp != nil {
+ ld.Ctxt.Etextp.Next = initfunc
+ } else {
+ ld.Ctxt.Textp = initfunc
+ }
+ ld.Ctxt.Etextp = initfunc
+ initarray_entry := ld.Linklookup(ld.Ctxt, "go.link.addmoduledatainit", 0)
+ initarray_entry.Reachable = true
+ initarray_entry.Local = true
+ initarray_entry.Type = obj.SINITARR
+ ld.Addaddr(ld.Ctxt, initarray_entry, initfunc)
+}
+
+func adddynrela(rel *ld.LSym, s *ld.LSym, r *ld.Reloc) {
+ log.Fatalf("adddynrela not implemented")
+}
+
+func adddynrel(s *ld.LSym, r *ld.Reloc) {
+ targ := r.Sym
+ ld.Ctxt.Cursym = s
+
+ switch r.Type {
+ default:
+ if r.Type >= 256 {
+ ld.Diag("unexpected relocation type %d", r.Type)
+ return
+ }
+
+ // Handle relocations found in ELF object files.
+ case 256 + ld.R_390_12,
+ 256 + ld.R_390_GOT12:
+ ld.Diag("s390x 12-bit relocations have not been implemented (relocation type %d)", r.Type-256)
+ return
+
+ case 256 + ld.R_390_8,
+ 256 + ld.R_390_16,
+ 256 + ld.R_390_32,
+ 256 + ld.R_390_64:
+ if targ.Type == obj.SDYNIMPORT {
+ ld.Diag("unexpected R_390_nn relocation for dynamic symbol %s", targ.Name)
+ }
+ r.Type = obj.R_ADDR
+ return
+
+ case 256 + ld.R_390_PC16,
+ 256 + ld.R_390_PC32,
+ 256 + ld.R_390_PC64:
+ if targ.Type == obj.SDYNIMPORT {
+ ld.Diag("unexpected R_390_PCnn relocation for dynamic symbol %s", targ.Name)
+ }
+ if targ.Type == 0 || targ.Type == obj.SXREF {
+ ld.Diag("unknown symbol %s in pcrel", targ.Name)
+ }
+ r.Type = obj.R_PCREL
+ r.Add += int64(r.Siz)
+ return
+
+ case 256 + ld.R_390_GOT16,
+ 256 + ld.R_390_GOT32,
+ 256 + ld.R_390_GOT64:
+ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
+ return
+
+ case 256 + ld.R_390_PLT16DBL,
+ 256 + ld.R_390_PLT32DBL:
+ r.Type = obj.R_PCREL
+ r.Variant = ld.RV_390_DBL
+ r.Add += int64(r.Siz)
+ if targ.Type == obj.SDYNIMPORT {
+ addpltsym(ld.Ctxt, targ)
+ r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ r.Add += int64(targ.Plt)
+ }
+ return
+
+ case 256 + ld.R_390_PLT32,
+ 256 + ld.R_390_PLT64:
+ r.Type = obj.R_PCREL
+ r.Add += int64(r.Siz)
+ if targ.Type == obj.SDYNIMPORT {
+ addpltsym(ld.Ctxt, targ)
+ r.Sym = ld.Linklookup(ld.Ctxt, ".plt", 0)
+ r.Add += int64(targ.Plt)
+ }
+ return
+
+ case 256 + ld.R_390_COPY:
+ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
+
+ case 256 + ld.R_390_GLOB_DAT:
+ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
+
+ case 256 + ld.R_390_JMP_SLOT:
+ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
+
+ case 256 + ld.R_390_RELATIVE:
+ ld.Diag("unimplemented S390x relocation: %v", r.Type-256)
+
+ case 256 + ld.R_390_GOTOFF:
+ if targ.Type == obj.SDYNIMPORT {
+ ld.Diag("unexpected R_390_GOTOFF relocation for dynamic symbol %s", targ.Name)
+ }
+ r.Type = obj.R_GOTOFF
+ return
+
+ case 256 + ld.R_390_GOTPC:
+ r.Type = obj.R_PCREL
+ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
+ r.Add += int64(r.Siz)
+ return
+
+ case 256 + ld.R_390_PC16DBL,
+ 256 + ld.R_390_PC32DBL:
+ r.Type = obj.R_PCREL
+ r.Variant = ld.RV_390_DBL
+ r.Add += int64(r.Siz)
+ if targ.Type == obj.SDYNIMPORT {
+ ld.Diag("unexpected R_390_PCnnDBL relocation for dynamic symbol %s", targ.Name)
+ }
+ return
+
+ case 256 + ld.R_390_GOTPCDBL:
+ r.Type = obj.R_PCREL
+ r.Variant = ld.RV_390_DBL
+ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
+ r.Add += int64(r.Siz)
+ return
+
+ case 256 + ld.R_390_GOTENT:
+ // if targ.Type != obj.SDYNIMPORT {
+ // ld.Diag("unexpected R_390_GOTENT relocation for non-dynamic symbol %s", targ.Name)
+ // }
+ addgotsym(targ)
+
+ r.Type = obj.R_PCREL
+ r.Variant = ld.RV_390_DBL
+ r.Sym = ld.Linklookup(ld.Ctxt, ".got", 0)
+ r.Add += int64(targ.Got)
+ r.Add += int64(r.Siz)
+ return
+ }
+ // Handle references to ELF symbols from our own object files.
+ if targ.Type != obj.SDYNIMPORT {
+ return
+ }
+
+ ld.Diag("unsupported relocation for dynamic symbol %s (type=%d stype=%d)", targ.Name, r.Type, targ.Type)
+}
+
+func elfreloc1(r *ld.Reloc, sectoff int64) int {
+ ld.Thearch.Vput(uint64(sectoff))
+
+ elfsym := r.Xsym.ElfsymForReloc()
+ switch r.Type {
+ default:
+ return -1
+
+ case obj.R_TLS_LE:
+ switch r.Siz {
+ default:
+ return -1
+ case 4:
+ // WARNING - silently ignored by linker in ELF64
+ ld.Thearch.Vput(ld.R_390_TLS_LE32 | uint64(elfsym)<<32)
+ case 8:
+ // WARNING - silently ignored by linker in ELF32
+ ld.Thearch.Vput(ld.R_390_TLS_LE64 | uint64(elfsym)<<32)
+ }
+
+ case obj.R_TLS_IE:
+ switch r.Siz {
+ default:
+ return -1
+ case 4:
+ ld.Thearch.Vput(ld.R_390_TLS_IEENT | uint64(elfsym)<<32)
+ }
+
+ case obj.R_ADDR:
+ switch r.Siz {
+ default:
+ return -1
+ case 4:
+ ld.Thearch.Vput(ld.R_390_32 | uint64(elfsym)<<32)
+ case 8:
+ ld.Thearch.Vput(ld.R_390_64 | uint64(elfsym)<<32)
+ }
+
+ case obj.R_GOTPCREL:
+ if r.Siz == 4 {
+ ld.Thearch.Vput(ld.R_390_GOTENT | uint64(elfsym)<<32)
+ } else {
+ return -1
+ }
+
+ case obj.R_PCREL, obj.R_PCRELDBL, obj.R_CALL:
+ elfrel := ld.R_390_NONE
+ isdbl := r.Variant&ld.RV_TYPE_MASK == ld.RV_390_DBL
+ // TODO(mundaym): all DBL style relocations should be signalled using the variant.
+ switch r.Type {
+ case obj.R_PCRELDBL, obj.R_CALL:
+ isdbl = true
+ }
+ if r.Xsym.Type == obj.SDYNIMPORT && (r.Xsym.ElfType == elf.STT_FUNC || r.Type == obj.R_CALL) {
+ if isdbl {
+ switch r.Siz {
+ case 2:
+ elfrel = ld.R_390_PLT16DBL
+ case 4:
+ elfrel = ld.R_390_PLT32DBL
+ }
+ } else {
+ switch r.Siz {
+ case 4:
+ elfrel = ld.R_390_PLT32
+ case 8:
+ elfrel = ld.R_390_PLT64
+ }
+ }
+ } else {
+ if isdbl {
+ switch r.Siz {
+ case 2:
+ elfrel = ld.R_390_PC16DBL
+ case 4:
+ elfrel = ld.R_390_PC32DBL
+ }
+ } else {
+ switch r.Siz {
+ case 2:
+ elfrel = ld.R_390_PC16
+ case 4:
+ elfrel = ld.R_390_PC32
+ case 8:
+ elfrel = ld.R_390_PC64
+ }
+ }
+ }
+ if elfrel == ld.R_390_NONE {
+ return -1 // unsupported size/dbl combination
+ }
+ ld.Thearch.Vput(uint64(elfrel) | uint64(elfsym)<<32)
+ }
+
+ ld.Thearch.Vput(uint64(r.Xadd))
+ return 0
+}
+
+func elfsetupplt() {
+ plt := ld.Linklookup(ld.Ctxt, ".plt", 0)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
+ if plt.Size == 0 {
+ // stg %r1,56(%r15)
+ ld.Adduint8(ld.Ctxt, plt, 0xe3)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Adduint8(ld.Ctxt, plt, 0xf0)
+ ld.Adduint8(ld.Ctxt, plt, 0x38)
+ ld.Adduint8(ld.Ctxt, plt, 0x00)
+ ld.Adduint8(ld.Ctxt, plt, 0x24)
+ // larl %r1,_GLOBAL_OFFSET_TABLE_
+ ld.Adduint8(ld.Ctxt, plt, 0xc0)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Addpcrelplus(ld.Ctxt, plt, got, 6)
+ // mvc 48(8,%r15),8(%r1)
+ ld.Adduint8(ld.Ctxt, plt, 0xd2)
+ ld.Adduint8(ld.Ctxt, plt, 0x07)
+ ld.Adduint8(ld.Ctxt, plt, 0xf0)
+ ld.Adduint8(ld.Ctxt, plt, 0x30)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Adduint8(ld.Ctxt, plt, 0x08)
+ // lg %r1,16(%r1)
+ ld.Adduint8(ld.Ctxt, plt, 0xe3)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Adduint8(ld.Ctxt, plt, 0x10)
+ ld.Adduint8(ld.Ctxt, plt, 0x00)
+ ld.Adduint8(ld.Ctxt, plt, 0x04)
+ // br %r1
+ ld.Adduint8(ld.Ctxt, plt, 0x07)
+ ld.Adduint8(ld.Ctxt, plt, 0xf1)
+ // nopr %r0
+ ld.Adduint8(ld.Ctxt, plt, 0x07)
+ ld.Adduint8(ld.Ctxt, plt, 0x00)
+ // nopr %r0
+ ld.Adduint8(ld.Ctxt, plt, 0x07)
+ ld.Adduint8(ld.Ctxt, plt, 0x00)
+ // nopr %r0
+ ld.Adduint8(ld.Ctxt, plt, 0x07)
+ ld.Adduint8(ld.Ctxt, plt, 0x00)
+
+ // assume got->size == 0 too
+ ld.Addaddrplus(ld.Ctxt, got, ld.Linklookup(ld.Ctxt, ".dynamic", 0), 0)
+
+ ld.Adduint64(ld.Ctxt, got, 0)
+ ld.Adduint64(ld.Ctxt, got, 0)
+ }
+}
+
+func machoreloc1(r *ld.Reloc, sectoff int64) int {
+ return -1
+}
+
+func archreloc(r *ld.Reloc, s *ld.LSym, val *int64) int {
+ if ld.Linkmode == ld.LinkExternal {
+ return -1
+ }
+
+ switch r.Type {
+ case obj.R_CONST:
+ *val = r.Add
+ return 0
+
+ case obj.R_GOTOFF:
+ *val = ld.Symaddr(r.Sym) + r.Add - ld.Symaddr(ld.Linklookup(ld.Ctxt, ".got", 0))
+ return 0
+ }
+
+ return -1
+}
+
+func archrelocvariant(r *ld.Reloc, s *ld.LSym, t int64) int64 {
+ switch r.Variant & ld.RV_TYPE_MASK {
+ default:
+ ld.Diag("unexpected relocation variant %d", r.Variant)
+ return t
+
+ case ld.RV_NONE:
+ return t
+
+ case ld.RV_390_DBL:
+ if (t & 1) != 0 {
+ ld.Diag("%s+%v is not 2-byte aligned", r.Sym.Name, r.Sym.Value)
+ }
+ return t >> 1
+ }
+}
+
+func addpltsym(ctxt *ld.Link, s *ld.LSym) {
+ if s.Plt >= 0 {
+ return
+ }
+
+ ld.Adddynsym(ctxt, s)
+
+ if ld.Iself {
+ plt := ld.Linklookup(ctxt, ".plt", 0)
+ got := ld.Linklookup(ctxt, ".got", 0)
+ rela := ld.Linklookup(ctxt, ".rela.plt", 0)
+ if plt.Size == 0 {
+ elfsetupplt()
+ }
+ // larl %r1,_GLOBAL_OFFSET_TABLE_+index
+
+ ld.Adduint8(ctxt, plt, 0xc0)
+ ld.Adduint8(ctxt, plt, 0x10)
+ ld.Addpcrelplus(ctxt, plt, got, got.Size+6) // need variant?
+
+ // add to got: pointer to current pos in plt
+ ld.Addaddrplus(ctxt, got, plt, plt.Size+8) // weird but correct
+ // lg %r1,0(%r1)
+ ld.Adduint8(ctxt, plt, 0xe3)
+ ld.Adduint8(ctxt, plt, 0x10)
+ ld.Adduint8(ctxt, plt, 0x10)
+ ld.Adduint8(ctxt, plt, 0x00)
+ ld.Adduint8(ctxt, plt, 0x00)
+ ld.Adduint8(ctxt, plt, 0x04)
+ // br %r1
+ ld.Adduint8(ctxt, plt, 0x07)
+ ld.Adduint8(ctxt, plt, 0xf1)
+ // basr %r1,%r0
+ ld.Adduint8(ctxt, plt, 0x0d)
+ ld.Adduint8(ctxt, plt, 0x10)
+ // lgf %r1,12(%r1)
+ ld.Adduint8(ctxt, plt, 0xe3)
+ ld.Adduint8(ctxt, plt, 0x10)
+ ld.Adduint8(ctxt, plt, 0x10)
+ ld.Adduint8(ctxt, plt, 0x0c)
+ ld.Adduint8(ctxt, plt, 0x00)
+ ld.Adduint8(ctxt, plt, 0x14)
+ // jg .plt
+ ld.Adduint8(ctxt, plt, 0xc0)
+ ld.Adduint8(ctxt, plt, 0xf4)
+
+ ld.Adduint32(ctxt, plt, uint32(-((plt.Size - 2) >> 1))) // roll-your-own relocation
+ //.plt index
+ ld.Adduint32(ctxt, plt, uint32(rela.Size)) // rela size before current entry
+
+ // rela
+ ld.Addaddrplus(ctxt, rela, got, got.Size-8)
+
+ ld.Adduint64(ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_JMP_SLOT))
+ ld.Adduint64(ctxt, rela, 0)
+
+ s.Plt = int32(plt.Size - 32)
+
+ } else {
+ ld.Diag("addpltsym: unsupported binary format")
+ }
+}
+
+func addgotsym(s *ld.LSym) {
+ if s.Got >= 0 {
+ return
+ }
+
+ ld.Adddynsym(ld.Ctxt, s)
+ got := ld.Linklookup(ld.Ctxt, ".got", 0)
+ s.Got = int32(got.Size)
+ ld.Adduint64(ld.Ctxt, got, 0)
+
+ if ld.Iself {
+ rela := ld.Linklookup(ld.Ctxt, ".rela", 0)
+ ld.Addaddrplus(ld.Ctxt, rela, got, int64(s.Got))
+ ld.Adduint64(ld.Ctxt, rela, ld.ELF64_R_INFO(uint32(s.Dynid), ld.R_390_GLOB_DAT))
+ ld.Adduint64(ld.Ctxt, rela, 0)
+ } else {
+ ld.Diag("addgotsym: unsupported binary format")
+ }
+}
+
+func asmb() {
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f asmb\n", obj.Cputime())
+ }
+ ld.Bso.Flush()
+
+ if ld.Iself {
+ ld.Asmbelfsetup()
+ }
+
+ sect := ld.Segtext.Sect
+ ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
+ ld.Codeblk(int64(sect.Vaddr), int64(sect.Length))
+ for sect = sect.Next; sect != nil; sect = sect.Next {
+ ld.Cseek(int64(sect.Vaddr - ld.Segtext.Vaddr + ld.Segtext.Fileoff))
+ ld.Datblk(int64(sect.Vaddr), int64(sect.Length))
+ }
+
+ if ld.Segrodata.Filelen > 0 {
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f rodatblk\n", obj.Cputime())
+ }
+ ld.Bso.Flush()
+
+ ld.Cseek(int64(ld.Segrodata.Fileoff))
+ ld.Datblk(int64(ld.Segrodata.Vaddr), int64(ld.Segrodata.Filelen))
+ }
+
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f datblk\n", obj.Cputime())
+ }
+ ld.Bso.Flush()
+
+ ld.Cseek(int64(ld.Segdata.Fileoff))
+ ld.Datblk(int64(ld.Segdata.Vaddr), int64(ld.Segdata.Filelen))
+
+ /* output symbol table */
+ ld.Symsize = 0
+
+ ld.Lcsize = 0
+ symo := uint32(0)
+ if ld.Debug['s'] == 0 {
+ // TODO: rationalize
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f sym\n", obj.Cputime())
+ }
+ ld.Bso.Flush()
+ switch ld.HEADTYPE {
+ default:
+ if ld.Iself {
+ symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
+ symo = uint32(ld.Rnd(int64(symo), int64(ld.INITRND)))
+ }
+
+ case obj.Hplan9:
+ symo = uint32(ld.Segdata.Fileoff + ld.Segdata.Filelen)
+ }
+
+ ld.Cseek(int64(symo))
+ switch ld.HEADTYPE {
+ default:
+ if ld.Iself {
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f elfsym\n", obj.Cputime())
+ }
+ ld.Asmelfsym()
+ ld.Cflush()
+ ld.Cwrite(ld.Elfstrdat)
+
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f dwarf\n", obj.Cputime())
+ }
+ ld.Dwarfemitdebugsections()
+
+ if ld.Linkmode == ld.LinkExternal {
+ ld.Elfemitreloc()
+ }
+ }
+
+ case obj.Hplan9:
+ ld.Asmplan9sym()
+ ld.Cflush()
+
+ sym := ld.Linklookup(ld.Ctxt, "pclntab", 0)
+ if sym != nil {
+ ld.Lcsize = int32(len(sym.P))
+ for i := 0; int32(i) < ld.Lcsize; i++ {
+ ld.Cput(uint8(sym.P[i]))
+ }
+
+ ld.Cflush()
+ }
+ }
+ }
+
+ ld.Ctxt.Cursym = nil
+ if ld.Debug['v'] != 0 {
+ fmt.Fprintf(&ld.Bso, "%5.2f header\n", obj.Cputime())
+ }
+ ld.Bso.Flush()
+ ld.Cseek(0)
+ switch ld.HEADTYPE {
+ default:
+ case obj.Hplan9: /* plan 9 */
+ ld.Thearch.Lput(0x647) /* magic */
+ ld.Thearch.Lput(uint32(ld.Segtext.Filelen)) /* sizes */
+ ld.Thearch.Lput(uint32(ld.Segdata.Filelen))
+ ld.Thearch.Lput(uint32(ld.Segdata.Length - ld.Segdata.Filelen))
+ ld.Thearch.Lput(uint32(ld.Symsize)) /* nsyms */
+ ld.Thearch.Lput(uint32(ld.Entryvalue())) /* va of entry */
+ ld.Thearch.Lput(0)
+ ld.Thearch.Lput(uint32(ld.Lcsize))
+
+ case obj.Hlinux,
+ obj.Hfreebsd,
+ obj.Hnetbsd,
+ obj.Hopenbsd,
+ obj.Hnacl:
+ ld.Asmbelf(int64(symo))
+ }
+
+ ld.Cflush()
+ if ld.Debug['c'] != 0 {
+ fmt.Printf("textsize=%d\n", ld.Segtext.Filelen)
+ fmt.Printf("datsize=%d\n", ld.Segdata.Filelen)
+ fmt.Printf("bsssize=%d\n", ld.Segdata.Length-ld.Segdata.Filelen)
+ fmt.Printf("symsize=%d\n", ld.Symsize)
+ fmt.Printf("lcsize=%d\n", ld.Lcsize)
+ fmt.Printf("total=%d\n", ld.Segtext.Filelen+ld.Segdata.Length+uint64(ld.Symsize)+uint64(ld.Lcsize))
+ }
+}
diff -pruN 1.6.3-1/src/cmd/link/internal/s390x/l.go 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/l.go
--- 1.6.3-1/src/cmd/link/internal/s390x/l.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/l.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,78 @@
+// Inferno utils/5l/asm.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/asm.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+// Writing object files.
+
+// cmd/9l/l.h from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+const (
+ thechar = 'z'
+ PtrSize = 8
+ IntSize = 8
+ RegSize = 8
+ MaxAlign = 32 // max data alignment
+ FuncAlign = 8
+ MINLC = 2
+)
+
+/* Used by ../internal/ld/dwarf.go */
+const (
+ DWARFREGSP = 15
+ DWARFREGLR = 14
+)
diff -pruN 1.6.3-1/src/cmd/link/internal/s390x/obj.go 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/obj.go
--- 1.6.3-1/src/cmd/link/internal/s390x/obj.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/internal/s390x/obj.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,115 @@
+// Inferno utils/5l/obj.c
+// http://code.google.com/p/inferno-os/source/browse/utils/5l/obj.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package s390x
+
+import (
+ "cmd/internal/obj"
+ "cmd/link/internal/ld"
+ "fmt"
+)
+
+// Reading object files.
+
+func Main() {
+ linkarchinit()
+ ld.Ldmain()
+}
+
+func linkarchinit() {
+ ld.Thestring = obj.Getgoarch()
+ ld.Thelinkarch = &ld.Links390x
+
+ ld.Thearch.Thechar = thechar
+ ld.Thearch.Ptrsize = ld.Thelinkarch.Ptrsize
+ ld.Thearch.Intsize = ld.Thelinkarch.Ptrsize
+ ld.Thearch.Regsize = ld.Thelinkarch.Regsize
+ ld.Thearch.Funcalign = FuncAlign
+ ld.Thearch.Maxalign = MaxAlign
+ ld.Thearch.Minlc = MINLC
+ ld.Thearch.Dwarfregsp = DWARFREGSP
+ ld.Thearch.Dwarfreglr = DWARFREGLR
+
+ ld.Thearch.Adddynrel = adddynrel
+ ld.Thearch.Archinit = archinit
+ ld.Thearch.Archreloc = archreloc
+ ld.Thearch.Archrelocvariant = archrelocvariant
+ ld.Thearch.Asmb = asmb // in asm.go
+ ld.Thearch.Elfreloc1 = elfreloc1
+ ld.Thearch.Elfsetupplt = elfsetupplt
+ ld.Thearch.Gentext = gentext
+ ld.Thearch.Machoreloc1 = machoreloc1
+ ld.Thearch.Lput = ld.Lputb
+ ld.Thearch.Wput = ld.Wputb
+ ld.Thearch.Vput = ld.Vputb
+
+ ld.Thearch.Linuxdynld = "/lib64/ld64.so.1"
+
+ // not relevant for s390x
+ ld.Thearch.Freebsddynld = "XXX"
+ ld.Thearch.Openbsddynld = "XXX"
+ ld.Thearch.Netbsddynld = "XXX"
+ ld.Thearch.Dragonflydynld = "XXX"
+ ld.Thearch.Solarisdynld = "XXX"
+}
+
+func archinit() {
+ // getgoextlinkenabled is based on GO_EXTLINK_ENABLED when
+ // Go was built; see ../../make.bash.
+ if ld.Linkmode == ld.LinkAuto && obj.Getgoextlinkenabled() == "0" {
+ ld.Linkmode = ld.LinkInternal
+ }
+
+ if ld.Buildmode == ld.BuildmodeCArchive || ld.Buildmode == ld.BuildmodeCShared || ld.DynlinkingGo() {
+ ld.Linkmode = ld.LinkExternal
+ }
+
+ switch ld.HEADTYPE {
+ default:
+ ld.Exitf("unknown -H option: %v", ld.HEADTYPE)
+
+ case obj.Hlinux: /* s390x elf */
+ ld.Elfinit()
+ ld.HEADR = ld.ELFRESERVE
+ if ld.INITTEXT == -1 {
+ ld.INITTEXT = 0x10000 + int64(ld.HEADR)
+ }
+ if ld.INITDAT == -1 {
+ ld.INITDAT = 0
+ }
+ if ld.INITRND == -1 {
+ ld.INITRND = 0x10000
+ }
+ }
+
+ if ld.INITDAT != 0 && ld.INITRND != 0 {
+ fmt.Printf("warning: -D0x%x is ignored because of -R0x%x\n", uint64(ld.INITDAT), uint32(ld.INITRND))
+ }
+}
diff -pruN 1.6.3-1/src/cmd/link/main.go 1.6.3-1ubuntu1/src/cmd/link/main.go
--- 1.6.3-1/src/cmd/link/main.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/link/main.go 2016-07-21 13:36:09.000000000 +0000
@@ -11,6 +11,7 @@ import (
"cmd/link/internal/arm64"
"cmd/link/internal/mips64"
"cmd/link/internal/ppc64"
+ "cmd/link/internal/s390x"
"cmd/link/internal/x86"
"fmt"
"os"
@@ -33,5 +34,7 @@ func main() {
mips64.Main()
case "ppc64", "ppc64le":
ppc64.Main()
+ case "s390x":
+ s390x.Main()
}
}
diff -pruN 1.6.3-1/src/cmd/objdump/objdump_test.go 1.6.3-1ubuntu1/src/cmd/objdump/objdump_test.go
--- 1.6.3-1/src/cmd/objdump/objdump_test.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/objdump/objdump_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -107,6 +107,8 @@ func TestDisasm(t *testing.T) {
t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
case "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559", runtime.GOARCH)
+ case "s390x":
+ t.Skipf("skipping on %s", runtime.GOARCH)
}
testDisasm(t)
}
@@ -123,6 +125,8 @@ func TestDisasmExtld(t *testing.T) {
t.Skipf("skipping on %s, issue 10106", runtime.GOARCH)
case "mips64", "mips64le":
t.Skipf("skipping on %s, issue 12559 and 12560", runtime.GOARCH)
+ case "s390x":
+ t.Skipf("skipping on %s", runtime.GOARCH)
}
// TODO(jsing): Reenable once openbsd/arm has external linking support.
if runtime.GOOS == "openbsd" && runtime.GOARCH == "arm" {
diff -pruN 1.6.3-1/src/cmd/vet/asmdecl.go 1.6.3-1ubuntu1/src/cmd/vet/asmdecl.go
--- 1.6.3-1/src/cmd/vet/asmdecl.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/cmd/vet/asmdecl.go 2016-07-21 13:36:09.000000000 +0000
@@ -65,6 +65,7 @@ var (
asmArchAmd64p32 = asmArch{"amd64p32", 4, 4, 8, false, "SP", false}
asmArchPpc64 = asmArch{"ppc64", 8, 8, 8, true, "R1", true}
asmArchPpc64LE = asmArch{"ppc64le", 8, 8, 8, false, "R1", true}
+ asmArchS390x = asmArch{"s390x", 8, 8, 8, true, "R15", true}
arches = []*asmArch{
&asmArch386,
@@ -74,6 +75,7 @@ var (
&asmArchAmd64p32,
&asmArchPpc64,
&asmArchPpc64LE,
+ &asmArchS390x,
}
)
diff -pruN 1.6.3-1/src/crypto/aes/asm_s390x.s 1.6.3-1ubuntu1/src/crypto/aes/asm_s390x.s
--- 1.6.3-1/src/crypto/aes/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/aes/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,97 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func hasAsm() bool
+// returns whether the AES-128, AES-192 and AES-256
+// cipher message functions are supported.
+TEXT ·hasAsm(SB),NOSPLIT,$16-1
+ XOR R0, R0 // set function code to 0 (query)
+ LA 8(R15), R1
+ WORD $0xB92E0024 // KM-Query
+
+ // check if bits 18-20 are set
+ MOVD 8(R15), R2
+ SRD $40, R2
+ AND $0x38, R2 // mask bits 18-20 (00111000)
+ CMPBNE R2, $0x38, notfound
+ MOVBZ $1, R1
+ MOVB R1, ret+0(FP)
+ RET
+notfound:
+ MOVBZ R0, ret+0(FP)
+ MOVD $0, 0(R0)
+ RET
+
+// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·encryptBlockAsm(SB),NOSPLIT,$0-32
+ MOVD nr+0(FP), R7
+ MOVD xk+8(FP), R1
+ MOVD dst+16(FP), R2
+ MOVD src+24(FP), R4
+ MOVD $16, R5
+ CMPBEQ R7, $14, aes256
+ CMPBEQ R7, $12, aes192
+aes128:
+ MOVBZ $18, R0
+ BR enc
+aes192:
+ MOVBZ $19, R0
+ BR enc
+aes256:
+ MOVBZ $20, R0
+enc:
+ WORD $0xB92E0024 // KM-AES
+ BVS enc
+ XOR R0, R0
+ RET
+
+// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
+TEXT ·decryptBlockAsm(SB),NOSPLIT,$0-32
+ MOVD nr+0(FP), R7
+ MOVD xk+8(FP), R1
+ MOVD dst+16(FP), R2
+ MOVD src+24(FP), R4
+ MOVD $16, R5
+ CMPBEQ R7, $14, aes256
+ CMPBEQ R7, $12, aes192
+aes128:
+ MOVBZ $(128+18), R0
+ BR dec
+aes192:
+ MOVBZ $(128+19), R0
+ BR dec
+aes256:
+ MOVBZ $(128+20), R0
+dec:
+ WORD $0xB92E0024 // KM-AES
+ BVS dec
+ XOR R0, R0
+ RET
+
+// func expandKeyAsm(nr int, key *byte, enc, dec *uint32)
+// We do NOT expand the keys here as the KM command just
+// expects the cryptographic key.
+// Instead just copy the needed bytes from the key into
+// the encryption/decryption expanded keys.
+TEXT ·expandKeyAsm(SB),NOSPLIT,$0-32
+ MOVD nr+0(FP), R1
+ MOVD key+8(FP), R2
+ MOVD enc+16(FP), R3
+ MOVD dec+24(FP), R4
+ CMPBEQ R1, $14, aes256
+ CMPBEQ R1, $12, aes192
+aes128:
+ MVC $(128/8), 0(R2), 0(R3)
+ MVC $(128/8), 0(R2), 0(R4)
+ RET
+aes192:
+ MVC $(192/8), 0(R2), 0(R3)
+ MVC $(192/8), 0(R2), 0(R4)
+ RET
+aes256:
+ MVC $(256/8), 0(R2), 0(R3)
+ MVC $(256/8), 0(R2), 0(R4)
+ RET
diff -pruN 1.6.3-1/src/crypto/aes/cipher_asm.go 1.6.3-1ubuntu1/src/crypto/aes/cipher_asm.go
--- 1.6.3-1/src/crypto/aes/cipher_asm.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/aes/cipher_asm.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64
+// +build amd64 s390x
package aes
diff -pruN 1.6.3-1/src/crypto/aes/cipher_generic.go 1.6.3-1ubuntu1/src/crypto/aes/cipher_generic.go
--- 1.6.3-1/src/crypto/aes/cipher_generic.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/aes/cipher_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64
+// +build !amd64,!s390x
package aes
@@ -17,11 +17,3 @@ func decryptBlock(xk []uint32, dst, src
func expandKey(key []byte, enc, dec []uint32) {
expandKeyGo(key, enc, dec)
}
-
-func hasGCMAsm() bool {
- return false
-}
-
-type aesCipherGCM struct {
- aesCipher
-}
diff -pruN 1.6.3-1/src/crypto/aes/gcm_generic.go 1.6.3-1ubuntu1/src/crypto/aes/gcm_generic.go
--- 1.6.3-1/src/crypto/aes/gcm_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/aes/gcm_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,15 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64
+
+package aes
+
+func hasGCMAsm() bool {
+ return false
+}
+
+type aesCipherGCM struct {
+ aesCipher
+}
diff -pruN 1.6.3-1/src/crypto/cipher/xor.go 1.6.3-1ubuntu1/src/crypto/cipher/xor.go
--- 1.6.3-1/src/crypto/cipher/xor.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/cipher/xor.go 2016-07-21 13:36:09.000000000 +0000
@@ -10,7 +10,7 @@ import (
)
const wordSize = int(unsafe.Sizeof(uintptr(0)))
-const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64"
+const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "s390x"
// fastXORBytes xors in bulk. It only works on architectures that
// support unaligned read/writes.
diff -pruN 1.6.3-1/src/crypto/md5/md5block_decl.go 1.6.3-1ubuntu1/src/crypto/md5/md5block_decl.go
--- 1.6.3-1/src/crypto/md5/md5block_decl.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/md5/md5block_decl.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32 386 arm
+// +build amd64 amd64p32 386 arm s390x
package md5
diff -pruN 1.6.3-1/src/crypto/md5/md5block_generic.go 1.6.3-1ubuntu1/src/crypto/md5/md5block_generic.go
--- 1.6.3-1/src/crypto/md5/md5block_generic.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/md5/md5block_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!amd64p32,!386,!arm
+// +build !amd64,!amd64p32,!386,!arm,!s390x
package md5
diff -pruN 1.6.3-1/src/crypto/md5/md5block_s390x.s 1.6.3-1ubuntu1/src/crypto/md5/md5block_s390x.s
--- 1.6.3-1/src/crypto/md5/md5block_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/md5/md5block_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,177 @@
+// Adapted from md5block_amd64.s by the Go Authors.
+//
+// Original source:
+// http://www.zorinaq.com/papers/md5-amd64.html
+// http://www.zorinaq.com/papers/md5-amd64.tar.bz2
+//
+// Translated from Perl generating GNU assembly into
+// #defines generating 6a assembly by the Go Authors.
+//
+// MD5 optimized for AMD64.
+//
+// Author: Marc Bevand
+// Licence: I hereby disclaim the copyright on this code and place it
+// in the public domain.
+
+#include "textflag.h"
+
+TEXT ·block(SB),NOSPLIT,$16-32
+ MOVD dig+0(FP), R1
+ MOVD p+8(FP), R6
+ MOVD p_len+16(FP), R5
+ AND $-64, R5
+ LAY (R6)(R5*1), R7
+
+ LMY 0(R1), R2, R5
+ CMPBEQ R6, R7, end
+
+loop:
+ STMY R2, R5, tmp-16(SP)
+
+ MOVWBR 0(R6), R8
+ MOVWZ R5, R9
+
+#define ROUND1(a, b, c, d, index, const, shift) \
+ XOR c, R9; \
+ ADD $const, a; \
+ ADD R8, a; \
+ AND b, R9; \
+ XOR d, R9; \
+ MOVWBR (index*4)(R6), R8; \
+ ADD R9, a; \
+ RLL $shift, a; \
+ MOVWZ c, R9; \
+ ADD b, a
+
+ ROUND1(R2,R3,R4,R5, 1,0xd76aa478, 7);
+ ROUND1(R5,R2,R3,R4, 2,0xe8c7b756,12);
+ ROUND1(R4,R5,R2,R3, 3,0x242070db,17);
+ ROUND1(R3,R4,R5,R2, 4,0xc1bdceee,22);
+ ROUND1(R2,R3,R4,R5, 5,0xf57c0faf, 7);
+ ROUND1(R5,R2,R3,R4, 6,0x4787c62a,12);
+ ROUND1(R4,R5,R2,R3, 7,0xa8304613,17);
+ ROUND1(R3,R4,R5,R2, 8,0xfd469501,22);
+ ROUND1(R2,R3,R4,R5, 9,0x698098d8, 7);
+ ROUND1(R5,R2,R3,R4,10,0x8b44f7af,12);
+ ROUND1(R4,R5,R2,R3,11,0xffff5bb1,17);
+ ROUND1(R3,R4,R5,R2,12,0x895cd7be,22);
+ ROUND1(R2,R3,R4,R5,13,0x6b901122, 7);
+ ROUND1(R5,R2,R3,R4,14,0xfd987193,12);
+ ROUND1(R4,R5,R2,R3,15,0xa679438e,17);
+ ROUND1(R3,R4,R5,R2, 0,0x49b40821,22);
+
+ MOVWBR (1*4)(R6), R8
+ MOVWZ R5, R9
+ MOVWZ R5, R1
+
+#define ROUND2(a, b, c, d, index, const, shift) \
+ XOR $0xffffffff, R9; \ // NOTW R9
+ ADD $const, a; \
+ ADD R8, a; \
+ AND b, R1; \
+ AND c, R9; \
+ MOVWBR (index*4)(R6), R8; \
+ OR R9, R1; \
+ MOVWZ c, R9; \
+ ADD R1, a; \
+ MOVWZ c, R1; \
+ RLL $shift, a; \
+ ADD b, a
+
+ ROUND2(R2,R3,R4,R5, 6,0xf61e2562, 5);
+ ROUND2(R5,R2,R3,R4,11,0xc040b340, 9);
+ ROUND2(R4,R5,R2,R3, 0,0x265e5a51,14);
+ ROUND2(R3,R4,R5,R2, 5,0xe9b6c7aa,20);
+ ROUND2(R2,R3,R4,R5,10,0xd62f105d, 5);
+ ROUND2(R5,R2,R3,R4,15, 0x2441453, 9);
+ ROUND2(R4,R5,R2,R3, 4,0xd8a1e681,14);
+ ROUND2(R3,R4,R5,R2, 9,0xe7d3fbc8,20);
+ ROUND2(R2,R3,R4,R5,14,0x21e1cde6, 5);
+ ROUND2(R5,R2,R3,R4, 3,0xc33707d6, 9);
+ ROUND2(R4,R5,R2,R3, 8,0xf4d50d87,14);
+ ROUND2(R3,R4,R5,R2,13,0x455a14ed,20);
+ ROUND2(R2,R3,R4,R5, 2,0xa9e3e905, 5);
+ ROUND2(R5,R2,R3,R4, 7,0xfcefa3f8, 9);
+ ROUND2(R4,R5,R2,R3,12,0x676f02d9,14);
+ ROUND2(R3,R4,R5,R2, 0,0x8d2a4c8a,20);
+
+ MOVWBR (5*4)(R6), R8
+ MOVWZ R4, R9
+
+#define ROUND3(a, b, c, d, index, const, shift) \
+ ADD $const, a; \
+ ADD R8, a; \
+ MOVWBR (index*4)(R6), R8; \
+ XOR d, R9; \
+ XOR b, R9; \
+ ADD R9, a; \
+ RLL $shift, a; \
+ MOVWZ b, R9; \
+ ADD b, a
+
+ ROUND3(R2,R3,R4,R5, 8,0xfffa3942, 4);
+ ROUND3(R5,R2,R3,R4,11,0x8771f681,11);
+ ROUND3(R4,R5,R2,R3,14,0x6d9d6122,16);
+ ROUND3(R3,R4,R5,R2, 1,0xfde5380c,23);
+ ROUND3(R2,R3,R4,R5, 4,0xa4beea44, 4);
+ ROUND3(R5,R2,R3,R4, 7,0x4bdecfa9,11);
+ ROUND3(R4,R5,R2,R3,10,0xf6bb4b60,16);
+ ROUND3(R3,R4,R5,R2,13,0xbebfbc70,23);
+ ROUND3(R2,R3,R4,R5, 0,0x289b7ec6, 4);
+ ROUND3(R5,R2,R3,R4, 3,0xeaa127fa,11);
+ ROUND3(R4,R5,R2,R3, 6,0xd4ef3085,16);
+ ROUND3(R3,R4,R5,R2, 9, 0x4881d05,23);
+ ROUND3(R2,R3,R4,R5,12,0xd9d4d039, 4);
+ ROUND3(R5,R2,R3,R4,15,0xe6db99e5,11);
+ ROUND3(R4,R5,R2,R3, 2,0x1fa27cf8,16);
+ ROUND3(R3,R4,R5,R2, 0,0xc4ac5665,23);
+
+ MOVWBR (0*4)(R6), R8
+ MOVWZ $0xffffffff, R9
+ XOR R5, R9
+
+#define ROUND4(a, b, c, d, index, const, shift) \
+ ADD $const, a; \
+ ADD R8, a; \
+ OR b, R9; \
+ XOR c, R9; \
+ ADD R9, a; \
+ MOVWBR (index*4)(R6), R8; \
+ MOVWZ $0xffffffff, R9; \
+ RLL $shift, a; \
+ XOR c, R9; \
+ ADD b, a
+
+ ROUND4(R2,R3,R4,R5, 7,0xf4292244, 6);
+ ROUND4(R5,R2,R3,R4,14,0x432aff97,10);
+ ROUND4(R4,R5,R2,R3, 5,0xab9423a7,15);
+ ROUND4(R3,R4,R5,R2,12,0xfc93a039,21);
+ ROUND4(R2,R3,R4,R5, 3,0x655b59c3, 6);
+ ROUND4(R5,R2,R3,R4,10,0x8f0ccc92,10);
+ ROUND4(R4,R5,R2,R3, 1,0xffeff47d,15);
+ ROUND4(R3,R4,R5,R2, 8,0x85845dd1,21);
+ ROUND4(R2,R3,R4,R5,15,0x6fa87e4f, 6);
+ ROUND4(R5,R2,R3,R4, 6,0xfe2ce6e0,10);
+ ROUND4(R4,R5,R2,R3,13,0xa3014314,15);
+ ROUND4(R3,R4,R5,R2, 4,0x4e0811a1,21);
+ ROUND4(R2,R3,R4,R5,11,0xf7537e82, 6);
+ ROUND4(R5,R2,R3,R4, 2,0xbd3af235,10);
+ ROUND4(R4,R5,R2,R3, 9,0x2ad7d2bb,15);
+ ROUND4(R3,R4,R5,R2, 0,0xeb86d391,21);
+
+ MOVWZ tmp-16(SP), R1
+ ADD R1, R2
+ MOVWZ tmp-12(SP), R1
+ ADD R1, R3
+ MOVWZ tmp-8(SP), R1
+ ADD R1, R4
+ MOVWZ tmp-4(SP), R1
+ ADD R1, R5
+
+ ADD $64, R6
+ CMPBLT R6, R7, loop
+
+end:
+ MOVD dig+0(FP), R1
+ STMY R2, R5, 0(R1)
+ RET
diff -pruN 1.6.3-1/src/crypto/sha1/sha1block_decl.go 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_decl.go
--- 1.6.3-1/src/crypto/sha1/sha1block_decl.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_decl.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64 amd64p32 arm 386
+// +build amd64 amd64p32 arm 386 s390x
package sha1
diff -pruN 1.6.3-1/src/crypto/sha1/sha1block_generic.go 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_generic.go
--- 1.6.3-1/src/crypto/sha1/sha1block_generic.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build !amd64,!amd64p32,!386,!arm
+// +build !amd64,!amd64p32,!386,!arm,!s390x
package sha1
diff -pruN 1.6.3-1/src/crypto/sha1/sha1block_s390x.s 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_s390x.s
--- 1.6.3-1/src/crypto/sha1/sha1block_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha1/sha1block_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func block(dig *digest, p []byte)
+TEXT ·block(SB),NOSPLIT,$0-32
+start:
+ // Check that we have the SHA-1 function
+ MOVD ·kimdQueryResult(SB), R4
+ SRD $56, R4 // Get the first byte
+ AND $0x40, R4, R5 // Bit 1 for SHA-1
+ BNE hardware
+ AND $0x80, R4, R5 // Bit 0 for Query
+ BNE generic
+ MOVD $·kimdQueryResult(SB), R1
+ XOR R0, R0 // Query function code
+ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
+ BR start
+
+hardware:
+ MOVD dig+0(FP), R1
+ MOVD p_base+8(FP), R2
+ MOVD p_len+16(FP), R3
+ MOVBZ $1, R0 // SHA-1 function code
+kimd:
+ WORD $0xB93E0002 // KIMD R2
+ BVS kimd // interrupted -- continue
+done:
+ XOR R0, R0 // Restore R0
+ RET
+
+generic:
+ BR ·blockGeneric(SB)
+
+GLOBL ·kimdQueryResult(SB), NOPTR, $16
diff -pruN 1.6.3-1/src/crypto/sha256/sha256block_decl.go 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_decl.go
--- 1.6.3-1/src/crypto/sha256/sha256block_decl.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_decl.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64
+// +build 386 amd64 s390x
package sha256
diff -pruN 1.6.3-1/src/crypto/sha256/sha256block_generic.go 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_generic.go
--- 1.6.3-1/src/crypto/sha256/sha256block_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!386,!s390x
+
+package sha256
+
+var block = blockGeneric
diff -pruN 1.6.3-1/src/crypto/sha256/sha256block.go 1.6.3-1ubuntu1/src/crypto/sha256/sha256block.go
--- 1.6.3-1/src/crypto/sha256/sha256block.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha256/sha256block.go 2016-07-21 13:36:09.000000000 +0000
@@ -77,7 +77,7 @@ var _K = []uint32{
0xc67178f2,
}
-func block(dig *digest, p []byte) {
+func blockGeneric(dig *digest, p []byte) {
var w [64]uint32
h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
for len(p) >= chunk {
diff -pruN 1.6.3-1/src/crypto/sha256/sha256block_s390x.s 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_s390x.s
--- 1.6.3-1/src/crypto/sha256/sha256block_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha256/sha256block_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func block(dig *digest, p []byte)
+TEXT ·block(SB),NOSPLIT,$0-32
+start:
+ // Check that we have the SHA-256 function
+ MOVD ·kimdQueryResult(SB), R4
+ SRD $56, R4 // Get the first byte
+ AND $0x20, R4, R5 // Bit 2 for SHA-256
+ BNE hardware
+ AND $0x80, R4, R5 // Bit 0 for Query
+ BNE generic
+ MOVD $·kimdQueryResult(SB), R1
+ XOR R0, R0 // Query function code
+ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
+ BR start
+
+hardware:
+ MOVD dig+0(FP), R1
+ MOVD p_base+8(FP), R2
+ MOVD p_len+16(FP), R3
+ MOVBZ $2, R0 // SHA-256 function code
+kimd:
+ WORD $0xB93E0002 // KIMD R2
+ BVS kimd // interrupted -- continue
+done:
+ XOR R0, R0 // Restore R0
+ RET
+
+generic:
+ BR ·blockGeneric(SB)
+
+GLOBL ·kimdQueryResult(SB), NOPTR, $16
diff -pruN 1.6.3-1/src/crypto/sha512/sha512block_decl.go 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_decl.go
--- 1.6.3-1/src/crypto/sha512/sha512block_decl.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_decl.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build amd64
+// +build amd64 s390x
package sha512
diff -pruN 1.6.3-1/src/crypto/sha512/sha512block_generic.go 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_generic.go
--- 1.6.3-1/src/crypto/sha512/sha512block_generic.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,9 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!s390x
+
+package sha512
+
+var block = blockGeneric
diff -pruN 1.6.3-1/src/crypto/sha512/sha512block.go 1.6.3-1ubuntu1/src/crypto/sha512/sha512block.go
--- 1.6.3-1/src/crypto/sha512/sha512block.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha512/sha512block.go 2016-07-21 13:36:09.000000000 +0000
@@ -93,7 +93,7 @@ var _K = []uint64{
0x6c44198c4a475817,
}
-func block(dig *digest, p []byte) {
+func blockGeneric(dig *digest, p []byte) {
var w [80]uint64
h0, h1, h2, h3, h4, h5, h6, h7 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4], dig.h[5], dig.h[6], dig.h[7]
for len(p) >= chunk {
diff -pruN 1.6.3-1/src/crypto/sha512/sha512block_s390x.s 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_s390x.s
--- 1.6.3-1/src/crypto/sha512/sha512block_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/sha512/sha512block_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func block(dig *digest, p []byte)
+TEXT ·block(SB),NOSPLIT,$0-32
+start:
+ // Check that we have the SHA-512 function
+ MOVD ·kimdQueryResult(SB), R4
+ SRD $56, R4 // Get the first byte
+ AND $0x10, R4, R5 // Bit 3 for SHA-512
+ BNE hardware
+ AND $0x80, R4, R5 // Bit 0 for Query
+ BNE generic
+ MOVD $·kimdQueryResult(SB), R1
+ XOR R0, R0 // Query function code
+ WORD $0xB93E0006 // KIMD Query (R6 is ignored)
+ BR start
+
+hardware:
+ MOVD dig+0(FP), R1
+ MOVD p_base+8(FP), R2
+ MOVD p_len+16(FP), R3
+ MOVBZ $3, R0 // SHA-512 function code
+kimd:
+ WORD $0xB93E0002 // KIMD R2
+ BVS kimd // interrupted -- continue
+done:
+ XOR R0, R0 // Restore R0
+ RET
+
+generic:
+ BR ·blockGeneric(SB)
+
+GLOBL ·kimdQueryResult(SB), NOPTR, $16
diff -pruN 1.6.3-1/src/crypto/x509/sec1.go 1.6.3-1ubuntu1/src/crypto/x509/sec1.go
--- 1.6.3-1/src/crypto/x509/sec1.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/x509/sec1.go 2016-07-21 13:36:09.000000000 +0000
@@ -41,8 +41,8 @@ func MarshalECPrivateKey(key *ecdsa.Priv
}
privateKeyBytes := key.D.Bytes()
- paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen() + 7) / 8)
- copy(paddedPrivateKey[len(paddedPrivateKey) - len(privateKeyBytes):], privateKeyBytes)
+ paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8)
+ copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes)
return asn1.Marshal(ecPrivateKey{
Version: 1,
@@ -84,7 +84,7 @@ func parseECPrivateKey(namedCurveOID *as
priv.Curve = curve
priv.D = k
- privateKey := make([]byte, (curveOrder.BitLen() + 7) / 8)
+ privateKey := make([]byte, (curveOrder.BitLen()+7)/8)
// Some private keys have leading zero padding. This is invalid
// according to [SEC1], but this code will ignore it.
@@ -98,7 +98,7 @@ func parseECPrivateKey(namedCurveOID *as
// Some private keys remove all leading zeros, this is also invalid
// according to [SEC1] but since OpenSSL used to do this, we ignore
// this too.
- copy(privateKey[len(privateKey) - len(privKey.PrivateKey):], privKey.PrivateKey)
+ copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey)
priv.X, priv.Y = curve.ScalarBaseMult(privateKey)
return priv, nil
diff -pruN 1.6.3-1/src/crypto/x509/sec1_test.go 1.6.3-1ubuntu1/src/crypto/x509/sec1_test.go
--- 1.6.3-1/src/crypto/x509/sec1_test.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/crypto/x509/sec1_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -10,8 +10,8 @@ import (
"testing"
)
-var ecKeyTests = []struct{
- derHex string
+var ecKeyTests = []struct {
+ derHex string
shouldReserialize bool
}{
// Generated using:
diff -pruN 1.6.3-1/src/debug/elf/elf.go 1.6.3-1ubuntu1/src/debug/elf/elf.go
--- 1.6.3-1/src/debug/elf/elf.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/debug/elf/elf.go 2016-07-21 13:36:09.000000000 +0000
@@ -1725,6 +1725,140 @@ var rppc64Strings = []intName{
func (i R_PPC64) String() string { return stringName(uint32(i), rppc64Strings, false) }
func (i R_PPC64) GoString() string { return stringName(uint32(i), rppc64Strings, true) }
+// Relocation types for s390x processors.
+type R_390 int
+
+const (
+ R_390_NONE R_390 = 0
+ R_390_8 R_390 = 1
+ R_390_12 R_390 = 2
+ R_390_16 R_390 = 3
+ R_390_32 R_390 = 4
+ R_390_PC32 R_390 = 5
+ R_390_GOT12 R_390 = 6
+ R_390_GOT32 R_390 = 7
+ R_390_PLT32 R_390 = 8
+ R_390_COPY R_390 = 9
+ R_390_GLOB_DAT R_390 = 10
+ R_390_JMP_SLOT R_390 = 11
+ R_390_RELATIVE R_390 = 12
+ R_390_GOTOFF R_390 = 13
+ R_390_GOTPC R_390 = 14
+ R_390_GOT16 R_390 = 15
+ R_390_PC16 R_390 = 16
+ R_390_PC16DBL R_390 = 17
+ R_390_PLT16DBL R_390 = 18
+ R_390_PC32DBL R_390 = 19
+ R_390_PLT32DBL R_390 = 20
+ R_390_GOTPCDBL R_390 = 21
+ R_390_64 R_390 = 22
+ R_390_PC64 R_390 = 23
+ R_390_GOT64 R_390 = 24
+ R_390_PLT64 R_390 = 25
+ R_390_GOTENT R_390 = 26
+ R_390_GOTOFF16 R_390 = 27
+ R_390_GOTOFF64 R_390 = 28
+ R_390_GOTPLT12 R_390 = 29
+ R_390_GOTPLT16 R_390 = 30
+ R_390_GOTPLT32 R_390 = 31
+ R_390_GOTPLT64 R_390 = 32
+ R_390_GOTPLTENT R_390 = 33
+ R_390_GOTPLTOFF16 R_390 = 34
+ R_390_GOTPLTOFF32 R_390 = 35
+ R_390_GOTPLTOFF64 R_390 = 36
+ R_390_TLS_LOAD R_390 = 37
+ R_390_TLS_GDCALL R_390 = 38
+ R_390_TLS_LDCALL R_390 = 39
+ R_390_TLS_GD32 R_390 = 40
+ R_390_TLS_GD64 R_390 = 41
+ R_390_TLS_GOTIE12 R_390 = 42
+ R_390_TLS_GOTIE32 R_390 = 43
+ R_390_TLS_GOTIE64 R_390 = 44
+ R_390_TLS_LDM32 R_390 = 45
+ R_390_TLS_LDM64 R_390 = 46
+ R_390_TLS_IE32 R_390 = 47
+ R_390_TLS_IE64 R_390 = 48
+ R_390_TLS_IEENT R_390 = 49
+ R_390_TLS_LE32 R_390 = 50
+ R_390_TLS_LE64 R_390 = 51
+ R_390_TLS_LDO32 R_390 = 52
+ R_390_TLS_LDO64 R_390 = 53
+ R_390_TLS_DTPMOD R_390 = 54
+ R_390_TLS_DTPOFF R_390 = 55
+ R_390_TLS_TPOFF R_390 = 56
+ R_390_20 R_390 = 57
+ R_390_GOT20 R_390 = 58
+ R_390_GOTPLT20 R_390 = 59
+ R_390_TLS_GOTIE20 R_390 = 60
+)
+
+var r390Strings = []intName{
+ {0, "R_390_NONE"},
+ {1, "R_390_8"},
+ {2, "R_390_12"},
+ {3, "R_390_16"},
+ {4, "R_390_32"},
+ {5, "R_390_PC32"},
+ {6, "R_390_GOT12"},
+ {7, "R_390_GOT32"},
+ {8, "R_390_PLT32"},
+ {9, "R_390_COPY"},
+ {10, "R_390_GLOB_DAT"},
+ {11, "R_390_JMP_SLOT"},
+ {12, "R_390_RELATIVE"},
+ {13, "R_390_GOTOFF"},
+ {14, "R_390_GOTPC"},
+ {15, "R_390_GOT16"},
+ {16, "R_390_PC16"},
+ {17, "R_390_PC16DBL"},
+ {18, "R_390_PLT16DBL"},
+ {19, "R_390_PC32DBL"},
+ {20, "R_390_PLT32DBL"},
+ {21, "R_390_GOTPCDBL"},
+ {22, "R_390_64"},
+ {23, "R_390_PC64"},
+ {24, "R_390_GOT64"},
+ {25, "R_390_PLT64"},
+ {26, "R_390_GOTENT"},
+ {27, "R_390_GOTOFF16"},
+ {28, "R_390_GOTOFF64"},
+ {29, "R_390_GOTPLT12"},
+ {30, "R_390_GOTPLT16"},
+ {31, "R_390_GOTPLT32"},
+ {32, "R_390_GOTPLT64"},
+ {33, "R_390_GOTPLTENT"},
+ {34, "R_390_GOTPLTOFF16"},
+ {35, "R_390_GOTPLTOFF32"},
+ {36, "R_390_GOTPLTOFF64"},
+ {37, "R_390_TLS_LOAD"},
+ {38, "R_390_TLS_GDCALL"},
+ {39, "R_390_TLS_LDCALL"},
+ {40, "R_390_TLS_GD32"},
+ {41, "R_390_TLS_GD64"},
+ {42, "R_390_TLS_GOTIE12"},
+ {43, "R_390_TLS_GOTIE32"},
+ {44, "R_390_TLS_GOTIE64"},
+ {45, "R_390_TLS_LDM32"},
+ {46, "R_390_TLS_LDM64"},
+ {47, "R_390_TLS_IE32"},
+ {48, "R_390_TLS_IE64"},
+ {49, "R_390_TLS_IEENT"},
+ {50, "R_390_TLS_LE32"},
+ {51, "R_390_TLS_LE64"},
+ {52, "R_390_TLS_LDO32"},
+ {53, "R_390_TLS_LDO64"},
+ {54, "R_390_TLS_DTPMOD"},
+ {55, "R_390_TLS_DTPOFF"},
+ {56, "R_390_TLS_TPOFF"},
+ {57, "R_390_20"},
+ {58, "R_390_GOT20"},
+ {59, "R_390_GOTPLT20"},
+ {60, "R_390_TLS_GOTIE20"},
+}
+
+func (i R_390) String() string { return stringName(uint32(i), r390Strings, false) }
+func (i R_390) GoString() string { return stringName(uint32(i), r390Strings, true) }
+
// Relocation types for SPARC.
type R_SPARC int
diff -pruN 1.6.3-1/src/debug/elf/file.go 1.6.3-1ubuntu1/src/debug/elf/file.go
--- 1.6.3-1/src/debug/elf/file.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/debug/elf/file.go 2016-07-21 13:36:09.000000000 +0000
@@ -596,6 +596,8 @@ func (f *File) applyRelocations(dst []by
return f.applyRelocationsPPC64(dst, rels)
case f.Class == ELFCLASS64 && f.Machine == EM_MIPS:
return f.applyRelocationsMIPS64(dst, rels)
+ case f.Class == ELFCLASS64 && f.Machine == EM_S390:
+ return f.applyRelocationss390x(dst, rels)
default:
return errors.New("applyRelocations: not implemented")
}
@@ -908,6 +910,55 @@ func (f *File) applyRelocationsMIPS64(ds
}
}
+ return nil
+}
+
+func (f *File) applyRelocationss390x(dst []byte, rels []byte) error {
+ // 24 is the size of Rela64.
+ if len(rels)%24 != 0 {
+ return errors.New("length of relocation section is not a multiple of 24")
+ }
+
+ symbols, _, err := f.getSymbols(SHT_SYMTAB)
+ if err != nil {
+ return err
+ }
+
+ b := bytes.NewReader(rels)
+ var rela Rela64
+
+ for b.Len() > 0 {
+ binary.Read(b, f.ByteOrder, &rela)
+ symNo := rela.Info >> 32
+ t := R_390(rela.Info & 0xffff)
+
+ if symNo == 0 || symNo > uint64(len(symbols)) {
+ continue
+ }
+ sym := &symbols[symNo-1]
+ switch SymType(sym.Info & 0xf) {
+ case STT_SECTION, STT_NOTYPE:
+ break
+ default:
+ continue
+ }
+
+ switch t {
+ case R_390_64:
+ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ val := sym.Value + uint64(rela.Addend)
+ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], val)
+ case R_390_32:
+ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 {
+ continue
+ }
+ val := uint32(sym.Value) + uint32(rela.Addend)
+ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], val)
+ }
+ }
+
return nil
}
diff -pruN 1.6.3-1/src/debug/elf/file_test.go 1.6.3-1ubuntu1/src/debug/elf/file_test.go
--- 1.6.3-1/src/debug/elf/file_test.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/debug/elf/file_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -473,6 +473,25 @@ var relocationTests = []relocationTest{
},
},
{
+ "testdata/go-relocation-test-gcc531-s390x.obj",
+ []relocationTestEntry{
+ {0, &dwarf.Entry{
+ Offset: 0xb,
+ Tag: dwarf.TagCompileUnit,
+ Children: true,
+ Field: []dwarf.Field{
+ {Attr: dwarf.AttrProducer, Val: "GNU C11 5.3.1 20160316 -march=zEC12 -m64 -mzarch -g -fstack-protector-strong", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLanguage, Val: int64(12), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrName, Val: "hello.c", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrCompDir, Val: "/tmp", Class: dwarf.ClassString},
+ {Attr: dwarf.AttrLowpc, Val: uint64(0x0), Class: dwarf.ClassAddress},
+ {Attr: dwarf.AttrHighpc, Val: int64(58), Class: dwarf.ClassConstant},
+ {Attr: dwarf.AttrStmtList, Val: int64(0), Class: dwarf.ClassLinePtr},
+ },
+ }},
+ },
+ },
+ {
"testdata/go-relocation-test-gcc493-mips64le.obj",
[]relocationTestEntry{
{0, &dwarf.Entry{
Binary files 1.6.3-1/src/debug/elf/testdata/go-relocation-test-gcc531-s390x.obj and 1.6.3-1ubuntu1/src/debug/elf/testdata/go-relocation-test-gcc531-s390x.obj differ
diff -pruN 1.6.3-1/src/debug/gosym/pclntab.go 1.6.3-1ubuntu1/src/debug/gosym/pclntab.go
--- 1.6.3-1/src/debug/gosym/pclntab.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/debug/gosym/pclntab.go 2016-07-21 13:36:09.000000000 +0000
@@ -167,7 +167,7 @@ func (t *LineTable) go12Init() {
// Check header: 4-byte magic, two zeros, pc quantum, pointer size.
t.go12 = -1 // not Go 1.2 until proven otherwise
if len(t.Data) < 16 || t.Data[4] != 0 || t.Data[5] != 0 ||
- (t.Data[6] != 1 && t.Data[6] != 4) || // pc quantum
+ (t.Data[6] != 1 && t.Data[6] != 2 && t.Data[6] != 4) || // pc quantum
(t.Data[7] != 4 && t.Data[7] != 8) { // pointer size
return
}
diff -pruN 1.6.3-1/src/go/build/build.go 1.6.3-1ubuntu1/src/go/build/build.go
--- 1.6.3-1/src/go/build/build.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/go/build/build.go 2016-07-21 13:36:09.000000000 +0000
@@ -282,6 +282,7 @@ var cgoEnabled = map[string]bool{
"solaris/amd64": true,
"windows/386": true,
"windows/amd64": true,
+ "linux/s390x": true,
}
func defaultContext() Context {
diff -pruN 1.6.3-1/src/hash/crc32/crc32_generic.go 1.6.3-1ubuntu1/src/hash/crc32/crc32_generic.go
--- 1.6.3-1/src/hash/crc32/crc32_generic.go 2016-07-18 16:24:07.000000000 +0000
+++ 1.6.3-1ubuntu1/src/hash/crc32/crc32_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 arm arm64 mips64 mips64le ppc64 ppc64le
+// +build 386 arm arm64 mips64 mips64le ppc64 ppc64le s390x
package crc32
diff -pruN 1.6.3-1/src/internal/syscall/unix/getrandom_linux_s390x.go 1.6.3-1ubuntu1/src/internal/syscall/unix/getrandom_linux_s390x.go
--- 1.6.3-1/src/internal/syscall/unix/getrandom_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/internal/syscall/unix/getrandom_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,7 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package unix
+
+const randomTrap uintptr = 349
diff -pruN 1.6.3-1/src/math/big/arith_s390x.s 1.6.3-1ubuntu1/src/math/big/arith_s390x.s
--- 1.6.3-1/src/math/big/arith_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/math/big/arith_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,565 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !math_big_pure_go,s390x
+
+#include "textflag.h"
+
+// This file provides fast assembly versions for the elementary
+// arithmetic operations on vectors implemented in arith.go.
+
+TEXT ·mulWW(SB),NOSPLIT,$0
+ MOVD x+0(FP), R3
+ MOVD y+8(FP), R4
+ MULHDU R3, R4
+ MOVD R10, z1+16(FP)
+ MOVD R11, z0+24(FP)
+ RET
+
+// func divWW(x1, x0, y Word) (q, r Word)
+TEXT ·divWW(SB),NOSPLIT,$0
+ MOVD x1+0(FP), R10
+ MOVD x0+8(FP), R11
+ MOVD y+16(FP), R5
+ WORD $0xb98700a5 // dlgr r10,r5
+ MOVD R11, q+24(FP)
+ MOVD R10, r+32(FP)
+ RET
+
+// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
+// func addVV(z, x, y []Word) (c Word)
+TEXT ·addVV(SB),NOSPLIT,$0
+ MOVD z_len+8(FP), R3
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R9
+ MOVD z+0(FP), R2
+
+ MOVD $0, R4 // c = 0
+ MOVD $0, R0 // make sure it's zero
+ MOVD $0, R10 // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUB $4, R3 // n -= 4
+ BLT v1 // if n < 0 goto v1
+
+U1: // n >= 0
+ // regular loop body unrolled 4x
+ MOVD 0(R8)(R10*1), R5
+ MOVD 8(R8)(R10*1), R6
+ MOVD 16(R8)(R10*1), R7
+ MOVD 24(R8)(R10*1), R1
+ ADDC R4, R4 // restore CF
+ MOVD 0(R9)(R10*1), R11
+ ADDE R11, R5
+ MOVD 8(R9)(R10*1), R11
+ ADDE R11, R6
+ MOVD 16(R9)(R10*1), R11
+ ADDE R11, R7
+ MOVD 24(R9)(R10*1), R11
+ ADDE R11, R1
+ MOVD R0, R4
+ ADDE R4, R4 // save CF
+ NEG R4, R4
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R6, 8(R2)(R10*1)
+ MOVD R7, 16(R2)(R10*1)
+ MOVD R1, 24(R2)(R10*1)
+
+
+ ADD $32, R10 // i += 4
+ SUB $4, R3 // n -= 4
+ BGE U1 // if n >= 0 goto U1
+
+v1: ADD $4, R3 // n += 4
+ BLE E1 // if n <= 0 goto E1
+
+L1: // n > 0
+ ADDC R4, R4 // restore CF
+ MOVD 0(R8)(R10*1), R5
+ MOVD 0(R9)(R10*1), R11
+ ADDE R11, R5
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R0, R4
+ ADDE R4, R4 // save CF
+ NEG R4, R4
+
+ ADD $8, R10 // i++
+ SUB $1, R3 // n--
+ BGT L1 // if n > 0 goto L1
+
+E1: NEG R4, R4
+ MOVD R4, c+72(FP) // return c
+ RET
+
+// DI = R3, CX = R4, SI = r10, r8 = r8, r9=r9, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0) + use R11
+// func subVV(z, x, y []Word) (c Word)
+// (same as addVV except for SUBC/SUBE instead of ADDC/ADDE and label names)
+TEXT ·subVV(SB),NOSPLIT,$0
+ MOVD z_len+8(FP), R3
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R9
+ MOVD z+0(FP), R2
+
+ MOVD $0, R4 // c = 0
+ MOVD $0, R0 // make sure it's zero
+ MOVD $0, R10 // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUB $4, R3 // n -= 4
+ BLT v1 // if n < 0 goto v1
+
+U1: // n >= 0
+ // regular loop body unrolled 4x
+ MOVD 0(R8)(R10*1), R5
+ MOVD 8(R8)(R10*1), R6
+ MOVD 16(R8)(R10*1), R7
+ MOVD 24(R8)(R10*1), R1
+ MOVD R0, R11
+ SUBC R4, R11 // restore CF
+ MOVD 0(R9)(R10*1), R11
+ SUBE R11, R5
+ MOVD 8(R9)(R10*1), R11
+ SUBE R11, R6
+ MOVD 16(R9)(R10*1), R11
+ SUBE R11, R7
+ MOVD 24(R9)(R10*1), R11
+ SUBE R11, R1
+ MOVD R0, R4
+ SUBE R4, R4 // save CF
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R6, 8(R2)(R10*1)
+ MOVD R7, 16(R2)(R10*1)
+ MOVD R1, 24(R2)(R10*1)
+
+
+ ADD $32, R10 // i += 4
+ SUB $4, R3 // n -= 4
+ BGE U1 // if n >= 0 goto U1
+
+v1: ADD $4, R3 // n += 4
+ BLE E1 // if n <= 0 goto E1
+
+L1: // n > 0
+ MOVD R0, R11
+ SUBC R4, R11 // restore CF
+ MOVD 0(R8)(R10*1), R5
+ MOVD 0(R9)(R10*1), R11
+ SUBE R11, R5
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R0, R4
+ SUBE R4, R4 // save CF
+
+ ADD $8, R10 // i++
+ SUB $1, R3 // n--
+ BGT L1 // if n > 0 goto L1
+
+E1: NEG R4, R4
+ MOVD R4, c+72(FP) // return c
+ RET
+
+
+// func addVW(z, x []Word, y Word) (c Word)
+TEXT ·addVW(SB),NOSPLIT,$0
+//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
+ MOVD z_len+8(FP), R3
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R4 // c = y
+ MOVD z+0(FP), R2
+ MOVD $0, R0 // make sure it's 0
+ MOVD $0, R10 // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUB $4, R3 // n -= 4
+ BLT v4 // if n < 4 goto v4
+
+U4: // n >= 0
+ // regular loop body unrolled 4x
+ MOVD 0(R8)(R10*1), R5
+ MOVD 8(R8)(R10*1), R6
+ MOVD 16(R8)(R10*1), R7
+ MOVD 24(R8)(R10*1), R1
+ ADDC R4, R5
+ ADDE R0, R6
+ ADDE R0, R7
+ ADDE R0, R1
+ ADDE R0, R0
+ MOVD R0, R4 // save CF
+ SUB R0, R0
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R6, 8(R2)(R10*1)
+ MOVD R7, 16(R2)(R10*1)
+ MOVD R1, 24(R2)(R10*1)
+
+ ADD $32, R10 // i += 4 -> i +=32
+ SUB $4, R3 // n -= 4
+ BGE U4 // if n >= 0 goto U4
+
+v4: ADD $4, R3 // n += 4
+ BLE E4 // if n <= 0 goto E4
+
+L4: // n > 0
+ MOVD 0(R8)(R10*1), R5
+ ADDC R4, R5
+ ADDE R0, R0
+ MOVD R0, R4 // save CF
+ SUB R0, R0
+ MOVD R5, 0(R2)(R10*1)
+
+ ADD $8, R10 // i++
+ SUB $1, R3 // n--
+ BGT L4 // if n > 0 goto L4
+
+E4: MOVD R4, c+56(FP) // return c
+
+ RET
+
+//DI = R3, CX = R4, SI = r10, r8 = r8, r10 = r2 , r11 = r5, r12 = r6, r13 = r7, r14 = r1 (R0 set to 0)
+// func subVW(z, x []Word, y Word) (c Word)
+// (same as addVW except for SUBC/SUBE instead of ADDC/ADDE and label names)
+TEXT ·subVW(SB),NOSPLIT,$0
+ MOVD z_len+8(FP), R3
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R4 // c = y
+ MOVD z+0(FP), R2
+ MOVD $0, R0 // make sure it's 0
+ MOVD $0, R10 // i = 0
+
+ // s/JL/JMP/ below to disable the unrolled loop
+ SUB $4, R3 // n -= 4
+ BLT v4 // if n < 4 goto v4
+
+U4: // n >= 0
+ // regular loop body unrolled 4x
+ MOVD 0(R8)(R10*1), R5
+ MOVD 8(R8)(R10*1), R6
+ MOVD 16(R8)(R10*1), R7
+ MOVD 24(R8)(R10*1), R1
+ SUBC R4, R5 //SLGR -> SUBC
+ SUBE R0, R6 //SLBGR -> SUBE
+ SUBE R0, R7
+ SUBE R0, R1
+ SUBE R4, R4 // save CF
+ NEG R4, R4
+ MOVD R5, 0(R2)(R10*1)
+ MOVD R6, 8(R2)(R10*1)
+ MOVD R7, 16(R2)(R10*1)
+ MOVD R1, 24(R2)(R10*1)
+
+ ADD $32, R10 // i += 4 -> i +=32
+ SUB $4, R3 // n -= 4
+ BGE U4 // if n >= 0 goto U4
+
+v4: ADD $4, R3 // n += 4
+ BLE E4 // if n <= 0 goto E4
+
+L4: // n > 0
+ MOVD 0(R8)(R10*1), R5
+ SUBC R4, R5
+ SUBE R4, R4 // save CF
+ NEG R4, R4
+ MOVD R5, 0(R2)(R10*1)
+
+ ADD $8, R10 // i++
+ SUB $1, R3 // n--
+ BGT L4 // if n > 0 goto L4
+
+E4: MOVD R4, c+56(FP) // return c
+
+ RET
+
+// func shlVU(z, x []Word, s uint) (c Word)
+TEXT ·shlVU(SB),NOSPLIT,$0
+ MOVD z_len+8(FP), R5
+ SUB $1, R5 // n--
+ BLT X8b // n < 0 (n <= 0)
+
+ // n > 0
+ MOVD s+48(FP), R4
+ CMPBEQ R0, R4, Z80 //handle 0 case beq
+ MOVD $64, R6
+ CMPBEQ R6, R4, Z864 //handle 64 case beq
+ MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ SLD $3, R5 // n = n*8
+ SUB R4, R6, R7
+ MOVD (R8)(R5*1), R10 // w1 = x[i-1]
+ SRD R7, R10, R3
+ MOVD R3, c+56(FP)
+
+ MOVD $0, R1 // i = 0
+ BR E8
+
+ // i < n-1
+L8: MOVD R10, R3 // w = w1
+ MOVD -8(R8)(R5*1), R10 // w1 = x[i+1]
+
+ SLD R4, R3 // w<>ŝ
+ SRD R7, R10, R6
+ OR R6, R3
+ MOVD R3, (R2)(R5*1) // z[i] = w<>ŝ
+ SUB $8, R5 // i--
+
+E8: CMPBGT R5, R0, L8 // i < n-1
+
+ // i >= n-1
+X8a: SLD R4, R10 // w1<= n-1
+ MOVD R10, (R2)(R5*1)
+ RET
+
+Z864: MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ SLD $3, R5 // n = n*8
+ MOVD (R8)(R5*1), R3 // w1 = x[n-1]
+ MOVD R3, c+56(FP) // z[i] = x[n-1]
+
+ BR E864
+
+ // i < n-1
+L864: MOVD -8(R8)(R5*1), R3
+
+ MOVD R3, (R2)(R5*1) // z[i] = x[n-1]
+ SUB $8, R5 // i--
+
+E864: CMPBGT R5, R0, L864 // i < n-1
+
+ MOVD R0, (R2) // z[n-1] = 0
+ RET
+
+
+// CX = R4, r8 = r8, r10 = r2 , r11 = r5, DX = r3, AX = r10 , BX = R1 , 64-count = r7 (R0 set to 0) temp = R6
+// func shrVU(z, x []Word, s uint) (c Word)
+TEXT ·shrVU(SB),NOSPLIT,$0
+ MOVD z_len+8(FP), R5
+ SUB $1, R5 // n--
+ BLT X9b // n < 0 (n <= 0)
+
+ // n > 0
+ MOVD s+48(FP), R4
+ CMPBEQ R0, R4, ZB0 //handle 0 case beq
+ MOVD $64, R6
+ CMPBEQ R6, R4, ZB64 //handle 64 case beq
+ MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ SLD $3, R5 // n = n*8
+ SUB R4, R6, R7
+ MOVD (R8), R10 // w1 = x[0]
+ SLD R7, R10, R3
+ MOVD R3, c+56(FP)
+
+ MOVD $0, R1 // i = 0
+ BR E9
+
+ // i < n-1
+L9: MOVD R10, R3 // w = w1
+ MOVD 8(R8)(R1*1), R10 // w1 = x[i+1]
+
+ SRD R4, R3 // w>>s | w1<>s | w1<= n-1
+X9a: SRD R4, R10 // w1>>s
+ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
+ RET
+
+X9b: MOVD R0, c+56(FP)
+ RET
+
+ZB0: MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ SLD $3, R5 // n = n*8
+
+ MOVD (R8), R10 // w1 = x[0]
+ MOVD $0, R3 // R10 << 64
+ MOVD R3, c+56(FP)
+
+ MOVD $0, R1 // i = 0
+ BR E9Z
+
+ // i < n-1
+L9Z: MOVD R10, R3 // w = w1
+ MOVD 8(R8)(R1*1), R10 // w1 = x[i+1]
+
+ MOVD R3, (R2)(R1*1) // z[i] = w>>s | w1<= n-1
+ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
+ RET
+
+ZB64: MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ SLD $3, R5 // n = n*8
+ MOVD (R8), R3 // w1 = x[0]
+ MOVD R3, c+56(FP)
+
+ MOVD $0, R1 // i = 0
+ BR E964
+
+ // i < n-1
+L964: MOVD 8(R8)(R1*1), R3 // w1 = x[i+1]
+
+ MOVD R3, (R2)(R1*1) // z[i] = w>>s | w1<= n-1
+ MOVD $0, R10 // w1>>s
+ MOVD R10, (R2)(R5*1) // z[n-1] = w1>>s
+ RET
+
+// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, DX = r3, AX = r6 , BX = R1 , (R0 set to 0) + use R11 + use R7 for i
+// func mulAddVWW(z, x []Word, y, r Word) (c Word)
+TEXT ·mulAddVWW(SB),NOSPLIT,$0
+ MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R9
+ MOVD r+56(FP), R4 // c = r
+ MOVD z_len+8(FP), R5
+ MOVD $0, R1 // i = 0
+ MOVD $0, R7 // i*8 = 0
+ MOVD $0, R0 // make sure it's zero
+ BR E5
+
+L5: MOVD (R8)(R1*1), R6
+ MULHDU R9, R6
+ ADDC R4, R11 //add to low order bits
+ ADDE R0, R6
+ MOVD R11, (R2)(R1*1)
+ MOVD R6, R4
+ ADD $8, R1 // i*8 + 8
+ ADD $1, R7 // i++
+
+E5: CMPBLT R7, R5, L5 // i < n
+
+ MOVD R4, c+64(FP)
+ RET
+
+// func addMulVVW(z, x []Word, y Word) (c Word)
+// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1 , (R0 set to 0) + use R11 + use R7 for i
+TEXT ·addMulVVW(SB),NOSPLIT,$0
+ MOVD z+0(FP), R2
+ MOVD x+24(FP), R8
+ MOVD y+48(FP), R9
+ MOVD z_len+8(FP), R5
+
+ MOVD $0, R1 // i*8 = 0
+ MOVD $0, R7 // i = 0
+ MOVD $0, R0 // make sure it's zero
+ MOVD $0, R4 // c = 0
+
+ MOVD R5, R12
+ AND $-2, R12
+ CMPBGE R5, $2, A6
+ BR E6
+
+A6: MOVD (R8)(R1*1), R6
+ MULHDU R9, R6
+ MOVD (R2)(R1*1), R10
+ ADDC R10, R11 //add to low order bits
+ ADDE R0, R6
+ ADDC R4, R11
+ ADDE R0, R6
+ MOVD R6, R4
+ MOVD R11, (R2)(R1*1)
+
+ MOVD (8)(R8)(R1*1), R6
+ MULHDU R9, R6
+ MOVD (8)(R2)(R1*1), R10
+ ADDC R10, R11 //add to low order bits
+ ADDE R0, R6
+ ADDC R4, R11
+ ADDE R0, R6
+ MOVD R6, R4
+ MOVD R11, (8)(R2)(R1*1)
+
+ ADD $16, R1 // i*8 + 8
+ ADD $2, R7 // i++
+
+ CMPBLT R7, R12, A6
+ BR E6
+
+L6: MOVD (R8)(R1*1), R6
+ MULHDU R9, R6
+ MOVD (R2)(R1*1), R10
+ ADDC R10, R11 //add to low order bits
+ ADDE R0, R6
+ ADDC R4, R11
+ ADDE R0, R6
+ MOVD R6, R4
+ MOVD R11, (R2)(R1*1)
+
+ ADD $8, R1 // i*8 + 8
+ ADD $1, R7 // i++
+
+E6: CMPBLT R7, R5, L6 // i < n
+
+ MOVD R4, c+56(FP)
+ RET
+
+// func divWVW(z []Word, xn Word, x []Word, y Word) (r Word)
+// CX = R4, r8 = r8, r9=r9, r10 = r2 , r11 = r5, AX = r11, DX = R6, r12=r12, BX = R1(*8) , (R0 set to 0) + use R11 + use R7 for i
+TEXT ·divWVW(SB),NOSPLIT,$0
+ MOVD z+0(FP), R2
+ MOVD xn+24(FP), R10 // r = xn
+ MOVD x+32(FP), R8
+ MOVD y+56(FP), R9
+ MOVD z_len+8(FP), R7 // i = z
+ SLD $3, R7, R1 // i*8
+ MOVD $0, R0 // make sure it's zero
+ BR E7
+
+L7: MOVD (R8)(R1*1), R11
+ WORD $0xB98700A9 //DLGR R10,R9
+ MOVD R11, (R2)(R1*1)
+
+E7: SUB $1, R7 // i--
+ SUB $8, R1
+ BGE L7 // i >= 0
+
+ MOVD R10, r+64(FP)
+ RET
+
+// func bitLen(x Word) (n int)
+TEXT ·bitLen(SB),NOSPLIT,$0
+ MOVD x+0(FP), R2
+ WORD $0xb9830022 // FLOGR R2,R2
+ MOVD $64, R3
+ SUB R2, R3
+ MOVD R3, n+8(FP)
+ RET
diff -pruN 1.6.3-1/src/math/dim_s390x.s 1.6.3-1ubuntu1/src/math/dim_s390x.s
--- 1.6.3-1/src/math/dim_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/math/dim_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,132 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on dim_amd64.s
+
+#include "textflag.h"
+
+#define PosInf 0x7FF0000000000000
+#define NaN 0x7FF8000000000001
+#define NegInf 0xFFF0000000000000
+
+// func Dim(x, y float64) float64
+TEXT ·Dim(SB),NOSPLIT,$0
+ // (+Inf, +Inf) special case
+ MOVD x+0(FP), R2
+ MOVD y+8(FP), R3
+ MOVD $PosInf, R4
+ CMPUBNE R4, R2, dim2
+ CMPUBEQ R4, R3, bothInf
+dim2: // (-Inf, -Inf) special case
+ MOVD $NegInf, R4
+ CMPUBNE R4, R2, dim3
+ CMPUBEQ R4, R3, bothInf
+dim3: // (NaN, x) or (x, NaN)
+ MOVD $~(1<<63), R5
+ MOVD $PosInf, R4
+ AND R5, R2 // x = |x|
+ CMPUBLT R4, R2, isDimNaN
+ AND R5, R3 // y = |y|
+ CMPUBLT R4, R3, isDimNaN
+
+ FMOVD x+0(FP), F1
+ FMOVD y+8(FP), F2
+ FSUB F2, F1
+ FMOVD $(0.0), F2
+ FCMPU F2, F1
+ BGE +3(PC)
+ FMOVD F1, ret+16(FP)
+ RET
+ FMOVD F2, ret+16(FP)
+ RET
+bothInf: // Dim(-Inf, -Inf) or Dim(+Inf, +Inf)
+isDimNaN:
+ MOVD $NaN, R4
+ MOVD R4, ret+16(FP)
+ RET
+
+// func ·Max(x, y float64) float64
+TEXT ·Max(SB),NOSPLIT,$0
+ // +Inf special cases
+ MOVD $PosInf, R4
+ MOVD x+0(FP), R8
+ CMPUBEQ R4, R8, isPosInf
+ MOVD y+8(FP), R9
+ CMPUBEQ R4, R9, isPosInf
+ // NaN special cases
+ MOVD $~(1<<63), R5 // bit mask
+ MOVD $PosInf, R4
+ MOVD R8, R2
+ AND R5, R2 // x = |x|
+ CMPUBLT R4, R2, isMaxNaN
+ MOVD R9, R3
+ AND R5, R3 // y = |y|
+ CMPUBLT R4, R3, isMaxNaN
+ // ±0 special cases
+ OR R3, R2
+ BEQ isMaxZero
+
+ FMOVD x+0(FP), F1
+ FMOVD y+8(FP), F2
+ FCMPU F2, F1
+ BGT +3(PC)
+ FMOVD F1, ret+16(FP)
+ RET
+ FMOVD F2, ret+16(FP)
+ RET
+isMaxNaN: // return NaN
+ MOVD $NaN, R4
+isPosInf: // return +Inf
+ MOVD R4, ret+16(FP)
+ RET
+isMaxZero:
+ MOVD $(1<<63), R4 // -0.0
+ CMPUBEQ R4, R8, +3(PC)
+ MOVD R8, ret+16(FP) // return 0
+ RET
+ MOVD R9, ret+16(FP) // return other 0
+ RET
+
+// func Min(x, y float64) float64
+TEXT ·Min(SB),NOSPLIT,$0
+ // -Inf special cases
+ MOVD $NegInf, R4
+ MOVD x+0(FP), R8
+ CMPUBEQ R4, R8, isNegInf
+ MOVD y+8(FP), R9
+ CMPUBEQ R4, R9, isNegInf
+ // NaN special cases
+ MOVD $~(1<<63), R5
+ MOVD $PosInf, R4
+ MOVD R8, R2
+ AND R5, R2 // x = |x|
+ CMPUBLT R4, R2, isMinNaN
+ MOVD R9, R3
+ AND R5, R3 // y = |y|
+ CMPUBLT R4, R3, isMinNaN
+ // ±0 special cases
+ OR R3, R2
+ BEQ isMinZero
+
+ FMOVD x+0(FP), F1
+ FMOVD y+8(FP), F2
+ FCMPU F2, F1
+ BLT +3(PC)
+ FMOVD F1, ret+16(FP)
+ RET
+ FMOVD F2, ret+16(FP)
+ RET
+isMinNaN: // return NaN
+ MOVD $NaN, R4
+isNegInf: // return -Inf
+ MOVD R4, ret+16(FP)
+ RET
+isMinZero:
+ MOVD $(1<<63), R4 // -0.0
+ CMPUBEQ R4, R8, +3(PC)
+ MOVD R9, ret+16(FP) // return other 0
+ RET
+ MOVD R8, ret+16(FP) // return -0
+ RET
+
diff -pruN 1.6.3-1/src/math/sqrt_s390x.s 1.6.3-1ubuntu1/src/math/sqrt_s390x.s
--- 1.6.3-1/src/math/sqrt_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/math/sqrt_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,12 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Sqrt(x float64) float64
+TEXT ·Sqrt(SB),NOSPLIT,$0
+ FMOVD x+0(FP), F1
+ FSQRT F1, F1
+ FMOVD F1, ret+8(FP)
+ RET
diff -pruN 1.6.3-1/src/math/stubs_s390x.s 1.6.3-1ubuntu1/src/math/stubs_s390x.s
--- 1.6.3-1/src/math/stubs_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/math/stubs_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,77 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "../runtime/textflag.h"
+
+TEXT ·Asin(SB),NOSPLIT,$0
+ BR ·asin(SB)
+
+TEXT ·Acos(SB),NOSPLIT,$0
+ BR ·acos(SB)
+
+TEXT ·Atan2(SB),NOSPLIT,$0
+ BR ·atan2(SB)
+
+TEXT ·Atan(SB),NOSPLIT,$0
+ BR ·atan(SB)
+
+TEXT ·Exp2(SB),NOSPLIT,$0
+ BR ·exp2(SB)
+
+TEXT ·Expm1(SB),NOSPLIT,$0
+ BR ·expm1(SB)
+
+TEXT ·Exp(SB),NOSPLIT,$0
+ BR ·exp(SB)
+
+TEXT ·Floor(SB),NOSPLIT,$0
+ BR ·floor(SB)
+
+TEXT ·Ceil(SB),NOSPLIT,$0
+ BR ·ceil(SB)
+
+TEXT ·Trunc(SB),NOSPLIT,$0
+ BR ·trunc(SB)
+
+TEXT ·Frexp(SB),NOSPLIT,$0
+ BR ·frexp(SB)
+
+TEXT ·Hypot(SB),NOSPLIT,$0
+ BR ·hypot(SB)
+
+TEXT ·Ldexp(SB),NOSPLIT,$0
+ BR ·ldexp(SB)
+
+TEXT ·Log10(SB),NOSPLIT,$0
+ BR ·log10(SB)
+
+TEXT ·Log2(SB),NOSPLIT,$0
+ BR ·log2(SB)
+
+TEXT ·Log1p(SB),NOSPLIT,$0
+ BR ·log1p(SB)
+
+TEXT ·Log(SB),NOSPLIT,$0
+ BR ·log(SB)
+
+TEXT ·Modf(SB),NOSPLIT,$0
+ BR ·modf(SB)
+
+TEXT ·Mod(SB),NOSPLIT,$0
+ BR ·mod(SB)
+
+TEXT ·Remainder(SB),NOSPLIT,$0
+ BR ·remainder(SB)
+
+TEXT ·Sincos(SB),NOSPLIT,$0
+ BR ·sincos(SB)
+
+TEXT ·Sin(SB),NOSPLIT,$0
+ BR ·sin(SB)
+
+TEXT ·Cos(SB),NOSPLIT,$0
+ BR ·cos(SB)
+
+TEXT ·Tan(SB),NOSPLIT,$0
+ BR ·tan(SB)
diff -pruN 1.6.3-1/src/net/http/fs_test.go 1.6.3-1ubuntu1/src/net/http/fs_test.go
--- 1.6.3-1/src/net/http/fs_test.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/net/http/fs_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -963,9 +963,9 @@ func TestLinuxSendfile(t *testing.T) {
syscalls := "sendfile,sendfile64"
switch runtime.GOARCH {
- case "mips64", "mips64le":
- // mips64 strace doesn't support sendfile64 and will error out
- // if we specify that with `-e trace='.
+ case "mips64", "mips64le", "s390x":
+ // strace on the above platforms doesn't support sendfile64
+ // and will error out if we specify that with `-e trace='.
syscalls = "sendfile"
}
diff -pruN 1.6.3-1/src/net/lookup_test.go 1.6.3-1ubuntu1/src/net/lookup_test.go
--- 1.6.3-1/src/net/lookup_test.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/net/lookup_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -626,6 +626,11 @@ func TestLookupPort(t *testing.T) {
t.Skipf("not supported on %s", runtime.GOOS)
}
+ switch runtime.GOARCH {
+ case "s390x":
+ t.Skipf("services not all known on %s", runtime.GOARCH)
+ }
+
for _, tt := range lookupPortTests {
if port, err := LookupPort(tt.network, tt.name); port != tt.port || (err == nil) != tt.ok {
t.Errorf("LookupPort(%q, %q) = %d, %v; want %d", tt.network, tt.name, port, err, tt.port)
diff -pruN 1.6.3-1/src/reflect/asm_s390x.s 1.6.3-1ubuntu1/src/reflect/asm_s390x.s
--- 1.6.3-1/src/reflect/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/reflect/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+#include "funcdata.h"
+
+// makeFuncStub is the code half of the function returned by MakeFunc.
+// See the comment on the declaration of makeFuncStub in makefunc.go
+// for more details.
+// No arg size here, runtime pulls arg map out of the func value.
+TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ BL ·callReflect(SB)
+ RET
+
+// methodValueCall is the code half of the function returned by makeMethodValue.
+// See the comment on the declaration of methodValueCall in makefunc.go
+// for more details.
+// No arg size here; runtime pulls arg map out of the func value.
+TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$16
+ NO_LOCAL_POINTERS
+ MOVD R12, 8(R15)
+ MOVD $argframe+0(FP), R3
+ MOVD R3, 16(R15)
+ BL ·callMethod(SB)
+ RET
diff -pruN 1.6.3-1/src/runtime/asm_386.s 1.6.3-1ubuntu1/src/runtime/asm_386.s
--- 1.6.3-1/src/runtime/asm_386.s 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/asm_386.s 2016-07-21 13:36:09.000000000 +0000
@@ -530,12 +530,17 @@ TEXT ·publicationBarrier(SB),NOSPLIT,$0
// called from deferreturn.
// 1. pop the caller
// 2. sub 5 bytes from the callers return
+// (when building for shared libraries, subtract 16 to cover load of GOT pointer into BX)
// 3. jmp to the argument
TEXT runtime·jmpdefer(SB), NOSPLIT, $0-8
MOVL fv+0(FP), DX // fn
MOVL argp+4(FP), BX // caller sp
LEAL -4(BX), SP // caller sp after CALL
+#ifdef GOBUILDMODE_shared
+ SUBL $16, (SP) // return to CALL again
+#else
SUBL $5, (SP) // return to CALL again
+#endif
MOVL 0(DX), BX
JMP BX // but first run the deferred function
diff -pruN 1.6.3-1/src/runtime/asm_s390x.s 1.6.3-1ubuntu1/src/runtime/asm_s390x.s
--- 1.6.3-1/src/runtime/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1129 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+// Indicate the status of vector facility
+// -1: init value
+// 0: vector not installed
+// 1: vector installed and enabled
+// 2: vector installed but not enabled
+
+DATA runtime·vectorfacility+0x00(SB)/4, $-1
+GLOBL runtime·vectorfacility(SB), NOPTR, $4
+
+TEXT runtime·checkvectorfacility(SB),NOSPLIT,$32-0
+ MOVD $2, R0
+ MOVD R1, tmp-32(SP)
+ MOVD $x-24(SP), R1
+// STFLE 0(R1)
+ WORD $0xB2B01000
+ MOVBZ z-8(SP), R1
+ AND $0x40, R1
+ BNE vectorinstalled
+ MOVB $0, runtime·vectorfacility(SB) //Vector not installed
+ MOVD tmp-32(SP), R1
+ MOVD $0, R0
+ RET
+vectorinstalled:
+ // check if the vector instruction has been enabled
+ VLEIB $0, $0xF, V16
+ VLGVB $0, V16, R0
+ CMPBEQ R0, $0xF, vectorenabled
+ MOVB $2, runtime·vectorfacility(SB) //Vector installed but not enabled
+ MOVD tmp-32(SP), R1
+ MOVD $0, R0
+ RET
+vectorenabled:
+ MOVB $1, runtime·vectorfacility(SB) //Vector installed and enabled
+ MOVD tmp-32(SP), R1
+ MOVD $0, R0
+ RET
+
+TEXT runtime·rt0_go(SB),NOSPLIT,$0
+ // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer
+ // C TLS base pointer in AR0:AR1
+
+ // initialize essential registers
+ XOR R0, R0
+
+ SUB $24, R15
+ MOVW R2, 8(R15) // argc
+ MOVD R3, 16(R15) // argv
+
+ // create istack out of the given (operating system) stack.
+ // _cgo_init may update stackguard.
+ MOVD $runtime·g0(SB), g
+ MOVD R15, R11
+ SUB $(64*1024), R11
+ MOVD R11, g_stackguard0(g)
+ MOVD R11, g_stackguard1(g)
+ MOVD R11, (g_stack+stack_lo)(g)
+ MOVD R15, (g_stack+stack_hi)(g)
+
+ // if there is a _cgo_init, call it using the gcc ABI.
+ MOVD _cgo_init(SB), R11
+ CMPBEQ R11, $0, nocgo
+ MOVW AR0, R4 // (AR0 << 32 | AR1) is the TLS base pointer; MOVD is translated to EAR
+ SLD $32, R4, R4
+ MOVW AR1, R4 // arg 2: TLS base pointer
+ MOVD $setg_gcc<>(SB), R3 // arg 1: setg
+ MOVD g, R2 // arg 0: G
+ // C functions expect 160 bytes of space on caller stack frame
+ // and an 8-byte aligned stack pointer
+ MOVD R15, R9 // save current stack (R9 is preserved in the Linux ABI)
+ SUB $160, R15 // reserve 160 bytes
+ MOVD $~7, R6
+ AND R6, R15 // 8-byte align
+ BL R11 // this call clobbers volatile registers according to Linux ABI (R0-R5, R14)
+ MOVD R9, R15 // restore stack
+ XOR R0, R0 // zero R0
+
+nocgo:
+ // update stackguard after _cgo_init
+ MOVD (g_stack+stack_lo)(g), R2
+ ADD $const__StackGuard, R2
+ MOVD R2, g_stackguard0(g)
+ MOVD R2, g_stackguard1(g)
+
+ // set the per-goroutine and per-mach "registers"
+ MOVD $runtime·m0(SB), R2
+
+ // save m->g0 = g0
+ MOVD g, m_g0(R2)
+ // save m0 to g0->m
+ MOVD R2, g_m(g)
+
+ BL runtime·check(SB)
+
+ // argc/argv are already prepared on stack
+ BL runtime·args(SB)
+ BL runtime·osinit(SB)
+ BL runtime·schedinit(SB)
+
+ // create a new goroutine to start program
+ MOVD $runtime·mainPC(SB), R2 // entry
+ SUB $24, R15
+ MOVD R2, 16(R15)
+ MOVD R0, 8(R15)
+ MOVD R0, 0(R15)
+ BL runtime·newproc(SB)
+ ADD $24, R15
+
+ // start this M
+ BL runtime·mstart(SB)
+
+ MOVD R0, 1(R0)
+ RET
+
+DATA runtime·mainPC+0(SB)/8,$runtime·main(SB)
+GLOBL runtime·mainPC(SB),RODATA,$8
+
+TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0
+ MOVD R0, 2(R0)
+ RET
+
+TEXT runtime·asminit(SB),NOSPLIT|NOFRAME,$0-0
+ RET
+
+/*
+ * go-routine
+ */
+
+// void gosave(Gobuf*)
+// save state in Gobuf; setjmp
+TEXT runtime·gosave(SB), NOSPLIT, $-8-8
+ MOVD buf+0(FP), R3
+ MOVD R15, gobuf_sp(R3)
+ MOVD LR, gobuf_pc(R3)
+ MOVD g, gobuf_g(R3)
+ MOVD $0, gobuf_lr(R3)
+ MOVD $0, gobuf_ret(R3)
+ MOVD $0, gobuf_ctxt(R3)
+ RET
+
+// void gogo(Gobuf*)
+// restore state from Gobuf; longjmp
+TEXT runtime·gogo(SB), NOSPLIT, $-8-8
+ MOVD buf+0(FP), R5
+ MOVD gobuf_g(R5), g // make sure g is not nil
+ BL runtime·save_g(SB)
+
+ MOVD 0(g), R4
+ MOVD gobuf_sp(R5), R15
+ MOVD gobuf_lr(R5), LR
+ MOVD gobuf_ret(R5), R3
+ MOVD gobuf_ctxt(R5), R12
+ MOVD $0, gobuf_sp(R5)
+ MOVD $0, gobuf_ret(R5)
+ MOVD $0, gobuf_lr(R5)
+ MOVD $0, gobuf_ctxt(R5)
+ CMP R0, R0 // set condition codes for == test, needed by stack split
+ MOVD gobuf_pc(R5), R6
+ BR (R6)
+
+// void mcall(fn func(*g))
+// Switch to m->g0's stack, call fn(g).
+// Fn must never return. It should gogo(&g->sched)
+// to keep running g.
+TEXT runtime·mcall(SB), NOSPLIT, $-8-8
+ // Save caller state in g->sched
+ MOVD R15, (g_sched+gobuf_sp)(g)
+ MOVD LR, (g_sched+gobuf_pc)(g)
+ MOVD R0, (g_sched+gobuf_lr)(g)
+ MOVD g, (g_sched+gobuf_g)(g)
+
+ // Switch to m->g0 & its stack, call fn.
+ MOVD g, R3
+ MOVD g_m(g), R8
+ MOVD m_g0(R8), g
+ BL runtime·save_g(SB)
+ CMP g, R3
+ BNE 2(PC)
+ BR runtime·badmcall(SB)
+ MOVD fn+0(FP), R12 // context
+ MOVD 0(R12), R4 // code pointer
+ MOVD (g_sched+gobuf_sp)(g), R15 // sp = m->g0->sched.sp
+ SUB $16, R15
+ MOVD R3, 8(R15)
+ MOVD $0, 0(R15)
+ BL (R4)
+ BR runtime·badmcall2(SB)
+
+// systemstack_switch is a dummy routine that systemstack leaves at the bottom
+// of the G stack. We need to distinguish the routine that
+// lives at the bottom of the G stack from the one that lives
+// at the top of the system stack because the one at the top of
+// the system stack terminates the stack walk (see topofstack()).
+TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0
+ UNDEF
+ BL (LR) // make sure this function is not leaf
+ RET
+
+// func systemstack(fn func())
+TEXT runtime·systemstack(SB), NOSPLIT, $0-8
+ MOVD fn+0(FP), R3 // R3 = fn
+ MOVD R3, R12 // context
+ MOVD g_m(g), R4 // R4 = m
+
+ MOVD m_gsignal(R4), R5 // R5 = gsignal
+ CMPBEQ g, R5, noswitch
+
+ MOVD m_g0(R4), R5 // R5 = g0
+ CMPBEQ g, R5, noswitch
+
+ MOVD m_curg(R4), R6
+ CMPBEQ g, R6, switch
+
+ // Bad: g is not gsignal, not g0, not curg. What is it?
+ // Hide call from linker nosplit analysis.
+ MOVD $runtime·badsystemstack(SB), R3
+ BL (R3)
+
+switch:
+ // save our state in g->sched. Pretend to
+ // be systemstack_switch if the G stack is scanned.
+ MOVD $runtime·systemstack_switch(SB), R6
+ ADD $16, R6 // get past prologue
+ MOVD R6, (g_sched+gobuf_pc)(g)
+ MOVD R15, (g_sched+gobuf_sp)(g)
+ MOVD R0, (g_sched+gobuf_lr)(g)
+ MOVD g, (g_sched+gobuf_g)(g)
+
+ // switch to g0
+ MOVD R5, g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R3
+ // make it look like mstart called systemstack on g0, to stop traceback
+ SUB $8, R3
+ MOVD $runtime·mstart(SB), R4
+ MOVD R4, 0(R3)
+ MOVD R3, R15
+
+ // call target function
+ MOVD 0(R12), R3 // code pointer
+ BL (R3)
+
+ // switch back to g
+ MOVD g_m(g), R3
+ MOVD m_curg(R3), g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R15
+ MOVD $0, (g_sched+gobuf_sp)(g)
+ RET
+
+noswitch:
+ // already on m stack, just call directly
+ MOVD 0(R12), R3 // code pointer
+ BL (R3)
+ RET
+
+/*
+ * support for morestack
+ */
+
+// Called during function prolog when more stack is needed.
+// Caller has already loaded:
+// R3: framesize, R4: argsize, R5: LR
+//
+// The traceback routines see morestack on a g0 as being
+// the top of a stack (for example, morestack calling newstack
+// calling the scheduler calling newm calling gc), so we must
+// record an argument size. For that purpose, it has no arguments.
+TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
+ // Cannot grow scheduler stack (m->g0).
+ MOVD g_m(g), R7
+ MOVD m_g0(R7), R8
+ CMPBNE g, R8, 2(PC)
+ BL runtime·abort(SB)
+
+ // Cannot grow signal stack (m->gsignal).
+ MOVD m_gsignal(R7), R8
+ CMP g, R8
+ BNE 2(PC)
+ BL runtime·abort(SB)
+
+ // Called from f.
+ // Set g->sched to context in f.
+ MOVD R12, (g_sched+gobuf_ctxt)(g)
+ MOVD R15, (g_sched+gobuf_sp)(g)
+ MOVD LR, R8
+ MOVD R8, (g_sched+gobuf_pc)(g)
+ MOVD R5, (g_sched+gobuf_lr)(g)
+
+ // Called from f.
+ // Set m->morebuf to f's caller.
+ MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC
+ MOVD R15, (m_morebuf+gobuf_sp)(R7) // f's caller's SP
+ MOVD g, (m_morebuf+gobuf_g)(R7)
+
+ // Call newstack on m->g0's stack.
+ MOVD m_g0(R7), g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R15
+ BL runtime·newstack(SB)
+
+ // Not reached, but make sure the return PC from the call to newstack
+ // is still in this function, and not the beginning of the next.
+ UNDEF
+
+TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0
+ MOVD $0, R12
+ BR runtime·morestack(SB)
+
+TEXT runtime·stackBarrier(SB),NOSPLIT,$0
+ // We came here via a RET to an overwritten LR.
+ // R3 may be live. Other registers are available.
+
+ // Get the original return PC, g.stkbar[g.stkbarPos].savedLRVal.
+ MOVD (g_stkbar+slice_array)(g), R4
+ MOVD g_stkbarPos(g), R5
+ MOVD $stkbar__size, R6
+ MULLD R5, R6
+ ADD R4, R6
+ MOVD stkbar_savedLRVal(R6), R6
+ // Record that this stack barrier was hit.
+ ADD $1, R5
+ MOVD R5, g_stkbarPos(g)
+ // Jump to the original return PC.
+ BR (R6)
+
+// reflectcall: call a function with the given argument list
+// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32).
+// we don't have variable-sized frames, so we use a small number
+// of constant-sized-frame functions to encode a few bits of size in the pc.
+// Caution: ugly multiline assembly macros in your future!
+
+#define DISPATCH(NAME,MAXSIZE) \
+ MOVD $MAXSIZE, R4; \
+ CMP R3, R4; \
+ BGT 3(PC); \
+ MOVD $NAME(SB), R5; \
+ BR (R5)
+// Note: can't just "BR NAME(SB)" - bad inlining results.
+
+TEXT reflect·call(SB), NOSPLIT, $0-0
+ BR ·reflectcall(SB)
+
+TEXT ·reflectcall(SB), NOSPLIT, $-8-32
+ MOVWZ argsize+24(FP), R3
+ // NOTE(rsc): No call16, because CALLFN needs four words
+ // of argument space to invoke callwritebarrier.
+ DISPATCH(runtime·call32, 32)
+ DISPATCH(runtime·call64, 64)
+ DISPATCH(runtime·call128, 128)
+ DISPATCH(runtime·call256, 256)
+ DISPATCH(runtime·call512, 512)
+ DISPATCH(runtime·call1024, 1024)
+ DISPATCH(runtime·call2048, 2048)
+ DISPATCH(runtime·call4096, 4096)
+ DISPATCH(runtime·call8192, 8192)
+ DISPATCH(runtime·call16384, 16384)
+ DISPATCH(runtime·call32768, 32768)
+ DISPATCH(runtime·call65536, 65536)
+ DISPATCH(runtime·call131072, 131072)
+ DISPATCH(runtime·call262144, 262144)
+ DISPATCH(runtime·call524288, 524288)
+ DISPATCH(runtime·call1048576, 1048576)
+ DISPATCH(runtime·call2097152, 2097152)
+ DISPATCH(runtime·call4194304, 4194304)
+ DISPATCH(runtime·call8388608, 8388608)
+ DISPATCH(runtime·call16777216, 16777216)
+ DISPATCH(runtime·call33554432, 33554432)
+ DISPATCH(runtime·call67108864, 67108864)
+ DISPATCH(runtime·call134217728, 134217728)
+ DISPATCH(runtime·call268435456, 268435456)
+ DISPATCH(runtime·call536870912, 536870912)
+ DISPATCH(runtime·call1073741824, 1073741824)
+ MOVD $runtime·badreflectcall(SB), R5
+ BR (R5)
+
+#define CALLFN(NAME,MAXSIZE) \
+TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \
+ NO_LOCAL_POINTERS; \
+ /* copy arguments to stack */ \
+ MOVD arg+16(FP), R3; \
+ MOVWZ argsize+24(FP), R4; \
+ MOVD R15, R5; \
+ ADD $(8-1), R5; \
+ SUB $1, R3; \
+ ADD R5, R4; \
+ CMP R5, R4; \
+ BEQ 6(PC); \
+ ADD $1, R3; \
+ ADD $1, R5; \
+ MOVBZ 0(R3), R6; \
+ MOVBZ R6, 0(R5); \
+ BR -6(PC); \
+ /* call function */ \
+ MOVD f+8(FP), R12; \
+ MOVD (R12), R8; \
+ PCDATA $PCDATA_StackMapIndex, $0; \
+ BL (R8); \
+ /* copy return values back */ \
+ MOVD arg+16(FP), R3; \
+ MOVWZ n+24(FP), R4; \
+ MOVWZ retoffset+28(FP), R6; \
+ MOVD R15, R5; \
+ ADD R6, R5; \
+ ADD R6, R3; \
+ SUB R6, R4; \
+ ADD $(8-1), R5; \
+ SUB $1, R3; \
+ ADD R5, R4; \
+loop: \
+ CMP R5, R4; \
+ BEQ end; \
+ ADD $1, R5; \
+ ADD $1, R3; \
+ MOVBZ 0(R5), R6; \
+ MOVBZ R6, 0(R3); \
+ BR loop; \
+end: \
+ /* execute write barrier updates */ \
+ MOVD argtype+0(FP), R7; \
+ MOVD arg+16(FP), R3; \
+ MOVWZ n+24(FP), R4; \
+ MOVWZ retoffset+28(FP), R6; \
+ MOVD R7, 8(R15); \
+ MOVD R3, 16(R15); \
+ MOVD R4, 24(R15); \
+ MOVD R6, 32(R15); \
+ BL runtime·callwritebarrier(SB); \
+ RET
+
+CALLFN(·call32, 32)
+CALLFN(·call64, 64)
+CALLFN(·call128, 128)
+CALLFN(·call256, 256)
+CALLFN(·call512, 512)
+CALLFN(·call1024, 1024)
+CALLFN(·call2048, 2048)
+CALLFN(·call4096, 4096)
+CALLFN(·call8192, 8192)
+CALLFN(·call16384, 16384)
+CALLFN(·call32768, 32768)
+CALLFN(·call65536, 65536)
+CALLFN(·call131072, 131072)
+CALLFN(·call262144, 262144)
+CALLFN(·call524288, 524288)
+CALLFN(·call1048576, 1048576)
+CALLFN(·call2097152, 2097152)
+CALLFN(·call4194304, 4194304)
+CALLFN(·call8388608, 8388608)
+CALLFN(·call16777216, 16777216)
+CALLFN(·call33554432, 33554432)
+CALLFN(·call67108864, 67108864)
+CALLFN(·call134217728, 134217728)
+CALLFN(·call268435456, 268435456)
+CALLFN(·call536870912, 536870912)
+CALLFN(·call1073741824, 1073741824)
+
+TEXT runtime·procyield(SB),NOSPLIT,$0-0
+ RET
+
+// void jmpdefer(fv, sp);
+// called from deferreturn.
+// 1. grab stored LR for caller
+// 2. sub 6 bytes to get back to BL deferreturn (size of BRASL instruction)
+// 3. BR to fn
+TEXT runtime·jmpdefer(SB),NOSPLIT|NOFRAME,$0-16
+ MOVD 0(R15), R1
+ SUB $6, R1, LR
+
+ MOVD fv+0(FP), R12
+ MOVD argp+8(FP), R15
+ SUB $8, R15
+ MOVD 0(R12), R3
+ BR (R3)
+
+// Save state of caller into g->sched. Smashes R31.
+TEXT gosave<>(SB),NOSPLIT|NOFRAME,$0
+ MOVD LR, (g_sched+gobuf_pc)(g)
+ MOVD R15, (g_sched+gobuf_sp)(g)
+ MOVD $0, (g_sched+gobuf_lr)(g)
+ MOVD $0, (g_sched+gobuf_ret)(g)
+ MOVD $0, (g_sched+gobuf_ctxt)(g)
+ RET
+
+// func asmcgocall(fn, arg unsafe.Pointer) int32
+// Call fn(arg) on the scheduler stack,
+// aligned appropriately for the gcc ABI.
+// See cgocall.go for more details.
+TEXT ·asmcgocall(SB),NOSPLIT,$0-20
+ // R2 = argc; R3 = argv; R11 = temp; R13 = g; R15 = stack pointer
+ // C TLS base pointer in AR0:AR1
+ MOVD fn+0(FP), R3
+ MOVD arg+8(FP), R4
+
+ MOVD R15, R2 // save original stack pointer
+ MOVD g, R5
+
+ // Figure out if we need to switch to m->g0 stack.
+ // We get called to create new OS threads too, and those
+ // come in on the m->g0 stack already.
+ MOVD g_m(g), R6
+ MOVD m_g0(R6), R6
+ CMPBEQ R6, g, g0
+ BL gosave<>(SB)
+ MOVD R6, g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R15
+
+ // Now on a scheduling stack (a pthread-created stack).
+g0:
+ // Save room for two of our pointers, plus 160 bytes of callee
+ // save area that lives on the caller stack.
+ SUB $176, R15
+ MOVD $~7, R6
+ AND R6, R15 // 8-byte alignment for gcc ABI
+ MOVD R5, 168(R15) // save old g on stack
+ MOVD (g_stack+stack_hi)(R5), R5
+ SUB R2, R5
+ MOVD R5, 160(R15) // save depth in old g stack (can't just save SP, as stack might be copied during a callback)
+ MOVD R0, 0(R15) // clear back chain pointer (TODO can we give it real back trace information?)
+ MOVD R4, R2 // arg in R2
+ BL R3 // can clobber: R0-R5, R14, F0-F3, F5, F7-F15
+
+ XOR R0, R0 // set R0 back to 0.
+ // Restore g, stack pointer.
+ MOVD 168(R15), g
+ BL runtime·save_g(SB)
+ MOVD (g_stack+stack_hi)(g), R5
+ MOVD 160(R15), R6
+ SUB R6, R5
+ MOVD R5, R15
+
+ MOVW R2, ret+16(FP)
+ RET
+
+// cgocallback(void (*fn)(void*), void *frame, uintptr framesize)
+// Turn the fn into a Go func (by taking its address) and call
+// cgocallback_gofunc.
+TEXT runtime·cgocallback(SB),NOSPLIT,$24-24
+ MOVD $fn+0(FP), R3
+ MOVD R3, 8(R15)
+ MOVD frame+8(FP), R3
+ MOVD R3, 16(R15)
+ MOVD framesize+16(FP), R3
+ MOVD R3, 24(R15)
+ MOVD $runtime·cgocallback_gofunc(SB), R3
+ BL (R3)
+ RET
+
+// cgocallback_gofunc(FuncVal*, void *frame, uintptr framesize)
+// See cgocall.go for more details.
+TEXT ·cgocallback_gofunc(SB),NOSPLIT,$16-24
+ NO_LOCAL_POINTERS
+
+ // Load m and g from thread-local storage.
+ MOVB runtime·iscgo(SB), R3
+ CMPBEQ R3, $0, nocgo
+ BL runtime·load_g(SB)
+
+nocgo:
+ // If g is nil, Go did not create the current thread.
+ // Call needm to obtain one for temporary use.
+ // In this case, we're running on the thread stack, so there's
+ // lots of space, but the linker doesn't know. Hide the call from
+ // the linker analysis by using an indirect call.
+ CMPBEQ g, $0, needm
+
+ MOVD g_m(g), R8
+ MOVD R8, savedm-8(SP)
+ BR havem
+
+needm:
+ MOVD g, savedm-8(SP) // g is zero, so is m.
+ MOVD $runtime·needm(SB), R3
+ BL (R3)
+
+ // Set m->sched.sp = SP, so that if a panic happens
+ // during the function we are about to execute, it will
+ // have a valid SP to run on the g0 stack.
+ // The next few lines (after the havem label)
+ // will save this SP onto the stack and then write
+ // the same SP back to m->sched.sp. That seems redundant,
+ // but if an unrecovered panic happens, unwindm will
+ // restore the g->sched.sp from the stack location
+ // and then systemstack will try to use it. If we don't set it here,
+ // that restored SP will be uninitialized (typically 0) and
+ // will not be usable.
+ MOVD g_m(g), R8
+ MOVD m_g0(R8), R3
+ MOVD R15, (g_sched+gobuf_sp)(R3)
+
+havem:
+ // Now there's a valid m, and we're running on its m->g0.
+ // Save current m->g0->sched.sp on stack and then set it to SP.
+ // Save current sp in m->g0->sched.sp in preparation for
+ // switch back to m->curg stack.
+ // NOTE: unwindm knows that the saved g->sched.sp is at 8(R1) aka savedsp-16(SP).
+ MOVD m_g0(R8), R3
+ MOVD (g_sched+gobuf_sp)(R3), R4
+ MOVD R4, savedsp-16(SP)
+ MOVD R15, (g_sched+gobuf_sp)(R3)
+
+ // Switch to m->curg stack and call runtime.cgocallbackg.
+ // Because we are taking over the execution of m->curg
+ // but *not* resuming what had been running, we need to
+ // save that information (m->curg->sched) so we can restore it.
+ // We can restore m->curg->sched.sp easily, because calling
+ // runtime.cgocallbackg leaves SP unchanged upon return.
+ // To save m->curg->sched.pc, we push it onto the stack.
+ // This has the added benefit that it looks to the traceback
+ // routine like cgocallbackg is going to return to that
+ // PC (because the frame we allocate below has the same
+ // size as cgocallback_gofunc's frame declared above)
+ // so that the traceback will seamlessly trace back into
+ // the earlier calls.
+ //
+ // In the new goroutine, -16(SP) and -8(SP) are unused.
+ MOVD m_curg(R8), g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R4 // prepare stack as R4
+ MOVD (g_sched+gobuf_pc)(g), R5
+ MOVD R5, -24(R4)
+ MOVD $-24(R4), R15
+ BL runtime·cgocallbackg(SB)
+
+ // Restore g->sched (== m->curg->sched) from saved values.
+ MOVD 0(R15), R5
+ MOVD R5, (g_sched+gobuf_pc)(g)
+ MOVD $24(R15), R4
+ MOVD R4, (g_sched+gobuf_sp)(g)
+
+ // Switch back to m->g0's stack and restore m->g0->sched.sp.
+ // (Unlike m->curg, the g0 goroutine never uses sched.pc,
+ // so we do not have to restore it.)
+ MOVD g_m(g), R8
+ MOVD m_g0(R8), g
+ BL runtime·save_g(SB)
+ MOVD (g_sched+gobuf_sp)(g), R15
+ MOVD savedsp-16(SP), R4
+ MOVD R4, (g_sched+gobuf_sp)(g)
+
+ // If the m on entry was nil, we called needm above to borrow an m
+ // for the duration of the call. Since the call is over, return it with dropm.
+ MOVD savedm-8(SP), R6
+ CMPBNE R6, $0, droppedm
+ MOVD $runtime·dropm(SB), R3
+ BL (R3)
+droppedm:
+
+ // Done!
+ RET
+
+// void setg(G*); set g. for use by needm.
+TEXT runtime·setg(SB), NOSPLIT, $0-8
+ MOVD gg+0(FP), g
+ // This only happens if iscgo, so jump straight to save_g
+ BL runtime·save_g(SB)
+ RET
+
+// void setg_gcc(G*); set g in C TLS.
+// Must obey the gcc calling convention.
+TEXT setg_gcc<>(SB),NOSPLIT|NOFRAME,$0-0
+ // The standard prologue clobbers LR (R14), which is callee-save in
+ // the C ABI, so we have to use NOFRAME and save LR ourselves.
+ MOVD LR, R1
+ // Also save g, R10, and R11 since they're callee-save in C ABI
+ MOVD R10, R3
+ MOVD g, R4
+ MOVD R11, R5
+
+ MOVD R2, g
+ BL runtime·save_g(SB)
+
+ MOVD R5, R11
+ MOVD R4, g
+ MOVD R3, R10
+ MOVD R1, LR
+ RET
+
+TEXT runtime·getcallerpc(SB),NOSPLIT,$8-16
+ MOVD 16(R15), R3 // LR saved by caller
+ MOVD runtime·stackBarrierPC(SB), R4
+ CMPBNE R3, R4, nobar
+ // Get original return PC.
+ BL runtime·nextBarrierPC(SB)
+ MOVD 8(R15), R3
+nobar:
+ MOVD R3, ret+8(FP)
+ RET
+
+TEXT runtime·setcallerpc(SB),NOSPLIT,$8-16
+ MOVD pc+8(FP), R3
+ MOVD 16(R15), R4
+ MOVD runtime·stackBarrierPC(SB), R5
+ CMPBEQ R4, R5, setbar
+ MOVD R3, 16(R15) // set LR in caller
+ RET
+setbar:
+ // Set the stack barrier return PC.
+ MOVD R3, 8(R15)
+ BL runtime·setNextBarrierPC(SB)
+ RET
+
+TEXT runtime·getcallersp(SB),NOSPLIT,$0-16
+ MOVD argp+0(FP), R3
+ SUB $8, R3
+ MOVD R3, ret+8(FP)
+ RET
+
+TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW (R0), R0
+ UNDEF
+
+// int64 runtime·cputicks(void)
+TEXT runtime·cputicks(SB),NOSPLIT,$0-8
+ // The TOD clock on s390 counts from the year 1900 in ~250ps intervals.
+ // This means that since about 1972 the msb has been set, making the
+ // result of a call to STORE CLOCK (stck) a negative number.
+ // We clear the msb to make it positive.
+ STCK ret+0(FP) // serialises before and after call
+ MOVD ret+0(FP), R3 // R3 will wrap to 0 in the year 2043
+ SLD $1, R3
+ SRD $1, R3
+ MOVD R3, ret+0(FP)
+ RET
+
+// memhash_varlen(p unsafe.Pointer, h seed) uintptr
+// redirects to memhash(p, h, size) using the size
+// stored in the closure.
+TEXT runtime·memhash_varlen(SB),NOSPLIT,$40-24
+ GO_ARGS
+ NO_LOCAL_POINTERS
+ MOVD p+0(FP), R3
+ MOVD h+8(FP), R4
+ MOVD 8(R12), R5
+ MOVD R3, 8(R15)
+ MOVD R4, 16(R15)
+ MOVD R5, 24(R15)
+ BL runtime·memhash(SB)
+ MOVD 32(R15), R3
+ MOVD R3, ret+16(FP)
+ RET
+
+// AES hashing not implemented for s390x
+TEXT runtime·aeshash(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW (R0), R15
+TEXT runtime·aeshash32(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW (R0), R15
+TEXT runtime·aeshash64(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW (R0), R15
+TEXT runtime·aeshashstr(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW (R0), R15
+
+TEXT runtime·memeq(SB),NOSPLIT|NOFRAME,$0-25
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R5
+ MOVD size+16(FP), R6
+ LA ret+24(FP), R7
+ BR runtime·memeqbody(SB)
+
+// memequal_varlen(a, b unsafe.Pointer) bool
+TEXT runtime·memequal_varlen(SB),NOSPLIT|NOFRAME,$0-17
+ MOVD a+0(FP), R3
+ MOVD b+8(FP), R5
+ MOVD 8(R12), R6 // compiler stores size at offset 8 in the closure
+ LA ret+16(FP), R7
+ BR runtime·memeqbody(SB)
+
+// eqstring tests whether two strings are equal.
+// The compiler guarantees that strings passed
+// to eqstring have equal length.
+// See runtime_test.go:eqstring_generic for
+// equivalent Go code.
+TEXT runtime·eqstring(SB),NOSPLIT|NOFRAME,$0-33
+ MOVD s1str+0(FP), R3
+ MOVD s1len+8(FP), R6
+ MOVD s2str+16(FP), R5
+ LA ret+32(FP), R7
+ BR runtime·memeqbody(SB)
+
+TEXT bytes·Equal(SB),NOSPLIT|NOFRAME,$0-49
+ MOVD a_len+8(FP), R2
+ MOVD b_len+32(FP), R6
+ MOVD a+0(FP), R3
+ MOVD b+24(FP), R5
+ LA ret+48(FP), R7
+ CMPBNE R2, R6, notequal
+ BR runtime·memeqbody(SB)
+notequal:
+ MOVB $0, ret+48(FP)
+ RET
+
+// input:
+// R3 = a
+// R5 = b
+// R6 = len
+// R7 = address of output byte (stores 0 or 1 here)
+// a and b have the same length
+TEXT runtime·memeqbody(SB),NOSPLIT|NOFRAME,$0-0
+ CMPBEQ R3, R5, equal
+loop:
+ CMPBEQ R6, $0, equal
+ CMPBLT R6, $32, tiny
+ CMP R6, $256
+ BLT tail
+ CLC $256, 0(R3), 0(R5)
+ BNE notequal
+ SUB $256, R6
+ LA 256(R3), R3
+ LA 256(R5), R5
+ BR loop
+tail:
+ SUB $1, R6, R8
+ EXRL $runtime·memeqbodyclc(SB), R8
+ BEQ equal
+notequal:
+ MOVB $0, 0(R7)
+ RET
+equal:
+ MOVB $1, 0(R7)
+ RET
+tiny:
+ MOVD $0, R2
+ CMPBLT R6, $16, lt16
+ MOVD 0(R3), R8
+ MOVD 0(R5), R9
+ CMPBNE R8, R9, notequal
+ MOVD 8(R3), R8
+ MOVD 8(R5), R9
+ CMPBNE R8, R9, notequal
+ LA 16(R2), R2
+ SUB $16, R6
+lt16:
+ CMPBLT R6, $8, lt8
+ MOVD 0(R3)(R2*1), R8
+ MOVD 0(R5)(R2*1), R9
+ CMPBNE R8, R9, notequal
+ LA 8(R2), R2
+ SUB $8, R6
+lt8:
+ CMPBLT R6, $4, lt4
+ MOVWZ 0(R3)(R2*1), R8
+ MOVWZ 0(R5)(R2*1), R9
+ CMPBNE R8, R9, notequal
+ LA 4(R2), R2
+ SUB $4, R6
+lt4:
+#define CHECK(n) \
+ CMPBEQ R6, $n, equal \
+ MOVB n(R3)(R2*1), R8 \
+ MOVB n(R5)(R2*1), R9 \
+ CMPBNE R8, R9, notequal
+ CHECK(0)
+ CHECK(1)
+ CHECK(2)
+ CHECK(3)
+ BR equal
+
+TEXT runtime·memeqbodyclc(SB),NOSPLIT|NOFRAME,$0-0
+ CLC $1, 0(R3), 0(R5)
+ RET
+
+TEXT runtime·fastrand1(SB), NOSPLIT, $0-4
+ MOVD g_m(g), R4
+ MOVWZ m_fastrand(R4), R3
+ ADD R3, R3
+ CMPW R3, $0
+ BGE 2(PC)
+ XOR $0x88888eef, R3
+ MOVW R3, m_fastrand(R4)
+ MOVW R3, ret+0(FP)
+ RET
+
+TEXT bytes·IndexByte(SB),NOSPLIT,$0-40
+ MOVD s+0(FP), R3 // s => R3
+ MOVD s_len+8(FP), R4 // s_len => R4
+ MOVBZ c+24(FP), R5 // c => R5
+ MOVD $ret+32(FP), R2 // &ret => R9
+ BR runtime·indexbytebody(SB)
+
+TEXT strings·IndexByte(SB),NOSPLIT,$0-32
+ MOVD s+0(FP), R3 // s => R3
+ MOVD s_len+8(FP), R4 // s_len => R4
+ MOVBZ c+16(FP), R5 // c => R5
+ MOVD $ret+24(FP), R2 // &ret => R9
+ BR runtime·indexbytebody(SB)
+
+// input:
+// R3: s
+// R4: s_len
+// R5: c -- byte sought
+// R2: &ret -- address to put index into
+TEXT runtime·indexbytebody(SB),NOSPLIT,$0
+ CMPBEQ R4, $0, notfound
+ MOVD R3, R6 // store base for later
+ ADD R3, R4, R8 // the address after the end of the string
+ //if the length is small, use loop; otherwise, use vector or srst search
+ CMPBGE R4, $16, large
+
+residual:
+ CMPBEQ R3, R8, notfound
+ MOVBZ 0(R3), R7
+ LA 1(R3), R3
+ CMPBNE R7, R5, residual
+
+found:
+ SUB R6, R3
+ SUB $1, R3
+ MOVD R3, 0(R2)
+ RET
+
+notfound:
+ MOVD $-1, 0(R2)
+ RET
+
+large:
+ MOVB runtime·vectorfacility(SB), R1
+ CMPBEQ R1, $-1, checkvector // vectorfacility = -1, vector not checked yet
+vectorchecked:
+ CMPBEQ R1, $1, vectorimpl // vectorfacility = 1, vector supported
+
+srstimpl: // vectorfacility != 1, not support or enable vector
+ MOVBZ R5, R0 // c needs to be in R0, leave until last minute as currently R0 is expected to be 0
+srstloop:
+ WORD $0xB25E0083 // srst %r8, %r3 (search the range [R3, R8))
+ BVS srstloop // interrupted - continue
+ BGT notfoundr0
+foundr0:
+ XOR R0, R0 // reset R0
+ SUB R6, R8 // remove base
+ MOVD R8, 0(R2)
+ RET
+notfoundr0:
+ XOR R0, R0 // reset R0
+ MOVD $-1, 0(R2)
+ RET
+
+vectorimpl:
+ //if the address is not 16byte aligned, use loop for the header
+ AND $15, R3, R8
+ CMPBGT R8, $0, notaligned
+
+aligned:
+ ADD R6, R4, R8
+ AND $-16, R8, R7
+ // replicate c across V17
+ VLVGB $0, R5, V19
+ VREPB $0, V19, V17
+
+vectorloop:
+ CMPBGE R3, R7, residual
+ VL 0(R3), V16 // load string to be searched into V16
+ ADD $16, R3
+ VFEEBS V16, V17, V18 // search V17 in V16 and set conditional code accordingly
+ BVS vectorloop
+
+ // when vector search found c in the string
+ VLGVB $7, V18, R7 // load 7th element of V18 containing index into R7
+ SUB $16, R3
+ SUB R6, R3
+ ADD R3, R7
+ MOVD R7, 0(R2)
+ RET
+
+notaligned:
+ AND $-16, R3, R8
+ ADD $16, R8
+notalignedloop:
+ CMPBEQ R3, R8, aligned
+ MOVBZ 0(R3), R7
+ LA 1(R3), R3
+ CMPBNE R7, R5, notalignedloop
+ BR found
+
+checkvector:
+ CALL runtime·checkvectorfacility(SB)
+ MOVB runtime·vectorfacility(SB), R1
+ BR vectorchecked
+
+TEXT runtime·return0(SB), NOSPLIT, $0
+ MOVW $0, R3
+ RET
+
+// Called from cgo wrappers, this function returns g->m->curg.stack.hi.
+// Must obey the gcc calling convention.
+TEXT _cgo_topofstack(SB),NOSPLIT|NOFRAME,$0
+ // g (R13), R10, R11 and LR (R14) are callee-save in the C ABI, so save them
+ MOVD g, R1
+ MOVD R10, R3
+ MOVD LR, R4
+ MOVD R11, R5
+
+ BL runtime·load_g(SB) // clobbers g (R13), R10, R11
+ MOVD g_m(g), R2
+ MOVD m_curg(R2), R2
+ MOVD (g_stack+stack_hi)(R2), R2
+
+ MOVD R1, g
+ MOVD R3, R10
+ MOVD R4, LR
+ MOVD R5, R11
+ RET
+
+// The top-most function running on a goroutine
+// returns to goexit+PCQuantum.
+TEXT runtime·goexit(SB),NOSPLIT|NOFRAME,$0-0
+ BYTE $0x07; BYTE $0x00; // 2-byte nop
+ BL runtime·goexit1(SB) // does not return
+ // traceback from goexit1 must hit code range of goexit
+ BYTE $0x07; BYTE $0x00; // 2-byte nop
+
+TEXT runtime·prefetcht0(SB),NOSPLIT,$0-8
+ RET
+
+TEXT runtime·prefetcht1(SB),NOSPLIT,$0-8
+ RET
+
+TEXT runtime·prefetcht2(SB),NOSPLIT,$0-8
+ RET
+
+TEXT runtime·prefetchnta(SB),NOSPLIT,$0-8
+ RET
+
+TEXT runtime·sigreturn(SB),NOSPLIT,$0-8
+ RET
+
+TEXT ·publicationBarrier(SB),NOSPLIT|NOFRAME,$0-0
+ SYNC
+ RET
+
+TEXT runtime·cmpstring(SB),NOSPLIT|NOFRAME,$0-40
+ MOVD s1_base+0(FP), R3
+ MOVD s1_len+8(FP), R4
+ MOVD s2_base+16(FP), R5
+ MOVD s2_len+24(FP), R6
+ LA ret+32(FP), R7
+ BR runtime·cmpbody(SB)
+
+TEXT bytes·Compare(SB),NOSPLIT|NOFRAME,$0-56
+ MOVD s1+0(FP), R3
+ MOVD s1+8(FP), R4
+ MOVD s2+24(FP), R5
+ MOVD s2+32(FP), R6
+ LA res+48(FP), R7
+ BR runtime·cmpbody(SB)
+
+// input:
+// R3 = a
+// R4 = alen
+// R5 = b
+// R6 = blen
+// R7 = address of output word (stores -1/0/1 here)
+TEXT runtime·cmpbody(SB),NOSPLIT|NOFRAME,$0-0
+ CMPBEQ R3, R5, cmplengths
+ MOVD R4, R8
+ CMPBLE R4, R6, amin
+ MOVD R6, R8
+amin:
+ CMPBEQ R8, $0, cmplengths
+ CMP R8, $256
+ BLE tail
+loop:
+ CLC $256, 0(R3), 0(R5)
+ BGT gt
+ BLT lt
+ SUB $256, R8
+ CMP R8, $256
+ BGT loop
+tail:
+ SUB $1, R8
+ EXRL $runtime·cmpbodyclc(SB), R8
+ BGT gt
+ BLT lt
+cmplengths:
+ CMP R4, R6
+ BEQ eq
+ BLT lt
+gt:
+ MOVD $1, 0(R7)
+ RET
+lt:
+ MOVD $-1, 0(R7)
+ RET
+eq:
+ MOVD $0, 0(R7)
+ RET
+
+TEXT runtime·cmpbodyclc(SB),NOSPLIT|NOFRAME,$0-0
+ CLC $1, 0(R3), 0(R5)
+ RET
+
+// This is called from .init_array and follows the platform, not Go, ABI.
+// We are overly conservative. We could only save the registers we use.
+// However, since this function is only called once per loaded module
+// performance is unimportant.
+TEXT runtime·addmoduledata(SB),NOSPLIT|NOFRAME,$0-0
+ // Save R6-R15, F0, F2, F4 and F6 in the
+ // register save area of the calling function
+ STMG R6, R15, 48(R15)
+ FMOVD F0, 128(R15)
+ FMOVD F2, 136(R15)
+ FMOVD F4, 144(R15)
+ FMOVD F6, 152(R15)
+
+ // append the argument (passed in R2, as per the ELF ABI) to the
+ // moduledata linked list.
+ MOVD runtime·lastmoduledatap(SB), R1
+ MOVD R2, moduledata_next(R1)
+ MOVD R2, runtime·lastmoduledatap(SB)
+
+ // Restore R6-R15, F0, F2, F4 and F6
+ LMG 48(R15), R6, R15
+ FMOVD F0, 128(R15)
+ FMOVD F2, 136(R15)
+ FMOVD F4, 144(R15)
+ FMOVD F6, 152(R15)
+ RET
+
+TEXT ·checkASM(SB),NOSPLIT,$0-1
+ MOVB $1, ret+0(FP)
+ RET
diff -pruN 1.6.3-1/src/runtime/cgo/asm_s390x.s 1.6.3-1ubuntu1/src/runtime/cgo/asm_s390x.s
--- 1.6.3-1/src/runtime/cgo/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/cgo/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,44 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+/*
+ * void crosscall2(void (*fn)(void*, int32), void*, int32)
+ * Save registers and call fn with two arguments.
+ * crosscall2 obeys the C ABI; fn obeys the Go ABI.
+ */
+TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0
+ // Start with standard C stack frame layout and linkage
+
+ // Save R6-R15, F0, F2, F4 and F6 in the
+ // register save area of the calling function
+ STMG R6, R15, 48(R15)
+ FMOVD F0, 128(R15)
+ FMOVD F2, 136(R15)
+ FMOVD F4, 144(R15)
+ FMOVD F6, 152(R15)
+
+ // Initialize Go ABI environment
+ XOR R0, R0
+ BL runtime·load_g(SB)
+
+ // Allocate 24 bytes on the stack
+ SUB $24, R15
+
+ MOVD R3, 8(R15) // arg1
+ MOVW R4, 16(R15) // arg2
+ BL (R2) // fn(arg1, arg2)
+
+ ADD $24, R15
+
+ // Restore R6-R15, F0, F2, F4 and F6
+ LMG 48(R15), R6, R15
+ FMOVD F0, 128(R15)
+ FMOVD F2, 136(R15)
+ FMOVD F4, 144(R15)
+ FMOVD F6, 152(R15)
+
+ RET
+
diff -pruN 1.6.3-1/src/runtime/cgo/gcc_linux_s390x.c 1.6.3-1ubuntu1/src/runtime/cgo/gcc_linux_s390x.c
--- 1.6.3-1/src/runtime/cgo/gcc_linux_s390x.c 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/cgo/gcc_linux_s390x.c 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,68 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include
+#include
+#include
+#include "libcgo.h"
+
+static void *threadentry(void*);
+
+void (*x_cgo_inittls)(void **tlsg, void **tlsbase);
+static void (*setg_gcc)(void*);
+
+void
+x_cgo_init(G *g, void (*setg)(void*), void **tlsbase)
+{
+ pthread_attr_t attr;
+ size_t size;
+
+ setg_gcc = setg;
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ g->stacklo = (uintptr)&attr - size + 4096;
+ pthread_attr_destroy(&attr);
+}
+
+void
+_cgo_sys_thread_start(ThreadStart *ts)
+{
+ pthread_attr_t attr;
+ sigset_t ign, oset;
+ pthread_t p;
+ size_t size;
+ int err;
+
+ sigfillset(&ign);
+ pthread_sigmask(SIG_SETMASK, &ign, &oset);
+
+ pthread_attr_init(&attr);
+ pthread_attr_getstacksize(&attr, &size);
+ // Leave stacklo=0 and set stackhi=size; mstack will do the rest.
+ ts->g->stackhi = size;
+ err = pthread_create(&p, &attr, threadentry, ts);
+
+ pthread_sigmask(SIG_SETMASK, &oset, nil);
+
+ if (err != 0) {
+ fatalf("pthread_create failed: %s", strerror(err));
+ }
+}
+
+extern void crosscall_s390x(void (*fn)(void), void *g);
+
+static void*
+threadentry(void *v)
+{
+ ThreadStart ts;
+
+ ts = *(ThreadStart*)v;
+ free(v);
+
+ // Save g for this thread in C TLS
+ setg_gcc((void*)ts.g);
+
+ crosscall_s390x(ts.fn, (void*)ts.g);
+ return nil;
+}
diff -pruN 1.6.3-1/src/runtime/cgo/gcc_s390x.S 1.6.3-1ubuntu1/src/runtime/cgo/gcc_s390x.S
--- 1.6.3-1/src/runtime/cgo/gcc_s390x.S 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/cgo/gcc_s390x.S 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,46 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+ * void crosscall_s390x(void (*fn)(void), void *g)
+ *
+ * Calling into the go tool chain, where all registers are caller save.
+ * Called from standard s390x C ABI, where r6-r13, r15, and f0, f2, f4 and f6 are
+ * callee-save, so they must be saved explicitly.
+ */
+.globl crosscall_s390x
+crosscall_s390x:
+ /*
+ * save r6-r15, f0, f2, f4 and f6 in the
+ * register save area of the calling function
+ */
+ stmg %r6, %r15, 48(%r15)
+ stdy %f0, 128(%r15)
+ stdy %f2, 136(%r15)
+ stdy %f4, 144(%r15)
+ stdy %f6, 152(%r15)
+
+ /* set r0 to 0 */
+ xgr %r0, %r0
+
+ /* restore g pointer */
+ lgr %r13, %r3
+
+ /* grow stack 8 bytes and call fn */
+ agfi %r15, -8
+ basr %r14, %r2
+ agfi %r15, 8
+
+ /* restore registers */
+ lmg %r6, %r15, 48(%r15)
+ ldy %f0, 128(%r15)
+ ldy %f2, 136(%r15)
+ ldy %f4, 144(%r15)
+ ldy %f6, 152(%r15)
+
+ br %r14 /* restored by lmg */
+
+#ifdef __ELF__
+.section .note.GNU-stack,"",%progbits
+#endif
diff -pruN 1.6.3-1/src/runtime/cgocall.go 1.6.3-1ubuntu1/src/runtime/cgocall.go
--- 1.6.3-1/src/runtime/cgocall.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/cgocall.go 2016-07-21 13:36:09.000000000 +0000
@@ -239,8 +239,8 @@ func cgocallbackg1() {
case "386":
// On 386, stack frame is three words, plus caller PC.
cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
- case "ppc64", "ppc64le":
- // On ppc64, the callback arguments are in the arguments area of
+ case "ppc64", "ppc64le", "s390x":
+ // On ppc64 and s390x, the callback arguments are in the arguments area of
// cgocallback's stack frame. The stack looks like this:
// +--------------------+------------------------------+
// | | ... |
@@ -293,7 +293,7 @@ func unwindm(restore *bool) {
switch GOARCH {
default:
throw("unwindm not implemented")
- case "386", "amd64", "arm", "ppc64", "ppc64le":
+ case "386", "amd64", "arm", "ppc64", "ppc64le", "s390x":
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
case "arm64":
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
diff -pruN 1.6.3-1/src/runtime/defs_linux_s390x.go 1.6.3-1ubuntu1/src/runtime/defs_linux_s390x.go
--- 1.6.3-1/src/runtime/defs_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/defs_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,167 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _EINTR = 0x4
+ _EAGAIN = 0xb
+ _ENOMEM = 0xc
+
+ _PROT_NONE = 0x0
+ _PROT_READ = 0x1
+ _PROT_WRITE = 0x2
+ _PROT_EXEC = 0x4
+
+ _MAP_ANON = 0x20
+ _MAP_PRIVATE = 0x2
+ _MAP_FIXED = 0x10
+
+ _MADV_DONTNEED = 0x4
+ _MADV_HUGEPAGE = 0xe
+ _MADV_NOHUGEPAGE = 0xf
+
+ _SA_RESTART = 0x10000000
+ _SA_ONSTACK = 0x8000000
+ _SA_SIGINFO = 0x4
+
+ _SIGHUP = 0x1
+ _SIGINT = 0x2
+ _SIGQUIT = 0x3
+ _SIGILL = 0x4
+ _SIGTRAP = 0x5
+ _SIGABRT = 0x6
+ _SIGBUS = 0x7
+ _SIGFPE = 0x8
+ _SIGKILL = 0x9
+ _SIGUSR1 = 0xa
+ _SIGSEGV = 0xb
+ _SIGUSR2 = 0xc
+ _SIGPIPE = 0xd
+ _SIGALRM = 0xe
+ _SIGSTKFLT = 0x10
+ _SIGCHLD = 0x11
+ _SIGCONT = 0x12
+ _SIGSTOP = 0x13
+ _SIGTSTP = 0x14
+ _SIGTTIN = 0x15
+ _SIGTTOU = 0x16
+ _SIGURG = 0x17
+ _SIGXCPU = 0x18
+ _SIGXFSZ = 0x19
+ _SIGVTALRM = 0x1a
+ _SIGPROF = 0x1b
+ _SIGWINCH = 0x1c
+ _SIGIO = 0x1d
+ _SIGPWR = 0x1e
+ _SIGSYS = 0x1f
+
+ _FPE_INTDIV = 0x1
+ _FPE_INTOVF = 0x2
+ _FPE_FLTDIV = 0x3
+ _FPE_FLTOVF = 0x4
+ _FPE_FLTUND = 0x5
+ _FPE_FLTRES = 0x6
+ _FPE_FLTINV = 0x7
+ _FPE_FLTSUB = 0x8
+
+ _BUS_ADRALN = 0x1
+ _BUS_ADRERR = 0x2
+ _BUS_OBJERR = 0x3
+
+ _SEGV_MAPERR = 0x1
+ _SEGV_ACCERR = 0x2
+
+ _ITIMER_REAL = 0x0
+ _ITIMER_VIRTUAL = 0x1
+ _ITIMER_PROF = 0x2
+
+ _EPOLLIN = 0x1
+ _EPOLLOUT = 0x4
+ _EPOLLERR = 0x8
+ _EPOLLHUP = 0x10
+ _EPOLLRDHUP = 0x2000
+ _EPOLLET = 0x80000000
+ _EPOLL_CLOEXEC = 0x80000
+ _EPOLL_CTL_ADD = 0x1
+ _EPOLL_CTL_DEL = 0x2
+ _EPOLL_CTL_MOD = 0x3
+)
+
+type timespec struct {
+ tv_sec int64
+ tv_nsec int64
+}
+
+func (ts *timespec) set_sec(x int64) {
+ ts.tv_sec = x
+}
+
+func (ts *timespec) set_nsec(x int32) {
+ ts.tv_nsec = int64(x)
+}
+
+type timeval struct {
+ tv_sec int64
+ tv_usec int64
+}
+
+func (tv *timeval) set_usec(x int32) {
+ tv.tv_usec = int64(x)
+}
+
+type sigactiont struct {
+ sa_handler uintptr
+ sa_flags uint64
+ sa_restorer uintptr
+ sa_mask uint64
+}
+
+type siginfo struct {
+ si_signo int32
+ si_errno int32
+ si_code int32
+ // below here is a union; si_addr is the only field we use
+ si_addr uint64
+}
+
+type itimerval struct {
+ it_interval timeval
+ it_value timeval
+}
+
+type epollevent struct {
+ events uint32
+ pad_cgo_0 [4]byte
+ data [8]byte // unaligned uintptr
+}
+
+const (
+ _O_RDONLY = 0x0
+ _O_CLOEXEC = 0x80000
+ _SA_RESTORER = 0
+)
+
+type sigaltstackt struct {
+ ss_sp *byte
+ ss_flags int32
+ ss_size uintptr
+}
+
+type sigcontext struct {
+ psw_mask uint64
+ psw_addr uint64
+ gregs [16]uint64
+ aregs [16]uint32
+ fpc uint32
+ fpregs [16]uint64
+}
+
+type ucontext struct {
+ uc_flags uint64
+ uc_link *ucontext
+ uc_stack sigaltstackt
+ uc_mcontext sigcontext
+ uc_sigmask uint64
+}
diff -pruN 1.6.3-1/src/runtime/extern.go 1.6.3-1ubuntu1/src/runtime/extern.go
--- 1.6.3-1/src/runtime/extern.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/extern.go 2016-07-21 13:36:09.000000000 +0000
@@ -230,5 +230,5 @@ func Version() string {
const GOOS string = sys.TheGoos
// GOARCH is the running program's architecture target:
-// 386, amd64, or arm.
+// 386, amd64, arm, or s390x.
const GOARCH string = sys.TheGoarch
diff -pruN 1.6.3-1/src/runtime/gcinfo_test.go 1.6.3-1ubuntu1/src/runtime/gcinfo_test.go
--- 1.6.3-1/src/runtime/gcinfo_test.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/gcinfo_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -144,7 +144,7 @@ func infoBigStruct() []byte {
typeScalar, typeScalar, typeScalar, typeScalar, // t int; y uint16; u uint64
typePointer, typeScalar, // i string
}
- case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le":
+ case "arm64", "amd64", "mips64", "mips64le", "ppc64", "ppc64le", "s390x":
return []byte{
typePointer, // q *int
typeScalar, typeScalar, typeScalar, // w byte; e [17]byte
diff -pruN 1.6.3-1/src/runtime/hash64.go 1.6.3-1ubuntu1/src/runtime/hash64.go
--- 1.6.3-1/src/runtime/hash64.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/hash64.go 2016-07-21 13:36:09.000000000 +0000
@@ -6,7 +6,7 @@
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
-// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le
+// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x
package runtime
diff -pruN 1.6.3-1/src/runtime/internal/atomic/asm_s390x.s 1.6.3-1ubuntu1/src/runtime/internal/atomic/asm_s390x.s
--- 1.6.3-1/src/runtime/internal/atomic/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/internal/atomic/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,194 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func Cas(ptr *uint32, old, new uint32) bool
+// Atomically:
+// if *ptr == old {
+// *val = new
+// return 1
+// } else {
+// return 0
+// }
+TEXT ·Cas(SB), NOSPLIT, $0-17
+ MOVD ptr+0(FP), R3
+ MOVWZ old+8(FP), R4
+ MOVWZ new+12(FP), R5
+ CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
+ BNE cas_fail
+ MOVB $1, ret+16(FP)
+ RET
+cas_fail:
+ MOVB $0, ret+16(FP)
+ RET
+
+// func Cas64(ptr *uint64, old, new uint64) bool
+// Atomically:
+// if *ptr == old {
+// *ptr = new
+// return 1
+// } else {
+// return 0
+// }
+TEXT ·Cas64(SB), NOSPLIT, $0-25
+ MOVD ptr+0(FP), R3
+ MOVD old+8(FP), R4
+ MOVD new+16(FP), R5
+ CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
+ BNE cas64_fail
+ MOVB $1, ret+24(FP)
+ RET
+cas64_fail:
+ MOVB $0, ret+24(FP)
+ RET
+
+// func Casuintptr(ptr *uintptr, old, new uintptr) bool
+TEXT ·Casuintptr(SB), NOSPLIT, $0-25
+ BR ·Cas64(SB)
+
+// func Loaduintptr(ptr *uintptr) uintptr
+TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
+ BR ·Load64(SB)
+
+// func Loaduint(ptr *uint) uint
+TEXT ·Loaduint(SB), NOSPLIT, $0-16
+ BR ·Load64(SB)
+
+// func Storeuintptr(ptr *uintptr, new uintptr)
+TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
+ BR ·Store64(SB)
+
+// func Loadint64(ptr *int64) int64
+TEXT ·Loadint64(SB), NOSPLIT, $0-16
+ BR ·Load64(SB)
+
+// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
+ BR ·Xadd64(SB)
+
+// func Xaddint64(ptr *int64, delta int64) int64
+TEXT ·Xaddint64(SB), NOSPLIT, $0-16
+ BR ·Xadd64(SB)
+
+// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+// Atomically:
+// if *ptr == old {
+// *ptr = new
+// return 1
+// } else {
+// return 0
+// }
+TEXT ·Casp1(SB), NOSPLIT, $0-25
+ BR ·Cas64(SB)
+
+// func Xadd(ptr *uint32, delta int32) uint32
+// Atomically:
+// *ptr += delta
+// return *ptr
+TEXT ·Xadd(SB), NOSPLIT, $0-20
+ MOVD ptr+0(FP), R4
+ MOVW delta+8(FP), R5
+ MOVW (R4), R3
+repeat:
+ ADD R5, R3, R6
+ CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
+ BNE repeat
+ MOVW R6, ret+16(FP)
+ RET
+
+// func Xadd64(ptr *uint64, delta int64) uint64
+TEXT ·Xadd64(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R4
+ MOVD delta+8(FP), R5
+ MOVD (R4), R3
+repeat:
+ ADD R5, R3, R6
+ CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
+ BNE repeat
+ MOVD R6, ret+16(FP)
+ RET
+
+// func Xchg(ptr *uint32, new uint32) uint32
+TEXT ·Xchg(SB), NOSPLIT, $0-20
+ MOVD ptr+0(FP), R4
+ MOVW new+8(FP), R3
+ MOVW (R4), R6
+repeat:
+ CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
+ BNE repeat
+ MOVW R6, ret+16(FP)
+ RET
+
+// func Xchg64(ptr *uint64, new uint64) uint64
+TEXT ·Xchg64(SB), NOSPLIT, $0-24
+ MOVD ptr+0(FP), R4
+ MOVD new+8(FP), R3
+ MOVD (R4), R6
+repeat:
+ CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
+ BNE repeat
+ MOVD R6, ret+16(FP)
+ RET
+
+// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
+ BR ·Xchg64(SB)
+
+// on s390x load & store are both atomic operations
+
+// func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
+TEXT ·Storep1(SB), NOSPLIT, $0-16
+ BR ·Store64(SB)
+
+// func Store(ptr *uint32, val uint32)
+TEXT ·Store(SB), NOSPLIT, $0-12
+ MOVD ptr+0(FP), R3
+ MOVW val+8(FP), R4
+ MOVW R4, 0(R3)
+ RET
+
+// func Store64(ptr *uint64, val uint64)
+TEXT ·Store64(SB), NOSPLIT, $0-16
+ MOVD ptr+0(FP), R3
+ MOVD val+8(FP), R4
+ MOVD R4, 0(R3)
+ RET
+
+// func Or8(addr *uint8, v uint8)
+TEXT ·Or8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVBZ val+8(FP), R4
+ // Calculate shift.
+ AND $3, R3, R5
+ XOR $3, R5 // big endian - flip direction
+ SLD $3, R5 // MUL $8, R5
+ SLD R5, R4
+ // Align ptr down to 4 bytes so we can use 32-bit load/store.
+ AND $-4, R3
+ MOVWZ 0(R3), R6
+again:
+ OR R4, R6, R7
+ CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
+ BNE again
+ RET
+
+// func And8(addr *uint8, v uint8)
+TEXT ·And8(SB), NOSPLIT, $0-9
+ MOVD ptr+0(FP), R3
+ MOVBZ val+8(FP), R4
+ // Calculate shift.
+ AND $3, R3, R5
+ XOR $3, R5 // big endian - flip direction
+ SLD $3, R5 // MUL $8, R5
+ OR $-256, R4 // create 0xffffffffffffffxx
+ RLLG R5, R4
+ // Align ptr down to 4 bytes so we can use 32-bit load/store.
+ AND $-4, R3
+ MOVWZ 0(R3), R6
+again:
+ AND R4, R6, R7
+ CS R6, R7, 0(R3) // if R6==(R3) then (R3)=R7 else R6=(R3)
+ BNE again
+ RET
diff -pruN 1.6.3-1/src/runtime/internal/atomic/atomic_s390x.go 1.6.3-1ubuntu1/src/runtime/internal/atomic/atomic_s390x.go
--- 1.6.3-1/src/runtime/internal/atomic/atomic_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/internal/atomic/atomic_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,63 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package atomic
+
+import "unsafe"
+
+//go:nosplit
+//go:noinline
+func Load(ptr *uint32) uint32 {
+ return *ptr
+}
+
+//go:nosplit
+//go:noinline
+func Loadp(ptr unsafe.Pointer) unsafe.Pointer {
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+//go:noinline
+func Load64(ptr *uint64) uint64 {
+ return *ptr
+}
+
+//go:noescape
+func And8(ptr *uint8, val uint8)
+
+//go:noescape
+func Or8(ptr *uint8, val uint8)
+
+// NOTE: Do not add atomicxor8 (XOR is not idempotent).
+
+//go:noescape
+func Xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func Xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
+
+//go:noescape
+func Xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func Xchg64(ptr *uint64, new uint64) uint64
+
+//go:noescape
+func Xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func Cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func Store(ptr *uint32, val uint32)
+
+//go:noescape
+func Store64(ptr *uint64, val uint64)
+
+// NO go:noescape annotation; see atomic_pointer.go.
+func Storep1(ptr unsafe.Pointer, val unsafe.Pointer)
diff -pruN 1.6.3-1/src/runtime/internal/sys/arch_s390x.go 1.6.3-1ubuntu1/src/runtime/internal/sys/arch_s390x.go
--- 1.6.3-1/src/runtime/internal/sys/arch_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/internal/sys/arch_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sys
+
+const (
+ TheChar = 'z'
+ BigEndian = 1
+ CacheLineSize = 256
+ PhysPageSize = 4096
+ PCQuantum = 2
+ Int64Align = 8
+ HugePageSize = 0
+ MinFrameSize = 8
+)
+
+type Uintreg uint64
diff -pruN 1.6.3-1/src/runtime/internal/sys/zgoarch_s390x.go 1.6.3-1ubuntu1/src/runtime/internal/sys/zgoarch_s390x.go
--- 1.6.3-1/src/runtime/internal/sys/zgoarch_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/internal/sys/zgoarch_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,26 @@
+// generated by gengoos.go using 'go generate'
+
+package sys
+
+const TheGoarch = `s390x`
+
+const Goarch386 = 0
+const GoarchAmd64 = 0
+const GoarchAmd64p32 = 0
+const GoarchArm = 0
+const GoarchArmbe = 0
+const GoarchArm64 = 0
+const GoarchArm64be = 0
+const GoarchPpc64 = 0
+const GoarchPpc64le = 0
+const GoarchMips = 0
+const GoarchMipsle = 0
+const GoarchMips64 = 0
+const GoarchMips64le = 0
+const GoarchMips64p32 = 0
+const GoarchMips64p32le = 0
+const GoarchPpc = 0
+const GoarchS390 = 0
+const GoarchS390x = 1
+const GoarchSparc = 0
+const GoarchSparc64 = 0
diff -pruN 1.6.3-1/src/runtime/lfstack_linux_s390x.go 1.6.3-1ubuntu1/src/runtime/lfstack_linux_s390x.go
--- 1.6.3-1/src/runtime/lfstack_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/lfstack_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,25 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// In addition to the 16 bits taken from the top, we can take 3 from the
+// bottom, because node must be pointer-aligned, giving a total of 19 bits
+// of count.
+const (
+ addrBits = 48
+ cntBits = 64 - addrBits + 3
+)
+
+func lfstackPack(node *lfnode, cnt uintptr) uint64 {
+ return uint64(uintptr(unsafe.Pointer(node)))<<(64-addrBits) | uint64(cnt&(1<> cntBits << 3)))
+ cnt = uintptr(val & (1< 64 {
+ throw("unexpected signal greater than 64")
+ }
+ *mask |= 1 << (uint(i) - 1)
+}
+
+func sigdelset(mask *sigset, i int) {
+ if i > 64 {
+ throw("unexpected signal greater than 64")
+ }
+ *mask &^= 1 << (uint(i) - 1)
+}
+
+func sigfillset(mask *uint64) {
+ *mask = ^uint64(0)
+}
+
+func sigcopyset(mask *sigset, m sigmask) {
+ *mask = sigset(uint64(m[0]) | uint64(m[1])<<32)
+}
diff -pruN 1.6.3-1/src/runtime/os2_linux_generic.go 1.6.3-1ubuntu1/src/runtime/os2_linux_generic.go
--- 1.6.3-1/src/runtime/os2_linux_generic.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/os2_linux_generic.go 2016-07-21 13:36:09.000000000 +0000
@@ -4,6 +4,7 @@
// +build !mips64
// +build !mips64le
+// +build !s390x
// +build linux
package runtime
diff -pruN 1.6.3-1/src/runtime/os2_linux_s390x.go 1.6.3-1ubuntu1/src/runtime/os2_linux_s390x.go
--- 1.6.3-1/src/runtime/os2_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/os2_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,22 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ _SS_DISABLE = 2
+ _NSIG = 65
+ _SI_USER = 0
+ _SIG_BLOCK = 0
+ _SIG_UNBLOCK = 1
+ _SIG_SETMASK = 2
+ _RLIMIT_AS = 9
+)
+
+type sigset uint64
+
+type rlimit struct {
+ rlim_cur uintptr
+ rlim_max uintptr
+}
diff -pruN 1.6.3-1/src/runtime/rt0_linux_s390x.s 1.6.3-1ubuntu1/src/runtime/rt0_linux_s390x.s
--- 1.6.3-1/src/runtime/rt0_linux_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/rt0_linux_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT _rt0_s390x_linux(SB),NOSPLIT|NOFRAME,$0
+ // In a statically linked binary, the stack contains argc,
+ // argv as argc string pointers followed by a NULL, envv as a
+ // sequence of string pointers followed by a NULL, and auxv.
+ // There is no TLS base pointer.
+ //
+ // TODO: Support dynamic linking entry point
+ MOVD 0(R15), R2 // argc
+ ADD $8, R15, R3 // argv
+ BR main(SB)
+
+TEXT main(SB),NOSPLIT|NOFRAME,$0
+ MOVD $runtime·rt0_go(SB), R11
+ BR R11
diff -pruN 1.6.3-1/src/runtime/runtime-gdb_test.go 1.6.3-1ubuntu1/src/runtime/runtime-gdb_test.go
--- 1.6.3-1/src/runtime/runtime-gdb_test.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/runtime-gdb_test.go 2016-07-21 13:36:09.000000000 +0000
@@ -107,7 +107,7 @@ func TestGdbPython(t *testing.T) {
// stack frames on RISC architectures.
canBackTrace := false
switch runtime.GOARCH {
- case "amd64", "386", "ppc64", "ppc64le", "arm", "arm64", "mips64", "mips64le":
+ case "amd64", "386", "ppc64", "ppc64le", "arm", "arm64", "mips64", "mips64le", "s390x":
canBackTrace = true
args = append(args,
"-ex", "echo BEGIN goroutine 2 bt\n",
diff -pruN 1.6.3-1/src/runtime/signal_linux_s390x.go 1.6.3-1ubuntu1/src/runtime/signal_linux_s390x.go
--- 1.6.3-1/src/runtime/signal_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/signal_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,50 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+type sigctxt struct {
+ info *siginfo
+ ctxt unsafe.Pointer
+}
+
+func (c *sigctxt) regs() *sigcontext {
+ return (*sigcontext)(unsafe.Pointer(&(*ucontext)(c.ctxt).uc_mcontext))
+}
+func (c *sigctxt) r0() uint64 { return c.regs().gregs[0] }
+func (c *sigctxt) r1() uint64 { return c.regs().gregs[1] }
+func (c *sigctxt) r2() uint64 { return c.regs().gregs[2] }
+func (c *sigctxt) r3() uint64 { return c.regs().gregs[3] }
+func (c *sigctxt) r4() uint64 { return c.regs().gregs[4] }
+func (c *sigctxt) r5() uint64 { return c.regs().gregs[5] }
+func (c *sigctxt) r6() uint64 { return c.regs().gregs[6] }
+func (c *sigctxt) r7() uint64 { return c.regs().gregs[7] }
+func (c *sigctxt) r8() uint64 { return c.regs().gregs[8] }
+func (c *sigctxt) r9() uint64 { return c.regs().gregs[9] }
+func (c *sigctxt) r10() uint64 { return c.regs().gregs[10] }
+func (c *sigctxt) r11() uint64 { return c.regs().gregs[11] }
+func (c *sigctxt) r12() uint64 { return c.regs().gregs[12] }
+func (c *sigctxt) r13() uint64 { return c.regs().gregs[13] }
+func (c *sigctxt) r14() uint64 { return c.regs().gregs[14] }
+func (c *sigctxt) r15() uint64 { return c.regs().gregs[15] }
+func (c *sigctxt) link() uint64 { return c.regs().gregs[14] }
+func (c *sigctxt) sp() uint64 { return c.regs().gregs[15] }
+func (c *sigctxt) pc() uint64 { return c.regs().psw_addr }
+func (c *sigctxt) sigcode() uint32 { return uint32(c.info.si_code) }
+func (c *sigctxt) sigaddr() uint64 { return c.info.si_addr }
+
+func (c *sigctxt) set_r0(x uint64) { c.regs().gregs[0] = x }
+func (c *sigctxt) set_r13(x uint64) { c.regs().gregs[13] = x }
+func (c *sigctxt) set_link(x uint64) { c.regs().gregs[14] = x }
+func (c *sigctxt) set_sp(x uint64) { c.regs().gregs[15] = x }
+func (c *sigctxt) set_pc(x uint64) { c.regs().psw_addr = x }
+func (c *sigctxt) set_sigcode(x uint32) { c.info.si_code = int32(x) }
+func (c *sigctxt) set_sigaddr(x uint64) {
+ *(*uintptr)(add(unsafe.Pointer(c.info), 2*sys.PtrSize)) = uintptr(x)
+}
diff -pruN 1.6.3-1/src/runtime/signal_s390x.go 1.6.3-1ubuntu1/src/runtime/signal_s390x.go
--- 1.6.3-1/src/runtime/signal_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/signal_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,170 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux
+
+package runtime
+
+import (
+ "runtime/internal/sys"
+ "unsafe"
+)
+
+func dumpregs(c *sigctxt) {
+ print("r0 ", hex(c.r0()), "\t")
+ print("r1 ", hex(c.r1()), "\n")
+ print("r2 ", hex(c.r2()), "\t")
+ print("r3 ", hex(c.r3()), "\n")
+ print("r4 ", hex(c.r4()), "\t")
+ print("r5 ", hex(c.r5()), "\n")
+ print("r6 ", hex(c.r6()), "\t")
+ print("r7 ", hex(c.r7()), "\n")
+ print("r8 ", hex(c.r8()), "\t")
+ print("r9 ", hex(c.r9()), "\n")
+ print("r10 ", hex(c.r10()), "\t")
+ print("r11 ", hex(c.r11()), "\n")
+ print("r12 ", hex(c.r12()), "\t")
+ print("r13 ", hex(c.r13()), "\n")
+ print("r14 ", hex(c.r14()), "\t")
+ print("r15 ", hex(c.r15()), "\n")
+ print("pc ", hex(c.pc()), "\t")
+ print("link ", hex(c.link()), "\n")
+}
+
+var crashing int32
+
+// May run during STW, so write barriers are not allowed.
+//
+//go:nowritebarrierrec
+func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) {
+ _g_ := getg()
+ c := &sigctxt{info, ctxt}
+
+ if sig == _SIGPROF {
+ sigprof(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp, _g_.m)
+ return
+ }
+ flags := int32(_SigThrow)
+ if sig < uint32(len(sigtable)) {
+ flags = sigtable[sig].flags
+ }
+ if c.sigcode() != _SI_USER && flags&_SigPanic != 0 {
+ // Make it look like a call to the signal func.
+ // Have to pass arguments out of band since
+ // augmenting the stack frame would break
+ // the unwinding code.
+ gp.sig = sig
+ gp.sigcode0 = uintptr(c.sigcode())
+ gp.sigcode1 = uintptr(c.sigaddr())
+ gp.sigpc = uintptr(c.pc())
+
+ // We arrange link, and pc to pretend the panicking
+ // function calls sigpanic directly.
+ // Always save LINK to stack so that panics in leaf
+ // functions are correctly handled. This smashes
+ // the stack frame but we're not going back there
+ // anyway.
+ sp := c.sp() - sys.MinFrameSize
+ c.set_sp(sp)
+ *(*uint64)(unsafe.Pointer(uintptr(sp))) = c.link()
+
+ pc := uintptr(gp.sigpc)
+
+ // If we don't recognize the PC as code
+ // but we do recognize the link register as code,
+ // then assume this was a call to non-code and treat like
+ // pc == 0, to make unwinding show the context.
+ if pc != 0 && findfunc(pc) == nil && findfunc(uintptr(c.link())) != nil {
+ pc = 0
+ }
+
+ // Don't bother saving PC if it's zero, which is
+ // probably a call to a nil func: the old link register
+ // is more useful in the stack trace.
+ if pc != 0 {
+ c.set_link(uint64(pc))
+ }
+
+ // In case we are panicking from external C code
+ c.set_r0(0)
+ c.set_r13(uint64(uintptr(unsafe.Pointer(gp))))
+ c.set_pc(uint64(funcPC(sigpanic)))
+ return
+ }
+
+ if c.sigcode() == _SI_USER || flags&_SigNotify != 0 {
+ if sigsend(sig) {
+ return
+ }
+ }
+
+ if c.sigcode() == _SI_USER && signal_ignored(sig) {
+ return
+ }
+
+ if flags&_SigKill != 0 {
+ dieFromSignal(int32(sig))
+ }
+
+ if flags&_SigThrow == 0 {
+ return
+ }
+
+ _g_.m.throwing = 1
+ _g_.m.caughtsig.set(gp)
+
+ if crashing == 0 {
+ startpanic()
+ }
+
+ if sig < uint32(len(sigtable)) {
+ print(sigtable[sig].name, "\n")
+ } else {
+ print("Signal ", sig, "\n")
+ }
+
+ print("PC=", hex(c.pc()), " m=", _g_.m.id, "\n")
+ if _g_.m.lockedg != nil && _g_.m.ncgo > 0 && gp == _g_.m.g0 {
+ print("signal arrived during cgo execution\n")
+ gp = _g_.m.lockedg
+ }
+ print("\n")
+
+ level, _, docrash := gotraceback()
+ if level > 0 {
+ goroutineheader(gp)
+ tracebacktrap(uintptr(c.pc()), uintptr(c.sp()), uintptr(c.link()), gp)
+ if crashing > 0 && gp != _g_.m.curg && _g_.m.curg != nil && readgstatus(_g_.m.curg)&^_Gscan == _Grunning {
+ // tracebackothers on original m skipped this one; trace it now.
+ goroutineheader(_g_.m.curg)
+ traceback(^uintptr(0), ^uintptr(0), 0, gp)
+ } else if crashing == 0 {
+ tracebackothers(gp)
+ print("\n")
+ }
+ dumpregs(c)
+ }
+
+ if docrash {
+ crashing++
+ if crashing < sched.mcount {
+ // There are other m's that need to dump their stacks.
+ // Relay SIGQUIT to the next m by sending it to the current process.
+ // All m's that have already received SIGQUIT have signal masks blocking
+ // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet.
+ // When the last m receives the SIGQUIT, it will fall through to the call to
+ // crash below. Just in case the relaying gets botched, each m involved in
+ // the relay sleeps for 5 seconds and then does the crash/exit itself.
+ // In expected operation, the last m has received the SIGQUIT and run
+ // crash/exit and the process is gone, all long before any of the
+ // 5-second sleeps have finished.
+ print("\n-----\n\n")
+ raiseproc(_SIGQUIT)
+ usleep(5 * 1000 * 1000)
+ }
+ crash()
+ }
+
+ exit(2)
+}
diff -pruN 1.6.3-1/src/runtime/sys_linux_s390x.s 1.6.3-1ubuntu1/src/runtime/sys_linux_s390x.s
--- 1.6.3-1/src/runtime/sys_linux_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/sys_linux_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,440 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// System calls and other system stuff for Linux s390x; see
+// /usr/include/asm-s390/unistd.h for the syscall number definitions.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "textflag.h"
+
+#define SYS_exit 1
+#define SYS_read 3
+#define SYS_write 4
+#define SYS_open 5
+#define SYS_close 6
+#define SYS_getpid 20
+#define SYS_kill 37
+#define SYS_fcntl 55
+#define SYS_gettimeofday 78
+#define SYS_select 142 // always return -ENOSYS // *****
+#define SYS_mmap 90
+#define SYS_munmap 91
+#define SYS_setitimer 104
+#define SYS_clone 120
+#define SYS_newselect 142 // ******
+#define SYS_sched_yield 158
+#define SYS_rt_sigreturn 173 // changed
+#define SYS_rt_sigaction 174 //
+#define SYS_rt_sigprocmask 175 //
+#define SYS_sigaltstack 186 //
+#define SYS_ugetrlimit 191 //
+#define SYS_madvise 219 //
+#define SYS_mincore 218 //
+#define SYS_gettid 236 //
+#define SYS_tkill 237 //
+#define SYS_futex 238 //
+#define SYS_sched_getaffinity 240 //
+#define SYS_exit_group 248 //
+#define SYS_epoll_create 249 //
+#define SYS_epoll_ctl 250 //
+#define SYS_epoll_wait 251 //
+#define SYS_clock_gettime 260 //
+#define SYS_epoll_create1 327 //
+
+TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW code+0(FP), R2
+ MOVW $SYS_exit_group, R1
+ SYSCALL
+ RET
+
+TEXT runtime·exit1(SB),NOSPLIT|NOFRAME,$0-4
+ MOVW code+0(FP), R2
+ MOVW $SYS_exit, R1
+ SYSCALL
+ RET
+
+TEXT runtime·open(SB),NOSPLIT|NOFRAME,$0-20
+ MOVD name+0(FP), R2
+ MOVW mode+8(FP), R3
+ MOVW perm+12(FP), R4
+ MOVW $SYS_open, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVW $-1, R2
+ MOVW R2, ret+16(FP)
+ RET
+
+TEXT runtime·closefd(SB),NOSPLIT|NOFRAME,$0-12
+ MOVW fd+0(FP), R2
+ MOVW $SYS_close, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVW $-1, R2
+ MOVW R2, ret+8(FP)
+ RET
+
+TEXT runtime·write(SB),NOSPLIT|NOFRAME,$0-28
+ MOVD fd+0(FP), R2
+ MOVD p+8(FP), R3
+ MOVW n+16(FP), R4
+ MOVW $SYS_write, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVW $-1, R2
+ MOVW R2, ret+24(FP)
+ RET
+
+TEXT runtime·read(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW fd+0(FP), R2
+ MOVD p+8(FP), R3
+ MOVW n+16(FP), R4
+ MOVW $SYS_read, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVW $-1, R2
+ MOVW R2, ret+24(FP)
+ RET
+
+TEXT runtime·getrlimit(SB),NOSPLIT|NOFRAME,$0-20
+ MOVW kind+0(FP), R2
+ MOVD limit+8(FP), R3
+ MOVW $SYS_ugetrlimit, R1
+ SYSCALL
+ MOVW R2, ret+16(FP)
+ RET
+
+TEXT runtime·usleep(SB),NOSPLIT,$16-4
+ MOVW usec+0(FP), R2
+ MOVD R2, R4
+ MOVW $1000000, R3
+ DIVD R3, R2
+ MOVD R2, 8(R15)
+ MULLD R2, R3
+ SUB R3, R4
+ MOVD R4, 16(R15)
+
+ // select(0, 0, 0, 0, &tv)
+ MOVW $0, R2
+ MOVW $0, R3
+ MOVW $0, R4
+ MOVW $0, R5
+ ADD $8, R15, R6
+ MOVW $SYS_newselect, R1
+ SYSCALL
+ RET
+
+TEXT runtime·gettid(SB),NOSPLIT,$0-4
+ MOVW $SYS_gettid, R1
+ SYSCALL
+ MOVW R2, ret+0(FP)
+ RET
+
+TEXT runtime·raise(SB),NOSPLIT|NOFRAME,$0
+ MOVW $SYS_gettid, R1
+ SYSCALL
+ MOVW R2, R2 // arg 1 tid
+ MOVW sig+0(FP), R3 // arg 2
+ MOVW $SYS_tkill, R1
+ SYSCALL
+ RET
+
+TEXT runtime·raiseproc(SB),NOSPLIT|NOFRAME,$0
+ MOVW $SYS_getpid, R1
+ SYSCALL
+ MOVW R2, R2 // arg 1 pid
+ MOVW sig+0(FP), R3 // arg 2
+ MOVW $SYS_kill, R1
+ SYSCALL
+ RET
+
+TEXT runtime·setitimer(SB),NOSPLIT|NOFRAME,$0-24
+ MOVW mode+0(FP), R2
+ MOVD new+8(FP), R3
+ MOVD old+16(FP), R4
+ MOVW $SYS_setitimer, R1
+ SYSCALL
+ RET
+
+TEXT runtime·mincore(SB),NOSPLIT|NOFRAME,$0-28
+ MOVD addr+0(FP), R2
+ MOVD n+8(FP), R3
+ MOVD dst+16(FP), R4
+ MOVW $SYS_mincore, R1
+ SYSCALL
+ MOVW R2, ret+24(FP)
+ RET
+
+// func now() (sec int64, nsec int32)
+TEXT time·now(SB),NOSPLIT,$16
+ MOVD $0(R15), R2
+ MOVD $0, R3
+ MOVW $SYS_gettimeofday, R1
+ SYSCALL
+ MOVD 0(R15), R2 // sec
+ MOVD 8(R15), R4 // usec
+ MOVD $1000, R3
+ MULLD R3, R4
+ MOVD R2, sec+0(FP)
+ MOVW R4, nsec+8(FP)
+ RET
+
+TEXT runtime·nanotime(SB),NOSPLIT,$16
+ MOVW $1, R2 // CLOCK_MONOTONIC
+ MOVD $0(R15), R3
+ MOVW $SYS_clock_gettime, R1
+ SYSCALL
+ MOVD 0(R15), R2 // sec
+ MOVD 8(R15), R4 // nsec
+ // sec is in R2, nsec in R4
+ // return nsec in R2
+ MOVD $1000000000, R3
+ MULLD R3, R2
+ ADD R4, R2
+ MOVD R2, ret+0(FP)
+ RET
+
+TEXT runtime·rtsigprocmask(SB),NOSPLIT|NOFRAME,$0-28
+ MOVW sig+0(FP), R2
+ MOVD new+8(FP), R3
+ MOVD old+16(FP), R4
+ MOVW size+24(FP), R5
+ MOVW $SYS_rt_sigprocmask, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVD R0, 0(R0) // crash
+ RET
+
+TEXT runtime·rt_sigaction(SB),NOSPLIT|NOFRAME,$0-36
+ MOVD sig+0(FP), R2
+ MOVD new+8(FP), R3
+ MOVD old+16(FP), R4
+ MOVD size+24(FP), R5
+ MOVW $SYS_rt_sigaction, R1
+ SYSCALL
+ MOVW R2, ret+32(FP)
+ RET
+
+TEXT runtime·sigfwd(SB),NOSPLIT,$0-32
+ MOVW sig+8(FP), R2
+ MOVD info+16(FP), R3
+ MOVD ctx+24(FP), R4
+ MOVD fn+0(FP), R5
+ BL R5
+ RET
+
+TEXT runtime·sigtramp(SB),NOSPLIT,$64
+ // initialize essential registers (just in case)
+ XOR R0, R0
+
+ // this might be called in external code context,
+ // where g is not set.
+ MOVB runtime·iscgo(SB), R6
+ CMPBEQ R6, $0, 2(PC)
+ BL runtime·load_g(SB)
+
+ MOVW R2, 8(R15)
+ MOVD R3, 16(R15)
+ MOVD R4, 24(R15)
+ MOVD $runtime·sigtrampgo(SB), R5
+ BL R5
+ RET
+
+// func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
+TEXT runtime·mmap(SB),NOSPLIT,$48-40
+ MOVD addr+0(FP), R2
+ MOVD n+8(FP), R3
+ MOVW prot+16(FP), R4
+ MOVW flags+20(FP), R5
+ MOVW fd+24(FP), R6
+ MOVWZ off+28(FP), R7
+
+ // s390x uses old_mmap, so the arguments need to be placed into
+ // a struct and a pointer to the struct passed to mmap.
+ MOVD R2, addr-48(SP)
+ MOVD R3, n-40(SP)
+ MOVD R4, prot-32(SP)
+ MOVD R5, flags-24(SP)
+ MOVD R6, fd-16(SP)
+ MOVD R7, off-8(SP)
+
+ MOVD $addr-48(SP), R2
+ MOVW $SYS_mmap, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ NEG R2
+ MOVD R2, ret+32(FP)
+ RET
+
+TEXT runtime·munmap(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R2
+ MOVD n+8(FP), R3
+ MOVW $SYS_munmap, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVD R0, 0(R0) // crash
+ RET
+
+TEXT runtime·madvise(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R2
+ MOVD n+8(FP), R3
+ MOVW flags+16(FP), R4
+ MOVW $SYS_madvise, R1
+ SYSCALL
+ // ignore failure - maybe pages are locked
+ RET
+
+// int64 futex(int32 *uaddr, int32 op, int32 val,
+// struct timespec *timeout, int32 *uaddr2, int32 val2);
+TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0
+ MOVD addr+0(FP), R2
+ MOVW op+8(FP), R3
+ MOVW val+12(FP), R4
+ MOVD ts+16(FP), R5
+ MOVD addr2+24(FP), R6
+ MOVW val3+32(FP), R7
+ MOVW $SYS_futex, R1
+ SYSCALL
+ MOVW R2, ret+40(FP)
+ RET
+
+// int32 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void));
+TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0
+ MOVW flags+0(FP), R3
+ MOVD stk+8(FP), R2
+
+ // Copy mp, gp, fn off parent stack for use by child.
+ // Careful: Linux system call clobbers ???.
+ MOVD mm+16(FP), R7
+ MOVD gg+24(FP), R8
+ MOVD fn+32(FP), R9
+
+ MOVD R7, -8(R2)
+ MOVD R8, -16(R2)
+ MOVD R9, -24(R2)
+ MOVD $1234, R7
+ MOVD R7, -32(R2)
+
+ SYSCALL $SYS_clone
+
+ // In parent, return.
+ CMPBEQ R2, $0, 3(PC)
+ MOVW R2, ret+40(FP)
+ RET
+
+ // In child, on new stack.
+ // initialize essential registers
+ XOR R0, R0
+ MOVD -32(R15), R7
+ CMP R7, $1234
+ BEQ 2(PC)
+ MOVD R0, 0(R0)
+
+ // Initialize m->procid to Linux tid
+ SYSCALL $SYS_gettid
+
+ MOVD -24(R15), R9 // fn
+ MOVD -16(R15), R8 // g
+ MOVD -8(R15), R7 // m
+
+ CMPBEQ R7, $0, nog
+ CMP R8, $0
+ BEQ nog
+
+ MOVD R2, m_procid(R7)
+
+ // TODO: setup TLS.
+
+ // In child, set up new stack
+ MOVD R7, g_m(R8)
+ MOVD R8, g
+ //CALL runtime·stackcheck(SB)
+
+nog:
+ // Call fn
+ BL R9
+
+ // It shouldn't return. If it does, exit that thread.
+ MOVW $111, R2
+ MOVW $SYS_exit, R1
+ SYSCALL
+ BR -2(PC) // keep exiting
+
+TEXT runtime·sigaltstack(SB),NOSPLIT|NOFRAME,$0
+ MOVD new+0(FP), R2
+ MOVD old+8(FP), R3
+ MOVW $SYS_sigaltstack, R1
+ SYSCALL
+ MOVD $-4095, R3
+ CMPUBLT R2, R3, 2(PC)
+ MOVD R0, 0(R0) // crash
+ RET
+
+TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0
+ MOVW $SYS_sched_yield, R1
+ SYSCALL
+ RET
+
+TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0
+ MOVD pid+0(FP), R2
+ MOVD len+8(FP), R3
+ MOVD buf+16(FP), R4
+ MOVW $SYS_sched_getaffinity, R1
+ SYSCALL
+ MOVW R2, ret+24(FP)
+ RET
+
+// int32 runtime·epollcreate(int32 size);
+TEXT runtime·epollcreate(SB),NOSPLIT|NOFRAME,$0
+ MOVW size+0(FP), R2
+ MOVW $SYS_epoll_create, R1
+ SYSCALL
+ MOVW R2, ret+8(FP)
+ RET
+
+// int32 runtime·epollcreate1(int32 flags);
+TEXT runtime·epollcreate1(SB),NOSPLIT|NOFRAME,$0
+ MOVW flags+0(FP), R2
+ MOVW $SYS_epoll_create1, R1
+ SYSCALL
+ MOVW R2, ret+8(FP)
+ RET
+
+// func epollctl(epfd, op, fd int32, ev *epollEvent) int
+TEXT runtime·epollctl(SB),NOSPLIT|NOFRAME,$0
+ MOVW epfd+0(FP), R2
+ MOVW op+4(FP), R3
+ MOVW fd+8(FP), R4
+ MOVD ev+16(FP), R5
+ MOVW $SYS_epoll_ctl, R1
+ SYSCALL
+ MOVW R2, ret+24(FP)
+ RET
+
+// int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
+TEXT runtime·epollwait(SB),NOSPLIT|NOFRAME,$0
+ MOVW epfd+0(FP), R2
+ MOVD ev+8(FP), R3
+ MOVW nev+16(FP), R4
+ MOVW timeout+20(FP), R5
+ MOVW $SYS_epoll_wait, R1
+ SYSCALL
+ MOVW R2, ret+24(FP)
+ RET
+
+// void runtime·closeonexec(int32 fd);
+TEXT runtime·closeonexec(SB),NOSPLIT|NOFRAME,$0
+ MOVW fd+0(FP), R2 // fd
+ MOVD $2, R3 // F_SETFD
+ MOVD $1, R4 // FD_CLOEXEC
+ MOVW $SYS_fcntl, R1
+ SYSCALL
+ RET
diff -pruN 1.6.3-1/src/runtime/sys_s390x.go 1.6.3-1ubuntu1/src/runtime/sys_s390x.go
--- 1.6.3-1/src/runtime/sys_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/sys_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,48 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ throw("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+func rewindmorestack(buf *gobuf) {
+ var inst uint64
+ if buf.pc&1 == 0 && buf.pc != 0 {
+ inst = *(*uint64)(unsafe.Pointer(buf.pc))
+ //print("runtime: rewind pc=", hex(buf.pc), " to pc=", hex(inst), "\n");
+ if inst>>48 == 0xa7f4 {
+ inst >>= 32
+ inst &= 0xFFFF
+ offset := int64(int16(inst))
+ offset <<= 1
+ buf.pc += uintptr(offset)
+ return
+ } else if inst>>48 == 0xc0f4 {
+ inst >>= 16
+ inst = inst & 0xFFFFFFFF
+ //print("runtime: rewind inst1 = ",hex(inst),"\n")
+ inst = (inst << 1) & 0xFFFFFFFF
+ //print("runtime: rewind inst2 = ",hex(inst),"\n")
+ buf.pc += uintptr(int32(inst))
+ //print("runtime: rewind pc = ",hex(buf.pc),"\n")
+ return
+ }
+ }
+ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
+ throw("runtime: misuse of rewindmorestack")
+}
diff -pruN 1.6.3-1/src/runtime/tls_s390x.s 1.6.3-1ubuntu1/src/runtime/tls_s390x.s
--- 1.6.3-1/src/runtime/tls_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/tls_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "go_asm.h"
+#include "go_tls.h"
+#include "funcdata.h"
+#include "textflag.h"
+
+// We have to resort to TLS variable to save g (R13).
+// One reason is that external code might trigger
+// SIGSEGV, and our runtime.sigtramp don't even know we
+// are in external code, and will continue to use R13,
+// this might well result in another SIGSEGV.
+
+// save_g saves the g register into pthread-provided
+// thread-local memory, so that we can call externally compiled
+// s390x code that will overwrite this register.
+//
+// If !iscgo, this is a no-op.
+//
+// NOTE: setg_gcc<> assume this clobbers only R10 and R11.
+TEXT runtime·save_g(SB),NOSPLIT|NOFRAME,$0-0
+ MOVB runtime·iscgo(SB), R10
+ CMPBEQ R10, $0, nocgo
+ MOVW AR0, R11
+ SLD $32, R11
+ MOVW AR1, R11
+ MOVD runtime·tls_g(SB), R10
+ MOVD g, 0(R10)(R11*1)
+nocgo:
+ RET
+
+// load_g loads the g register from pthread-provided
+// thread-local memory, for use after calling externally compiled
+// s390x code that overwrote those registers.
+//
+// This is never called directly from C code (it doesn't have to
+// follow the C ABI), but it may be called from a C context, where the
+// usual Go registers aren't set up.
+//
+// NOTE: _cgo_topofstack assumes this only clobbers g (R13), R10 and R11.
+TEXT runtime·load_g(SB),NOSPLIT|NOFRAME,$0-0
+ MOVW AR0, R11
+ SLD $32, R11
+ MOVW AR1, R11
+ MOVD runtime·tls_g(SB), R10
+ MOVD 0(R10)(R11*1), g
+ RET
+
+GLOBL runtime·tls_g+0(SB),TLSBSS,$8
diff -pruN 1.6.3-1/src/runtime/unaligned1.go 1.6.3-1ubuntu1/src/runtime/unaligned1.go
--- 1.6.3-1/src/runtime/unaligned1.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/runtime/unaligned1.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build 386 amd64 amd64p32 arm64
+// +build 386 amd64 amd64p32 arm64 s390x
package runtime
diff -pruN 1.6.3-1/src/sync/atomic/asm_s390x.s 1.6.3-1ubuntu1/src/sync/atomic/asm_s390x.s
--- 1.6.3-1/src/sync/atomic/asm_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/sync/atomic/asm_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,143 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·SwapInt32(SB),NOSPLIT,$0-20
+ BR ·SwapUint32(SB)
+
+TEXT ·SwapUint32(SB),NOSPLIT,$0-20
+ MOVD addr+0(FP), R3
+ MOVWZ new+8(FP), R4
+ MOVWZ (R3), R5
+repeat:
+ CS R5, R4, (R3) // if (R3)==R5 then (R3)=R4 else R5=(R3)
+ BNE repeat
+ MOVW R5, old+16(FP)
+ RET
+
+TEXT ·SwapInt64(SB),NOSPLIT,$0-24
+ BR ·SwapUint64(SB)
+
+TEXT ·SwapUint64(SB),NOSPLIT,$0-24
+ MOVD addr+0(FP), R3
+ MOVD new+8(FP), R4
+ MOVD (R3), R5
+repeat:
+ CSG R5, R4, (R3) // if (R3)==R5 then (R3)=R4 else R5=(R3)
+ BNE repeat
+ MOVD R5, old+16(FP)
+ RET
+
+TEXT ·SwapUintptr(SB),NOSPLIT,$0-24
+ BR ·SwapUint64(SB)
+
+TEXT ·CompareAndSwapInt32(SB),NOSPLIT,$0-17
+ BR ·CompareAndSwapUint32(SB)
+
+TEXT ·CompareAndSwapUint32(SB),NOSPLIT,$0-17
+ MOVD ptr+0(FP), R3
+ MOVWZ old+8(FP), R4
+ MOVWZ new+12(FP), R5
+ CS R4, R5, 0(R3) // if R4==(R3) then (R3)=R5 else R4=(R3)
+ BNE cas_fail
+ MOVB $1, ret+16(FP)
+ RET
+cas_fail:
+ MOVB $0, ret+16(FP)
+ RET
+
+TEXT ·CompareAndSwapUintptr(SB),NOSPLIT,$0-25
+ BR ·CompareAndSwapUint64(SB)
+
+TEXT ·CompareAndSwapInt64(SB),NOSPLIT,$0-25
+ BR ·CompareAndSwapUint64(SB)
+
+TEXT ·CompareAndSwapUint64(SB),NOSPLIT,$0-25
+ MOVD ptr+0(FP), R3
+ MOVD old+8(FP), R4
+ MOVD new+16(FP), R5
+ CSG R4, R5, 0(R3) // if R4==(R3) then (R3)=R5 else R4=(R3)
+ BNE cas64_fail
+ MOVB $1, ret+24(FP)
+ RET
+cas64_fail:
+ MOVB $0, ret+24(FP)
+ RET
+
+TEXT ·AddInt32(SB),NOSPLIT,$0-20
+ BR ·AddUint32(SB)
+
+TEXT ·AddUint32(SB),NOSPLIT,$0-20
+ MOVD ptr+0(FP), R4
+ MOVWZ delta+8(FP), R5
+ MOVWZ (R4), R3
+repeat:
+ ADD R3, R5, R6
+ CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
+ BNE repeat
+ MOVW R6, ret+16(FP)
+ RET
+
+TEXT ·AddUintptr(SB),NOSPLIT,$0-24
+ BR ·AddUint64(SB)
+
+TEXT ·AddInt64(SB),NOSPLIT,$0-24
+ BR ·AddUint64(SB)
+
+TEXT ·AddUint64(SB),NOSPLIT,$0-24
+ MOVD ptr+0(FP), R4
+ MOVD delta+8(FP), R5
+ MOVD (R4), R3
+repeat:
+ ADD R3, R5, R6
+ CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
+ BNE repeat
+ MOVD R6, ret+16(FP)
+ RET
+
+TEXT ·LoadInt32(SB),NOSPLIT,$0-12
+ BR ·LoadUint32(SB)
+
+TEXT ·LoadUint32(SB),NOSPLIT,$0-12
+ MOVD addr+0(FP), R3
+ MOVW 0(R3), R4
+ MOVW R4, val+8(FP)
+ RET
+
+TEXT ·LoadInt64(SB),NOSPLIT,$0-16
+ BR ·LoadUint64(SB)
+
+TEXT ·LoadUint64(SB),NOSPLIT,$0-16
+ MOVD addr+0(FP), R3
+ MOVD 0(R3), R4
+ MOVD R4, val+8(FP)
+ RET
+
+TEXT ·LoadUintptr(SB),NOSPLIT,$0-16
+ BR ·LoadPointer(SB)
+
+TEXT ·LoadPointer(SB),NOSPLIT,$0-16
+ BR ·LoadUint64(SB)
+
+TEXT ·StoreInt32(SB),NOSPLIT,$0-12
+ BR ·StoreUint32(SB)
+
+TEXT ·StoreUint32(SB),NOSPLIT,$0-12
+ MOVD ptr+0(FP), R3
+ MOVW val+8(FP), R4
+ MOVW R4, 0(R3)
+ RET
+
+TEXT ·StoreInt64(SB),NOSPLIT,$0-16
+ BR ·StoreUint64(SB)
+
+TEXT ·StoreUint64(SB),NOSPLIT,$0-16
+ MOVD addr+0(FP), R3
+ MOVD val+8(FP), R4
+ MOVD R4, 0(R3)
+ RET
+
+TEXT ·StoreUintptr(SB),NOSPLIT,$0-16
+ BR ·StoreUint64(SB)
diff -pruN 1.6.3-1/src/syscall/asm_linux_s390x.s 1.6.3-1ubuntu1/src/syscall/asm_linux_s390x.s
--- 1.6.3-1/src/syscall/asm_linux_s390x.s 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/asm_linux_s390x.s 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,156 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+//
+// System calls for s390x, Linux
+//
+
+// func Syscall(trap int64, a1, a2, a3 int64) (r1, r2, err int64)
+TEXT ·Syscall(SB),NOSPLIT,$0-56
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok
+ MOVD $-1, r1+32(FP)
+ MOVD $0, r2+40(FP)
+ NEG R2, R2
+ MOVD R2, err+48(FP) // errno
+ BL runtime·exitsyscall(SB)
+ RET
+ok:
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ MOVD $0, err+48(FP) // errno
+ BL runtime·exitsyscall(SB)
+ RET
+
+// func Syscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+TEXT ·Syscall6(SB),NOSPLIT,$0-80
+ BL runtime·entersyscall(SB)
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD a4+32(FP), R5
+ MOVD a5+40(FP), R6
+ MOVD a6+48(FP), R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok6
+ MOVD $-1, r1+56(FP)
+ MOVD $0, r2+64(FP)
+ NEG R2, R2
+ MOVD R2, err+72(FP) // errno
+ BL runtime·exitsyscall(SB)
+ RET
+ok6:
+ MOVD R2, r1+56(FP)
+ MOVD R3, r2+64(FP)
+ MOVD $0, err+72(FP) // errno
+ BL runtime·exitsyscall(SB)
+ RET
+
+// func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2, err uintptr)
+TEXT ·RawSyscall(SB),NOSPLIT,$0-56
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok1
+ MOVD $-1, r1+32(FP)
+ MOVD $0, r2+40(FP)
+ NEG R2, R2
+ MOVD R2, err+48(FP) // errno
+ RET
+ok1:
+ MOVD R2, r1+32(FP)
+ MOVD R3, r2+40(FP)
+ MOVD $0, err+48(FP) // errno
+ RET
+
+// func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr)
+TEXT ·RawSyscall6(SB),NOSPLIT,$0-80
+ MOVD a1+8(FP), R2
+ MOVD a2+16(FP), R3
+ MOVD a3+24(FP), R4
+ MOVD a4+32(FP), R5
+ MOVD a5+40(FP), R6
+ MOVD a6+48(FP), R7
+ MOVD trap+0(FP), R1 // syscall entry
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, ok2
+ MOVD $-1, r1+56(FP)
+ MOVD $0, r2+64(FP)
+ NEG R2, R2
+ MOVD R2, err+72(FP) // errno
+ RET
+ok2:
+ MOVD R2, r1+56(FP)
+ MOVD R3, r2+64(FP)
+ MOVD $0, err+72(FP) // errno
+ RET
+
+#define SYS_SOCKETCALL 102 /* from zsysnum_linux_s390x.go */
+
+// func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err int)
+// Kernel interface gets call sub-number and pointer to a0.
+TEXT ·socketcall(SB),NOSPLIT,$0-72
+ BL runtime·entersyscall(SB)
+ MOVD $SYS_SOCKETCALL, R1 // syscall entry
+ MOVD call+0(FP), R2 // socket call number
+ MOVD $a0+8(FP), R3 // pointer to call arguments
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, oksock
+ MOVD $-1, n+56(FP)
+ NEG R2, R2
+ MOVD R2, err+64(FP)
+ BL runtime·exitsyscall(SB)
+ RET
+oksock:
+ MOVD R2, n+56(FP)
+ MOVD $0, err+64(FP)
+ CALL runtime·exitsyscall(SB)
+ RET
+
+// func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err int)
+// Kernel interface gets call sub-number and pointer to a0.
+TEXT ·rawsocketcall(SB),NOSPLIT,$0-72
+ MOVD $SYS_SOCKETCALL, R1 // syscall entry
+ MOVD call+0(FP), R2 // socket call number
+ MOVD $a0+8(FP), R3 // pointer to call arguments
+ MOVD $0, R4
+ MOVD $0, R5
+ MOVD $0, R6
+ MOVD $0, R7
+ SYSCALL
+ MOVD $0xfffffffffffff001, R8
+ CMPUBLT R2, R8, oksock1
+ MOVD $-1, n+56(FP)
+ NEG R2, R2
+ MOVD R2, err+64(FP)
+ RET
+oksock1:
+ MOVD R2, n+56(FP)
+ MOVD $0, err+64(FP)
+ RET
diff -pruN 1.6.3-1/src/syscall/exec_linux.go 1.6.3-1ubuntu1/src/syscall/exec_linux.go
--- 1.6.3-1/src/syscall/exec_linux.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/exec_linux.go 2016-07-21 13:36:09.000000000 +0000
@@ -7,6 +7,7 @@
package syscall
import (
+ "runtime"
"unsafe"
)
@@ -93,7 +94,11 @@ func forkAndExecInChild(argv0 *byte, arg
// About to call fork.
// No more allocation or calls of non-assembly functions.
runtime_BeforeFork()
- r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
+ if runtime.GOARCH == "s390x" {
+ r1, _, err1 = RawSyscall6(SYS_CLONE, 0, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0)
+ } else {
+ r1, _, err1 = RawSyscall6(SYS_CLONE, uintptr(SIGCHLD)|sys.Cloneflags, 0, 0, 0, 0, 0)
+ }
if err1 != 0 {
runtime_AfterFork()
return 0, err1
diff -pruN 1.6.3-1/src/syscall/mkall.sh 1.6.3-1ubuntu1/src/syscall/mkall.sh
--- 1.6.3-1/src/syscall/mkall.sh 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/mkall.sh 2016-07-21 13:36:09.000000000 +0000
@@ -207,6 +207,13 @@ linux_ppc64le)
mksysnum="./mksysnum_linux.pl $unistd_h"
mktypes="GOARCH=$GOARCH go tool cgo -godefs"
;;
+linux_s390x)
+ GOOSARCH_in=syscall_linux_s390x.go
+ unistd_h=/usr/include/asm/unistd.h
+ mkerrors="$mkerrors -m64"
+ mksysnum="./mksysnum_linux.pl $unistd_h"
+ mktypes="GOARCH=$GOARCH go tool cgo -godefs"
+ ;;
nacl_386)
mkerrors=""
mksyscall="./mksyscall.pl -l32 -nacl"
@@ -288,5 +295,5 @@ esac
if [ -n "$mksyscall" ]; then echo "$mksyscall $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi
if [ -n "$mksysctl" ]; then echo "$mksysctl |gofmt >$zsysctl"; fi
if [ -n "$mksysnum" ]; then echo "$mksysnum |gofmt >zsysnum_$GOOSARCH.go"; fi
- if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |gofmt >ztypes_$GOOSARCH.go"; fi
+ if [ -n "$mktypes" ]; then echo "$mktypes types_$GOOS.go |go run mkpost.go >ztypes_$GOOSARCH.go"; fi
) | $run
diff -pruN 1.6.3-1/src/syscall/mkpost.go 1.6.3-1ubuntu1/src/syscall/mkpost.go
--- 1.6.3-1/src/syscall/mkpost.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/mkpost.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,63 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// mkpost processes the output of cgo -godefs to
+// modify the generated types. It is used to clean up
+// the syscall API in an architecture specific manner.
+//
+// mkpost is run after cgo -godefs by mkall.sh.
+package main
+
+import (
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "os"
+ "regexp"
+)
+
+func main() {
+ b, err := ioutil.ReadAll(os.Stdin)
+ if err != nil {
+ log.Fatal(err)
+ }
+ s := string(b)
+
+ goarch := os.Getenv("GOARCH")
+ goos := os.Getenv("GOOS")
+ if goarch == "s390x" && goos == "linux" {
+ // Export the types of PtraceRegs fields.
+ re := regexp.MustCompile("ptrace(Psw|Fpregs|Per)")
+ s = re.ReplaceAllString(s, "Ptrace$1")
+
+ // Replace padding fields inserted by cgo with blank identifiers.
+ re = regexp.MustCompile("Pad_cgo[A-Za-z0-9_]*")
+ s = re.ReplaceAllString(s, "_")
+
+ // Replace other unwanted fields with blank identifiers.
+ re = regexp.MustCompile("X_[A-Za-z0-9_]*")
+ s = re.ReplaceAllString(s, "_")
+
+ // Force the type of RawSockaddr.Data to [14]int8 to match
+ // the existing gccgo API.
+ re = regexp.MustCompile("(Data\\s+\\[14\\])uint8")
+ s = re.ReplaceAllString(s, "${1}int8")
+ }
+
+ // gofmt
+ b, err = format.Source([]byte(s))
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Append this command to the header to show where the new file
+ // came from.
+ re := regexp.MustCompile("(cgo -godefs [a-zA-Z0-9_]+\\.go.*)")
+ s = re.ReplaceAllString(string(b), "$1 | go run mkpost.go")
+
+ fmt.Print(s)
+}
diff -pruN 1.6.3-1/src/syscall/mksyscall.pl 1.6.3-1ubuntu1/src/syscall/mksyscall.pl
--- 1.6.3-1/src/syscall/mksyscall.pl 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/mksyscall.pl 2016-07-21 13:36:09.000000000 +0000
@@ -100,7 +100,7 @@ while(<>) {
# Line must be of the form
# func Open(path string, mode int, perm int) (fd int, errno error)
# Split into name, in params, out params.
- if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$/) {
+ if(!/^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)_?SYS_[A-Z0-9_]+))?$/) {
print STDERR "$ARGV:$.: malformed //sys declaration\n";
$errors = 1;
next;
diff -pruN 1.6.3-1/src/syscall/syscall_linux.go 1.6.3-1ubuntu1/src/syscall/syscall_linux.go
--- 1.6.3-1/src/syscall/syscall_linux.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/syscall_linux.go 2016-07-21 13:36:09.000000000 +0000
@@ -301,7 +301,8 @@ func (sa *SockaddrUnix) sockaddr() (unsa
}
sa.raw.Family = AF_UNIX
for i := 0; i < n; i++ {
- sa.raw.Path[i] = int8(name[i])
+ bp := (*byte)(unsafe.Pointer(&sa.raw.Path[i]))
+ *bp = name[i]
}
// length is family (uint16), name, NUL.
sl := _Socklen(2)
diff -pruN 1.6.3-1/src/syscall/syscall_linux_s390x.go 1.6.3-1ubuntu1/src/syscall/syscall_linux_s390x.go
--- 1.6.3-1/src/syscall/syscall_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/syscall_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,299 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syscall
+
+import "unsafe"
+
+const (
+ _SYS_dup = SYS_DUP2
+ _SYS_getdents = SYS_GETDENTS64
+)
+
+//sys Dup2(oldfd int, newfd int) (err error)
+//sys Fchown(fd int, uid int, gid int) (err error)
+//sys Fstat(fd int, stat *Stat_t) (err error)
+//sys Fstatfs(fd int, buf *Statfs_t) (err error)
+//sys Ftruncate(fd int, length int64) (err error)
+//sysnb Getegid() (egid int)
+//sysnb Geteuid() (euid int)
+//sysnb Getgid() (gid int)
+//sysnb Getrlimit(resource int, rlim *Rlimit) (err error) = SYS_GETRLIMIT
+//sysnb Getuid() (uid int)
+//sysnb InotifyInit() (fd int, err error)
+//sys Lchown(path string, uid int, gid int) (err error)
+//sys Lstat(path string, stat *Stat_t) (err error)
+//sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
+//sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
+//sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
+//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
+//sys Setfsgid(gid int) (err error)
+//sys Setfsuid(uid int) (err error)
+//sysnb Setregid(rgid int, egid int) (err error)
+//sysnb Setresgid(rgid int, egid int, sgid int) (err error)
+//sysnb Setresuid(ruid int, euid int, suid int) (err error)
+//sysnb Setrlimit(resource int, rlim *Rlimit) (err error)
+//sysnb Setreuid(ruid int, euid int) (err error)
+//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error)
+//sys Stat(path string, stat *Stat_t) (err error)
+//sys Statfs(path string, buf *Statfs_t) (err error)
+//sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) = SYS_SYNC_FILE_RANGE
+//sys Truncate(path string, length int64) (err error)
+//sysnb getgroups(n int, list *_Gid_t) (nn int, err error)
+//sysnb setgroups(n int, list *_Gid_t) (err error)
+
+func Getpagesize() int { return 4096 }
+
+//sysnb Gettimeofday(tv *Timeval) (err error)
+
+func Time(t *Time_t) (tt Time_t, err error) {
+ var tv Timeval
+ err = Gettimeofday(&tv)
+ if err != nil {
+ return 0, err
+ }
+ if t != nil {
+ *t = Time_t(tv.Sec)
+ }
+ return Time_t(tv.Sec), nil
+}
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+ ts.Sec = nsec / 1e9
+ ts.Nsec = nsec % 1e9
+ return
+}
+
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+ nsec += 999 // round up to microsecond
+ tv.Sec = nsec / 1e9
+ tv.Usec = nsec % 1e9 / 1e3
+ return
+}
+
+func Pipe(p []int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, 0)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+//sysnb pipe2(p *[2]_C_int, flags int) (err error)
+
+func Pipe2(p []int, flags int) (err error) {
+ if len(p) != 2 {
+ return EINVAL
+ }
+ var pp [2]_C_int
+ err = pipe2(&pp, flags)
+ p[0] = int(pp[0])
+ p[1] = int(pp[1])
+ return
+}
+
+// Linux on s390x uses the old mmap interface, which requires arguments to be passed in a struct.
+// mmap2 also requires arguments to be passed in a struct; it is currently not exposed in .
+func mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error) {
+ mmap_args := [6]uintptr{addr, length, uintptr(prot), uintptr(flags), uintptr(fd), uintptr(offset)}
+ r0, _, e1 := Syscall(SYS_MMAP, uintptr(unsafe.Pointer(&mmap_args[0])), 0, 0)
+ use(unsafe.Pointer(&mmap_args[0]))
+ xaddr = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// On s390x Linux, all the socket calls go through an extra indirection.
+// The arguments to the underlying system call are the number below
+// and a pointer to an array of uintptr. We hide the pointer in the
+// socketcall assembly to avoid allocation on every system call.
+
+const (
+ // see linux/net.h
+ _SOCKET = 1
+ _BIND = 2
+ _CONNECT = 3
+ _LISTEN = 4
+ _ACCEPT = 5
+ _GETSOCKNAME = 6
+ _GETPEERNAME = 7
+ _SOCKETPAIR = 8
+ _SEND = 9
+ _RECV = 10
+ _SENDTO = 11
+ _RECVFROM = 12
+ _SHUTDOWN = 13
+ _SETSOCKOPT = 14
+ _GETSOCKOPT = 15
+ _SENDMSG = 16
+ _RECVMSG = 17
+ _ACCEPT4 = 18
+ _RECVMMSG = 19
+ _SENDMMSG = 20
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
+func rawsocketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (n int, err Errno)
+
+func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func accept4(s int, rsa *RawSockaddrAny, addrlen *_Socklen, flags int) (fd int, err error) {
+ fd, e := socketcall(_ACCEPT4, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), uintptr(flags), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockname(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETSOCKNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getpeername(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) {
+ _, e := rawsocketcall(_GETPEERNAME, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socketpair(domain int, typ int, flags int, fd *[2]int32) (err error) {
+ _, e := rawsocketcall(_SOCKETPAIR, uintptr(domain), uintptr(typ), uintptr(flags), uintptr(unsafe.Pointer(fd)), 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_BIND, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) {
+ _, e := socketcall(_CONNECT, uintptr(s), uintptr(addr), uintptr(addrlen), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func socket(domain int, typ int, proto int) (fd int, err error) {
+ fd, e := rawsocketcall(_SOCKET, uintptr(domain), uintptr(typ), uintptr(proto), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen) (err error) {
+ _, e := socketcall(_GETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), uintptr(unsafe.Pointer(vallen)), 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) (err error) {
+ _, e := socketcall(_SETSOCKOPT, uintptr(s), uintptr(level), uintptr(name), uintptr(val), vallen, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvfrom(s int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ n, e := socketcall(_RECVFROM, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendto(s int, p []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) {
+ var base uintptr
+ if len(p) > 0 {
+ base = uintptr(unsafe.Pointer(&p[0]))
+ }
+ _, e := socketcall(_SENDTO, uintptr(s), base, uintptr(len(p)), uintptr(flags), uintptr(to), uintptr(addrlen))
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_RECVMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) {
+ n, e := socketcall(_SENDMSG, uintptr(s), uintptr(unsafe.Pointer(msg)), uintptr(flags), 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Listen(s int, n int) (err error) {
+ _, e := socketcall(_LISTEN, uintptr(s), uintptr(n), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func Shutdown(s, how int) (err error) {
+ _, e := socketcall(_SHUTDOWN, uintptr(s), uintptr(how), 0, 0, 0, 0)
+ if e != 0 {
+ err = e
+ }
+ return
+}
+
+func (r *PtraceRegs) PC() uint64 { return r.Psw.Addr }
+
+func (r *PtraceRegs) SetPC(pc uint64) { r.Psw.Addr = pc }
+
+func (iov *Iovec) SetLen(length int) {
+ iov.Len = uint64(length)
+}
+
+func (msghdr *Msghdr) SetControllen(length int) {
+ msghdr.Controllen = uint64(length)
+}
+
+func (cmsg *Cmsghdr) SetLen(length int) {
+ cmsg.Len = uint64(length)
+}
diff -pruN 1.6.3-1/src/syscall/types_linux.go 1.6.3-1ubuntu1/src/syscall/types_linux.go
--- 1.6.3-1/src/syscall/types_linux.go 2016-07-18 16:24:08.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/types_linux.go 2016-07-21 13:36:09.000000000 +0000
@@ -77,8 +77,8 @@ struct sockaddr_any {
// copied from /usr/include/linux/un.h
struct my_sockaddr_un {
sa_family_t sun_family;
-#if defined(__ARM_EABI__) || defined(__powerpc64__)
- // on ARM and PPC char is by default unsigned
+#if defined(__ARM_EABI__) || defined(__powerpc64__) || defined(__s390x__)
+ // on ARM, PPC and s390x char is by default unsigned
signed char sun_path[108];
#else
char sun_path[108];
@@ -93,10 +93,22 @@ typedef struct user_pt_regs PtraceRegs;
typedef struct pt_regs PtraceRegs;
#elif defined(__mips__)
typedef struct user PtraceRegs;
+#elif defined(__s390x__)
+typedef struct _user_regs_struct PtraceRegs;
#else
typedef struct user_regs_struct PtraceRegs;
#endif
+#if defined(__s390x__)
+typedef struct _user_psw_struct ptracePsw;
+typedef struct _user_fpregs_struct ptraceFpregs;
+typedef struct _user_per_struct ptracePer;
+#else
+typedef struct {} ptracePsw;
+typedef struct {} ptraceFpregs;
+typedef struct {} ptracePer;
+#endif
+
// The real epoll_event is a union, and godefs doesn't handle it well.
struct my_epoll_event {
uint32_t events;
@@ -105,7 +117,7 @@ struct my_epoll_event {
// alignment requirements of EABI
int32_t padFd;
#endif
-#ifdef __powerpc64__
+#if defined(__powerpc64__) || defined(__s390x__)
int32_t _padFd;
#endif
int32_t fd;
@@ -370,6 +382,13 @@ const SizeofInotifyEvent = C.sizeof_stru
// Register structures
type PtraceRegs C.PtraceRegs
+// Structures contained in PtraceRegs on s390x (exported by post.go)
+type ptracePsw C.ptracePsw
+
+type ptraceFpregs C.ptraceFpregs
+
+type ptracePer C.ptracePer
+
// Misc
type FdSet C.fd_set
diff -pruN 1.6.3-1/src/syscall/zerrors_linux_s390x.go 1.6.3-1ubuntu1/src/syscall/zerrors_linux_s390x.go
--- 1.6.3-1/src/syscall/zerrors_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/zerrors_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1942 @@
+// mkerrors.sh -m64
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs -- -m64 _const.go
+
+// +build s390x,linux
+
+package syscall
+
+const (
+ AF_ALG = 0x26
+ AF_APPLETALK = 0x5
+ AF_ASH = 0x12
+ AF_ATMPVC = 0x8
+ AF_ATMSVC = 0x14
+ AF_AX25 = 0x3
+ AF_BLUETOOTH = 0x1f
+ AF_BRIDGE = 0x7
+ AF_CAIF = 0x25
+ AF_CAN = 0x1d
+ AF_DECnet = 0xc
+ AF_ECONET = 0x13
+ AF_FILE = 0x1
+ AF_IEEE802154 = 0x24
+ AF_INET = 0x2
+ AF_INET6 = 0xa
+ AF_IPX = 0x4
+ AF_IRDA = 0x17
+ AF_ISDN = 0x22
+ AF_IUCV = 0x20
+ AF_KEY = 0xf
+ AF_LLC = 0x1a
+ AF_LOCAL = 0x1
+ AF_MAX = 0x29
+ AF_NETBEUI = 0xd
+ AF_NETLINK = 0x10
+ AF_NETROM = 0x6
+ AF_NFC = 0x27
+ AF_PACKET = 0x11
+ AF_PHONET = 0x23
+ AF_PPPOX = 0x18
+ AF_RDS = 0x15
+ AF_ROSE = 0xb
+ AF_ROUTE = 0x10
+ AF_RXRPC = 0x21
+ AF_SECURITY = 0xe
+ AF_SNA = 0x16
+ AF_TIPC = 0x1e
+ AF_UNIX = 0x1
+ AF_UNSPEC = 0x0
+ AF_VSOCK = 0x28
+ AF_WANPIPE = 0x19
+ AF_X25 = 0x9
+ ARPHRD_6LOWPAN = 0x339
+ ARPHRD_ADAPT = 0x108
+ ARPHRD_APPLETLK = 0x8
+ ARPHRD_ARCNET = 0x7
+ ARPHRD_ASH = 0x30d
+ ARPHRD_ATM = 0x13
+ ARPHRD_AX25 = 0x3
+ ARPHRD_BIF = 0x307
+ ARPHRD_CAIF = 0x336
+ ARPHRD_CAN = 0x118
+ ARPHRD_CHAOS = 0x5
+ ARPHRD_CISCO = 0x201
+ ARPHRD_CSLIP = 0x101
+ ARPHRD_CSLIP6 = 0x103
+ ARPHRD_DDCMP = 0x205
+ ARPHRD_DLCI = 0xf
+ ARPHRD_ECONET = 0x30e
+ ARPHRD_EETHER = 0x2
+ ARPHRD_ETHER = 0x1
+ ARPHRD_EUI64 = 0x1b
+ ARPHRD_FCAL = 0x311
+ ARPHRD_FCFABRIC = 0x313
+ ARPHRD_FCPL = 0x312
+ ARPHRD_FCPP = 0x310
+ ARPHRD_FDDI = 0x306
+ ARPHRD_FRAD = 0x302
+ ARPHRD_HDLC = 0x201
+ ARPHRD_HIPPI = 0x30c
+ ARPHRD_HWX25 = 0x110
+ ARPHRD_IEEE1394 = 0x18
+ ARPHRD_IEEE802 = 0x6
+ ARPHRD_IEEE80211 = 0x321
+ ARPHRD_IEEE80211_PRISM = 0x322
+ ARPHRD_IEEE80211_RADIOTAP = 0x323
+ ARPHRD_IEEE802154 = 0x324
+ ARPHRD_IEEE802154_MONITOR = 0x325
+ ARPHRD_IEEE802_TR = 0x320
+ ARPHRD_INFINIBAND = 0x20
+ ARPHRD_IP6GRE = 0x337
+ ARPHRD_IPDDP = 0x309
+ ARPHRD_IPGRE = 0x30a
+ ARPHRD_IRDA = 0x30f
+ ARPHRD_LAPB = 0x204
+ ARPHRD_LOCALTLK = 0x305
+ ARPHRD_LOOPBACK = 0x304
+ ARPHRD_METRICOM = 0x17
+ ARPHRD_NETLINK = 0x338
+ ARPHRD_NETROM = 0x0
+ ARPHRD_NONE = 0xfffe
+ ARPHRD_PHONET = 0x334
+ ARPHRD_PHONET_PIPE = 0x335
+ ARPHRD_PIMREG = 0x30b
+ ARPHRD_PPP = 0x200
+ ARPHRD_PRONET = 0x4
+ ARPHRD_RAWHDLC = 0x206
+ ARPHRD_ROSE = 0x10e
+ ARPHRD_RSRVD = 0x104
+ ARPHRD_SIT = 0x308
+ ARPHRD_SKIP = 0x303
+ ARPHRD_SLIP = 0x100
+ ARPHRD_SLIP6 = 0x102
+ ARPHRD_TUNNEL = 0x300
+ ARPHRD_TUNNEL6 = 0x301
+ ARPHRD_VOID = 0xffff
+ ARPHRD_X25 = 0x10f
+ B0 = 0x0
+ B1000000 = 0x1008
+ B110 = 0x3
+ B115200 = 0x1002
+ B1152000 = 0x1009
+ B1200 = 0x9
+ B134 = 0x4
+ B150 = 0x5
+ B1500000 = 0x100a
+ B1800 = 0xa
+ B19200 = 0xe
+ B200 = 0x6
+ B2000000 = 0x100b
+ B230400 = 0x1003
+ B2400 = 0xb
+ B2500000 = 0x100c
+ B300 = 0x7
+ B3000000 = 0x100d
+ B3500000 = 0x100e
+ B38400 = 0xf
+ B4000000 = 0x100f
+ B460800 = 0x1004
+ B4800 = 0xc
+ B50 = 0x1
+ B500000 = 0x1005
+ B57600 = 0x1001
+ B576000 = 0x1006
+ B600 = 0x8
+ B75 = 0x2
+ B921600 = 0x1007
+ B9600 = 0xd
+ BPF_A = 0x10
+ BPF_ABS = 0x20
+ BPF_ADD = 0x0
+ BPF_ALU = 0x4
+ BPF_AND = 0x50
+ BPF_B = 0x10
+ BPF_DIV = 0x30
+ BPF_H = 0x8
+ BPF_IMM = 0x0
+ BPF_IND = 0x40
+ BPF_JA = 0x0
+ BPF_JEQ = 0x10
+ BPF_JGE = 0x30
+ BPF_JGT = 0x20
+ BPF_JMP = 0x5
+ BPF_JSET = 0x40
+ BPF_K = 0x0
+ BPF_LD = 0x0
+ BPF_LDX = 0x1
+ BPF_LEN = 0x80
+ BPF_LL_OFF = -0x200000
+ BPF_LSH = 0x60
+ BPF_MAJOR_VERSION = 0x1
+ BPF_MAXINSNS = 0x1000
+ BPF_MEM = 0x60
+ BPF_MEMWORDS = 0x10
+ BPF_MINOR_VERSION = 0x1
+ BPF_MISC = 0x7
+ BPF_MOD = 0x90
+ BPF_MSH = 0xa0
+ BPF_MUL = 0x20
+ BPF_NEG = 0x80
+ BPF_NET_OFF = -0x100000
+ BPF_OR = 0x40
+ BPF_RET = 0x6
+ BPF_RSH = 0x70
+ BPF_ST = 0x2
+ BPF_STX = 0x3
+ BPF_SUB = 0x10
+ BPF_TAX = 0x0
+ BPF_TXA = 0x80
+ BPF_W = 0x0
+ BPF_X = 0x8
+ BPF_XOR = 0xa0
+ BRKINT = 0x2
+ CFLUSH = 0xf
+ CLOCAL = 0x800
+ CLONE_CHILD_CLEARTID = 0x200000
+ CLONE_CHILD_SETTID = 0x1000000
+ CLONE_DETACHED = 0x400000
+ CLONE_FILES = 0x400
+ CLONE_FS = 0x200
+ CLONE_IO = 0x80000000
+ CLONE_NEWCGROUP = 0x2000000
+ CLONE_NEWIPC = 0x8000000
+ CLONE_NEWNET = 0x40000000
+ CLONE_NEWNS = 0x20000
+ CLONE_NEWPID = 0x20000000
+ CLONE_NEWUSER = 0x10000000
+ CLONE_NEWUTS = 0x4000000
+ CLONE_PARENT = 0x8000
+ CLONE_PARENT_SETTID = 0x100000
+ CLONE_PTRACE = 0x2000
+ CLONE_SETTLS = 0x80000
+ CLONE_SIGHAND = 0x800
+ CLONE_SYSVSEM = 0x40000
+ CLONE_THREAD = 0x10000
+ CLONE_UNTRACED = 0x800000
+ CLONE_VFORK = 0x4000
+ CLONE_VM = 0x100
+ CREAD = 0x80
+ CS5 = 0x0
+ CS6 = 0x10
+ CS7 = 0x20
+ CS8 = 0x30
+ CSIGNAL = 0xff
+ CSIZE = 0x30
+ CSTART = 0x11
+ CSTATUS = 0x0
+ CSTOP = 0x13
+ CSTOPB = 0x40
+ CSUSP = 0x1a
+ DT_BLK = 0x6
+ DT_CHR = 0x2
+ DT_DIR = 0x4
+ DT_FIFO = 0x1
+ DT_LNK = 0xa
+ DT_REG = 0x8
+ DT_SOCK = 0xc
+ DT_UNKNOWN = 0x0
+ DT_WHT = 0xe
+ ECHO = 0x8
+ ECHOCTL = 0x200
+ ECHOE = 0x10
+ ECHOK = 0x20
+ ECHOKE = 0x800
+ ECHONL = 0x40
+ ECHOPRT = 0x400
+ ENCODING_DEFAULT = 0x0
+ ENCODING_FM_MARK = 0x3
+ ENCODING_FM_SPACE = 0x4
+ ENCODING_MANCHESTER = 0x5
+ ENCODING_NRZ = 0x1
+ ENCODING_NRZI = 0x2
+ EPOLLERR = 0x8
+ EPOLLET = 0x80000000
+ EPOLLHUP = 0x10
+ EPOLLIN = 0x1
+ EPOLLMSG = 0x400
+ EPOLLONESHOT = 0x40000000
+ EPOLLOUT = 0x4
+ EPOLLPRI = 0x2
+ EPOLLRDBAND = 0x80
+ EPOLLRDHUP = 0x2000
+ EPOLLRDNORM = 0x40
+ EPOLLWAKEUP = 0x20000000
+ EPOLLWRBAND = 0x200
+ EPOLLWRNORM = 0x100
+ EPOLL_CLOEXEC = 0x80000
+ EPOLL_CTL_ADD = 0x1
+ EPOLL_CTL_DEL = 0x2
+ EPOLL_CTL_MOD = 0x3
+ ETH_P_1588 = 0x88f7
+ ETH_P_8021AD = 0x88a8
+ ETH_P_8021AH = 0x88e7
+ ETH_P_8021Q = 0x8100
+ ETH_P_80221 = 0x8917
+ ETH_P_802_2 = 0x4
+ ETH_P_802_3 = 0x1
+ ETH_P_802_3_MIN = 0x600
+ ETH_P_802_EX1 = 0x88b5
+ ETH_P_AARP = 0x80f3
+ ETH_P_AF_IUCV = 0xfbfb
+ ETH_P_ALL = 0x3
+ ETH_P_AOE = 0x88a2
+ ETH_P_ARCNET = 0x1a
+ ETH_P_ARP = 0x806
+ ETH_P_ATALK = 0x809b
+ ETH_P_ATMFATE = 0x8884
+ ETH_P_ATMMPOA = 0x884c
+ ETH_P_AX25 = 0x2
+ ETH_P_BATMAN = 0x4305
+ ETH_P_BPQ = 0x8ff
+ ETH_P_CAIF = 0xf7
+ ETH_P_CAN = 0xc
+ ETH_P_CANFD = 0xd
+ ETH_P_CONTROL = 0x16
+ ETH_P_CUST = 0x6006
+ ETH_P_DDCMP = 0x6
+ ETH_P_DEC = 0x6000
+ ETH_P_DIAG = 0x6005
+ ETH_P_DNA_DL = 0x6001
+ ETH_P_DNA_RC = 0x6002
+ ETH_P_DNA_RT = 0x6003
+ ETH_P_DSA = 0x1b
+ ETH_P_ECONET = 0x18
+ ETH_P_EDSA = 0xdada
+ ETH_P_FCOE = 0x8906
+ ETH_P_FIP = 0x8914
+ ETH_P_HDLC = 0x19
+ ETH_P_IEEE802154 = 0xf6
+ ETH_P_IEEEPUP = 0xa00
+ ETH_P_IEEEPUPAT = 0xa01
+ ETH_P_IP = 0x800
+ ETH_P_IPV6 = 0x86dd
+ ETH_P_IPX = 0x8137
+ ETH_P_IRDA = 0x17
+ ETH_P_LAT = 0x6004
+ ETH_P_LINK_CTL = 0x886c
+ ETH_P_LOCALTALK = 0x9
+ ETH_P_LOOP = 0x60
+ ETH_P_LOOPBACK = 0x9000
+ ETH_P_MOBITEX = 0x15
+ ETH_P_MPLS_MC = 0x8848
+ ETH_P_MPLS_UC = 0x8847
+ ETH_P_MVRP = 0x88f5
+ ETH_P_PAE = 0x888e
+ ETH_P_PAUSE = 0x8808
+ ETH_P_PHONET = 0xf5
+ ETH_P_PPPTALK = 0x10
+ ETH_P_PPP_DISC = 0x8863
+ ETH_P_PPP_MP = 0x8
+ ETH_P_PPP_SES = 0x8864
+ ETH_P_PRP = 0x88fb
+ ETH_P_PUP = 0x200
+ ETH_P_PUPAT = 0x201
+ ETH_P_QINQ1 = 0x9100
+ ETH_P_QINQ2 = 0x9200
+ ETH_P_QINQ3 = 0x9300
+ ETH_P_RARP = 0x8035
+ ETH_P_SCA = 0x6007
+ ETH_P_SLOW = 0x8809
+ ETH_P_SNAP = 0x5
+ ETH_P_TDLS = 0x890d
+ ETH_P_TEB = 0x6558
+ ETH_P_TIPC = 0x88ca
+ ETH_P_TRAILER = 0x1c
+ ETH_P_TR_802_2 = 0x11
+ ETH_P_TSN = 0x22f0
+ ETH_P_WAN_PPP = 0x7
+ ETH_P_WCCP = 0x883e
+ ETH_P_X25 = 0x805
+ ETH_P_XDSA = 0xf8
+ EXTA = 0xe
+ EXTB = 0xf
+ EXTPROC = 0x10000
+ FD_CLOEXEC = 0x1
+ FD_SETSIZE = 0x400
+ FLUSHO = 0x1000
+ F_DUPFD = 0x0
+ F_DUPFD_CLOEXEC = 0x406
+ F_EXLCK = 0x4
+ F_GETFD = 0x1
+ F_GETFL = 0x3
+ F_GETLEASE = 0x401
+ F_GETLK = 0x5
+ F_GETLK64 = 0x5
+ F_GETOWN = 0x9
+ F_GETOWN_EX = 0x10
+ F_GETPIPE_SZ = 0x408
+ F_GETSIG = 0xb
+ F_LOCK = 0x1
+ F_NOTIFY = 0x402
+ F_OFD_GETLK = 0x24
+ F_OFD_SETLK = 0x25
+ F_OFD_SETLKW = 0x26
+ F_OK = 0x0
+ F_RDLCK = 0x0
+ F_SETFD = 0x2
+ F_SETFL = 0x4
+ F_SETLEASE = 0x400
+ F_SETLK = 0x6
+ F_SETLK64 = 0x6
+ F_SETLKW = 0x7
+ F_SETLKW64 = 0x7
+ F_SETOWN = 0x8
+ F_SETOWN_EX = 0xf
+ F_SETPIPE_SZ = 0x407
+ F_SETSIG = 0xa
+ F_SHLCK = 0x8
+ F_TEST = 0x3
+ F_TLOCK = 0x2
+ F_ULOCK = 0x0
+ F_UNLCK = 0x2
+ F_WRLCK = 0x1
+ HUPCL = 0x400
+ ICANON = 0x2
+ ICMPV6_FILTER = 0x1
+ ICRNL = 0x100
+ IEXTEN = 0x8000
+ IFA_F_DADFAILED = 0x8
+ IFA_F_DEPRECATED = 0x20
+ IFA_F_HOMEADDRESS = 0x10
+ IFA_F_MANAGETEMPADDR = 0x100
+ IFA_F_MCAUTOJOIN = 0x400
+ IFA_F_NODAD = 0x2
+ IFA_F_NOPREFIXROUTE = 0x200
+ IFA_F_OPTIMISTIC = 0x4
+ IFA_F_PERMANENT = 0x80
+ IFA_F_SECONDARY = 0x1
+ IFA_F_STABLE_PRIVACY = 0x800
+ IFA_F_TEMPORARY = 0x1
+ IFA_F_TENTATIVE = 0x40
+ IFA_MAX = 0x8
+ IFF_ALLMULTI = 0x200
+ IFF_ATTACH_QUEUE = 0x200
+ IFF_AUTOMEDIA = 0x4000
+ IFF_BROADCAST = 0x2
+ IFF_DEBUG = 0x4
+ IFF_DETACH_QUEUE = 0x400
+ IFF_DORMANT = 0x20000
+ IFF_DYNAMIC = 0x8000
+ IFF_ECHO = 0x40000
+ IFF_LOOPBACK = 0x8
+ IFF_LOWER_UP = 0x10000
+ IFF_MASTER = 0x400
+ IFF_MULTICAST = 0x1000
+ IFF_MULTI_QUEUE = 0x100
+ IFF_NOARP = 0x80
+ IFF_NOFILTER = 0x1000
+ IFF_NOTRAILERS = 0x20
+ IFF_NO_PI = 0x1000
+ IFF_ONE_QUEUE = 0x2000
+ IFF_PERSIST = 0x800
+ IFF_POINTOPOINT = 0x10
+ IFF_PORTSEL = 0x2000
+ IFF_PROMISC = 0x100
+ IFF_RUNNING = 0x40
+ IFF_SLAVE = 0x800
+ IFF_TAP = 0x2
+ IFF_TUN = 0x1
+ IFF_TUN_EXCL = 0x8000
+ IFF_UP = 0x1
+ IFF_VNET_HDR = 0x4000
+ IFF_VOLATILE = 0x70c5a
+ IFNAMSIZ = 0x10
+ IGNBRK = 0x1
+ IGNCR = 0x80
+ IGNPAR = 0x4
+ IMAXBEL = 0x2000
+ INLCR = 0x40
+ INPCK = 0x10
+ IN_ACCESS = 0x1
+ IN_ALL_EVENTS = 0xfff
+ IN_ATTRIB = 0x4
+ IN_CLASSA_HOST = 0xffffff
+ IN_CLASSA_MAX = 0x80
+ IN_CLASSA_NET = 0xff000000
+ IN_CLASSA_NSHIFT = 0x18
+ IN_CLASSB_HOST = 0xffff
+ IN_CLASSB_MAX = 0x10000
+ IN_CLASSB_NET = 0xffff0000
+ IN_CLASSB_NSHIFT = 0x10
+ IN_CLASSC_HOST = 0xff
+ IN_CLASSC_NET = 0xffffff00
+ IN_CLASSC_NSHIFT = 0x8
+ IN_CLOEXEC = 0x80000
+ IN_CLOSE = 0x18
+ IN_CLOSE_NOWRITE = 0x10
+ IN_CLOSE_WRITE = 0x8
+ IN_CREATE = 0x100
+ IN_DELETE = 0x200
+ IN_DELETE_SELF = 0x400
+ IN_DONT_FOLLOW = 0x2000000
+ IN_EXCL_UNLINK = 0x4000000
+ IN_IGNORED = 0x8000
+ IN_ISDIR = 0x40000000
+ IN_LOOPBACKNET = 0x7f
+ IN_MASK_ADD = 0x20000000
+ IN_MODIFY = 0x2
+ IN_MOVE = 0xc0
+ IN_MOVED_FROM = 0x40
+ IN_MOVED_TO = 0x80
+ IN_MOVE_SELF = 0x800
+ IN_NONBLOCK = 0x800
+ IN_ONESHOT = 0x80000000
+ IN_ONLYDIR = 0x1000000
+ IN_OPEN = 0x20
+ IN_Q_OVERFLOW = 0x4000
+ IN_UNMOUNT = 0x2000
+ IPPROTO_AH = 0x33
+ IPPROTO_BEETPH = 0x5e
+ IPPROTO_COMP = 0x6c
+ IPPROTO_DCCP = 0x21
+ IPPROTO_DSTOPTS = 0x3c
+ IPPROTO_EGP = 0x8
+ IPPROTO_ENCAP = 0x62
+ IPPROTO_ESP = 0x32
+ IPPROTO_FRAGMENT = 0x2c
+ IPPROTO_GRE = 0x2f
+ IPPROTO_HOPOPTS = 0x0
+ IPPROTO_ICMP = 0x1
+ IPPROTO_ICMPV6 = 0x3a
+ IPPROTO_IDP = 0x16
+ IPPROTO_IGMP = 0x2
+ IPPROTO_IP = 0x0
+ IPPROTO_IPIP = 0x4
+ IPPROTO_IPV6 = 0x29
+ IPPROTO_MH = 0x87
+ IPPROTO_MTP = 0x5c
+ IPPROTO_NONE = 0x3b
+ IPPROTO_PIM = 0x67
+ IPPROTO_PUP = 0xc
+ IPPROTO_RAW = 0xff
+ IPPROTO_ROUTING = 0x2b
+ IPPROTO_RSVP = 0x2e
+ IPPROTO_SCTP = 0x84
+ IPPROTO_TCP = 0x6
+ IPPROTO_TP = 0x1d
+ IPPROTO_UDP = 0x11
+ IPPROTO_UDPLITE = 0x88
+ IPV6_2292DSTOPTS = 0x4
+ IPV6_2292HOPLIMIT = 0x8
+ IPV6_2292HOPOPTS = 0x3
+ IPV6_2292PKTINFO = 0x2
+ IPV6_2292PKTOPTIONS = 0x6
+ IPV6_2292RTHDR = 0x5
+ IPV6_ADDRFORM = 0x1
+ IPV6_ADD_MEMBERSHIP = 0x14
+ IPV6_AUTHHDR = 0xa
+ IPV6_CHECKSUM = 0x7
+ IPV6_DROP_MEMBERSHIP = 0x15
+ IPV6_DSTOPTS = 0x3b
+ IPV6_HOPLIMIT = 0x34
+ IPV6_HOPOPTS = 0x36
+ IPV6_IPSEC_POLICY = 0x22
+ IPV6_JOIN_ANYCAST = 0x1b
+ IPV6_JOIN_GROUP = 0x14
+ IPV6_LEAVE_ANYCAST = 0x1c
+ IPV6_LEAVE_GROUP = 0x15
+ IPV6_MTU = 0x18
+ IPV6_MTU_DISCOVER = 0x17
+ IPV6_MULTICAST_HOPS = 0x12
+ IPV6_MULTICAST_IF = 0x11
+ IPV6_MULTICAST_LOOP = 0x13
+ IPV6_NEXTHOP = 0x9
+ IPV6_PKTINFO = 0x32
+ IPV6_PMTUDISC_DO = 0x2
+ IPV6_PMTUDISC_DONT = 0x0
+ IPV6_PMTUDISC_INTERFACE = 0x4
+ IPV6_PMTUDISC_OMIT = 0x5
+ IPV6_PMTUDISC_PROBE = 0x3
+ IPV6_PMTUDISC_WANT = 0x1
+ IPV6_RECVDSTOPTS = 0x3a
+ IPV6_RECVERR = 0x19
+ IPV6_RECVHOPLIMIT = 0x33
+ IPV6_RECVHOPOPTS = 0x35
+ IPV6_RECVPKTINFO = 0x31
+ IPV6_RECVRTHDR = 0x38
+ IPV6_RECVTCLASS = 0x42
+ IPV6_ROUTER_ALERT = 0x16
+ IPV6_RTHDR = 0x39
+ IPV6_RTHDRDSTOPTS = 0x37
+ IPV6_RTHDR_LOOSE = 0x0
+ IPV6_RTHDR_STRICT = 0x1
+ IPV6_RTHDR_TYPE_0 = 0x0
+ IPV6_RXDSTOPTS = 0x3b
+ IPV6_RXHOPOPTS = 0x36
+ IPV6_TCLASS = 0x43
+ IPV6_UNICAST_HOPS = 0x10
+ IPV6_V6ONLY = 0x1a
+ IPV6_XFRM_POLICY = 0x23
+ IP_ADD_MEMBERSHIP = 0x23
+ IP_ADD_SOURCE_MEMBERSHIP = 0x27
+ IP_BLOCK_SOURCE = 0x26
+ IP_DEFAULT_MULTICAST_LOOP = 0x1
+ IP_DEFAULT_MULTICAST_TTL = 0x1
+ IP_DF = 0x4000
+ IP_DROP_MEMBERSHIP = 0x24
+ IP_DROP_SOURCE_MEMBERSHIP = 0x28
+ IP_FREEBIND = 0xf
+ IP_HDRINCL = 0x3
+ IP_IPSEC_POLICY = 0x10
+ IP_MAXPACKET = 0xffff
+ IP_MAX_MEMBERSHIPS = 0x14
+ IP_MF = 0x2000
+ IP_MINTTL = 0x15
+ IP_MSFILTER = 0x29
+ IP_MSS = 0x240
+ IP_MTU = 0xe
+ IP_MTU_DISCOVER = 0xa
+ IP_MULTICAST_ALL = 0x31
+ IP_MULTICAST_IF = 0x20
+ IP_MULTICAST_LOOP = 0x22
+ IP_MULTICAST_TTL = 0x21
+ IP_NODEFRAG = 0x16
+ IP_OFFMASK = 0x1fff
+ IP_OPTIONS = 0x4
+ IP_ORIGDSTADDR = 0x14
+ IP_PASSSEC = 0x12
+ IP_PKTINFO = 0x8
+ IP_PKTOPTIONS = 0x9
+ IP_PMTUDISC = 0xa
+ IP_PMTUDISC_DO = 0x2
+ IP_PMTUDISC_DONT = 0x0
+ IP_PMTUDISC_INTERFACE = 0x4
+ IP_PMTUDISC_OMIT = 0x5
+ IP_PMTUDISC_PROBE = 0x3
+ IP_PMTUDISC_WANT = 0x1
+ IP_RECVERR = 0xb
+ IP_RECVOPTS = 0x6
+ IP_RECVORIGDSTADDR = 0x14
+ IP_RECVRETOPTS = 0x7
+ IP_RECVTOS = 0xd
+ IP_RECVTTL = 0xc
+ IP_RETOPTS = 0x7
+ IP_RF = 0x8000
+ IP_ROUTER_ALERT = 0x5
+ IP_TOS = 0x1
+ IP_TRANSPARENT = 0x13
+ IP_TTL = 0x2
+ IP_UNBLOCK_SOURCE = 0x25
+ IP_UNICAST_IF = 0x32
+ IP_XFRM_POLICY = 0x11
+ ISIG = 0x1
+ ISTRIP = 0x20
+ IUTF8 = 0x4000
+ IXANY = 0x800
+ IXOFF = 0x1000
+ IXON = 0x400
+ LINUX_REBOOT_CMD_CAD_OFF = 0x0
+ LINUX_REBOOT_CMD_CAD_ON = 0x89abcdef
+ LINUX_REBOOT_CMD_HALT = 0xcdef0123
+ LINUX_REBOOT_CMD_KEXEC = 0x45584543
+ LINUX_REBOOT_CMD_POWER_OFF = 0x4321fedc
+ LINUX_REBOOT_CMD_RESTART = 0x1234567
+ LINUX_REBOOT_CMD_RESTART2 = 0xa1b2c3d4
+ LINUX_REBOOT_CMD_SW_SUSPEND = 0xd000fce2
+ LINUX_REBOOT_MAGIC1 = 0xfee1dead
+ LINUX_REBOOT_MAGIC2 = 0x28121969
+ LOCK_EX = 0x2
+ LOCK_NB = 0x4
+ LOCK_SH = 0x1
+ LOCK_UN = 0x8
+ MADV_DODUMP = 0x11
+ MADV_DOFORK = 0xb
+ MADV_DONTDUMP = 0x10
+ MADV_DONTFORK = 0xa
+ MADV_DONTNEED = 0x4
+ MADV_HUGEPAGE = 0xe
+ MADV_HWPOISON = 0x64
+ MADV_MERGEABLE = 0xc
+ MADV_NOHUGEPAGE = 0xf
+ MADV_NORMAL = 0x0
+ MADV_RANDOM = 0x1
+ MADV_REMOVE = 0x9
+ MADV_SEQUENTIAL = 0x2
+ MADV_UNMERGEABLE = 0xd
+ MADV_WILLNEED = 0x3
+ MAP_ANON = 0x20
+ MAP_ANONYMOUS = 0x20
+ MAP_DENYWRITE = 0x800
+ MAP_EXECUTABLE = 0x1000
+ MAP_FILE = 0x0
+ MAP_FIXED = 0x10
+ MAP_GROWSDOWN = 0x100
+ MAP_HUGETLB = 0x40000
+ MAP_HUGE_MASK = 0x3f
+ MAP_HUGE_SHIFT = 0x1a
+ MAP_LOCKED = 0x2000
+ MAP_NONBLOCK = 0x10000
+ MAP_NORESERVE = 0x4000
+ MAP_POPULATE = 0x8000
+ MAP_PRIVATE = 0x2
+ MAP_SHARED = 0x1
+ MAP_STACK = 0x20000
+ MAP_TYPE = 0xf
+ MCL_CURRENT = 0x1
+ MCL_FUTURE = 0x2
+ MNT_DETACH = 0x2
+ MNT_EXPIRE = 0x4
+ MNT_FORCE = 0x1
+ MSG_CMSG_CLOEXEC = 0x40000000
+ MSG_CONFIRM = 0x800
+ MSG_CTRUNC = 0x8
+ MSG_DONTROUTE = 0x4
+ MSG_DONTWAIT = 0x40
+ MSG_EOR = 0x80
+ MSG_ERRQUEUE = 0x2000
+ MSG_FASTOPEN = 0x20000000
+ MSG_FIN = 0x200
+ MSG_MORE = 0x8000
+ MSG_NOSIGNAL = 0x4000
+ MSG_OOB = 0x1
+ MSG_PEEK = 0x2
+ MSG_PROXY = 0x10
+ MSG_RST = 0x1000
+ MSG_SYN = 0x400
+ MSG_TRUNC = 0x20
+ MSG_TRYHARD = 0x4
+ MSG_WAITALL = 0x100
+ MSG_WAITFORONE = 0x10000
+ MS_ACTIVE = 0x40000000
+ MS_ASYNC = 0x1
+ MS_BIND = 0x1000
+ MS_DIRSYNC = 0x80
+ MS_INVALIDATE = 0x2
+ MS_I_VERSION = 0x800000
+ MS_KERNMOUNT = 0x400000
+ MS_MANDLOCK = 0x40
+ MS_MGC_MSK = 0xffff0000
+ MS_MGC_VAL = 0xc0ed0000
+ MS_MOVE = 0x2000
+ MS_NOATIME = 0x400
+ MS_NODEV = 0x4
+ MS_NODIRATIME = 0x800
+ MS_NOEXEC = 0x8
+ MS_NOSUID = 0x2
+ MS_NOUSER = -0x80000000
+ MS_POSIXACL = 0x10000
+ MS_PRIVATE = 0x40000
+ MS_RDONLY = 0x1
+ MS_REC = 0x4000
+ MS_RELATIME = 0x200000
+ MS_REMOUNT = 0x20
+ MS_RMT_MASK = 0x800051
+ MS_SHARED = 0x100000
+ MS_SILENT = 0x8000
+ MS_SLAVE = 0x80000
+ MS_STRICTATIME = 0x1000000
+ MS_SYNC = 0x4
+ MS_SYNCHRONOUS = 0x10
+ MS_UNBINDABLE = 0x20000
+ NAME_MAX = 0xff
+ NETLINK_ADD_MEMBERSHIP = 0x1
+ NETLINK_AUDIT = 0x9
+ NETLINK_BROADCAST_ERROR = 0x4
+ NETLINK_CAP_ACK = 0xa
+ NETLINK_CONNECTOR = 0xb
+ NETLINK_CRYPTO = 0x15
+ NETLINK_DNRTMSG = 0xe
+ NETLINK_DROP_MEMBERSHIP = 0x2
+ NETLINK_ECRYPTFS = 0x13
+ NETLINK_FIB_LOOKUP = 0xa
+ NETLINK_FIREWALL = 0x3
+ NETLINK_GENERIC = 0x10
+ NETLINK_INET_DIAG = 0x4
+ NETLINK_IP6_FW = 0xd
+ NETLINK_ISCSI = 0x8
+ NETLINK_KOBJECT_UEVENT = 0xf
+ NETLINK_LISTEN_ALL_NSID = 0x8
+ NETLINK_LIST_MEMBERSHIPS = 0x9
+ NETLINK_NETFILTER = 0xc
+ NETLINK_NFLOG = 0x5
+ NETLINK_NO_ENOBUFS = 0x5
+ NETLINK_PKTINFO = 0x3
+ NETLINK_RDMA = 0x14
+ NETLINK_ROUTE = 0x0
+ NETLINK_RX_RING = 0x6
+ NETLINK_SCSITRANSPORT = 0x12
+ NETLINK_SELINUX = 0x7
+ NETLINK_SOCK_DIAG = 0x4
+ NETLINK_TX_RING = 0x7
+ NETLINK_UNUSED = 0x1
+ NETLINK_USERSOCK = 0x2
+ NETLINK_XFRM = 0x6
+ NLA_ALIGNTO = 0x4
+ NLA_F_NESTED = 0x8000
+ NLA_F_NET_BYTEORDER = 0x4000
+ NLA_HDRLEN = 0x4
+ NLMSG_ALIGNTO = 0x4
+ NLMSG_DONE = 0x3
+ NLMSG_ERROR = 0x2
+ NLMSG_HDRLEN = 0x10
+ NLMSG_MIN_TYPE = 0x10
+ NLMSG_NOOP = 0x1
+ NLMSG_OVERRUN = 0x4
+ NLM_F_ACK = 0x4
+ NLM_F_APPEND = 0x800
+ NLM_F_ATOMIC = 0x400
+ NLM_F_CREATE = 0x400
+ NLM_F_DUMP = 0x300
+ NLM_F_DUMP_FILTERED = 0x20
+ NLM_F_DUMP_INTR = 0x10
+ NLM_F_ECHO = 0x8
+ NLM_F_EXCL = 0x200
+ NLM_F_MATCH = 0x200
+ NLM_F_MULTI = 0x2
+ NLM_F_REPLACE = 0x100
+ NLM_F_REQUEST = 0x1
+ NLM_F_ROOT = 0x100
+ NOFLSH = 0x80
+ OCRNL = 0x8
+ OFDEL = 0x80
+ OFILL = 0x40
+ ONLCR = 0x4
+ ONLRET = 0x20
+ ONOCR = 0x10
+ OPOST = 0x1
+ O_ACCMODE = 0x3
+ O_APPEND = 0x400
+ O_ASYNC = 0x2000
+ O_CLOEXEC = 0x80000
+ O_CREAT = 0x40
+ O_DIRECT = 0x4000
+ O_DIRECTORY = 0x10000
+ O_DSYNC = 0x1000
+ O_EXCL = 0x80
+ O_FSYNC = 0x101000
+ O_LARGEFILE = 0x0
+ O_NDELAY = 0x800
+ O_NOATIME = 0x40000
+ O_NOCTTY = 0x100
+ O_NOFOLLOW = 0x20000
+ O_NONBLOCK = 0x800
+ O_PATH = 0x200000
+ O_RDONLY = 0x0
+ O_RDWR = 0x2
+ O_RSYNC = 0x101000
+ O_SYNC = 0x101000
+ O_TMPFILE = 0x410000
+ O_TRUNC = 0x200
+ O_WRONLY = 0x1
+ PACKET_ADD_MEMBERSHIP = 0x1
+ PACKET_AUXDATA = 0x8
+ PACKET_BROADCAST = 0x1
+ PACKET_COPY_THRESH = 0x7
+ PACKET_DROP_MEMBERSHIP = 0x2
+ PACKET_FANOUT = 0x12
+ PACKET_FANOUT_CBPF = 0x6
+ PACKET_FANOUT_CPU = 0x2
+ PACKET_FANOUT_DATA = 0x16
+ PACKET_FANOUT_EBPF = 0x7
+ PACKET_FANOUT_FLAG_DEFRAG = 0x8000
+ PACKET_FANOUT_FLAG_ROLLOVER = 0x1000
+ PACKET_FANOUT_HASH = 0x0
+ PACKET_FANOUT_LB = 0x1
+ PACKET_FANOUT_QM = 0x5
+ PACKET_FANOUT_RND = 0x4
+ PACKET_FANOUT_ROLLOVER = 0x3
+ PACKET_FASTROUTE = 0x6
+ PACKET_HDRLEN = 0xb
+ PACKET_HOST = 0x0
+ PACKET_KERNEL = 0x7
+ PACKET_LOOPBACK = 0x5
+ PACKET_LOSS = 0xe
+ PACKET_MR_ALLMULTI = 0x2
+ PACKET_MR_MULTICAST = 0x0
+ PACKET_MR_PROMISC = 0x1
+ PACKET_MR_UNICAST = 0x3
+ PACKET_MULTICAST = 0x2
+ PACKET_ORIGDEV = 0x9
+ PACKET_OTHERHOST = 0x3
+ PACKET_OUTGOING = 0x4
+ PACKET_QDISC_BYPASS = 0x14
+ PACKET_RECV_OUTPUT = 0x3
+ PACKET_RESERVE = 0xc
+ PACKET_ROLLOVER_STATS = 0x15
+ PACKET_RX_RING = 0x5
+ PACKET_STATISTICS = 0x6
+ PACKET_TIMESTAMP = 0x11
+ PACKET_TX_HAS_OFF = 0x13
+ PACKET_TX_RING = 0xd
+ PACKET_TX_TIMESTAMP = 0x10
+ PACKET_USER = 0x6
+ PACKET_VERSION = 0xa
+ PACKET_VNET_HDR = 0xf
+ PARENB = 0x100
+ PARITY_CRC16_PR0 = 0x2
+ PARITY_CRC16_PR0_CCITT = 0x4
+ PARITY_CRC16_PR1 = 0x3
+ PARITY_CRC16_PR1_CCITT = 0x5
+ PARITY_CRC32_PR0_CCITT = 0x6
+ PARITY_CRC32_PR1_CCITT = 0x7
+ PARITY_DEFAULT = 0x0
+ PARITY_NONE = 0x1
+ PARMRK = 0x8
+ PARODD = 0x200
+ PENDIN = 0x4000
+ PRIO_PGRP = 0x1
+ PRIO_PROCESS = 0x0
+ PRIO_USER = 0x2
+ PROT_EXEC = 0x4
+ PROT_GROWSDOWN = 0x1000000
+ PROT_GROWSUP = 0x2000000
+ PROT_NONE = 0x0
+ PROT_READ = 0x1
+ PROT_WRITE = 0x2
+ PR_CAPBSET_DROP = 0x18
+ PR_CAPBSET_READ = 0x17
+ PR_CAP_AMBIENT = 0x2f
+ PR_CAP_AMBIENT_CLEAR_ALL = 0x4
+ PR_CAP_AMBIENT_IS_SET = 0x1
+ PR_CAP_AMBIENT_LOWER = 0x3
+ PR_CAP_AMBIENT_RAISE = 0x2
+ PR_ENDIAN_BIG = 0x0
+ PR_ENDIAN_LITTLE = 0x1
+ PR_ENDIAN_PPC_LITTLE = 0x2
+ PR_FPEMU_NOPRINT = 0x1
+ PR_FPEMU_SIGFPE = 0x2
+ PR_FP_EXC_ASYNC = 0x2
+ PR_FP_EXC_DISABLED = 0x0
+ PR_FP_EXC_DIV = 0x10000
+ PR_FP_EXC_INV = 0x100000
+ PR_FP_EXC_NONRECOV = 0x1
+ PR_FP_EXC_OVF = 0x20000
+ PR_FP_EXC_PRECISE = 0x3
+ PR_FP_EXC_RES = 0x80000
+ PR_FP_EXC_SW_ENABLE = 0x80
+ PR_FP_EXC_UND = 0x40000
+ PR_FP_MODE_FR = 0x1
+ PR_FP_MODE_FRE = 0x2
+ PR_GET_CHILD_SUBREAPER = 0x25
+ PR_GET_DUMPABLE = 0x3
+ PR_GET_ENDIAN = 0x13
+ PR_GET_FPEMU = 0x9
+ PR_GET_FPEXC = 0xb
+ PR_GET_FP_MODE = 0x2e
+ PR_GET_KEEPCAPS = 0x7
+ PR_GET_NAME = 0x10
+ PR_GET_NO_NEW_PRIVS = 0x27
+ PR_GET_PDEATHSIG = 0x2
+ PR_GET_SECCOMP = 0x15
+ PR_GET_SECUREBITS = 0x1b
+ PR_GET_THP_DISABLE = 0x2a
+ PR_GET_TID_ADDRESS = 0x28
+ PR_GET_TIMERSLACK = 0x1e
+ PR_GET_TIMING = 0xd
+ PR_GET_TSC = 0x19
+ PR_GET_UNALIGN = 0x5
+ PR_MCE_KILL = 0x21
+ PR_MCE_KILL_CLEAR = 0x0
+ PR_MCE_KILL_DEFAULT = 0x2
+ PR_MCE_KILL_EARLY = 0x1
+ PR_MCE_KILL_GET = 0x22
+ PR_MCE_KILL_LATE = 0x0
+ PR_MCE_KILL_SET = 0x1
+ PR_MPX_DISABLE_MANAGEMENT = 0x2c
+ PR_MPX_ENABLE_MANAGEMENT = 0x2b
+ PR_SET_CHILD_SUBREAPER = 0x24
+ PR_SET_DUMPABLE = 0x4
+ PR_SET_ENDIAN = 0x14
+ PR_SET_FPEMU = 0xa
+ PR_SET_FPEXC = 0xc
+ PR_SET_FP_MODE = 0x2d
+ PR_SET_KEEPCAPS = 0x8
+ PR_SET_MM = 0x23
+ PR_SET_MM_ARG_END = 0x9
+ PR_SET_MM_ARG_START = 0x8
+ PR_SET_MM_AUXV = 0xc
+ PR_SET_MM_BRK = 0x7
+ PR_SET_MM_END_CODE = 0x2
+ PR_SET_MM_END_DATA = 0x4
+ PR_SET_MM_ENV_END = 0xb
+ PR_SET_MM_ENV_START = 0xa
+ PR_SET_MM_EXE_FILE = 0xd
+ PR_SET_MM_MAP = 0xe
+ PR_SET_MM_MAP_SIZE = 0xf
+ PR_SET_MM_START_BRK = 0x6
+ PR_SET_MM_START_CODE = 0x1
+ PR_SET_MM_START_DATA = 0x3
+ PR_SET_MM_START_STACK = 0x5
+ PR_SET_NAME = 0xf
+ PR_SET_NO_NEW_PRIVS = 0x26
+ PR_SET_PDEATHSIG = 0x1
+ PR_SET_PTRACER = 0x59616d61
+ PR_SET_PTRACER_ANY = -0x1
+ PR_SET_SECCOMP = 0x16
+ PR_SET_SECUREBITS = 0x1c
+ PR_SET_THP_DISABLE = 0x29
+ PR_SET_TIMERSLACK = 0x1d
+ PR_SET_TIMING = 0xe
+ PR_SET_TSC = 0x1a
+ PR_SET_UNALIGN = 0x6
+ PR_TASK_PERF_EVENTS_DISABLE = 0x1f
+ PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMING_STATISTICAL = 0x0
+ PR_TIMING_TIMESTAMP = 0x1
+ PR_TSC_ENABLE = 0x1
+ PR_TSC_SIGSEGV = 0x2
+ PR_UNALIGN_NOPRINT = 0x1
+ PR_UNALIGN_SIGBUS = 0x2
+ PTRACE_ATTACH = 0x10
+ PTRACE_CONT = 0x7
+ PTRACE_DETACH = 0x11
+ PTRACE_DISABLE_TE = 0x5010
+ PTRACE_ENABLE_TE = 0x5009
+ PTRACE_EVENT_CLONE = 0x3
+ PTRACE_EVENT_EXEC = 0x4
+ PTRACE_EVENT_EXIT = 0x6
+ PTRACE_EVENT_FORK = 0x1
+ PTRACE_EVENT_SECCOMP = 0x7
+ PTRACE_EVENT_STOP = 0x80
+ PTRACE_EVENT_VFORK = 0x2
+ PTRACE_EVENT_VFORK_DONE = 0x5
+ PTRACE_GETEVENTMSG = 0x4201
+ PTRACE_GETREGS = 0xc
+ PTRACE_GETREGSET = 0x4204
+ PTRACE_GETSIGINFO = 0x4202
+ PTRACE_GETSIGMASK = 0x420a
+ PTRACE_GET_LAST_BREAK = 0x5006
+ PTRACE_INTERRUPT = 0x4207
+ PTRACE_KILL = 0x8
+ PTRACE_LISTEN = 0x4208
+ PTRACE_OLDSETOPTIONS = 0x15
+ PTRACE_O_EXITKILL = 0x100000
+ PTRACE_O_MASK = 0x3000ff
+ PTRACE_O_SUSPEND_SECCOMP = 0x200000
+ PTRACE_O_TRACECLONE = 0x8
+ PTRACE_O_TRACEEXEC = 0x10
+ PTRACE_O_TRACEEXIT = 0x40
+ PTRACE_O_TRACEFORK = 0x2
+ PTRACE_O_TRACESECCOMP = 0x80
+ PTRACE_O_TRACESYSGOOD = 0x1
+ PTRACE_O_TRACEVFORK = 0x4
+ PTRACE_O_TRACEVFORKDONE = 0x20
+ PTRACE_PEEKDATA = 0x2
+ PTRACE_PEEKDATA_AREA = 0x5003
+ PTRACE_PEEKSIGINFO = 0x4209
+ PTRACE_PEEKSIGINFO_SHARED = 0x1
+ PTRACE_PEEKTEXT = 0x1
+ PTRACE_PEEKTEXT_AREA = 0x5002
+ PTRACE_PEEKUSR = 0x3
+ PTRACE_PEEKUSR_AREA = 0x5000
+ PTRACE_PEEK_SYSTEM_CALL = 0x5007
+ PTRACE_POKEDATA = 0x5
+ PTRACE_POKEDATA_AREA = 0x5005
+ PTRACE_POKETEXT = 0x4
+ PTRACE_POKETEXT_AREA = 0x5004
+ PTRACE_POKEUSR = 0x6
+ PTRACE_POKEUSR_AREA = 0x5001
+ PTRACE_POKE_SYSTEM_CALL = 0x5008
+ PTRACE_PROT = 0x15
+ PTRACE_SECCOMP_GET_FILTER = 0x420c
+ PTRACE_SEIZE = 0x4206
+ PTRACE_SETOPTIONS = 0x4200
+ PTRACE_SETREGS = 0xd
+ PTRACE_SETREGSET = 0x4205
+ PTRACE_SETSIGINFO = 0x4203
+ PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SINGLEBLOCK = 0xc
+ PTRACE_SINGLESTEP = 0x9
+ PTRACE_SYSCALL = 0x18
+ PTRACE_TE_ABORT_RAND = 0x5011
+ PTRACE_TRACEME = 0x0
+ PT_ACR0 = 0x90
+ PT_ACR1 = 0x94
+ PT_ACR10 = 0xb8
+ PT_ACR11 = 0xbc
+ PT_ACR12 = 0xc0
+ PT_ACR13 = 0xc4
+ PT_ACR14 = 0xc8
+ PT_ACR15 = 0xcc
+ PT_ACR2 = 0x98
+ PT_ACR3 = 0x9c
+ PT_ACR4 = 0xa0
+ PT_ACR5 = 0xa4
+ PT_ACR6 = 0xa8
+ PT_ACR7 = 0xac
+ PT_ACR8 = 0xb0
+ PT_ACR9 = 0xb4
+ PT_CR_10 = 0x168
+ PT_CR_11 = 0x170
+ PT_CR_9 = 0x160
+ PT_ENDREGS = 0x1af
+ PT_FPC = 0xd8
+ PT_FPR0 = 0xe0
+ PT_FPR1 = 0xe8
+ PT_FPR10 = 0x130
+ PT_FPR11 = 0x138
+ PT_FPR12 = 0x140
+ PT_FPR13 = 0x148
+ PT_FPR14 = 0x150
+ PT_FPR15 = 0x158
+ PT_FPR2 = 0xf0
+ PT_FPR3 = 0xf8
+ PT_FPR4 = 0x100
+ PT_FPR5 = 0x108
+ PT_FPR6 = 0x110
+ PT_FPR7 = 0x118
+ PT_FPR8 = 0x120
+ PT_FPR9 = 0x128
+ PT_GPR0 = 0x10
+ PT_GPR1 = 0x18
+ PT_GPR10 = 0x60
+ PT_GPR11 = 0x68
+ PT_GPR12 = 0x70
+ PT_GPR13 = 0x78
+ PT_GPR14 = 0x80
+ PT_GPR15 = 0x88
+ PT_GPR2 = 0x20
+ PT_GPR3 = 0x28
+ PT_GPR4 = 0x30
+ PT_GPR5 = 0x38
+ PT_GPR6 = 0x40
+ PT_GPR7 = 0x48
+ PT_GPR8 = 0x50
+ PT_GPR9 = 0x58
+ PT_IEEE_IP = 0x1a8
+ PT_LASTOFF = 0x1a8
+ PT_ORIGGPR2 = 0xd0
+ PT_PSWADDR = 0x8
+ PT_PSWMASK = 0x0
+ RLIMIT_AS = 0x9
+ RLIMIT_CORE = 0x4
+ RLIMIT_CPU = 0x0
+ RLIMIT_DATA = 0x2
+ RLIMIT_FSIZE = 0x1
+ RLIMIT_NOFILE = 0x7
+ RLIMIT_STACK = 0x3
+ RLIM_INFINITY = -0x1
+ RTAX_ADVMSS = 0x8
+ RTAX_CC_ALGO = 0x10
+ RTAX_CWND = 0x7
+ RTAX_FEATURES = 0xc
+ RTAX_FEATURE_ALLFRAG = 0x8
+ RTAX_FEATURE_ECN = 0x1
+ RTAX_FEATURE_MASK = 0xf
+ RTAX_FEATURE_SACK = 0x2
+ RTAX_FEATURE_TIMESTAMP = 0x4
+ RTAX_HOPLIMIT = 0xa
+ RTAX_INITCWND = 0xb
+ RTAX_INITRWND = 0xe
+ RTAX_LOCK = 0x1
+ RTAX_MAX = 0x10
+ RTAX_MTU = 0x2
+ RTAX_QUICKACK = 0xf
+ RTAX_REORDERING = 0x9
+ RTAX_RTO_MIN = 0xd
+ RTAX_RTT = 0x4
+ RTAX_RTTVAR = 0x5
+ RTAX_SSTHRESH = 0x6
+ RTAX_UNSPEC = 0x0
+ RTAX_WINDOW = 0x3
+ RTA_ALIGNTO = 0x4
+ RTA_MAX = 0x16
+ RTCF_DIRECTSRC = 0x4000000
+ RTCF_DOREDIRECT = 0x1000000
+ RTCF_LOG = 0x2000000
+ RTCF_MASQ = 0x400000
+ RTCF_NAT = 0x800000
+ RTCF_VALVE = 0x200000
+ RTF_ADDRCLASSMASK = 0xf8000000
+ RTF_ADDRCONF = 0x40000
+ RTF_ALLONLINK = 0x20000
+ RTF_BROADCAST = 0x10000000
+ RTF_CACHE = 0x1000000
+ RTF_DEFAULT = 0x10000
+ RTF_DYNAMIC = 0x10
+ RTF_FLOW = 0x2000000
+ RTF_GATEWAY = 0x2
+ RTF_HOST = 0x4
+ RTF_INTERFACE = 0x40000000
+ RTF_IRTT = 0x100
+ RTF_LINKRT = 0x100000
+ RTF_LOCAL = 0x80000000
+ RTF_MODIFIED = 0x20
+ RTF_MSS = 0x40
+ RTF_MTU = 0x40
+ RTF_MULTICAST = 0x20000000
+ RTF_NAT = 0x8000000
+ RTF_NOFORWARD = 0x1000
+ RTF_NONEXTHOP = 0x200000
+ RTF_NOPMTUDISC = 0x4000
+ RTF_POLICY = 0x4000000
+ RTF_REINSTATE = 0x8
+ RTF_REJECT = 0x200
+ RTF_STATIC = 0x400
+ RTF_THROW = 0x2000
+ RTF_UP = 0x1
+ RTF_WINDOW = 0x80
+ RTF_XRESOLVE = 0x800
+ RTM_BASE = 0x10
+ RTM_DELACTION = 0x31
+ RTM_DELADDR = 0x15
+ RTM_DELADDRLABEL = 0x49
+ RTM_DELLINK = 0x11
+ RTM_DELMDB = 0x55
+ RTM_DELNEIGH = 0x1d
+ RTM_DELNSID = 0x59
+ RTM_DELQDISC = 0x25
+ RTM_DELROUTE = 0x19
+ RTM_DELRULE = 0x21
+ RTM_DELTCLASS = 0x29
+ RTM_DELTFILTER = 0x2d
+ RTM_F_CLONED = 0x200
+ RTM_F_EQUALIZE = 0x400
+ RTM_F_LOOKUP_TABLE = 0x1000
+ RTM_F_NOTIFY = 0x100
+ RTM_F_PREFIX = 0x800
+ RTM_GETACTION = 0x32
+ RTM_GETADDR = 0x16
+ RTM_GETADDRLABEL = 0x4a
+ RTM_GETANYCAST = 0x3e
+ RTM_GETDCB = 0x4e
+ RTM_GETLINK = 0x12
+ RTM_GETMDB = 0x56
+ RTM_GETMULTICAST = 0x3a
+ RTM_GETNEIGH = 0x1e
+ RTM_GETNEIGHTBL = 0x42
+ RTM_GETNETCONF = 0x52
+ RTM_GETNSID = 0x5a
+ RTM_GETQDISC = 0x26
+ RTM_GETROUTE = 0x1a
+ RTM_GETRULE = 0x22
+ RTM_GETTCLASS = 0x2a
+ RTM_GETTFILTER = 0x2e
+ RTM_MAX = 0x5b
+ RTM_NEWACTION = 0x30
+ RTM_NEWADDR = 0x14
+ RTM_NEWADDRLABEL = 0x48
+ RTM_NEWLINK = 0x10
+ RTM_NEWMDB = 0x54
+ RTM_NEWNDUSEROPT = 0x44
+ RTM_NEWNEIGH = 0x1c
+ RTM_NEWNEIGHTBL = 0x40
+ RTM_NEWNETCONF = 0x50
+ RTM_NEWNSID = 0x58
+ RTM_NEWPREFIX = 0x34
+ RTM_NEWQDISC = 0x24
+ RTM_NEWROUTE = 0x18
+ RTM_NEWRULE = 0x20
+ RTM_NEWTCLASS = 0x28
+ RTM_NEWTFILTER = 0x2c
+ RTM_NR_FAMILIES = 0x13
+ RTM_NR_MSGTYPES = 0x4c
+ RTM_SETDCB = 0x4f
+ RTM_SETLINK = 0x13
+ RTM_SETNEIGHTBL = 0x43
+ RTNH_ALIGNTO = 0x4
+ RTNH_COMPARE_MASK = 0x11
+ RTNH_F_DEAD = 0x1
+ RTNH_F_LINKDOWN = 0x10
+ RTNH_F_OFFLOAD = 0x8
+ RTNH_F_ONLINK = 0x4
+ RTNH_F_PERVASIVE = 0x2
+ RTN_MAX = 0xb
+ RTPROT_BABEL = 0x2a
+ RTPROT_BIRD = 0xc
+ RTPROT_BOOT = 0x3
+ RTPROT_DHCP = 0x10
+ RTPROT_DNROUTED = 0xd
+ RTPROT_GATED = 0x8
+ RTPROT_KERNEL = 0x2
+ RTPROT_MROUTED = 0x11
+ RTPROT_MRT = 0xa
+ RTPROT_NTK = 0xf
+ RTPROT_RA = 0x9
+ RTPROT_REDIRECT = 0x1
+ RTPROT_STATIC = 0x4
+ RTPROT_UNSPEC = 0x0
+ RTPROT_XORP = 0xe
+ RTPROT_ZEBRA = 0xb
+ RT_CLASS_DEFAULT = 0xfd
+ RT_CLASS_LOCAL = 0xff
+ RT_CLASS_MAIN = 0xfe
+ RT_CLASS_MAX = 0xff
+ RT_CLASS_UNSPEC = 0x0
+ RUSAGE_CHILDREN = -0x1
+ RUSAGE_SELF = 0x0
+ RUSAGE_THREAD = 0x1
+ SCM_CREDENTIALS = 0x2
+ SCM_RIGHTS = 0x1
+ SCM_TIMESTAMP = 0x1d
+ SCM_TIMESTAMPING = 0x25
+ SCM_TIMESTAMPNS = 0x23
+ SCM_WIFI_STATUS = 0x29
+ SHUT_RD = 0x0
+ SHUT_RDWR = 0x2
+ SHUT_WR = 0x1
+ SIOCADDDLCI = 0x8980
+ SIOCADDMULTI = 0x8931
+ SIOCADDRT = 0x890b
+ SIOCATMARK = 0x8905
+ SIOCDARP = 0x8953
+ SIOCDELDLCI = 0x8981
+ SIOCDELMULTI = 0x8932
+ SIOCDELRT = 0x890c
+ SIOCDEVPRIVATE = 0x89f0
+ SIOCDIFADDR = 0x8936
+ SIOCDRARP = 0x8960
+ SIOCGARP = 0x8954
+ SIOCGIFADDR = 0x8915
+ SIOCGIFBR = 0x8940
+ SIOCGIFBRDADDR = 0x8919
+ SIOCGIFCONF = 0x8912
+ SIOCGIFCOUNT = 0x8938
+ SIOCGIFDSTADDR = 0x8917
+ SIOCGIFENCAP = 0x8925
+ SIOCGIFFLAGS = 0x8913
+ SIOCGIFHWADDR = 0x8927
+ SIOCGIFINDEX = 0x8933
+ SIOCGIFMAP = 0x8970
+ SIOCGIFMEM = 0x891f
+ SIOCGIFMETRIC = 0x891d
+ SIOCGIFMTU = 0x8921
+ SIOCGIFNAME = 0x8910
+ SIOCGIFNETMASK = 0x891b
+ SIOCGIFPFLAGS = 0x8935
+ SIOCGIFSLAVE = 0x8929
+ SIOCGIFTXQLEN = 0x8942
+ SIOCGPGRP = 0x8904
+ SIOCGRARP = 0x8961
+ SIOCGSTAMP = 0x8906
+ SIOCGSTAMPNS = 0x8907
+ SIOCPROTOPRIVATE = 0x89e0
+ SIOCRTMSG = 0x890d
+ SIOCSARP = 0x8955
+ SIOCSIFADDR = 0x8916
+ SIOCSIFBR = 0x8941
+ SIOCSIFBRDADDR = 0x891a
+ SIOCSIFDSTADDR = 0x8918
+ SIOCSIFENCAP = 0x8926
+ SIOCSIFFLAGS = 0x8914
+ SIOCSIFHWADDR = 0x8924
+ SIOCSIFHWBROADCAST = 0x8937
+ SIOCSIFLINK = 0x8911
+ SIOCSIFMAP = 0x8971
+ SIOCSIFMEM = 0x8920
+ SIOCSIFMETRIC = 0x891e
+ SIOCSIFMTU = 0x8922
+ SIOCSIFNAME = 0x8923
+ SIOCSIFNETMASK = 0x891c
+ SIOCSIFPFLAGS = 0x8934
+ SIOCSIFSLAVE = 0x8930
+ SIOCSIFTXQLEN = 0x8943
+ SIOCSPGRP = 0x8902
+ SIOCSRARP = 0x8962
+ SOCK_CLOEXEC = 0x80000
+ SOCK_DCCP = 0x6
+ SOCK_DGRAM = 0x2
+ SOCK_NONBLOCK = 0x800
+ SOCK_PACKET = 0xa
+ SOCK_RAW = 0x3
+ SOCK_RDM = 0x4
+ SOCK_SEQPACKET = 0x5
+ SOCK_STREAM = 0x1
+ SOL_AAL = 0x109
+ SOL_ATM = 0x108
+ SOL_DECNET = 0x105
+ SOL_ICMPV6 = 0x3a
+ SOL_IP = 0x0
+ SOL_IPV6 = 0x29
+ SOL_IRDA = 0x10a
+ SOL_PACKET = 0x107
+ SOL_RAW = 0xff
+ SOL_SOCKET = 0x1
+ SOL_TCP = 0x6
+ SOL_X25 = 0x106
+ SOMAXCONN = 0x80
+ SO_ACCEPTCONN = 0x1e
+ SO_ATTACH_BPF = 0x32
+ SO_ATTACH_FILTER = 0x1a
+ SO_BINDTODEVICE = 0x19
+ SO_BPF_EXTENSIONS = 0x30
+ SO_BROADCAST = 0x6
+ SO_BSDCOMPAT = 0xe
+ SO_BUSY_POLL = 0x2e
+ SO_DEBUG = 0x1
+ SO_DETACH_BPF = 0x1b
+ SO_DETACH_FILTER = 0x1b
+ SO_DOMAIN = 0x27
+ SO_DONTROUTE = 0x5
+ SO_ERROR = 0x4
+ SO_GET_FILTER = 0x1a
+ SO_INCOMING_CPU = 0x31
+ SO_KEEPALIVE = 0x9
+ SO_LINGER = 0xd
+ SO_LOCK_FILTER = 0x2c
+ SO_MARK = 0x24
+ SO_MAX_PACING_RATE = 0x2f
+ SO_NOFCS = 0x2b
+ SO_NO_CHECK = 0xb
+ SO_OOBINLINE = 0xa
+ SO_PASSCRED = 0x10
+ SO_PASSSEC = 0x22
+ SO_PEEK_OFF = 0x2a
+ SO_PEERCRED = 0x11
+ SO_PEERNAME = 0x1c
+ SO_PEERSEC = 0x1f
+ SO_PRIORITY = 0xc
+ SO_PROTOCOL = 0x26
+ SO_RCVBUF = 0x8
+ SO_RCVBUFFORCE = 0x21
+ SO_RCVLOWAT = 0x12
+ SO_RCVTIMEO = 0x14
+ SO_REUSEADDR = 0x2
+ SO_REUSEPORT = 0xf
+ SO_RXQ_OVFL = 0x28
+ SO_SECURITY_AUTHENTICATION = 0x16
+ SO_SECURITY_ENCRYPTION_NETWORK = 0x18
+ SO_SECURITY_ENCRYPTION_TRANSPORT = 0x17
+ SO_SELECT_ERR_QUEUE = 0x2d
+ SO_SNDBUF = 0x7
+ SO_SNDBUFFORCE = 0x20
+ SO_SNDLOWAT = 0x13
+ SO_SNDTIMEO = 0x15
+ SO_TIMESTAMP = 0x1d
+ SO_TIMESTAMPING = 0x25
+ SO_TIMESTAMPNS = 0x23
+ SO_TYPE = 0x3
+ SO_WIFI_STATUS = 0x29
+ S_BLKSIZE = 0x200
+ S_IEXEC = 0x40
+ S_IFBLK = 0x6000
+ S_IFCHR = 0x2000
+ S_IFDIR = 0x4000
+ S_IFIFO = 0x1000
+ S_IFLNK = 0xa000
+ S_IFMT = 0xf000
+ S_IFREG = 0x8000
+ S_IFSOCK = 0xc000
+ S_IREAD = 0x100
+ S_IRGRP = 0x20
+ S_IROTH = 0x4
+ S_IRUSR = 0x100
+ S_IRWXG = 0x38
+ S_IRWXO = 0x7
+ S_IRWXU = 0x1c0
+ S_ISGID = 0x400
+ S_ISUID = 0x800
+ S_ISVTX = 0x200
+ S_IWGRP = 0x10
+ S_IWOTH = 0x2
+ S_IWRITE = 0x80
+ S_IWUSR = 0x80
+ S_IXGRP = 0x8
+ S_IXOTH = 0x1
+ S_IXUSR = 0x40
+ TCFLSH = 0x540b
+ TCIFLUSH = 0x0
+ TCIOFLUSH = 0x2
+ TCOFLUSH = 0x1
+ TCP_CONGESTION = 0xd
+ TCP_COOKIE_IN_ALWAYS = 0x1
+ TCP_COOKIE_MAX = 0x10
+ TCP_COOKIE_MIN = 0x8
+ TCP_COOKIE_OUT_NEVER = 0x2
+ TCP_COOKIE_PAIR_SIZE = 0x20
+ TCP_COOKIE_TRANSACTIONS = 0xf
+ TCP_CORK = 0x3
+ TCP_DEFER_ACCEPT = 0x9
+ TCP_FASTOPEN = 0x17
+ TCP_INFO = 0xb
+ TCP_KEEPCNT = 0x6
+ TCP_KEEPIDLE = 0x4
+ TCP_KEEPINTVL = 0x5
+ TCP_LINGER2 = 0x8
+ TCP_MAXSEG = 0x2
+ TCP_MAXWIN = 0xffff
+ TCP_MAX_WINSHIFT = 0xe
+ TCP_MD5SIG = 0xe
+ TCP_MD5SIG_MAXKEYLEN = 0x50
+ TCP_MSS = 0x200
+ TCP_MSS_DEFAULT = 0x218
+ TCP_MSS_DESIRED = 0x4c4
+ TCP_NODELAY = 0x1
+ TCP_QUEUE_SEQ = 0x15
+ TCP_QUICKACK = 0xc
+ TCP_REPAIR = 0x13
+ TCP_REPAIR_OPTIONS = 0x16
+ TCP_REPAIR_QUEUE = 0x14
+ TCP_SYNCNT = 0x7
+ TCP_S_DATA_IN = 0x4
+ TCP_S_DATA_OUT = 0x8
+ TCP_THIN_DUPACK = 0x11
+ TCP_THIN_LINEAR_TIMEOUTS = 0x10
+ TCP_TIMESTAMP = 0x18
+ TCP_USER_TIMEOUT = 0x12
+ TCP_WINDOW_CLAMP = 0xa
+ TCSAFLUSH = 0x2
+ TIOCCBRK = 0x5428
+ TIOCCONS = 0x541d
+ TIOCEXCL = 0x540c
+ TIOCGDEV = 0x80045432
+ TIOCGETD = 0x5424
+ TIOCGEXCL = 0x80045440
+ TIOCGICOUNT = 0x545d
+ TIOCGLCKTRMIOS = 0x5456
+ TIOCGPGRP = 0x540f
+ TIOCGPKT = 0x80045438
+ TIOCGPTLCK = 0x80045439
+ TIOCGPTN = 0x80045430
+ TIOCGRS485 = 0x542e
+ TIOCGSERIAL = 0x541e
+ TIOCGSID = 0x5429
+ TIOCGSOFTCAR = 0x5419
+ TIOCGWINSZ = 0x5413
+ TIOCINQ = 0x541b
+ TIOCLINUX = 0x541c
+ TIOCMBIC = 0x5417
+ TIOCMBIS = 0x5416
+ TIOCMGET = 0x5415
+ TIOCMIWAIT = 0x545c
+ TIOCMSET = 0x5418
+ TIOCM_CAR = 0x40
+ TIOCM_CD = 0x40
+ TIOCM_CTS = 0x20
+ TIOCM_DSR = 0x100
+ TIOCM_DTR = 0x2
+ TIOCM_LE = 0x1
+ TIOCM_RI = 0x80
+ TIOCM_RNG = 0x80
+ TIOCM_RTS = 0x4
+ TIOCM_SR = 0x10
+ TIOCM_ST = 0x8
+ TIOCNOTTY = 0x5422
+ TIOCNXCL = 0x540d
+ TIOCOUTQ = 0x5411
+ TIOCPKT = 0x5420
+ TIOCPKT_DATA = 0x0
+ TIOCPKT_DOSTOP = 0x20
+ TIOCPKT_FLUSHREAD = 0x1
+ TIOCPKT_FLUSHWRITE = 0x2
+ TIOCPKT_IOCTL = 0x40
+ TIOCPKT_NOSTOP = 0x10
+ TIOCPKT_START = 0x8
+ TIOCPKT_STOP = 0x4
+ TIOCSBRK = 0x5427
+ TIOCSCTTY = 0x540e
+ TIOCSERCONFIG = 0x5453
+ TIOCSERGETLSR = 0x5459
+ TIOCSERGETMULTI = 0x545a
+ TIOCSERGSTRUCT = 0x5458
+ TIOCSERGWILD = 0x5454
+ TIOCSERSETMULTI = 0x545b
+ TIOCSERSWILD = 0x5455
+ TIOCSER_TEMT = 0x1
+ TIOCSETD = 0x5423
+ TIOCSIG = 0x40045436
+ TIOCSLCKTRMIOS = 0x5457
+ TIOCSPGRP = 0x5410
+ TIOCSPTLCK = 0x40045431
+ TIOCSRS485 = 0x542f
+ TIOCSSERIAL = 0x541f
+ TIOCSSOFTCAR = 0x541a
+ TIOCSTI = 0x5412
+ TIOCSWINSZ = 0x5414
+ TIOCVHANGUP = 0x5437
+ TOSTOP = 0x100
+ TUNATTACHFILTER = 0x401054d5
+ TUNDETACHFILTER = 0x401054d6
+ TUNGETFEATURES = 0x800454cf
+ TUNGETFILTER = 0x801054db
+ TUNGETIFF = 0x800454d2
+ TUNGETSNDBUF = 0x800454d3
+ TUNGETVNETBE = 0x800454df
+ TUNGETVNETHDRSZ = 0x800454d7
+ TUNGETVNETLE = 0x800454dd
+ TUNSETDEBUG = 0x400454c9
+ TUNSETGROUP = 0x400454ce
+ TUNSETIFF = 0x400454ca
+ TUNSETIFINDEX = 0x400454da
+ TUNSETLINK = 0x400454cd
+ TUNSETNOCSUM = 0x400454c8
+ TUNSETOFFLOAD = 0x400454d0
+ TUNSETOWNER = 0x400454cc
+ TUNSETPERSIST = 0x400454cb
+ TUNSETQUEUE = 0x400454d9
+ TUNSETSNDBUF = 0x400454d4
+ TUNSETTXFILTER = 0x400454d1
+ TUNSETVNETBE = 0x400454de
+ TUNSETVNETHDRSZ = 0x400454d8
+ TUNSETVNETLE = 0x400454dc
+ VDISCARD = 0xd
+ VEOF = 0x4
+ VEOL = 0xb
+ VEOL2 = 0x10
+ VERASE = 0x2
+ VINTR = 0x0
+ VKILL = 0x3
+ VLNEXT = 0xf
+ VMIN = 0x6
+ VQUIT = 0x1
+ VREPRINT = 0xc
+ VSTART = 0x8
+ VSTOP = 0x9
+ VSUSP = 0xa
+ VSWTC = 0x7
+ VT0 = 0x0
+ VT1 = 0x4000
+ VTDLY = 0x4000
+ VTIME = 0x5
+ VWERASE = 0xe
+ WALL = 0x40000000
+ WCLONE = 0x80000000
+ WCONTINUED = 0x8
+ WEXITED = 0x4
+ WNOHANG = 0x1
+ WNOTHREAD = 0x20000000
+ WNOWAIT = 0x1000000
+ WORDSIZE = 0x40
+ WSTOPPED = 0x2
+ WUNTRACED = 0x2
+)
+
+// Errors
+const (
+ E2BIG = Errno(0x7)
+ EACCES = Errno(0xd)
+ EADDRINUSE = Errno(0x62)
+ EADDRNOTAVAIL = Errno(0x63)
+ EADV = Errno(0x44)
+ EAFNOSUPPORT = Errno(0x61)
+ EAGAIN = Errno(0xb)
+ EALREADY = Errno(0x72)
+ EBADE = Errno(0x34)
+ EBADF = Errno(0x9)
+ EBADFD = Errno(0x4d)
+ EBADMSG = Errno(0x4a)
+ EBADR = Errno(0x35)
+ EBADRQC = Errno(0x38)
+ EBADSLT = Errno(0x39)
+ EBFONT = Errno(0x3b)
+ EBUSY = Errno(0x10)
+ ECANCELED = Errno(0x7d)
+ ECHILD = Errno(0xa)
+ ECHRNG = Errno(0x2c)
+ ECOMM = Errno(0x46)
+ ECONNABORTED = Errno(0x67)
+ ECONNREFUSED = Errno(0x6f)
+ ECONNRESET = Errno(0x68)
+ EDEADLK = Errno(0x23)
+ EDEADLOCK = Errno(0x23)
+ EDESTADDRREQ = Errno(0x59)
+ EDOM = Errno(0x21)
+ EDOTDOT = Errno(0x49)
+ EDQUOT = Errno(0x7a)
+ EEXIST = Errno(0x11)
+ EFAULT = Errno(0xe)
+ EFBIG = Errno(0x1b)
+ EHOSTDOWN = Errno(0x70)
+ EHOSTUNREACH = Errno(0x71)
+ EHWPOISON = Errno(0x85)
+ EIDRM = Errno(0x2b)
+ EILSEQ = Errno(0x54)
+ EINPROGRESS = Errno(0x73)
+ EINTR = Errno(0x4)
+ EINVAL = Errno(0x16)
+ EIO = Errno(0x5)
+ EISCONN = Errno(0x6a)
+ EISDIR = Errno(0x15)
+ EISNAM = Errno(0x78)
+ EKEYEXPIRED = Errno(0x7f)
+ EKEYREJECTED = Errno(0x81)
+ EKEYREVOKED = Errno(0x80)
+ EL2HLT = Errno(0x33)
+ EL2NSYNC = Errno(0x2d)
+ EL3HLT = Errno(0x2e)
+ EL3RST = Errno(0x2f)
+ ELIBACC = Errno(0x4f)
+ ELIBBAD = Errno(0x50)
+ ELIBEXEC = Errno(0x53)
+ ELIBMAX = Errno(0x52)
+ ELIBSCN = Errno(0x51)
+ ELNRNG = Errno(0x30)
+ ELOOP = Errno(0x28)
+ EMEDIUMTYPE = Errno(0x7c)
+ EMFILE = Errno(0x18)
+ EMLINK = Errno(0x1f)
+ EMSGSIZE = Errno(0x5a)
+ EMULTIHOP = Errno(0x48)
+ ENAMETOOLONG = Errno(0x24)
+ ENAVAIL = Errno(0x77)
+ ENETDOWN = Errno(0x64)
+ ENETRESET = Errno(0x66)
+ ENETUNREACH = Errno(0x65)
+ ENFILE = Errno(0x17)
+ ENOANO = Errno(0x37)
+ ENOBUFS = Errno(0x69)
+ ENOCSI = Errno(0x32)
+ ENODATA = Errno(0x3d)
+ ENODEV = Errno(0x13)
+ ENOENT = Errno(0x2)
+ ENOEXEC = Errno(0x8)
+ ENOKEY = Errno(0x7e)
+ ENOLCK = Errno(0x25)
+ ENOLINK = Errno(0x43)
+ ENOMEDIUM = Errno(0x7b)
+ ENOMEM = Errno(0xc)
+ ENOMSG = Errno(0x2a)
+ ENONET = Errno(0x40)
+ ENOPKG = Errno(0x41)
+ ENOPROTOOPT = Errno(0x5c)
+ ENOSPC = Errno(0x1c)
+ ENOSR = Errno(0x3f)
+ ENOSTR = Errno(0x3c)
+ ENOSYS = Errno(0x26)
+ ENOTBLK = Errno(0xf)
+ ENOTCONN = Errno(0x6b)
+ ENOTDIR = Errno(0x14)
+ ENOTEMPTY = Errno(0x27)
+ ENOTNAM = Errno(0x76)
+ ENOTRECOVERABLE = Errno(0x83)
+ ENOTSOCK = Errno(0x58)
+ ENOTSUP = Errno(0x5f)
+ ENOTTY = Errno(0x19)
+ ENOTUNIQ = Errno(0x4c)
+ ENXIO = Errno(0x6)
+ EOPNOTSUPP = Errno(0x5f)
+ EOVERFLOW = Errno(0x4b)
+ EOWNERDEAD = Errno(0x82)
+ EPERM = Errno(0x1)
+ EPFNOSUPPORT = Errno(0x60)
+ EPIPE = Errno(0x20)
+ EPROTO = Errno(0x47)
+ EPROTONOSUPPORT = Errno(0x5d)
+ EPROTOTYPE = Errno(0x5b)
+ ERANGE = Errno(0x22)
+ EREMCHG = Errno(0x4e)
+ EREMOTE = Errno(0x42)
+ EREMOTEIO = Errno(0x79)
+ ERESTART = Errno(0x55)
+ ERFKILL = Errno(0x84)
+ EROFS = Errno(0x1e)
+ ESHUTDOWN = Errno(0x6c)
+ ESOCKTNOSUPPORT = Errno(0x5e)
+ ESPIPE = Errno(0x1d)
+ ESRCH = Errno(0x3)
+ ESRMNT = Errno(0x45)
+ ESTALE = Errno(0x74)
+ ESTRPIPE = Errno(0x56)
+ ETIME = Errno(0x3e)
+ ETIMEDOUT = Errno(0x6e)
+ ETOOMANYREFS = Errno(0x6d)
+ ETXTBSY = Errno(0x1a)
+ EUCLEAN = Errno(0x75)
+ EUNATCH = Errno(0x31)
+ EUSERS = Errno(0x57)
+ EWOULDBLOCK = Errno(0xb)
+ EXDEV = Errno(0x12)
+ EXFULL = Errno(0x36)
+)
+
+// Signals
+const (
+ SIGABRT = Signal(0x6)
+ SIGALRM = Signal(0xe)
+ SIGBUS = Signal(0x7)
+ SIGCHLD = Signal(0x11)
+ SIGCLD = Signal(0x11)
+ SIGCONT = Signal(0x12)
+ SIGFPE = Signal(0x8)
+ SIGHUP = Signal(0x1)
+ SIGILL = Signal(0x4)
+ SIGINT = Signal(0x2)
+ SIGIO = Signal(0x1d)
+ SIGIOT = Signal(0x6)
+ SIGKILL = Signal(0x9)
+ SIGPIPE = Signal(0xd)
+ SIGPOLL = Signal(0x1d)
+ SIGPROF = Signal(0x1b)
+ SIGPWR = Signal(0x1e)
+ SIGQUIT = Signal(0x3)
+ SIGSEGV = Signal(0xb)
+ SIGSTKFLT = Signal(0x10)
+ SIGSTOP = Signal(0x13)
+ SIGSYS = Signal(0x1f)
+ SIGTERM = Signal(0xf)
+ SIGTRAP = Signal(0x5)
+ SIGTSTP = Signal(0x14)
+ SIGTTIN = Signal(0x15)
+ SIGTTOU = Signal(0x16)
+ SIGUNUSED = Signal(0x1f)
+ SIGURG = Signal(0x17)
+ SIGUSR1 = Signal(0xa)
+ SIGUSR2 = Signal(0xc)
+ SIGVTALRM = Signal(0x1a)
+ SIGWINCH = Signal(0x1c)
+ SIGXCPU = Signal(0x18)
+ SIGXFSZ = Signal(0x19)
+)
+
+// Error table
+var errors = [...]string{
+ 1: "operation not permitted",
+ 2: "no such file or directory",
+ 3: "no such process",
+ 4: "interrupted system call",
+ 5: "input/output error",
+ 6: "no such device or address",
+ 7: "argument list too long",
+ 8: "exec format error",
+ 9: "bad file descriptor",
+ 10: "no child processes",
+ 11: "resource temporarily unavailable",
+ 12: "cannot allocate memory",
+ 13: "permission denied",
+ 14: "bad address",
+ 15: "block device required",
+ 16: "device or resource busy",
+ 17: "file exists",
+ 18: "invalid cross-device link",
+ 19: "no such device",
+ 20: "not a directory",
+ 21: "is a directory",
+ 22: "invalid argument",
+ 23: "too many open files in system",
+ 24: "too many open files",
+ 25: "inappropriate ioctl for device",
+ 26: "text file busy",
+ 27: "file too large",
+ 28: "no space left on device",
+ 29: "illegal seek",
+ 30: "read-only file system",
+ 31: "too many links",
+ 32: "broken pipe",
+ 33: "numerical argument out of domain",
+ 34: "numerical result out of range",
+ 35: "resource deadlock avoided",
+ 36: "file name too long",
+ 37: "no locks available",
+ 38: "function not implemented",
+ 39: "directory not empty",
+ 40: "too many levels of symbolic links",
+ 42: "no message of desired type",
+ 43: "identifier removed",
+ 44: "channel number out of range",
+ 45: "level 2 not synchronized",
+ 46: "level 3 halted",
+ 47: "level 3 reset",
+ 48: "link number out of range",
+ 49: "protocol driver not attached",
+ 50: "no CSI structure available",
+ 51: "level 2 halted",
+ 52: "invalid exchange",
+ 53: "invalid request descriptor",
+ 54: "exchange full",
+ 55: "no anode",
+ 56: "invalid request code",
+ 57: "invalid slot",
+ 59: "bad font file format",
+ 60: "device not a stream",
+ 61: "no data available",
+ 62: "timer expired",
+ 63: "out of streams resources",
+ 64: "machine is not on the network",
+ 65: "package not installed",
+ 66: "object is remote",
+ 67: "link has been severed",
+ 68: "advertise error",
+ 69: "srmount error",
+ 70: "communication error on send",
+ 71: "protocol error",
+ 72: "multihop attempted",
+ 73: "RFS specific error",
+ 74: "bad message",
+ 75: "value too large for defined data type",
+ 76: "name not unique on network",
+ 77: "file descriptor in bad state",
+ 78: "remote address changed",
+ 79: "can not access a needed shared library",
+ 80: "accessing a corrupted shared library",
+ 81: ".lib section in a.out corrupted",
+ 82: "attempting to link in too many shared libraries",
+ 83: "cannot exec a shared library directly",
+ 84: "invalid or incomplete multibyte or wide character",
+ 85: "interrupted system call should be restarted",
+ 86: "streams pipe error",
+ 87: "too many users",
+ 88: "socket operation on non-socket",
+ 89: "destination address required",
+ 90: "message too long",
+ 91: "protocol wrong type for socket",
+ 92: "protocol not available",
+ 93: "protocol not supported",
+ 94: "socket type not supported",
+ 95: "operation not supported",
+ 96: "protocol family not supported",
+ 97: "address family not supported by protocol",
+ 98: "address already in use",
+ 99: "cannot assign requested address",
+ 100: "network is down",
+ 101: "network is unreachable",
+ 102: "network dropped connection on reset",
+ 103: "software caused connection abort",
+ 104: "connection reset by peer",
+ 105: "no buffer space available",
+ 106: "transport endpoint is already connected",
+ 107: "transport endpoint is not connected",
+ 108: "cannot send after transport endpoint shutdown",
+ 109: "too many references: cannot splice",
+ 110: "connection timed out",
+ 111: "connection refused",
+ 112: "host is down",
+ 113: "no route to host",
+ 114: "operation already in progress",
+ 115: "operation now in progress",
+ 116: "stale file handle",
+ 117: "structure needs cleaning",
+ 118: "not a XENIX named type file",
+ 119: "no XENIX semaphores available",
+ 120: "is a named type file",
+ 121: "remote I/O error",
+ 122: "disk quota exceeded",
+ 123: "no medium found",
+ 124: "wrong medium type",
+ 125: "operation canceled",
+ 126: "required key not available",
+ 127: "key has expired",
+ 128: "key has been revoked",
+ 129: "key was rejected by service",
+ 130: "owner died",
+ 131: "state not recoverable",
+ 132: "operation not possible due to RF-kill",
+ 133: "memory page has hardware error",
+}
+
+// Signal table
+var signals = [...]string{
+ 1: "hangup",
+ 2: "interrupt",
+ 3: "quit",
+ 4: "illegal instruction",
+ 5: "trace/breakpoint trap",
+ 6: "aborted",
+ 7: "bus error",
+ 8: "floating point exception",
+ 9: "killed",
+ 10: "user defined signal 1",
+ 11: "segmentation fault",
+ 12: "user defined signal 2",
+ 13: "broken pipe",
+ 14: "alarm clock",
+ 15: "terminated",
+ 16: "stack fault",
+ 17: "child exited",
+ 18: "continued",
+ 19: "stopped (signal)",
+ 20: "stopped",
+ 21: "stopped (tty input)",
+ 22: "stopped (tty output)",
+ 23: "urgent I/O condition",
+ 24: "CPU time limit exceeded",
+ 25: "file size limit exceeded",
+ 26: "virtual timer expired",
+ 27: "profiling timer expired",
+ 28: "window changed",
+ 29: "I/O possible",
+ 30: "power failure",
+ 31: "bad system call",
+}
diff -pruN 1.6.3-1/src/syscall/zsyscall_linux_s390x.go 1.6.3-1ubuntu1/src/syscall/zsyscall_linux_s390x.go
--- 1.6.3-1/src/syscall/zsyscall_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/zsyscall_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,1578 @@
+// mksyscall.pl syscall_linux.go syscall_linux_s390x.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// +build s390x,linux
+
+package syscall
+
+import "unsafe"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func linkat(olddirfd int, oldpath string, newdirfd int, newpath string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_LINKAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func openat(dirfd int, path string, flags int, mode uint32) (fd int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall6(SYS_OPENAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mode), 0, 0)
+ use(unsafe.Pointer(_p0))
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlinkat(dirfd int, path string, buf []byte) (n int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(buf) > 0 {
+ _p1 = unsafe.Pointer(&buf[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_READLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(buf)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func symlinkat(oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_SYMLINKAT, uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)))
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func unlinkat(dirfd int, path string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UNLINKAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimes(path string, times *[2]Timeval) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMES, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func utimensat(dirfd int, path string, times *[2]Timespec) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIMENSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(times)))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func futimesat(dirfd int, path *byte, times *[2]Timeval) (err error) {
+ _, _, e1 := Syscall(SYS_FUTIMESAT, uintptr(dirfd), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(times)))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getcwd(buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_GETCWD, uintptr(_p0), uintptr(len(buf)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err error) {
+ r0, _, e1 := Syscall6(SYS_WAIT4, uintptr(pid), uintptr(unsafe.Pointer(wstatus)), uintptr(options), uintptr(unsafe.Pointer(rusage)), 0, 0)
+ wpid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) {
+ _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(arg)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_REBOOT, uintptr(magic1), uintptr(magic2), uintptr(cmd), uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func mount(source string, target string, fstype string, flags uintptr, data *byte) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(source)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ var _p2 *byte
+ _p2, err = BytePtrFromString(fstype)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MOUNT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(_p2)), uintptr(flags), uintptr(unsafe.Pointer(data)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ use(unsafe.Pointer(_p2))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Acct(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_ACCT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Adjtimex(buf *Timex) (state int, err error) {
+ r0, _, e1 := Syscall(SYS_ADJTIMEX, uintptr(unsafe.Pointer(buf)), 0, 0)
+ state = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chdir(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Chroot(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_CHROOT, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Close(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup(oldfd int) (fd int, err error) {
+ r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup3(oldfd int, newfd int, flags int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP3, uintptr(oldfd), uintptr(newfd), uintptr(flags))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate(size int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE, uintptr(size), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCreate1(flag int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_EPOLL_CREATE1, uintptr(flag), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollCtl(epfd int, op int, fd int, event *EpollEvent) (err error) {
+ _, _, e1 := RawSyscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(events) > 0 {
+ _p0 = unsafe.Pointer(&events[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_EPOLL_WAIT, uintptr(epfd), uintptr(_p0), uintptr(len(events)), uintptr(msec), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Exit(code int) {
+ Syscall(SYS_EXIT_GROUP, uintptr(code), 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FACCESSAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fallocate(fd int, mode uint32, off int64, len int64) (err error) {
+ _, _, e1 := Syscall6(SYS_FALLOCATE, uintptr(fd), uintptr(mode), uintptr(off), uintptr(len), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchdir(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmod(fd int, mode uint32) (err error) {
+ _, _, e1 := Syscall(SYS_FCHMOD, uintptr(fd), uintptr(mode), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_FCHOWNAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func fcntl(fd int, cmd int, arg int) (val int, err error) {
+ r0, _, e1 := Syscall(SYS_FCNTL, uintptr(fd), uintptr(cmd), uintptr(arg))
+ val = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fdatasync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FDATASYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Flock(fd int, how int) (err error) {
+ _, _, e1 := Syscall(SYS_FLOCK, uintptr(fd), uintptr(how), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fsync(fd int) (err error) {
+ _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getdents(fd int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(_SYS_getdents, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpgid(pid int) (pgid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETPGID, uintptr(pid), 0, 0)
+ pgid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpid() (pid int) {
+ r0, _, _ := RawSyscall(SYS_GETPID, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getppid() (ppid int) {
+ r0, _, _ := RawSyscall(SYS_GETPPID, 0, 0, 0)
+ ppid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getpriority(which int, who int) (prio int, err error) {
+ r0, _, e1 := Syscall(SYS_GETPRIORITY, uintptr(which), uintptr(who), 0)
+ prio = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrusage(who int, rusage *Rusage) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettid() (tid int) {
+ r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0)
+ tid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getxattr(path string, attr string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(dest) > 0 {
+ _p2 = unsafe.Pointer(&dest[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(pathname)
+ if err != nil {
+ return
+ }
+ r0, _, e1 := Syscall(SYS_INOTIFY_ADD_WATCH, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(mask))
+ use(unsafe.Pointer(_p0))
+ watchdesc = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit1(flags int) (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT1, uintptr(flags), 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyRmWatch(fd int, watchdesc uint32) (success int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_RM_WATCH, uintptr(fd), uintptr(watchdesc), 0)
+ success = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Kill(pid int, sig Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_KILL, uintptr(pid), uintptr(sig), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Klogctl(typ int, buf []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(_p0), uintptr(len(buf)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Listxattr(path string, dest []byte) (sz int, err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 unsafe.Pointer
+ if len(dest) > 0 {
+ _p1 = unsafe.Pointer(&dest[0])
+ } else {
+ _p1 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
+ use(unsafe.Pointer(_p0))
+ sz = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mkdirat(dirfd int, path string, mode uint32) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_MKDIRAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mknodat(dirfd int, path string, mode uint32, dev int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_MKNODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(dev), 0, 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Nanosleep(time *Timespec, leftover *Timespec) (err error) {
+ _, _, e1 := Syscall(SYS_NANOSLEEP, uintptr(unsafe.Pointer(time)), uintptr(unsafe.Pointer(leftover)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pause() (err error) {
+ _, _, e1 := Syscall(SYS_PAUSE, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func PivotRoot(newroot string, putold string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(newroot)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(putold)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_PIVOT_ROOT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) {
+ _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func read(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Removexattr(path string, attr string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(oldpath)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(newpath)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall6(SYS_RENAMEAT, uintptr(olddirfd), uintptr(unsafe.Pointer(_p0)), uintptr(newdirfd), uintptr(unsafe.Pointer(_p1)), 0, 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setdomainname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETDOMAINNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sethostname(p []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_SETHOSTNAME, uintptr(_p0), uintptr(len(p)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpgid(pid int, pgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETPGID, uintptr(pid), uintptr(pgid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setsid() (pid int, err error) {
+ r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0)
+ pid = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Settimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setpriority(which int, who int, prio int) (err error) {
+ _, _, e1 := Syscall(SYS_SETPRIORITY, uintptr(which), uintptr(who), uintptr(prio))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ var _p1 *byte
+ _p1, err = BytePtrFromString(attr)
+ if err != nil {
+ return
+ }
+ var _p2 unsafe.Pointer
+ if len(data) > 0 {
+ _p2 = unsafe.Pointer(&data[0])
+ } else {
+ _p2 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ use(unsafe.Pointer(_p1))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sync() {
+ Syscall(SYS_SYNC, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Sysinfo(info *Sysinfo_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SYSINFO, uintptr(unsafe.Pointer(info)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tee(rfd int, wfd int, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_TEE, uintptr(rfd), uintptr(wfd), uintptr(len), uintptr(flags), 0, 0)
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Tgkill(tgid int, tid int, sig Signal) (err error) {
+ _, _, e1 := RawSyscall(SYS_TGKILL, uintptr(tgid), uintptr(tid), uintptr(sig))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Times(tms *Tms) (ticks uintptr, err error) {
+ r0, _, e1 := RawSyscall(SYS_TIMES, uintptr(unsafe.Pointer(tms)), 0, 0)
+ ticks = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Umask(mask int) (oldmask int) {
+ r0, _, _ := RawSyscall(SYS_UMASK, uintptr(mask), 0, 0)
+ oldmask = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Uname(buf *Utsname) (err error) {
+ _, _, e1 := RawSyscall(SYS_UNAME, uintptr(unsafe.Pointer(buf)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unmount(target string, flags int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(target)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UMOUNT2, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Unshare(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_UNSHARE, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ustat(dev int, ubuf *Ustat_t) (err error) {
+ _, _, e1 := Syscall(SYS_USTAT, uintptr(dev), uintptr(unsafe.Pointer(ubuf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Utime(path string, buf *Utimbuf) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_UTIME, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func write(fd int, p []byte) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func exitThread(code int) (err error) {
+ _, _, e1 := Syscall(SYS_EXIT, uintptr(code), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func readlen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_READ, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writelen(fd int, p *byte, np int) (n int, err error) {
+ r0, _, e1 := Syscall(SYS_WRITE, uintptr(fd), uintptr(unsafe.Pointer(p)), uintptr(np))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func munmap(addr uintptr, length uintptr) (err error) {
+ _, _, e1 := Syscall(SYS_MUNMAP, uintptr(addr), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Madvise(b []byte, advice int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MADVISE, uintptr(_p0), uintptr(len(b)), uintptr(advice))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mprotect(b []byte, prot int) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MPROTECT, uintptr(_p0), uintptr(len(b)), uintptr(prot))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlock(b []byte) (err error) {
+ var _p0 unsafe.Pointer
+ if len(b) > 0 {
+ _p0 = unsafe.Pointer(&b[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ _, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Mlockall(flags int) (err error) {
+ _, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Munlockall() (err error) {
+ _, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Dup2(oldfd int, newfd int) (err error) {
+ _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fchown(fd int, uid int, gid int) (err error) {
+ _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstat(fd int, stat *Stat_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Fstatfs(fd int, buf *Statfs_t) (err error) {
+ _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(buf)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Ftruncate(fd int, length int64) (err error) {
+ _, _, e1 := Syscall(SYS_FTRUNCATE, uintptr(fd), uintptr(length), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getegid() (egid int) {
+ r0, _, _ := RawSyscall(SYS_GETEGID, 0, 0, 0)
+ egid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Geteuid() (euid int) {
+ r0, _, _ := RawSyscall(SYS_GETEUID, 0, 0, 0)
+ euid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getgid() (gid int) {
+ r0, _, _ := RawSyscall(SYS_GETGID, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getrlimit(resource int, rlim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Getuid() (uid int) {
+ r0, _, _ := RawSyscall(SYS_GETUID, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func InotifyInit() (fd int, err error) {
+ r0, _, e1 := RawSyscall(SYS_INOTIFY_INIT, 0, 0, 0)
+ fd = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lchown(path string, uid int, gid int) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LCHOWN, uintptr(unsafe.Pointer(_p0)), uintptr(uid), uintptr(gid))
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Lstat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_LSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pread(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PREAD64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(p) > 0 {
+ _p0 = unsafe.Pointer(&p[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := Syscall6(SYS_PWRITE64, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Seek(fd int, offset int64, whence int) (off int64, err error) {
+ r0, _, e1 := Syscall(SYS_LSEEK, uintptr(fd), uintptr(offset), uintptr(whence))
+ off = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+ r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+ r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
+ written = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsgid(gid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSGID, uintptr(gid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setfsuid(uid int) (err error) {
+ _, _, e1 := Syscall(SYS_SETFSUID, uintptr(uid), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setregid(rgid int, egid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREGID, uintptr(rgid), uintptr(egid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresgid(rgid int, egid int, sgid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESGID, uintptr(rgid), uintptr(egid), uintptr(sgid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setresuid(ruid int, euid int, suid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRESUID, uintptr(ruid), uintptr(euid), uintptr(suid))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setrlimit(resource int, rlim *Rlimit) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETRLIMIT, uintptr(resource), uintptr(unsafe.Pointer(rlim)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Setreuid(ruid int, euid int) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETREUID, uintptr(ruid), uintptr(euid), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) {
+ r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags))
+ n = int64(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Stat(path string, stat *Stat_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Statfs(path string, buf *Statfs_t) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_STATFS, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(buf)), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func SyncFileRange(fd int, off int64, n int64, flags int) (err error) {
+ _, _, e1 := Syscall6(SYS_SYNC_FILE_RANGE, uintptr(fd), uintptr(off), uintptr(n), uintptr(flags), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Truncate(path string, length int64) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := Syscall(SYS_TRUNCATE, uintptr(unsafe.Pointer(_p0)), uintptr(length), 0)
+ use(unsafe.Pointer(_p0))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func getgroups(n int, list *_Gid_t) (nn int, err error) {
+ r0, _, e1 := RawSyscall(SYS_GETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ nn = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setgroups(n int, list *_Gid_t) (err error) {
+ _, _, e1 := RawSyscall(SYS_SETGROUPS, uintptr(n), uintptr(unsafe.Pointer(list)), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func Gettimeofday(tv *Timeval) (err error) {
+ _, _, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tv)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pipe2(p *[2]_C_int, flags int) (err error) {
+ _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff -pruN 1.6.3-1/src/syscall/zsysnum_linux_s390x.go 1.6.3-1ubuntu1/src/syscall/zsysnum_linux_s390x.go
--- 1.6.3-1/src/syscall/zsysnum_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/zsysnum_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,328 @@
+// mksysnum_linux.pl /usr/include/asm/unistd.h
+// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT
+
+// +build s390x,linux
+
+package syscall
+
+const (
+ SYS_EXIT = 1
+ SYS_FORK = 2
+ SYS_READ = 3
+ SYS_WRITE = 4
+ SYS_OPEN = 5
+ SYS_CLOSE = 6
+ SYS_RESTART_SYSCALL = 7
+ SYS_CREAT = 8
+ SYS_LINK = 9
+ SYS_UNLINK = 10
+ SYS_EXECVE = 11
+ SYS_CHDIR = 12
+ SYS_MKNOD = 14
+ SYS_CHMOD = 15
+ SYS_LSEEK = 19
+ SYS_GETPID = 20
+ SYS_MOUNT = 21
+ SYS_UMOUNT = 22
+ SYS_PTRACE = 26
+ SYS_ALARM = 27
+ SYS_PAUSE = 29
+ SYS_UTIME = 30
+ SYS_ACCESS = 33
+ SYS_NICE = 34
+ SYS_SYNC = 36
+ SYS_KILL = 37
+ SYS_RENAME = 38
+ SYS_MKDIR = 39
+ SYS_RMDIR = 40
+ SYS_DUP = 41
+ SYS_PIPE = 42
+ SYS_TIMES = 43
+ SYS_BRK = 45
+ SYS_SIGNAL = 48
+ SYS_ACCT = 51
+ SYS_UMOUNT2 = 52
+ SYS_IOCTL = 54
+ SYS_FCNTL = 55
+ SYS_SETPGID = 57
+ SYS_UMASK = 60
+ SYS_CHROOT = 61
+ SYS_USTAT = 62
+ SYS_DUP2 = 63
+ SYS_GETPPID = 64
+ SYS_GETPGRP = 65
+ SYS_SETSID = 66
+ SYS_SIGACTION = 67
+ SYS_SIGSUSPEND = 72
+ SYS_SIGPENDING = 73
+ SYS_SETHOSTNAME = 74
+ SYS_SETRLIMIT = 75
+ SYS_GETRUSAGE = 77
+ SYS_GETTIMEOFDAY = 78
+ SYS_SETTIMEOFDAY = 79
+ SYS_SYMLINK = 83
+ SYS_READLINK = 85
+ SYS_USELIB = 86
+ SYS_SWAPON = 87
+ SYS_REBOOT = 88
+ SYS_READDIR = 89
+ SYS_MMAP = 90
+ SYS_MUNMAP = 91
+ SYS_TRUNCATE = 92
+ SYS_FTRUNCATE = 93
+ SYS_FCHMOD = 94
+ SYS_GETPRIORITY = 96
+ SYS_SETPRIORITY = 97
+ SYS_STATFS = 99
+ SYS_FSTATFS = 100
+ SYS_SOCKETCALL = 102
+ SYS_SYSLOG = 103
+ SYS_SETITIMER = 104
+ SYS_GETITIMER = 105
+ SYS_STAT = 106
+ SYS_LSTAT = 107
+ SYS_FSTAT = 108
+ SYS_LOOKUP_DCOOKIE = 110
+ SYS_VHANGUP = 111
+ SYS_IDLE = 112
+ SYS_WAIT4 = 114
+ SYS_SWAPOFF = 115
+ SYS_SYSINFO = 116
+ SYS_IPC = 117
+ SYS_FSYNC = 118
+ SYS_SIGRETURN = 119
+ SYS_CLONE = 120
+ SYS_SETDOMAINNAME = 121
+ SYS_UNAME = 122
+ SYS_ADJTIMEX = 124
+ SYS_MPROTECT = 125
+ SYS_SIGPROCMASK = 126
+ SYS_CREATE_MODULE = 127
+ SYS_INIT_MODULE = 128
+ SYS_DELETE_MODULE = 129
+ SYS_GET_KERNEL_SYMS = 130
+ SYS_QUOTACTL = 131
+ SYS_GETPGID = 132
+ SYS_FCHDIR = 133
+ SYS_BDFLUSH = 134
+ SYS_SYSFS = 135
+ SYS_PERSONALITY = 136
+ SYS_AFS_SYSCALL = 137
+ SYS_GETDENTS = 141
+ SYS_FLOCK = 143
+ SYS_MSYNC = 144
+ SYS_READV = 145
+ SYS_WRITEV = 146
+ SYS_GETSID = 147
+ SYS_FDATASYNC = 148
+ SYS__SYSCTL = 149
+ SYS_MLOCK = 150
+ SYS_MUNLOCK = 151
+ SYS_MLOCKALL = 152
+ SYS_MUNLOCKALL = 153
+ SYS_SCHED_SETPARAM = 154
+ SYS_SCHED_GETPARAM = 155
+ SYS_SCHED_SETSCHEDULER = 156
+ SYS_SCHED_GETSCHEDULER = 157
+ SYS_SCHED_YIELD = 158
+ SYS_SCHED_GET_PRIORITY_MAX = 159
+ SYS_SCHED_GET_PRIORITY_MIN = 160
+ SYS_SCHED_RR_GET_INTERVAL = 161
+ SYS_NANOSLEEP = 162
+ SYS_MREMAP = 163
+ SYS_QUERY_MODULE = 167
+ SYS_POLL = 168
+ SYS_NFSSERVCTL = 169
+ SYS_PRCTL = 172
+ SYS_RT_SIGRETURN = 173
+ SYS_RT_SIGACTION = 174
+ SYS_RT_SIGPROCMASK = 175
+ SYS_RT_SIGPENDING = 176
+ SYS_RT_SIGTIMEDWAIT = 177
+ SYS_RT_SIGQUEUEINFO = 178
+ SYS_RT_SIGSUSPEND = 179
+ SYS_PREAD64 = 180
+ SYS_PWRITE64 = 181
+ SYS_GETCWD = 183
+ SYS_CAPGET = 184
+ SYS_CAPSET = 185
+ SYS_SIGALTSTACK = 186
+ SYS_SENDFILE = 187
+ SYS_GETPMSG = 188
+ SYS_PUTPMSG = 189
+ SYS_VFORK = 190
+ SYS_PIVOT_ROOT = 217
+ SYS_MINCORE = 218
+ SYS_MADVISE = 219
+ SYS_GETDENTS64 = 220
+ SYS_READAHEAD = 222
+ SYS_SETXATTR = 224
+ SYS_LSETXATTR = 225
+ SYS_FSETXATTR = 226
+ SYS_GETXATTR = 227
+ SYS_LGETXATTR = 228
+ SYS_FGETXATTR = 229
+ SYS_LISTXATTR = 230
+ SYS_LLISTXATTR = 231
+ SYS_FLISTXATTR = 232
+ SYS_REMOVEXATTR = 233
+ SYS_LREMOVEXATTR = 234
+ SYS_FREMOVEXATTR = 235
+ SYS_GETTID = 236
+ SYS_TKILL = 237
+ SYS_FUTEX = 238
+ SYS_SCHED_SETAFFINITY = 239
+ SYS_SCHED_GETAFFINITY = 240
+ SYS_TGKILL = 241
+ SYS_IO_SETUP = 243
+ SYS_IO_DESTROY = 244
+ SYS_IO_GETEVENTS = 245
+ SYS_IO_SUBMIT = 246
+ SYS_IO_CANCEL = 247
+ SYS_EXIT_GROUP = 248
+ SYS_EPOLL_CREATE = 249
+ SYS_EPOLL_CTL = 250
+ SYS_EPOLL_WAIT = 251
+ SYS_SET_TID_ADDRESS = 252
+ SYS_FADVISE64 = 253
+ SYS_TIMER_CREATE = 254
+ SYS_TIMER_SETTIME = 255
+ SYS_TIMER_GETTIME = 256
+ SYS_TIMER_GETOVERRUN = 257
+ SYS_TIMER_DELETE = 258
+ SYS_CLOCK_SETTIME = 259
+ SYS_CLOCK_GETTIME = 260
+ SYS_CLOCK_GETRES = 261
+ SYS_CLOCK_NANOSLEEP = 262
+ SYS_STATFS64 = 265
+ SYS_FSTATFS64 = 266
+ SYS_REMAP_FILE_PAGES = 267
+ SYS_MBIND = 268
+ SYS_GET_MEMPOLICY = 269
+ SYS_SET_MEMPOLICY = 270
+ SYS_MQ_OPEN = 271
+ SYS_MQ_UNLINK = 272
+ SYS_MQ_TIMEDSEND = 273
+ SYS_MQ_TIMEDRECEIVE = 274
+ SYS_MQ_NOTIFY = 275
+ SYS_MQ_GETSETATTR = 276
+ SYS_KEXEC_LOAD = 277
+ SYS_ADD_KEY = 278
+ SYS_REQUEST_KEY = 279
+ SYS_KEYCTL = 280
+ SYS_WAITID = 281
+ SYS_IOPRIO_SET = 282
+ SYS_IOPRIO_GET = 283
+ SYS_INOTIFY_INIT = 284
+ SYS_INOTIFY_ADD_WATCH = 285
+ SYS_INOTIFY_RM_WATCH = 286
+ SYS_MIGRATE_PAGES = 287
+ SYS_OPENAT = 288
+ SYS_MKDIRAT = 289
+ SYS_MKNODAT = 290
+ SYS_FCHOWNAT = 291
+ SYS_FUTIMESAT = 292
+ SYS_UNLINKAT = 294
+ SYS_RENAMEAT = 295
+ SYS_LINKAT = 296
+ SYS_SYMLINKAT = 297
+ SYS_READLINKAT = 298
+ SYS_FCHMODAT = 299
+ SYS_FACCESSAT = 300
+ SYS_PSELECT6 = 301
+ SYS_PPOLL = 302
+ SYS_UNSHARE = 303
+ SYS_SET_ROBUST_LIST = 304
+ SYS_GET_ROBUST_LIST = 305
+ SYS_SPLICE = 306
+ SYS_SYNC_FILE_RANGE = 307
+ SYS_TEE = 308
+ SYS_VMSPLICE = 309
+ SYS_MOVE_PAGES = 310
+ SYS_GETCPU = 311
+ SYS_EPOLL_PWAIT = 312
+ SYS_UTIMES = 313
+ SYS_FALLOCATE = 314
+ SYS_UTIMENSAT = 315
+ SYS_SIGNALFD = 316
+ SYS_TIMERFD = 317
+ SYS_EVENTFD = 318
+ SYS_TIMERFD_CREATE = 319
+ SYS_TIMERFD_SETTIME = 320
+ SYS_TIMERFD_GETTIME = 321
+ SYS_SIGNALFD4 = 322
+ SYS_EVENTFD2 = 323
+ SYS_INOTIFY_INIT1 = 324
+ SYS_PIPE2 = 325
+ SYS_DUP3 = 326
+ SYS_EPOLL_CREATE1 = 327
+ SYS_PREADV = 328
+ SYS_PWRITEV = 329
+ SYS_RT_TGSIGQUEUEINFO = 330
+ SYS_PERF_EVENT_OPEN = 331
+ SYS_FANOTIFY_INIT = 332
+ SYS_FANOTIFY_MARK = 333
+ SYS_PRLIMIT64 = 334
+ SYS_NAME_TO_HANDLE_AT = 335
+ SYS_OPEN_BY_HANDLE_AT = 336
+ SYS_CLOCK_ADJTIME = 337
+ SYS_SYNCFS = 338
+ SYS_SETNS = 339
+ SYS_PROCESS_VM_READV = 340
+ SYS_PROCESS_VM_WRITEV = 341
+ SYS_S390_RUNTIME_INSTR = 342
+ SYS_KCMP = 343
+ SYS_FINIT_MODULE = 344
+ SYS_SCHED_SETATTR = 345
+ SYS_SCHED_GETATTR = 346
+ SYS_RENAMEAT2 = 347
+ SYS_SECCOMP = 348
+ SYS_GETRANDOM = 349
+ SYS_MEMFD_CREATE = 350
+ SYS_BPF = 351
+ SYS_S390_PCI_MMIO_WRITE = 352
+ SYS_S390_PCI_MMIO_READ = 353
+ SYS_EXECVEAT = 354
+ SYS_USERFAULTFD = 355
+ SYS_MEMBARRIER = 356
+ SYS_RECVMMSG = 357
+ SYS_SENDMMSG = 358
+ SYS_SOCKET = 359
+ SYS_SOCKETPAIR = 360
+ SYS_BIND = 361
+ SYS_CONNECT = 362
+ SYS_LISTEN = 363
+ SYS_ACCEPT4 = 364
+ SYS_GETSOCKOPT = 365
+ SYS_SETSOCKOPT = 366
+ SYS_GETSOCKNAME = 367
+ SYS_GETPEERNAME = 368
+ SYS_SENDTO = 369
+ SYS_SENDMSG = 370
+ SYS_RECVFROM = 371
+ SYS_RECVMSG = 372
+ SYS_SHUTDOWN = 373
+ SYS_MLOCK2 = 374
+ SYS_SELECT = 142
+ SYS_GETRLIMIT = 191
+ SYS_LCHOWN = 198
+ SYS_GETUID = 199
+ SYS_GETGID = 200
+ SYS_GETEUID = 201
+ SYS_GETEGID = 202
+ SYS_SETREUID = 203
+ SYS_SETREGID = 204
+ SYS_GETGROUPS = 205
+ SYS_SETGROUPS = 206
+ SYS_FCHOWN = 207
+ SYS_SETRESUID = 208
+ SYS_GETRESUID = 209
+ SYS_SETRESGID = 210
+ SYS_GETRESGID = 211
+ SYS_CHOWN = 212
+ SYS_SETUID = 213
+ SYS_SETGID = 214
+ SYS_SETFSUID = 215
+ SYS_SETFSGID = 216
+ SYS_NEWFSTATAT = 293
+)
diff -pruN 1.6.3-1/src/syscall/ztypes_linux_s390x.go 1.6.3-1ubuntu1/src/syscall/ztypes_linux_s390x.go
--- 1.6.3-1/src/syscall/ztypes_linux_s390x.go 1970-01-01 00:00:00.000000000 +0000
+++ 1.6.3-1ubuntu1/src/syscall/ztypes_linux_s390x.go 2016-07-21 13:36:09.000000000 +0000
@@ -0,0 +1,622 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs types_linux.go | go run mkpost.go
+
+// +build s390x,linux
+
+package syscall
+
+const (
+ sizeofPtr = 0x8
+ sizeofShort = 0x2
+ sizeofInt = 0x4
+ sizeofLong = 0x8
+ sizeofLongLong = 0x8
+ PathMax = 0x1000
+)
+
+type (
+ _C_short int16
+ _C_int int32
+ _C_long int64
+ _C_long_long int64
+)
+
+type Timespec struct {
+ Sec int64
+ Nsec int64
+}
+
+type Timeval struct {
+ Sec int64
+ Usec int64
+}
+
+type Timex struct {
+ Modes uint32
+ _ [4]byte
+ Offset int64
+ Freq int64
+ Maxerror int64
+ Esterror int64
+ Status int32
+ _ [4]byte
+ Constant int64
+ Precision int64
+ Tolerance int64
+ Time Timeval
+ Tick int64
+ Ppsfreq int64
+ Jitter int64
+ Shift int32
+ _ [4]byte
+ Stabil int64
+ Jitcnt int64
+ Calcnt int64
+ Errcnt int64
+ Stbcnt int64
+ Tai int32
+ _ [44]byte
+}
+
+type Time_t int64
+
+type Tms struct {
+ Utime int64
+ Stime int64
+ Cutime int64
+ Cstime int64
+}
+
+type Utimbuf struct {
+ Actime int64
+ Modtime int64
+}
+
+type Rusage struct {
+ Utime Timeval
+ Stime Timeval
+ Maxrss int64
+ Ixrss int64
+ Idrss int64
+ Isrss int64
+ Minflt int64
+ Majflt int64
+ Nswap int64
+ Inblock int64
+ Oublock int64
+ Msgsnd int64
+ Msgrcv int64
+ Nsignals int64
+ Nvcsw int64
+ Nivcsw int64
+}
+
+type Rlimit struct {
+ Cur uint64
+ Max uint64
+}
+
+type _Gid_t uint32
+
+type Stat_t struct {
+ Dev uint64
+ Ino uint64
+ Nlink uint64
+ Mode uint32
+ Uid uint32
+ Gid uint32
+ _ int32
+ Rdev uint64
+ Size int64
+ Atim Timespec
+ Mtim Timespec
+ Ctim Timespec
+ Blksize int64
+ Blocks int64
+ _ [3]int64
+}
+
+type Statfs_t struct {
+ Type uint32
+ Bsize uint32
+ Blocks uint64
+ Bfree uint64
+ Bavail uint64
+ Files uint64
+ Ffree uint64
+ Fsid Fsid
+ Namelen uint32
+ Frsize uint32
+ Flags uint32
+ Spare [4]uint32
+ _ [4]byte
+}
+
+type Dirent struct {
+ Ino uint64
+ Off int64
+ Reclen uint16
+ Type uint8
+ Name [256]uint8
+ _ [5]byte
+}
+
+type Fsid struct {
+ _ [2]int32
+}
+
+type Flock_t struct {
+ Type int16
+ Whence int16
+ _ [4]byte
+ Start int64
+ Len int64
+ Pid int32
+ _ [4]byte
+}
+
+type RawSockaddrInet4 struct {
+ Family uint16
+ Port uint16
+ Addr [4]byte /* in_addr */
+ Zero [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+ Family uint16
+ Port uint16
+ Flowinfo uint32
+ Addr [16]byte /* in6_addr */
+ Scope_id uint32
+}
+
+type RawSockaddrUnix struct {
+ Family uint16
+ Path [108]int8
+}
+
+type RawSockaddrLinklayer struct {
+ Family uint16
+ Protocol uint16
+ Ifindex int32
+ Hatype uint16
+ Pkttype uint8
+ Halen uint8
+ Addr [8]uint8
+}
+
+type RawSockaddrNetlink struct {
+ Family uint16
+ Pad uint16
+ Pid uint32
+ Groups uint32
+}
+
+type RawSockaddr struct {
+ Family uint16
+ Data [14]int8
+}
+
+type RawSockaddrAny struct {
+ Addr RawSockaddr
+ Pad [96]uint8
+}
+
+type _Socklen uint32
+
+type Linger struct {
+ Onoff int32
+ Linger int32
+}
+
+type Iovec struct {
+ Base *byte
+ Len uint64
+}
+
+type IPMreq struct {
+ Multiaddr [4]byte /* in_addr */
+ Interface [4]byte /* in_addr */
+}
+
+type IPMreqn struct {
+ Multiaddr [4]byte /* in_addr */
+ Address [4]byte /* in_addr */
+ Ifindex int32
+}
+
+type IPv6Mreq struct {
+ Multiaddr [16]byte /* in6_addr */
+ Interface uint32
+}
+
+type Msghdr struct {
+ Name *byte
+ Namelen uint32
+ _ [4]byte
+ Iov *Iovec
+ Iovlen uint64
+ Control *byte
+ Controllen uint64
+ Flags int32
+ _ [4]byte
+}
+
+type Cmsghdr struct {
+ Len uint64
+ Level int32
+ Type int32
+}
+
+type Inet4Pktinfo struct {
+ Ifindex int32
+ Spec_dst [4]byte /* in_addr */
+ Addr [4]byte /* in_addr */
+}
+
+type Inet6Pktinfo struct {
+ Addr [16]byte /* in6_addr */
+ Ifindex uint32
+}
+
+type IPv6MTUInfo struct {
+ Addr RawSockaddrInet6
+ Mtu uint32
+}
+
+type ICMPv6Filter struct {
+ Data [8]uint32
+}
+
+type Ucred struct {
+ Pid int32
+ Uid uint32
+ Gid uint32
+}
+
+type TCPInfo struct {
+ State uint8
+ Ca_state uint8
+ Retransmits uint8
+ Probes uint8
+ Backoff uint8
+ Options uint8
+ _ [2]byte
+ Rto uint32
+ Ato uint32
+ Snd_mss uint32
+ Rcv_mss uint32
+ Unacked uint32
+ Sacked uint32
+ Lost uint32
+ Retrans uint32
+ Fackets uint32
+ Last_data_sent uint32
+ Last_ack_sent uint32
+ Last_data_recv uint32
+ Last_ack_recv uint32
+ Pmtu uint32
+ Rcv_ssthresh uint32
+ Rtt uint32
+ Rttvar uint32
+ Snd_ssthresh uint32
+ Snd_cwnd uint32
+ Advmss uint32
+ Reordering uint32
+ Rcv_rtt uint32
+ Rcv_space uint32
+ Total_retrans uint32
+}
+
+const (
+ SizeofSockaddrInet4 = 0x10
+ SizeofSockaddrInet6 = 0x1c
+ SizeofSockaddrAny = 0x70
+ SizeofSockaddrUnix = 0x6e
+ SizeofSockaddrLinklayer = 0x14
+ SizeofSockaddrNetlink = 0xc
+ SizeofLinger = 0x8
+ SizeofIPMreq = 0x8
+ SizeofIPMreqn = 0xc
+ SizeofIPv6Mreq = 0x14
+ SizeofMsghdr = 0x38
+ SizeofCmsghdr = 0x10
+ SizeofInet4Pktinfo = 0xc
+ SizeofInet6Pktinfo = 0x14
+ SizeofIPv6MTUInfo = 0x20
+ SizeofICMPv6Filter = 0x20
+ SizeofUcred = 0xc
+ SizeofTCPInfo = 0x68
+)
+
+const (
+ IFA_UNSPEC = 0x0
+ IFA_ADDRESS = 0x1
+ IFA_LOCAL = 0x2
+ IFA_LABEL = 0x3
+ IFA_BROADCAST = 0x4
+ IFA_ANYCAST = 0x5
+ IFA_CACHEINFO = 0x6
+ IFA_MULTICAST = 0x7
+ IFLA_UNSPEC = 0x0
+ IFLA_ADDRESS = 0x1
+ IFLA_BROADCAST = 0x2
+ IFLA_IFNAME = 0x3
+ IFLA_MTU = 0x4
+ IFLA_LINK = 0x5
+ IFLA_QDISC = 0x6
+ IFLA_STATS = 0x7
+ IFLA_COST = 0x8
+ IFLA_PRIORITY = 0x9
+ IFLA_MASTER = 0xa
+ IFLA_WIRELESS = 0xb
+ IFLA_PROTINFO = 0xc
+ IFLA_TXQLEN = 0xd
+ IFLA_MAP = 0xe
+ IFLA_WEIGHT = 0xf
+ IFLA_OPERSTATE = 0x10
+ IFLA_LINKMODE = 0x11
+ IFLA_LINKINFO = 0x12
+ IFLA_NET_NS_PID = 0x13
+ IFLA_IFALIAS = 0x14
+ IFLA_MAX = 0x27
+ RT_SCOPE_UNIVERSE = 0x0
+ RT_SCOPE_SITE = 0xc8
+ RT_SCOPE_LINK = 0xfd
+ RT_SCOPE_HOST = 0xfe
+ RT_SCOPE_NOWHERE = 0xff
+ RT_TABLE_UNSPEC = 0x0
+ RT_TABLE_COMPAT = 0xfc
+ RT_TABLE_DEFAULT = 0xfd
+ RT_TABLE_MAIN = 0xfe
+ RT_TABLE_LOCAL = 0xff
+ RT_TABLE_MAX = 0xffffffff
+ RTA_UNSPEC = 0x0
+ RTA_DST = 0x1
+ RTA_SRC = 0x2
+ RTA_IIF = 0x3
+ RTA_OIF = 0x4
+ RTA_GATEWAY = 0x5
+ RTA_PRIORITY = 0x6
+ RTA_PREFSRC = 0x7
+ RTA_METRICS = 0x8
+ RTA_MULTIPATH = 0x9
+ RTA_FLOW = 0xb
+ RTA_CACHEINFO = 0xc
+ RTA_TABLE = 0xf
+ RTN_UNSPEC = 0x0
+ RTN_UNICAST = 0x1
+ RTN_LOCAL = 0x2
+ RTN_BROADCAST = 0x3
+ RTN_ANYCAST = 0x4
+ RTN_MULTICAST = 0x5
+ RTN_BLACKHOLE = 0x6
+ RTN_UNREACHABLE = 0x7
+ RTN_PROHIBIT = 0x8
+ RTN_THROW = 0x9
+ RTN_NAT = 0xa
+ RTN_XRESOLVE = 0xb
+ RTNLGRP_NONE = 0x0
+ RTNLGRP_LINK = 0x1
+ RTNLGRP_NOTIFY = 0x2
+ RTNLGRP_NEIGH = 0x3
+ RTNLGRP_TC = 0x4
+ RTNLGRP_IPV4_IFADDR = 0x5
+ RTNLGRP_IPV4_MROUTE = 0x6
+ RTNLGRP_IPV4_ROUTE = 0x7
+ RTNLGRP_IPV4_RULE = 0x8
+ RTNLGRP_IPV6_IFADDR = 0x9
+ RTNLGRP_IPV6_MROUTE = 0xa
+ RTNLGRP_IPV6_ROUTE = 0xb
+ RTNLGRP_IPV6_IFINFO = 0xc
+ RTNLGRP_IPV6_PREFIX = 0x12
+ RTNLGRP_IPV6_RULE = 0x13
+ RTNLGRP_ND_USEROPT = 0x14
+ SizeofNlMsghdr = 0x10
+ SizeofNlMsgerr = 0x14
+ SizeofRtGenmsg = 0x1
+ SizeofNlAttr = 0x4
+ SizeofRtAttr = 0x4
+ SizeofIfInfomsg = 0x10
+ SizeofIfAddrmsg = 0x8
+ SizeofRtMsg = 0xc
+ SizeofRtNexthop = 0x8
+)
+
+type NlMsghdr struct {
+ Len uint32
+ Type uint16
+ Flags uint16
+ Seq uint32
+ Pid uint32
+}
+
+type NlMsgerr struct {
+ Error int32
+ Msg NlMsghdr
+}
+
+type RtGenmsg struct {
+ Family uint8
+}
+
+type NlAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type RtAttr struct {
+ Len uint16
+ Type uint16
+}
+
+type IfInfomsg struct {
+ Family uint8
+ _ uint8
+ Type uint16
+ Index int32
+ Flags uint32
+ Change uint32
+}
+
+type IfAddrmsg struct {
+ Family uint8
+ Prefixlen uint8
+ Flags uint8
+ Scope uint8
+ Index uint32
+}
+
+type RtMsg struct {
+ Family uint8
+ Dst_len uint8
+ Src_len uint8
+ Tos uint8
+ Table uint8
+ Protocol uint8
+ Scope uint8
+ Type uint8
+ Flags uint32
+}
+
+type RtNexthop struct {
+ Len uint16
+ Flags uint8
+ Hops uint8
+ Ifindex int32
+}
+
+const (
+ SizeofSockFilter = 0x8
+ SizeofSockFprog = 0x10
+)
+
+type SockFilter struct {
+ Code uint16
+ Jt uint8
+ Jf uint8
+ K uint32
+}
+
+type SockFprog struct {
+ Len uint16
+ _ [6]byte
+ Filter *SockFilter
+}
+
+type InotifyEvent struct {
+ Wd int32
+ Mask uint32
+ Cookie uint32
+ Len uint32
+}
+
+const SizeofInotifyEvent = 0x10
+
+type PtraceRegs struct {
+ Psw PtracePsw
+ Gprs [16]uint64
+ Acrs [16]uint32
+ Orig_gpr2 uint64
+ Fp_regs PtraceFpregs
+ Per_info PtracePer
+ Ieee_instruction_pointer uint64
+}
+
+type PtracePsw struct {
+ Mask uint64
+ Addr uint64
+}
+
+type PtraceFpregs struct {
+ Fpc uint32
+ _ [4]byte
+ Fprs [16]float64
+}
+
+type PtracePer struct {
+ Control_regs [0]uint64
+ _ [24]byte
+ _ [8]byte
+ Starting_addr uint64
+ Ending_addr uint64
+ Perc_atmid uint16
+ _ [6]byte
+ Address uint64
+ Access_id uint8
+ _ [7]byte
+}
+
+type FdSet struct {
+ Bits [16]int64
+}
+
+type Sysinfo_t struct {
+ Uptime int64
+ Loads [3]uint64
+ Totalram uint64
+ Freeram uint64
+ Sharedram uint64
+ Bufferram uint64
+ Totalswap uint64
+ Freeswap uint64
+ Procs uint16
+ Pad uint16
+ _ [4]byte
+ Totalhigh uint64
+ Freehigh uint64
+ Unit uint32
+ _ [0]uint8
+ _ [4]byte
+}
+
+type Utsname struct {
+ Sysname [65]uint8
+ Nodename [65]uint8
+ Release [65]uint8
+ Version [65]uint8
+ Machine [65]uint8
+ Domainname [65]uint8
+}
+
+type Ustat_t struct {
+ Tfree int32
+ _ [4]byte
+ Tinode uint64
+ Fname [6]uint8
+ Fpack [6]uint8
+ _ [4]byte
+}
+
+type EpollEvent struct {
+ Events uint32
+ _ int32
+ Fd int32
+ Pad int32
+}
+
+const (
+ _AT_FDCWD = -0x64
+ _AT_REMOVEDIR = 0x200
+ _AT_SYMLINK_NOFOLLOW = 0x100
+)
+
+type Termios struct {
+ Iflag uint32
+ Oflag uint32
+ Cflag uint32
+ Lflag uint32
+ Line uint8
+ Cc [32]uint8
+ _ [3]byte
+ Ispeed uint32
+ Ospeed uint32
+}
+
+const (
+ IUCLC = 0x200
+ OLCUC = 0x2
+ TCGETS = 0x5401
+ TCSETS = 0x5402
+ XCASE = 0x4
+)
diff -pruN 1.6.3-1/test/fixedbugs/issue11656.go 1.6.3-1ubuntu1/test/fixedbugs/issue11656.go
--- 1.6.3-1/test/fixedbugs/issue11656.go 2016-07-18 16:24:09.000000000 +0000
+++ 1.6.3-1ubuntu1/test/fixedbugs/issue11656.go 2016-07-21 13:36:09.000000000 +0000
@@ -65,6 +65,8 @@ func f(n int) {
binary.BigEndian.PutUint32(ill, 0x00000034) // trap
case "mips64le":
binary.LittleEndian.PutUint32(ill, 0x00000034) // trap
+ case "s390x":
+ binary.BigEndian.PutUint32(ill, 0) // undefined
default:
// Just leave it as 0 and hope for the best.
}
diff -pruN 1.6.3-1/test/init1.go 1.6.3-1ubuntu1/test/init1.go
--- 1.6.3-1/test/init1.go 2016-07-18 16:24:09.000000000 +0000
+++ 1.6.3-1ubuntu1/test/init1.go 2016-07-21 13:36:09.000000000 +0000
@@ -40,7 +40,7 @@ func init() {
sys1, numGC1 := memstats.Sys, memstats.NumGC
if sys1-sys >= N*MB || numGC1 == numGC {
println("allocated 1000 chunks of", MB, "and used ", sys1-sys, "memory")
- println("numGC went", numGC, "to", numGC)
+ println("numGC went", numGC, "to", numGC1)
panic("init1")
}
}
diff -pruN 1.6.3-1/test/nilptr3.go 1.6.3-1ubuntu1/test/nilptr3.go
--- 1.6.3-1/test/nilptr3.go 2016-07-18 16:24:09.000000000 +0000
+++ 1.6.3-1ubuntu1/test/nilptr3.go 2016-07-21 13:36:09.000000000 +0000
@@ -2,7 +2,8 @@
// Fails on ppc64x because of incomplete optimization.
// See issues 9058.
// Same reason for mips64x.
-// +build !ppc64,!ppc64le,!mips64,!mips64le
+// Same reason for s390x.
+// +build !ppc64,!ppc64le,!mips64,!mips64le,!s390x
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
diff -pruN 1.6.3-1/test/nosplit.go 1.6.3-1ubuntu1/test/nosplit.go
--- 1.6.3-1/test/nosplit.go 2016-07-18 16:24:09.000000000 +0000
+++ 1.6.3-1ubuntu1/test/nosplit.go 2016-07-21 13:36:09.000000000 +0000
@@ -275,6 +275,9 @@ TestCases:
case "amd64":
ptrSize = 8
fmt.Fprintf(&buf, "#define REGISTER AX\n")
+ case "s390x":
+ ptrSize = 8
+ fmt.Fprintf(&buf, "#define REGISTER R10\n")
default:
fmt.Fprintf(&buf, "#define REGISTER AX\n")
}