Created
July 18, 2022 12:03
-
-
Save slp/d60da6d61517aa9d15271921943382d2 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
From 96d52b6e40060c104cd4e08c2fab73ad1a899fba Mon Sep 17 00:00:00 2001 | |
From: Sergio Lopez <[email protected]> | |
Date: Mon, 18 Jul 2022 13:51:24 +0200 | |
Subject: [PATCH 1/4] Update vendor of containers/storage | |
Backport 39880517a19ffde1ca2fca5f29cb051309e56773 from the main | |
branch. | |
Signed-off-by: Sergio Lopez <[email protected]> | |
--- | |
go.mod | 6 +- | |
go.sum | 12 + | |
.../Microsoft/hcsshim/internal/hcs/system.go | 188 ++- | |
.../hcsshim/internal/hns/hnspolicy.go | 9 +- | |
.../hcsshim/internal/jobobject/iocp.go | 111 ++ | |
.../hcsshim/internal/jobobject/jobobject.go | 499 +++++++ | |
.../hcsshim/internal/jobobject/limits.go | 315 +++++ | |
.../Microsoft/hcsshim/internal/queue/mq.go | 111 ++ | |
.../Microsoft/hcsshim/internal/winapi/iocp.go | 3 - | |
.../hcsshim/internal/winapi/jobobject.go | 9 +- | |
.../hcsshim/internal/winapi/process.go | 57 + | |
.../hcsshim/internal/winapi/winapi.go | 2 +- | |
.../internal/winapi/zsyscall_windows.go | 22 +- | |
.../github.com/containers/storage/.cirrus.yml | 8 +- | |
vendor/github.com/containers/storage/Makefile | 4 +- | |
vendor/github.com/containers/storage/VERSION | 2 +- | |
.../storage/drivers/chown_darwin.go | 109 ++ | |
.../containers/storage/drivers/chown_unix.go | 4 +- | |
.../storage/drivers/driver_darwin.go | 14 + | |
.../storage/drivers/driver_linux.go | 24 +- | |
.../storage/drivers/driver_unsupported.go | 2 +- | |
.../containers/storage/drivers/fsdiff.go | 9 + | |
.../storage/drivers/overlay/overlay.go | 8 +- | |
.../containers/storage/drivers/vfs/driver.go | 5 + | |
vendor/github.com/containers/storage/go.mod | 8 +- | |
vendor/github.com/containers/storage/go.sum | 19 +- | |
.../containers/storage/pkg/archive/archive.go | 30 +- | |
.../storage/pkg/chrootarchive/archive.go | 9 - | |
.../pkg/chrootarchive/archive_darwin.go | 21 + | |
.../storage/pkg/chrootarchive/archive_unix.go | 2 +- | |
.../storage/pkg/chrootarchive/chroot_linux.go | 7 + | |
.../storage/pkg/chrootarchive/chroot_unix.go | 2 +- | |
.../storage/pkg/chrootarchive/diff_darwin.go | 41 + | |
.../storage/pkg/chrootarchive/diff_unix.go | 2 +- | |
.../storage/pkg/chrootarchive/init_darwin.go | 4 + | |
.../storage/pkg/chrootarchive/init_unix.go | 2 +- | |
.../storage/pkg/chunked/storage_linux.go | 20 +- | |
.../containers/storage/pkg/idtools/idtools.go | 44 +- | |
.../storage/pkg/mount/mounter_freebsd.go | 3 + | |
.../storage/pkg/mount/mounter_unsupported.go | 4 +- | |
.../storage/pkg/system/meminfo_freebsd.go | 84 ++ | |
.../storage/pkg/system/meminfo_unsupported.go | 3 +- | |
.../storage/pkg/system/xattrs_darwin.go | 84 ++ | |
.../storage/pkg/system/xattrs_unsupported.go | 2 +- | |
.../containers/storage/pkg/unshare/unshare.c | 2 +- | |
.../containers/storage/pkg/unshare/unshare.go | 24 +- | |
.../storage/pkg/unshare/unshare_cgo.go | 3 +- | |
.../storage/pkg/unshare/unshare_darwin.go | 53 + | |
.../storage/pkg/unshare/unshare_freebsd.c | 76 ++ | |
.../storage/pkg/unshare/unshare_freebsd.go | 179 +++ | |
.../storage/pkg/unshare/unshare_linux.go | 30 +- | |
.../pkg/unshare/unshare_unsupported.go | 8 +- | |
.../pkg/unshare/unshare_unsupported_cgo.go | 3 +- | |
.../containers/storage/storage.conf | 26 + | |
.../containers/storage/storage.conf-freebsd | 4 +- | |
vendor/github.com/containers/storage/store.go | 16 + | |
.../containers/storage/types/options.go | 36 +- | |
.../storage/types/options_darwin.go | 17 + | |
.../storage/types/options_freebsd.go | 17 + | |
.../containers/storage/types/options_linux.go | 17 + | |
.../storage/types/options_windows.go | 17 + | |
.../containers/storage/types/utils.go | 2 +- | |
.../github.com/klauspost/compress/README.md | 33 + | |
.../klauspost/compress/flate/deflate.go | 36 +- | |
.../klauspost/compress/flate/fast_encoder.go | 2 +- | |
.../klauspost/compress/flate/inflate_gen.go | 100 +- | |
.../klauspost/compress/huff0/autogen.go | 5 - | |
.../klauspost/compress/huff0/bitreader.go | 10 - | |
.../klauspost/compress/huff0/bitwriter.go | 115 -- | |
.../klauspost/compress/huff0/bytereader.go | 10 - | |
.../klauspost/compress/huff0/compress.go | 1 + | |
.../klauspost/compress/huff0/decompress.go | 113 +- | |
.../compress/huff0/decompress_8b_amd64.s | 488 ------- | |
.../compress/huff0/decompress_8b_amd64.s.in | 197 --- | |
.../compress/huff0/decompress_amd64.go | 177 ++- | |
.../compress/huff0/decompress_amd64.s | 1179 +++++++++++------ | |
.../compress/huff0/decompress_amd64.s.in | 195 --- | |
.../compress/huff0/decompress_generic.go | 102 ++ | |
.../klauspost/compress/zstd/bitreader.go | 7 - | |
.../klauspost/compress/zstd/bitwriter.go | 76 -- | |
.../klauspost/compress/zstd/blockdec.go | 31 +- | |
.../klauspost/compress/zstd/bytebuf.go | 4 - | |
.../klauspost/compress/zstd/bytereader.go | 6 - | |
.../klauspost/compress/zstd/decoder.go | 107 +- | |
.../compress/zstd/decoder_options.go | 9 + | |
.../klauspost/compress/zstd/enc_better.go | 8 +- | |
.../klauspost/compress/zstd/enc_dfast.go | 10 +- | |
.../klauspost/compress/zstd/encoder.go | 2 +- | |
.../klauspost/compress/zstd/framedec.go | 57 +- | |
.../klauspost/compress/zstd/fse_decoder.go | 40 - | |
.../klauspost/compress/zstd/fse_encoder.go | 23 - | |
.../klauspost/compress/zstd/fuzz.go | 11 - | |
.../klauspost/compress/zstd/fuzz_none.go | 11 - | |
.../klauspost/compress/zstd/hash.go | 6 - | |
.../klauspost/compress/zstd/seqdec.go | 102 +- | |
.../klauspost/compress/zstd/seqdec_amd64.go | 16 +- | |
.../klauspost/compress/zstd/seqdec_amd64.s | 726 ++++++---- | |
.../github.com/klauspost/compress/zstd/zip.go | 9 +- | |
.../klauspost/compress/zstd/zstd.go | 11 - | |
.../assert/assertion_compare_can_convert.go | 2 +- | |
vendor/gopkg.in/yaml.v3/decode.go | 78 +- | |
vendor/gopkg.in/yaml.v3/parserc.go | 11 +- | |
vendor/modules.txt | 14 +- | |
103 files changed, 4049 insertions(+), 2545 deletions(-) | |
create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go | |
create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go | |
create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go | |
create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go | |
delete mode 100644 vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go | |
create mode 100644 vendor/github.com/containers/storage/drivers/chown_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/drivers/driver_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c | |
create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go | |
create mode 100644 vendor/github.com/containers/storage/types/options_darwin.go | |
create mode 100644 vendor/github.com/containers/storage/types/options_freebsd.go | |
create mode 100644 vendor/github.com/containers/storage/types/options_linux.go | |
create mode 100644 vendor/github.com/containers/storage/types/options_windows.go | |
delete mode 100644 vendor/github.com/klauspost/compress/huff0/autogen.go | |
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s | |
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in | |
delete mode 100644 vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in | |
delete mode 100644 vendor/github.com/klauspost/compress/zstd/fuzz.go | |
delete mode 100644 vendor/github.com/klauspost/compress/zstd/fuzz_none.go | |
diff --git a/go.mod b/go.mod | |
index d3c34cbb..df4eacf4 100644 | |
--- a/go.mod | |
+++ b/go.mod | |
@@ -8,7 +8,7 @@ require ( | |
github.com/containers/common v0.48.0 | |
github.com/containers/image/v5 v5.21.1 | |
github.com/containers/ocicrypt v1.1.4-0.20220428134531-566b808bdf6f | |
- github.com/containers/storage v1.40.3 | |
+ github.com/containers/storage v1.41.1-0.20220606145428-2b2fb9fa246d | |
github.com/docker/distribution v2.8.1+incompatible | |
github.com/docker/docker v20.10.14+incompatible | |
github.com/docker/go-units v0.4.0 | |
@@ -23,7 +23,7 @@ require ( | |
github.com/onsi/gomega v1.19.0 | |
github.com/opencontainers/go-digest v1.0.0 | |
github.com/opencontainers/image-spec v1.0.3-0.20211202193544-a5463b7f9c84 | |
- github.com/opencontainers/runc v1.1.1 | |
+ github.com/opencontainers/runc v1.1.2 | |
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 | |
github.com/opencontainers/runtime-tools v0.9.0 | |
github.com/opencontainers/selinux v1.10.1 | |
@@ -33,7 +33,7 @@ require ( | |
github.com/sirupsen/logrus v1.8.1 | |
github.com/spf13/cobra v1.4.0 | |
github.com/spf13/pflag v1.0.5 | |
- github.com/stretchr/testify v1.7.1 | |
+ github.com/stretchr/testify v1.7.2 | |
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 | |
go.etcd.io/bbolt v1.3.6 | |
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 | |
diff --git a/go.sum b/go.sum | |
index 56e51c4e..3d1fb783 100644 | |
--- a/go.sum | |
+++ b/go.sum | |
@@ -90,6 +90,8 @@ github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX | |
github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= | |
github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= | |
github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= | |
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= | |
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= | |
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= | |
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= | |
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= | |
@@ -302,6 +304,8 @@ github.com/containers/storage v1.37.0/go.mod h1:kqeJeS0b7DO2ZT1nVWs0XufrmPFbgV3c | |
github.com/containers/storage v1.40.0/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= | |
github.com/containers/storage v1.40.3 h1:T/0oGWjAQJuwYv6VeJiOSQNxaiOqhnwtDqsG3VmvLrE= | |
github.com/containers/storage v1.40.3/go.mod h1:zUyPC3CFIGR1OhY1CKkffxgw9+LuH76PGvVcFj38dgs= | |
+github.com/containers/storage v1.41.1-0.20220606145428-2b2fb9fa246d h1:2SDCpmiaV0Dl/lTt4ERAfsilXq5qGXPGGHq2GbpS7eE= | |
+github.com/containers/storage v1.41.1-0.20220606145428-2b2fb9fa246d/go.mod h1:vP+mAlcZPE0ZzPtYK//uQxIX+40QZjnMKlTkwxaTwlY= | |
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= | |
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | |
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= | |
@@ -633,6 +637,8 @@ github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47e | |
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= | |
github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= | |
github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | |
+github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= | |
+github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | |
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= | |
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | |
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= | |
@@ -775,6 +781,8 @@ github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04s | |
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= | |
github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU= | |
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= | |
+github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= | |
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= | |
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
@@ -916,6 +924,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ | |
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= | |
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |
+github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= | |
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= | |
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= | |
github.com/sylabs/sif/v2 v2.7.0 h1:VFzN8alnJ/3n1JA0K9DyUtfSzezWgWrzLDcYGhgBskk= | |
github.com/sylabs/sif/v2 v2.7.0/go.mod h1:TiyBWsgWeh5yBeQFNuQnvROwswqK7YJT8JA1L53bsXQ= | |
@@ -1527,6 +1537,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C | |
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= | |
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | |
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= | |
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= | |
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go | |
index 75499c96..1d45a703 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go | |
@@ -4,17 +4,22 @@ import ( | |
"context" | |
"encoding/json" | |
"errors" | |
+ "fmt" | |
"strings" | |
"sync" | |
"syscall" | |
+ "time" | |
"github.com/Microsoft/hcsshim/internal/cow" | |
"github.com/Microsoft/hcsshim/internal/hcs/schema1" | |
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" | |
+ "github.com/Microsoft/hcsshim/internal/jobobject" | |
"github.com/Microsoft/hcsshim/internal/log" | |
+ "github.com/Microsoft/hcsshim/internal/logfields" | |
"github.com/Microsoft/hcsshim/internal/oc" | |
"github.com/Microsoft/hcsshim/internal/timeout" | |
"github.com/Microsoft/hcsshim/internal/vmcompute" | |
+ "github.com/sirupsen/logrus" | |
"go.opencensus.io/trace" | |
) | |
@@ -28,7 +33,8 @@ type System struct { | |
waitBlock chan struct{} | |
waitError error | |
exitError error | |
- os, typ string | |
+ os, typ, owner string | |
+ startTime time.Time | |
} | |
func newSystem(id string) *System { | |
@@ -38,6 +44,11 @@ func newSystem(id string) *System { | |
} | |
} | |
+// Implementation detail for silo naming, this should NOT be relied upon very heavily. | |
+func siloNameFmt(containerID string) string { | |
+ return fmt.Sprintf(`\Container_%s`, containerID) | |
+} | |
+ | |
// CreateComputeSystem creates a new compute system with the given configuration but does not start it. | |
func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface interface{}) (_ *System, err error) { | |
operation := "hcs::CreateComputeSystem" | |
@@ -127,6 +138,7 @@ func (computeSystem *System) getCachedProperties(ctx context.Context) error { | |
} | |
computeSystem.typ = strings.ToLower(props.SystemType) | |
computeSystem.os = strings.ToLower(props.RuntimeOSType) | |
+ computeSystem.owner = strings.ToLower(props.Owner) | |
if computeSystem.os == "" && computeSystem.typ == "container" { | |
// Pre-RS5 HCS did not return the OS, but it only supported containers | |
// that ran Windows. | |
@@ -195,7 +207,7 @@ func (computeSystem *System) Start(ctx context.Context) (err error) { | |
if err != nil { | |
return makeSystemError(computeSystem, operation, err, events) | |
} | |
- | |
+ computeSystem.startTime = time.Now() | |
return nil | |
} | |
@@ -324,11 +336,115 @@ func (computeSystem *System) Properties(ctx context.Context, types ...schema1.Pr | |
return properties, nil | |
} | |
-// PropertiesV2 returns the requested container properties targeting a V2 schema container. | |
-func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (*hcsschema.Properties, error) { | |
- computeSystem.handleLock.RLock() | |
- defer computeSystem.handleLock.RUnlock() | |
+// queryInProc handles querying for container properties without reaching out to HCS. `props` | |
+// will be updated to contain any data returned from the queries present in `types`. If any properties | |
+// failed to be queried they will be tallied up and returned in as the first return value. Failures on | |
+// query are NOT considered errors; the only failure case for this method is if the containers job object | |
+// cannot be opened. | |
+func (computeSystem *System) queryInProc(ctx context.Context, props *hcsschema.Properties, types []hcsschema.PropertyType) ([]hcsschema.PropertyType, error) { | |
+ // In the future we can make use of some new functionality in the HCS that allows you | |
+ // to pass a job object for HCS to use for the container. Currently, the only way we'll | |
+ // be able to open the job/silo is if we're running as SYSTEM. | |
+ jobOptions := &jobobject.Options{ | |
+ UseNTVariant: true, | |
+ Name: siloNameFmt(computeSystem.id), | |
+ } | |
+ job, err := jobobject.Open(ctx, jobOptions) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ defer job.Close() | |
+ | |
+ var fallbackQueryTypes []hcsschema.PropertyType | |
+ for _, propType := range types { | |
+ switch propType { | |
+ case hcsschema.PTStatistics: | |
+ // Handle a bad caller asking for the same type twice. No use in re-querying if this is | |
+ // filled in already. | |
+ if props.Statistics == nil { | |
+ props.Statistics, err = computeSystem.statisticsInProc(job) | |
+ if err != nil { | |
+ log.G(ctx).WithError(err).Warn("failed to get statistics in-proc") | |
+ | |
+ fallbackQueryTypes = append(fallbackQueryTypes, propType) | |
+ } | |
+ } | |
+ default: | |
+ fallbackQueryTypes = append(fallbackQueryTypes, propType) | |
+ } | |
+ } | |
+ | |
+ return fallbackQueryTypes, nil | |
+} | |
+ | |
+// statisticsInProc emulates what HCS does to grab statistics for a given container with a small | |
+// change to make grabbing the private working set total much more efficient. | |
+func (computeSystem *System) statisticsInProc(job *jobobject.JobObject) (*hcsschema.Statistics, error) { | |
+ // Start timestamp for these stats before we grab them to match HCS | |
+ timestamp := time.Now() | |
+ | |
+ memInfo, err := job.QueryMemoryStats() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ processorInfo, err := job.QueryProcessorStats() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ storageInfo, err := job.QueryStorageStats() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ // This calculates the private working set more efficiently than HCS does. HCS calls NtQuerySystemInformation | |
+ // with the class SystemProcessInformation which returns an array containing system information for *every* | |
+ // process running on the machine. They then grab the pids that are running in the container and filter down | |
+ // the entries in the array to only what's running in that silo and start tallying up the total. This doesn't | |
+ // work well as performance should get worse if more processess are running on the machine in general and not | |
+ // just in the container. All of the additional information besides the WorkingSetPrivateSize field is ignored | |
+ // as well which isn't great and is wasted work to fetch. | |
+ // | |
+ // HCS only let's you grab statistics in an all or nothing fashion, so we can't just grab the private | |
+ // working set ourselves and ask for everything else seperately. The optimization we can make here is | |
+ // to open the silo ourselves and do the same queries for the rest of the info, as well as calculating | |
+ // the private working set in a more efficient manner by: | |
+ // | |
+ // 1. Find the pids running in the silo | |
+ // 2. Get a process handle for every process (only need PROCESS_QUERY_LIMITED_INFORMATION access) | |
+ // 3. Call NtQueryInformationProcess on each process with the class ProcessVmCounters | |
+ // 4. Tally up the total using the field PrivateWorkingSetSize in VM_COUNTERS_EX2. | |
+ privateWorkingSet, err := job.QueryPrivateWorkingSet() | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ return &hcsschema.Statistics{ | |
+ Timestamp: timestamp, | |
+ ContainerStartTime: computeSystem.startTime, | |
+ Uptime100ns: uint64(time.Since(computeSystem.startTime).Nanoseconds()) / 100, | |
+ Memory: &hcsschema.MemoryStats{ | |
+ MemoryUsageCommitBytes: memInfo.JobMemory, | |
+ MemoryUsageCommitPeakBytes: memInfo.PeakJobMemoryUsed, | |
+ MemoryUsagePrivateWorkingSetBytes: privateWorkingSet, | |
+ }, | |
+ Processor: &hcsschema.ProcessorStats{ | |
+ RuntimeKernel100ns: uint64(processorInfo.TotalKernelTime), | |
+ RuntimeUser100ns: uint64(processorInfo.TotalUserTime), | |
+ TotalRuntime100ns: uint64(processorInfo.TotalKernelTime + processorInfo.TotalUserTime), | |
+ }, | |
+ Storage: &hcsschema.StorageStats{ | |
+ ReadCountNormalized: uint64(storageInfo.ReadStats.IoCount), | |
+ ReadSizeBytes: storageInfo.ReadStats.TotalSize, | |
+ WriteCountNormalized: uint64(storageInfo.WriteStats.IoCount), | |
+ WriteSizeBytes: storageInfo.WriteStats.TotalSize, | |
+ }, | |
+ }, nil | |
+} | |
+ | |
+// hcsPropertiesV2Query is a helper to make a HcsGetComputeSystemProperties call using the V2 schema property types. | |
+func (computeSystem *System) hcsPropertiesV2Query(ctx context.Context, types []hcsschema.PropertyType) (*hcsschema.Properties, error) { | |
operation := "hcs::System::PropertiesV2" | |
queryBytes, err := json.Marshal(hcsschema.PropertyQuery{PropertyTypes: types}) | |
@@ -345,12 +461,66 @@ func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschem | |
if propertiesJSON == "" { | |
return nil, ErrUnexpectedValue | |
} | |
- properties := &hcsschema.Properties{} | |
- if err := json.Unmarshal([]byte(propertiesJSON), properties); err != nil { | |
+ props := &hcsschema.Properties{} | |
+ if err := json.Unmarshal([]byte(propertiesJSON), props); err != nil { | |
return nil, makeSystemError(computeSystem, operation, err, nil) | |
} | |
- return properties, nil | |
+ return props, nil | |
+} | |
+ | |
+// PropertiesV2 returns the requested compute systems properties targeting a V2 schema compute system. | |
+func (computeSystem *System) PropertiesV2(ctx context.Context, types ...hcsschema.PropertyType) (_ *hcsschema.Properties, err error) { | |
+ computeSystem.handleLock.RLock() | |
+ defer computeSystem.handleLock.RUnlock() | |
+ | |
+ // Let HCS tally up the total for VM based queries instead of querying ourselves. | |
+ if computeSystem.typ != "container" { | |
+ return computeSystem.hcsPropertiesV2Query(ctx, types) | |
+ } | |
+ | |
+ // Define a starter Properties struct with the default fields returned from every | |
+ // query. Owner is only returned from Statistics but it's harmless to include. | |
+ properties := &hcsschema.Properties{ | |
+ Id: computeSystem.id, | |
+ SystemType: computeSystem.typ, | |
+ RuntimeOsType: computeSystem.os, | |
+ Owner: computeSystem.owner, | |
+ } | |
+ | |
+ logEntry := log.G(ctx) | |
+ // First lets try and query ourselves without reaching to HCS. If any of the queries fail | |
+ // we'll take note and fallback to querying HCS for any of the failed types. | |
+ fallbackTypes, err := computeSystem.queryInProc(ctx, properties, types) | |
+ if err == nil && len(fallbackTypes) == 0 { | |
+ return properties, nil | |
+ } else if err != nil { | |
+ logEntry.WithError(fmt.Errorf("failed to query compute system properties in-proc: %w", err)) | |
+ fallbackTypes = types | |
+ } | |
+ | |
+ logEntry.WithFields(logrus.Fields{ | |
+ logfields.ContainerID: computeSystem.id, | |
+ "propertyTypes": fallbackTypes, | |
+ }).Info("falling back to HCS for property type queries") | |
+ | |
+ hcsProperties, err := computeSystem.hcsPropertiesV2Query(ctx, fallbackTypes) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ // Now add in anything that we might have successfully queried in process. | |
+ if properties.Statistics != nil { | |
+ hcsProperties.Statistics = properties.Statistics | |
+ hcsProperties.Owner = properties.Owner | |
+ } | |
+ | |
+ // For future support for querying processlist in-proc as well. | |
+ if properties.ProcessList != nil { | |
+ hcsProperties.ProcessList = properties.ProcessList | |
+ } | |
+ | |
+ return hcsProperties, nil | |
} | |
// Pause pauses the execution of the computeSystem. This feature is not enabled in TP5. | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go | |
index 591a2631..84b36821 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/hns/hnspolicy.go | |
@@ -21,10 +21,11 @@ const ( | |
) | |
type NatPolicy struct { | |
- Type PolicyType `json:"Type"` | |
- Protocol string `json:",omitempty"` | |
- InternalPort uint16 `json:",omitempty"` | |
- ExternalPort uint16 `json:",omitempty"` | |
+ Type PolicyType `json:"Type"` | |
+ Protocol string `json:",omitempty"` | |
+ InternalPort uint16 `json:",omitempty"` | |
+ ExternalPort uint16 `json:",omitempty"` | |
+ ExternalPortReserved bool `json:",omitempty"` | |
} | |
type QosPolicy struct { | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go | |
new file mode 100644 | |
index 00000000..3d640ac7 | |
--- /dev/null | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go | |
@@ -0,0 +1,111 @@ | |
+package jobobject | |
+ | |
+import ( | |
+ "context" | |
+ "fmt" | |
+ "sync" | |
+ "unsafe" | |
+ | |
+ "github.com/Microsoft/hcsshim/internal/log" | |
+ "github.com/Microsoft/hcsshim/internal/queue" | |
+ "github.com/Microsoft/hcsshim/internal/winapi" | |
+ "github.com/sirupsen/logrus" | |
+ "golang.org/x/sys/windows" | |
+) | |
+ | |
+var ( | |
+ ioInitOnce sync.Once | |
+ initIOErr error | |
+ // Global iocp handle that will be re-used for every job object | |
+ ioCompletionPort windows.Handle | |
+ // Mapping of job handle to queue to place notifications in. | |
+ jobMap sync.Map | |
+) | |
+ | |
+// MsgAllProcessesExited is a type representing a message that every process in a job has exited. | |
+type MsgAllProcessesExited struct{} | |
+ | |
+// MsgUnimplemented represents a message that we are aware of, but that isn't implemented currently. | |
+// This should not be treated as an error. | |
+type MsgUnimplemented struct{} | |
+ | |
+// pollIOCP polls the io completion port forever. | |
+func pollIOCP(ctx context.Context, iocpHandle windows.Handle) { | |
+ var ( | |
+ overlapped uintptr | |
+ code uint32 | |
+ key uintptr | |
+ ) | |
+ | |
+ for { | |
+ err := windows.GetQueuedCompletionStatus(iocpHandle, &code, &key, (**windows.Overlapped)(unsafe.Pointer(&overlapped)), windows.INFINITE) | |
+ if err != nil { | |
+ log.G(ctx).WithError(err).Error("failed to poll for job object message") | |
+ continue | |
+ } | |
+ if val, ok := jobMap.Load(key); ok { | |
+ msq, ok := val.(*queue.MessageQueue) | |
+ if !ok { | |
+ log.G(ctx).WithField("value", msq).Warn("encountered non queue type in job map") | |
+ continue | |
+ } | |
+ notification, err := parseMessage(code, overlapped) | |
+ if err != nil { | |
+ log.G(ctx).WithFields(logrus.Fields{ | |
+ "code": code, | |
+ "overlapped": overlapped, | |
+ }).Warn("failed to parse job object message") | |
+ continue | |
+ } | |
+ if err := msq.Write(notification); err == queue.ErrQueueClosed { | |
+ // Write will only return an error when the queue is closed. | |
+ // The only time a queue would ever be closed is when we call `Close` on | |
+ // the job it belongs to which also removes it from the jobMap, so something | |
+ // went wrong here. We can't return as this is reading messages for all jobs | |
+ // so just log it and move on. | |
+ log.G(ctx).WithFields(logrus.Fields{ | |
+ "code": code, | |
+ "overlapped": overlapped, | |
+ }).Warn("tried to write to a closed queue") | |
+ continue | |
+ } | |
+ } else { | |
+ log.G(ctx).Warn("received a message for a job not present in the mapping") | |
+ } | |
+ } | |
+} | |
+ | |
+func parseMessage(code uint32, overlapped uintptr) (interface{}, error) { | |
+ // Check code and parse out relevant information related to that notification | |
+ // that we care about. For now all we handle is the message that all processes | |
+ // in the job have exited. | |
+ switch code { | |
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO: | |
+ return MsgAllProcessesExited{}, nil | |
+ // Other messages for completeness and a check to make sure that if we fall | |
+ // into the default case that this is a code we don't know how to handle. | |
+ case winapi.JOB_OBJECT_MSG_END_OF_JOB_TIME: | |
+ case winapi.JOB_OBJECT_MSG_END_OF_PROCESS_TIME: | |
+ case winapi.JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT: | |
+ case winapi.JOB_OBJECT_MSG_NEW_PROCESS: | |
+ case winapi.JOB_OBJECT_MSG_EXIT_PROCESS: | |
+ case winapi.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS: | |
+ case winapi.JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT: | |
+ case winapi.JOB_OBJECT_MSG_JOB_MEMORY_LIMIT: | |
+ case winapi.JOB_OBJECT_MSG_NOTIFICATION_LIMIT: | |
+ default: | |
+ return nil, fmt.Errorf("unknown job notification type: %d", code) | |
+ } | |
+ return MsgUnimplemented{}, nil | |
+} | |
+ | |
+// Assigns an IO completion port to get notified of events for the registered job | |
+// object. | |
+func attachIOCP(job windows.Handle, iocp windows.Handle) error { | |
+ info := winapi.JOBOBJECT_ASSOCIATE_COMPLETION_PORT{ | |
+ CompletionKey: job, | |
+ CompletionPort: iocp, | |
+ } | |
+ _, err := windows.SetInformationJobObject(job, windows.JobObjectAssociateCompletionPortInformation, uintptr(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info))) | |
+ return err | |
+} | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go | |
new file mode 100644 | |
index 00000000..9c272641 | |
--- /dev/null | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go | |
@@ -0,0 +1,499 @@ | |
+package jobobject | |
+ | |
+import ( | |
+ "context" | |
+ "errors" | |
+ "fmt" | |
+ "sync" | |
+ "unsafe" | |
+ | |
+ "github.com/Microsoft/hcsshim/internal/queue" | |
+ "github.com/Microsoft/hcsshim/internal/winapi" | |
+ "golang.org/x/sys/windows" | |
+) | |
+ | |
+// This file provides higher level constructs for the win32 job object API. | |
+// Most of the core creation and management functions are already present in "golang.org/x/sys/windows" | |
+// (CreateJobObject, AssignProcessToJobObject, etc.) as well as most of the limit information | |
+// structs and associated limit flags. Whatever is not present from the job object API | |
+// in golang.org/x/sys/windows is located in /internal/winapi. | |
+// | |
+// https://docs.microsoft.com/en-us/windows/win32/procthread/job-objects | |
+ | |
+// JobObject is a high level wrapper around a Windows job object. Holds a handle to | |
+// the job, a queue to receive iocp notifications about the lifecycle | |
+// of the job and a mutex for synchronized handle access. | |
+type JobObject struct { | |
+ handle windows.Handle | |
+ mq *queue.MessageQueue | |
+ handleLock sync.RWMutex | |
+} | |
+ | |
+// JobLimits represents the resource constraints that can be applied to a job object. | |
+type JobLimits struct { | |
+ CPULimit uint32 | |
+ CPUWeight uint32 | |
+ MemoryLimitInBytes uint64 | |
+ MaxIOPS int64 | |
+ MaxBandwidth int64 | |
+} | |
+ | |
+type CPURateControlType uint32 | |
+ | |
+const ( | |
+ WeightBased CPURateControlType = iota | |
+ RateBased | |
+) | |
+ | |
+// Processor resource controls | |
+const ( | |
+ cpuLimitMin = 1 | |
+ cpuLimitMax = 10000 | |
+ cpuWeightMin = 1 | |
+ cpuWeightMax = 9 | |
+) | |
+ | |
+var ( | |
+ ErrAlreadyClosed = errors.New("the handle has already been closed") | |
+ ErrNotRegistered = errors.New("job is not registered to receive notifications") | |
+) | |
+ | |
+// Options represents the set of configurable options when making or opening a job object. | |
+type Options struct { | |
+ // `Name` specifies the name of the job object if a named job object is desired. | |
+ Name string | |
+ // `Notifications` specifies if the job will be registered to receive notifications. | |
+ // Defaults to false. | |
+ Notifications bool | |
+ // `UseNTVariant` specifies if we should use the `Nt` variant of Open/CreateJobObject. | |
+ // Defaults to false. | |
+ UseNTVariant bool | |
+} | |
+ | |
+// Create creates a job object. | |
+// | |
+// If options.Name is an empty string, the job will not be assigned a name. | |
+// | |
+// If options.Notifications are not enabled `PollNotifications` will return immediately with error `errNotRegistered`. | |
+// | |
+// If `options` is nil, use default option values. | |
+// | |
+// Returns a JobObject structure and an error if there is one. | |
+func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { | |
+ if options == nil { | |
+ options = &Options{} | |
+ } | |
+ | |
+ var jobName *winapi.UnicodeString | |
+ if options.Name != "" { | |
+ jobName, err = winapi.NewUnicodeString(options.Name) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ } | |
+ | |
+ var jobHandle windows.Handle | |
+ if options.UseNTVariant { | |
+ oa := winapi.ObjectAttributes{ | |
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}), | |
+ ObjectName: jobName, | |
+ Attributes: 0, | |
+ } | |
+ status := winapi.NtCreateJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) | |
+ if status != 0 { | |
+ return nil, winapi.RtlNtStatusToDosError(status) | |
+ } | |
+ } else { | |
+ var jobNameBuf *uint16 | |
+ if jobName != nil && jobName.Buffer != nil { | |
+ jobNameBuf = jobName.Buffer | |
+ } | |
+ jobHandle, err = windows.CreateJobObject(nil, jobNameBuf) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ } | |
+ | |
+ defer func() { | |
+ if err != nil { | |
+ windows.Close(jobHandle) | |
+ } | |
+ }() | |
+ | |
+ job := &JobObject{ | |
+ handle: jobHandle, | |
+ } | |
+ | |
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been | |
+ // created, create it and start polling. | |
+ if options.Notifications { | |
+ mq, err := setupNotifications(ctx, job) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ job.mq = mq | |
+ } | |
+ | |
+ return job, nil | |
+} | |
+ | |
+// Open opens an existing job object with name provided in `options`. If no name is provided | |
+// return an error since we need to know what job object to open. | |
+// | |
+// If options.Notifications is false `PollNotifications` will return immediately with error `errNotRegistered`. | |
+// | |
+// Returns a JobObject structure and an error if there is one. | |
+func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { | |
+ if options == nil || (options != nil && options.Name == "") { | |
+ return nil, errors.New("no job object name specified to open") | |
+ } | |
+ | |
+ unicodeJobName, err := winapi.NewUnicodeString(options.Name) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ | |
+ var jobHandle windows.Handle | |
+ if options != nil && options.UseNTVariant { | |
+ oa := winapi.ObjectAttributes{ | |
+ Length: unsafe.Sizeof(winapi.ObjectAttributes{}), | |
+ ObjectName: unicodeJobName, | |
+ Attributes: 0, | |
+ } | |
+ status := winapi.NtOpenJobObject(&jobHandle, winapi.JOB_OBJECT_ALL_ACCESS, &oa) | |
+ if status != 0 { | |
+ return nil, winapi.RtlNtStatusToDosError(status) | |
+ } | |
+ } else { | |
+ jobHandle, err = winapi.OpenJobObject(winapi.JOB_OBJECT_ALL_ACCESS, false, unicodeJobName.Buffer) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ } | |
+ | |
+ defer func() { | |
+ if err != nil { | |
+ windows.Close(jobHandle) | |
+ } | |
+ }() | |
+ | |
+ job := &JobObject{ | |
+ handle: jobHandle, | |
+ } | |
+ | |
+ // If the IOCP we'll be using to receive messages for all jobs hasn't been | |
+ // created, create it and start polling. | |
+ if options != nil && options.Notifications { | |
+ mq, err := setupNotifications(ctx, job) | |
+ if err != nil { | |
+ return nil, err | |
+ } | |
+ job.mq = mq | |
+ } | |
+ | |
+ return job, nil | |
+} | |
+ | |
+// helper function to setup notifications for creating/opening a job object | |
+func setupNotifications(ctx context.Context, job *JobObject) (*queue.MessageQueue, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ ioInitOnce.Do(func() { | |
+ h, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0xffffffff) | |
+ if err != nil { | |
+ initIOErr = err | |
+ return | |
+ } | |
+ ioCompletionPort = h | |
+ go pollIOCP(ctx, h) | |
+ }) | |
+ | |
+ if initIOErr != nil { | |
+ return nil, initIOErr | |
+ } | |
+ | |
+ mq := queue.NewMessageQueue() | |
+ jobMap.Store(uintptr(job.handle), mq) | |
+ if err := attachIOCP(job.handle, ioCompletionPort); err != nil { | |
+ jobMap.Delete(uintptr(job.handle)) | |
+ return nil, fmt.Errorf("failed to attach job to IO completion port: %w", err) | |
+ } | |
+ return mq, nil | |
+} | |
+ | |
+// PollNotification will poll for a job object notification. This call should only be called once | |
+// per job (ideally in a goroutine loop) and will block if there is not a notification ready. | |
+// This call will return immediately with error `ErrNotRegistered` if the job was not registered | |
+// to receive notifications during `Create`. Internally, messages will be queued and there | |
+// is no worry of messages being dropped. | |
+func (job *JobObject) PollNotification() (interface{}, error) { | |
+ if job.mq == nil { | |
+ return nil, ErrNotRegistered | |
+ } | |
+ return job.mq.ReadOrWait() | |
+} | |
+ | |
+// UpdateProcThreadAttribute updates the passed in ProcThreadAttributeList to contain what is necessary to | |
+// launch a process in a job at creation time. This can be used to avoid having to call Assign() after a process | |
+// has already started running. | |
+func (job *JobObject) UpdateProcThreadAttribute(attrList *windows.ProcThreadAttributeListContainer) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ | |
+ if err := attrList.Update( | |
+ winapi.PROC_THREAD_ATTRIBUTE_JOB_LIST, | |
+ unsafe.Pointer(&job.handle), | |
+ unsafe.Sizeof(job.handle), | |
+ ); err != nil { | |
+ return fmt.Errorf("failed to update proc thread attributes for job object: %w", err) | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+// Close closes the job object handle. | |
+func (job *JobObject) Close() error { | |
+ job.handleLock.Lock() | |
+ defer job.handleLock.Unlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ | |
+ if err := windows.Close(job.handle); err != nil { | |
+ return err | |
+ } | |
+ | |
+ if job.mq != nil { | |
+ job.mq.Close() | |
+ } | |
+ // Handles now invalid so if the map entry to receive notifications for this job still | |
+ // exists remove it so we can stop receiving notifications. | |
+ if _, ok := jobMap.Load(uintptr(job.handle)); ok { | |
+ jobMap.Delete(uintptr(job.handle)) | |
+ } | |
+ | |
+ job.handle = 0 | |
+ return nil | |
+} | |
+ | |
+// Assign assigns a process to the job object. | |
+func (job *JobObject) Assign(pid uint32) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ | |
+ if pid == 0 { | |
+ return errors.New("invalid pid: 0") | |
+ } | |
+ hProc, err := windows.OpenProcess(winapi.PROCESS_ALL_ACCESS, true, pid) | |
+ if err != nil { | |
+ return err | |
+ } | |
+ defer windows.Close(hProc) | |
+ return windows.AssignProcessToJobObject(job.handle, hProc) | |
+} | |
+ | |
+// Terminate terminates the job, essentially calls TerminateProcess on every process in the | |
+// job. | |
+func (job *JobObject) Terminate(exitCode uint32) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ return windows.TerminateJobObject(job.handle, exitCode) | |
+} | |
+ | |
+// Pids returns all of the process IDs in the job object. | |
+func (job *JobObject) Pids() ([]uint32, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST{} | |
+ err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ winapi.JobObjectBasicProcessIdList, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ) | |
+ | |
+ // This is either the case where there is only one process or no processes in | |
+ // the job. Any other case will result in ERROR_MORE_DATA. Check if info.NumberOfProcessIdsInList | |
+ // is 1 and just return this, otherwise return an empty slice. | |
+ if err == nil { | |
+ if info.NumberOfProcessIdsInList == 1 { | |
+ return []uint32{uint32(info.ProcessIdList[0])}, nil | |
+ } | |
+ // Return empty slice instead of nil to play well with the caller of this. | |
+ // Do not return an error if no processes are running inside the job | |
+ return []uint32{}, nil | |
+ } | |
+ | |
+ if err != winapi.ERROR_MORE_DATA { | |
+ return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err) | |
+ } | |
+ | |
+ jobBasicProcessIDListSize := unsafe.Sizeof(info) + (unsafe.Sizeof(info.ProcessIdList[0]) * uintptr(info.NumberOfAssignedProcesses-1)) | |
+ buf := make([]byte, jobBasicProcessIDListSize) | |
+ if err = winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ winapi.JobObjectBasicProcessIdList, | |
+ uintptr(unsafe.Pointer(&buf[0])), | |
+ uint32(len(buf)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("failed to query for PIDs in job object: %w", err) | |
+ } | |
+ | |
+ bufInfo := (*winapi.JOBOBJECT_BASIC_PROCESS_ID_LIST)(unsafe.Pointer(&buf[0])) | |
+ pids := make([]uint32, bufInfo.NumberOfProcessIdsInList) | |
+ for i, bufPid := range bufInfo.AllPids() { | |
+ pids[i] = uint32(bufPid) | |
+ } | |
+ return pids, nil | |
+} | |
+ | |
+// QueryMemoryStats gets the memory stats for the job object. | |
+func (job *JobObject) QueryMemoryStats() (*winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := winapi.JOBOBJECT_MEMORY_USAGE_INFORMATION{} | |
+ if err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ winapi.JobObjectMemoryUsageInformation, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("failed to query for job object memory stats: %w", err) | |
+ } | |
+ return &info, nil | |
+} | |
+ | |
+// QueryProcessorStats gets the processor stats for the job object. | |
+func (job *JobObject) QueryProcessorStats() (*winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := winapi.JOBOBJECT_BASIC_ACCOUNTING_INFORMATION{} | |
+ if err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ winapi.JobObjectBasicAccountingInformation, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("failed to query for job object process stats: %w", err) | |
+ } | |
+ return &info, nil | |
+} | |
+ | |
+// QueryStorageStats gets the storage (I/O) stats for the job object. | |
+func (job *JobObject) QueryStorageStats() (*winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := winapi.JOBOBJECT_IO_ATTRIBUTION_INFORMATION{ | |
+ ControlFlags: winapi.JOBOBJECT_IO_ATTRIBUTION_CONTROL_ENABLE, | |
+ } | |
+ if err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ winapi.JobObjectIoAttribution, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("failed to query for job object storage stats: %w", err) | |
+ } | |
+ return &info, nil | |
+} | |
+ | |
+// QueryPrivateWorkingSet returns the private working set size for the job. This is calculated by adding up the | |
+// private working set for every process running in the job. | |
+func (job *JobObject) QueryPrivateWorkingSet() (uint64, error) { | |
+ pids, err := job.Pids() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ | |
+ openAndQueryWorkingSet := func(pid uint32) (uint64, error) { | |
+ h, err := windows.OpenProcess(windows.PROCESS_QUERY_LIMITED_INFORMATION, false, pid) | |
+ if err != nil { | |
+ // Continue to the next if OpenProcess doesn't return a valid handle (fails). Handles a | |
+ // case where one of the pids in the job exited before we open. | |
+ return 0, nil | |
+ } | |
+ defer func() { | |
+ _ = windows.Close(h) | |
+ }() | |
+ // Check if the process is actually running in the job still. There's a small chance | |
+ // that the process could have exited and had its pid re-used between grabbing the pids | |
+ // in the job and opening the handle to it above. | |
+ var inJob int32 | |
+ if err := winapi.IsProcessInJob(h, job.handle, &inJob); err != nil { | |
+ // This shouldn't fail unless we have incorrect access rights which we control | |
+ // here so probably best to error out if this failed. | |
+ return 0, err | |
+ } | |
+ // Don't report stats for this process as it's not running in the job. This shouldn't be | |
+ // an error condition though. | |
+ if inJob == 0 { | |
+ return 0, nil | |
+ } | |
+ | |
+ var vmCounters winapi.VM_COUNTERS_EX2 | |
+ status := winapi.NtQueryInformationProcess( | |
+ h, | |
+ winapi.ProcessVmCounters, | |
+ uintptr(unsafe.Pointer(&vmCounters)), | |
+ uint32(unsafe.Sizeof(vmCounters)), | |
+ nil, | |
+ ) | |
+ if !winapi.NTSuccess(status) { | |
+ return 0, fmt.Errorf("failed to query information for process: %w", winapi.RtlNtStatusToDosError(status)) | |
+ } | |
+ return uint64(vmCounters.PrivateWorkingSetSize), nil | |
+ } | |
+ | |
+ var jobWorkingSetSize uint64 | |
+ for _, pid := range pids { | |
+ workingSet, err := openAndQueryWorkingSet(pid) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ jobWorkingSetSize += workingSet | |
+ } | |
+ | |
+ return jobWorkingSetSize, nil | |
+} | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go | |
new file mode 100644 | |
index 00000000..4be29778 | |
--- /dev/null | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go | |
@@ -0,0 +1,315 @@ | |
+package jobobject | |
+ | |
+import ( | |
+ "errors" | |
+ "fmt" | |
+ "unsafe" | |
+ | |
+ "github.com/Microsoft/hcsshim/internal/winapi" | |
+ "golang.org/x/sys/windows" | |
+) | |
+ | |
+const ( | |
+ memoryLimitMax uint64 = 0xffffffffffffffff | |
+) | |
+ | |
+func isFlagSet(flag, controlFlags uint32) bool { | |
+ return (flag & controlFlags) == flag | |
+} | |
+ | |
+// SetResourceLimits sets resource limits on the job object (cpu, memory, storage). | |
+func (job *JobObject) SetResourceLimits(limits *JobLimits) error { | |
+ // Go through and check what limits were specified and apply them to the job. | |
+ if limits.MemoryLimitInBytes != 0 { | |
+ if err := job.SetMemoryLimit(limits.MemoryLimitInBytes); err != nil { | |
+ return fmt.Errorf("failed to set job object memory limit: %w", err) | |
+ } | |
+ } | |
+ | |
+ if limits.CPULimit != 0 { | |
+ if err := job.SetCPULimit(RateBased, limits.CPULimit); err != nil { | |
+ return fmt.Errorf("failed to set job object cpu limit: %w", err) | |
+ } | |
+ } else if limits.CPUWeight != 0 { | |
+ if err := job.SetCPULimit(WeightBased, limits.CPUWeight); err != nil { | |
+ return fmt.Errorf("failed to set job object cpu limit: %w", err) | |
+ } | |
+ } | |
+ | |
+ if limits.MaxBandwidth != 0 || limits.MaxIOPS != 0 { | |
+ if err := job.SetIOLimit(limits.MaxBandwidth, limits.MaxIOPS); err != nil { | |
+ return fmt.Errorf("failed to set io limit on job object: %w", err) | |
+ } | |
+ } | |
+ return nil | |
+} | |
+ | |
+// SetTerminateOnLastHandleClose sets the job object flag that specifies that the job should terminate | |
+// all processes in the job on the last open handle being closed. | |
+func (job *JobObject) SetTerminateOnLastHandleClose() error { | |
+ info, err := job.getExtendedInformation() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE | |
+ return job.setExtendedInformation(info) | |
+} | |
+ | |
+// SetMemoryLimit sets the memory limit of the job object based on the given `memoryLimitInBytes`. | |
+func (job *JobObject) SetMemoryLimit(memoryLimitInBytes uint64) error { | |
+ if memoryLimitInBytes >= memoryLimitMax { | |
+ return errors.New("memory limit specified exceeds the max size") | |
+ } | |
+ | |
+ info, err := job.getExtendedInformation() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ | |
+ info.JobMemoryLimit = uintptr(memoryLimitInBytes) | |
+ info.BasicLimitInformation.LimitFlags |= windows.JOB_OBJECT_LIMIT_JOB_MEMORY | |
+ return job.setExtendedInformation(info) | |
+} | |
+ | |
+// GetMemoryLimit gets the memory limit in bytes of the job object. | |
+func (job *JobObject) GetMemoryLimit() (uint64, error) { | |
+ info, err := job.getExtendedInformation() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ return uint64(info.JobMemoryLimit), nil | |
+} | |
+ | |
+// SetCPULimit sets the CPU limit depending on the specified `CPURateControlType` to | |
+// `rateControlValue` for the job object. | |
+func (job *JobObject) SetCPULimit(rateControlType CPURateControlType, rateControlValue uint32) error { | |
+ cpuInfo, err := job.getCPURateControlInformation() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ switch rateControlType { | |
+ case WeightBased: | |
+ if rateControlValue < cpuWeightMin || rateControlValue > cpuWeightMax { | |
+ return fmt.Errorf("processor weight value of `%d` is invalid", rateControlValue) | |
+ } | |
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED | |
+ cpuInfo.Value = rateControlValue | |
+ case RateBased: | |
+ if rateControlValue < cpuLimitMin || rateControlValue > cpuLimitMax { | |
+ return fmt.Errorf("processor rate of `%d` is invalid", rateControlValue) | |
+ } | |
+ cpuInfo.ControlFlags |= winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE | winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP | |
+ cpuInfo.Value = rateControlValue | |
+ default: | |
+ return errors.New("invalid job object cpu rate control type") | |
+ } | |
+ return job.setCPURateControlInfo(cpuInfo) | |
+} | |
+ | |
+// GetCPULimit gets the cpu limits for the job object. | |
+// `rateControlType` is used to indicate what type of cpu limit to query for. | |
+func (job *JobObject) GetCPULimit(rateControlType CPURateControlType) (uint32, error) { | |
+ info, err := job.getCPURateControlInformation() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ | |
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_ENABLE, info.ControlFlags) { | |
+ return 0, errors.New("the job does not have cpu rate control enabled") | |
+ } | |
+ | |
+ switch rateControlType { | |
+ case WeightBased: | |
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_WEIGHT_BASED, info.ControlFlags) { | |
+ return 0, errors.New("cannot get cpu weight for job object without cpu weight option set") | |
+ } | |
+ case RateBased: | |
+ if !isFlagSet(winapi.JOB_OBJECT_CPU_RATE_CONTROL_HARD_CAP, info.ControlFlags) { | |
+ return 0, errors.New("cannot get cpu rate hard cap for job object without cpu rate hard cap option set") | |
+ } | |
+ default: | |
+ return 0, errors.New("invalid job object cpu rate control type") | |
+ } | |
+ return info.Value, nil | |
+} | |
+ | |
+// SetCPUAffinity sets the processor affinity for the job object. | |
+// The affinity is passed in as a bitmask. | |
+func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error { | |
+ info, err := job.getExtendedInformation() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY) | |
+ info.BasicLimitInformation.Affinity = uintptr(affinityBitMask) | |
+ return job.setExtendedInformation(info) | |
+} | |
+ | |
+// GetCPUAffinity gets the processor affinity for the job object. | |
+// The returned affinity is a bitmask. | |
+func (job *JobObject) GetCPUAffinity() (uint64, error) { | |
+ info, err := job.getExtendedInformation() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ return uint64(info.BasicLimitInformation.Affinity), nil | |
+} | |
+ | |
+// SetIOLimit sets the IO limits specified on the job object. | |
+func (job *JobObject) SetIOLimit(maxBandwidth, maxIOPS int64) error { | |
+ ioInfo, err := job.getIOLimit() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ ioInfo.ControlFlags |= winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE | |
+ if maxBandwidth != 0 { | |
+ ioInfo.MaxBandwidth = maxBandwidth | |
+ } | |
+ if maxIOPS != 0 { | |
+ ioInfo.MaxIops = maxIOPS | |
+ } | |
+ return job.setIORateControlInfo(ioInfo) | |
+} | |
+ | |
+// GetIOMaxBandwidthLimit gets the max bandwidth for the job object. | |
+func (job *JobObject) GetIOMaxBandwidthLimit() (int64, error) { | |
+ info, err := job.getIOLimit() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ return info.MaxBandwidth, nil | |
+} | |
+ | |
+// GetIOMaxIopsLimit gets the max iops for the job object. | |
+func (job *JobObject) GetIOMaxIopsLimit() (int64, error) { | |
+ info, err := job.getIOLimit() | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ return info.MaxIops, nil | |
+} | |
+ | |
+// Helper function for getting a job object's extended information. | |
+func (job *JobObject) getExtendedInformation() (*windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{} | |
+ if err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ windows.JobObjectExtendedLimitInformation, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("query %v returned error: %w", info, err) | |
+ } | |
+ return &info, nil | |
+} | |
+ | |
+// Helper function for getting a job object's CPU rate control information. | |
+func (job *JobObject) getCPURateControlInformation() (*winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ info := winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION{} | |
+ if err := winapi.QueryInformationJobObject( | |
+ job.handle, | |
+ windows.JobObjectCpuRateControlInformation, | |
+ uintptr(unsafe.Pointer(&info)), | |
+ uint32(unsafe.Sizeof(info)), | |
+ nil, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("query %v returned error: %w", info, err) | |
+ } | |
+ return &info, nil | |
+} | |
+ | |
+// Helper function for setting a job object's extended information. | |
+func (job *JobObject) setExtendedInformation(info *windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ | |
+ if _, err := windows.SetInformationJobObject( | |
+ job.handle, | |
+ windows.JobObjectExtendedLimitInformation, | |
+ uintptr(unsafe.Pointer(info)), | |
+ uint32(unsafe.Sizeof(*info)), | |
+ ); err != nil { | |
+ return fmt.Errorf("failed to set Extended info %v on job object: %w", info, err) | |
+ } | |
+ return nil | |
+} | |
+ | |
+// Helper function for querying job handle for IO limit information. | |
+func (job *JobObject) getIOLimit() (*winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION, error) { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return nil, ErrAlreadyClosed | |
+ } | |
+ | |
+ ioInfo := &winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION{} | |
+ var blockCount uint32 = 1 | |
+ | |
+ if _, err := winapi.QueryIoRateControlInformationJobObject( | |
+ job.handle, | |
+ nil, | |
+ &ioInfo, | |
+ &blockCount, | |
+ ); err != nil { | |
+ return nil, fmt.Errorf("query %v returned error: %w", ioInfo, err) | |
+ } | |
+ | |
+ if !isFlagSet(winapi.JOB_OBJECT_IO_RATE_CONTROL_ENABLE, ioInfo.ControlFlags) { | |
+ return nil, fmt.Errorf("query %v cannot get IO limits for job object without IO rate control option set", ioInfo) | |
+ } | |
+ return ioInfo, nil | |
+} | |
+ | |
+// Helper function for setting a job object's IO rate control information. | |
+func (job *JobObject) setIORateControlInfo(ioInfo *winapi.JOBOBJECT_IO_RATE_CONTROL_INFORMATION) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ | |
+ if _, err := winapi.SetIoRateControlInformationJobObject(job.handle, ioInfo); err != nil { | |
+ return fmt.Errorf("failed to set IO limit info %v on job object: %w", ioInfo, err) | |
+ } | |
+ return nil | |
+} | |
+ | |
+// Helper function for setting a job object's CPU rate control information. | |
+func (job *JobObject) setCPURateControlInfo(cpuInfo *winapi.JOBOBJECT_CPU_RATE_CONTROL_INFORMATION) error { | |
+ job.handleLock.RLock() | |
+ defer job.handleLock.RUnlock() | |
+ | |
+ if job.handle == 0 { | |
+ return ErrAlreadyClosed | |
+ } | |
+ if _, err := windows.SetInformationJobObject( | |
+ job.handle, | |
+ windows.JobObjectCpuRateControlInformation, | |
+ uintptr(unsafe.Pointer(cpuInfo)), | |
+ uint32(unsafe.Sizeof(cpuInfo)), | |
+ ); err != nil { | |
+ return fmt.Errorf("failed to set cpu limit info %v on job object: %w", cpuInfo, err) | |
+ } | |
+ return nil | |
+} | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go | |
new file mode 100644 | |
index 00000000..e177c9a6 | |
--- /dev/null | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/queue/mq.go | |
@@ -0,0 +1,111 @@ | |
+package queue | |
+ | |
+import ( | |
+ "errors" | |
+ "sync" | |
+) | |
+ | |
+var ( | |
+ ErrQueueClosed = errors.New("the queue is closed for reading and writing") | |
+ ErrQueueEmpty = errors.New("the queue is empty") | |
+) | |
+ | |
+// MessageQueue represents a threadsafe message queue to be used to retrieve or | |
+// write messages to. | |
+type MessageQueue struct { | |
+ m *sync.RWMutex | |
+ c *sync.Cond | |
+ messages []interface{} | |
+ closed bool | |
+} | |
+ | |
+// NewMessageQueue returns a new MessageQueue. | |
+func NewMessageQueue() *MessageQueue { | |
+ m := &sync.RWMutex{} | |
+ return &MessageQueue{ | |
+ m: m, | |
+ c: sync.NewCond(m), | |
+ messages: []interface{}{}, | |
+ } | |
+} | |
+ | |
+// Write writes `msg` to the queue. | |
+func (mq *MessageQueue) Write(msg interface{}) error { | |
+ mq.m.Lock() | |
+ defer mq.m.Unlock() | |
+ | |
+ if mq.closed { | |
+ return ErrQueueClosed | |
+ } | |
+ mq.messages = append(mq.messages, msg) | |
+ // Signal a waiter that there is now a value available in the queue. | |
+ mq.c.Signal() | |
+ return nil | |
+} | |
+ | |
+// Read will read a value from the queue if available, otherwise return an error. | |
+func (mq *MessageQueue) Read() (interface{}, error) { | |
+ mq.m.Lock() | |
+ defer mq.m.Unlock() | |
+ if mq.closed { | |
+ return nil, ErrQueueClosed | |
+ } | |
+ if mq.isEmpty() { | |
+ return nil, ErrQueueEmpty | |
+ } | |
+ val := mq.messages[0] | |
+ mq.messages[0] = nil | |
+ mq.messages = mq.messages[1:] | |
+ return val, nil | |
+} | |
+ | |
+// ReadOrWait will read a value from the queue if available, else it will wait for a | |
+// value to become available. This will block forever if nothing gets written or until | |
+// the queue gets closed. | |
+func (mq *MessageQueue) ReadOrWait() (interface{}, error) { | |
+ mq.m.Lock() | |
+ if mq.closed { | |
+ mq.m.Unlock() | |
+ return nil, ErrQueueClosed | |
+ } | |
+ if mq.isEmpty() { | |
+ for !mq.closed && mq.isEmpty() { | |
+ mq.c.Wait() | |
+ } | |
+ mq.m.Unlock() | |
+ return mq.Read() | |
+ } | |
+ val := mq.messages[0] | |
+ mq.messages[0] = nil | |
+ mq.messages = mq.messages[1:] | |
+ mq.m.Unlock() | |
+ return val, nil | |
+} | |
+ | |
+// IsEmpty returns if the queue is empty | |
+func (mq *MessageQueue) IsEmpty() bool { | |
+ mq.m.RLock() | |
+ defer mq.m.RUnlock() | |
+ return len(mq.messages) == 0 | |
+} | |
+ | |
+// Nonexported empty check that doesn't lock so we can call this in Read and Write. | |
+func (mq *MessageQueue) isEmpty() bool { | |
+ return len(mq.messages) == 0 | |
+} | |
+ | |
+// Close closes the queue for future writes or reads. Any attempts to read or write from the | |
+// queue after close will return ErrQueueClosed. This is safe to call multiple times. | |
+func (mq *MessageQueue) Close() { | |
+ mq.m.Lock() | |
+ defer mq.m.Unlock() | |
+ // Already closed | |
+ if mq.closed { | |
+ return | |
+ } | |
+ mq.messages = nil | |
+ mq.closed = true | |
+ // If there's anybody currently waiting on a value from ReadOrWait, we need to | |
+ // broadcast so the read(s) can return ErrQueueClosed. | |
+ mq.c.Broadcast() | |
+} | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go | |
deleted file mode 100644 | |
index 4e609cbf..00000000 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/iocp.go | |
+++ /dev/null | |
@@ -1,3 +0,0 @@ | |
-package winapi | |
- | |
-//sys GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go | |
index ba12b1ad..479649db 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/jobobject.go | |
@@ -24,7 +24,10 @@ const ( | |
// Access rights for creating or opening job objects. | |
// | |
// https://docs.microsoft.com/en-us/windows/win32/procthread/job-object-security-and-access-rights | |
-const JOB_OBJECT_ALL_ACCESS = 0x1F001F | |
+const ( | |
+ JOB_OBJECT_QUERY = 0x0004 | |
+ JOB_OBJECT_ALL_ACCESS = 0x1F001F | |
+) | |
// IO limit flags | |
// | |
@@ -93,7 +96,7 @@ type JOBOBJECT_BASIC_PROCESS_ID_LIST struct { | |
// AllPids returns all the process Ids in the job object. | |
func (p *JOBOBJECT_BASIC_PROCESS_ID_LIST) AllPids() []uintptr { | |
- return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList] | |
+ return (*[(1 << 27) - 1]uintptr)(unsafe.Pointer(&p.ProcessIdList[0]))[:p.NumberOfProcessIdsInList:p.NumberOfProcessIdsInList] | |
} | |
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-jobobject_basic_accounting_information | |
@@ -162,7 +165,7 @@ type JOBOBJECT_ASSOCIATE_COMPLETION_PORT struct { | |
// PBOOL Result | |
// ); | |
// | |
-//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) = kernel32.IsProcessInJob | |
+//sys IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) = kernel32.IsProcessInJob | |
// BOOL QueryInformationJobObject( | |
// HANDLE hJob, | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go | |
index 37839435..5f9e03fd 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/process.go | |
@@ -6,3 +6,60 @@ const ( | |
PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE = 0x20016 | |
PROC_THREAD_ATTRIBUTE_JOB_LIST = 0x2000D | |
) | |
+ | |
+// ProcessVmCounters corresponds to the _VM_COUNTERS_EX and _VM_COUNTERS_EX2 structures. | |
+const ProcessVmCounters = 3 | |
+ | |
+// __kernel_entry NTSTATUS NtQueryInformationProcess( | |
+// [in] HANDLE ProcessHandle, | |
+// [in] PROCESSINFOCLASS ProcessInformationClass, | |
+// [out] PVOID ProcessInformation, | |
+// [in] ULONG ProcessInformationLength, | |
+// [out, optional] PULONG ReturnLength | |
+// ); | |
+// | |
+//sys NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) = ntdll.NtQueryInformationProcess | |
+ | |
+// typedef struct _VM_COUNTERS_EX | |
+// { | |
+// SIZE_T PeakVirtualSize; | |
+// SIZE_T VirtualSize; | |
+// ULONG PageFaultCount; | |
+// SIZE_T PeakWorkingSetSize; | |
+// SIZE_T WorkingSetSize; | |
+// SIZE_T QuotaPeakPagedPoolUsage; | |
+// SIZE_T QuotaPagedPoolUsage; | |
+// SIZE_T QuotaPeakNonPagedPoolUsage; | |
+// SIZE_T QuotaNonPagedPoolUsage; | |
+// SIZE_T PagefileUsage; | |
+// SIZE_T PeakPagefileUsage; | |
+// SIZE_T PrivateUsage; | |
+// } VM_COUNTERS_EX, *PVM_COUNTERS_EX; | |
+// | |
+type VM_COUNTERS_EX struct { | |
+ PeakVirtualSize uintptr | |
+ VirtualSize uintptr | |
+ PageFaultCount uint32 | |
+ PeakWorkingSetSize uintptr | |
+ WorkingSetSize uintptr | |
+ QuotaPeakPagedPoolUsage uintptr | |
+ QuotaPagedPoolUsage uintptr | |
+ QuotaPeakNonPagedPoolUsage uintptr | |
+ QuotaNonPagedPoolUsage uintptr | |
+ PagefileUsage uintptr | |
+ PeakPagefileUsage uintptr | |
+ PrivateUsage uintptr | |
+} | |
+ | |
+// typedef struct _VM_COUNTERS_EX2 | |
+// { | |
+// VM_COUNTERS_EX CountersEx; | |
+// SIZE_T PrivateWorkingSetSize; | |
+// SIZE_T SharedCommitUsage; | |
+// } VM_COUNTERS_EX2, *PVM_COUNTERS_EX2; | |
+// | |
+type VM_COUNTERS_EX2 struct { | |
+ CountersEx VM_COUNTERS_EX | |
+ PrivateWorkingSetSize uintptr | |
+ SharedCommitUsage uintptr | |
+} | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go | |
index 1d4ba3c4..d2cc9d9f 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/winapi.go | |
@@ -2,4 +2,4 @@ | |
// be thought of as an extension to golang.org/x/sys/windows. | |
package winapi | |
-//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go console.go system.go net.go path.go thread.go iocp.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go | |
+//go:generate go run ..\..\mksyscall_windows.go -output zsyscall_windows.go user.go console.go system.go net.go path.go thread.go jobobject.go logon.go memory.go process.go processor.go devices.go filesystem.go errors.go | |
diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go | |
index 4eb64b4c..39fb3e1a 100644 | |
--- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go | |
+++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go | |
@@ -50,7 +50,6 @@ var ( | |
procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") | |
procSearchPathW = modkernel32.NewProc("SearchPathW") | |
procCreateRemoteThread = modkernel32.NewProc("CreateRemoteThread") | |
- procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") | |
procIsProcessInJob = modkernel32.NewProc("IsProcessInJob") | |
procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") | |
procOpenJobObjectW = modkernel32.NewProc("OpenJobObjectW") | |
@@ -61,6 +60,7 @@ var ( | |
procLogonUserW = modadvapi32.NewProc("LogonUserW") | |
procLocalAlloc = modkernel32.NewProc("LocalAlloc") | |
procLocalFree = modkernel32.NewProc("LocalFree") | |
+ procNtQueryInformationProcess = modntdll.NewProc("NtQueryInformationProcess") | |
procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") | |
procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") | |
procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") | |
@@ -140,19 +140,7 @@ func CreateRemoteThread(process windows.Handle, sa *windows.SecurityAttributes, | |
return | |
} | |
-func GetQueuedCompletionStatus(cphandle windows.Handle, qty *uint32, key *uintptr, overlapped **windows.Overlapped, timeout uint32) (err error) { | |
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) | |
- if r1 == 0 { | |
- if e1 != 0 { | |
- err = errnoErr(e1) | |
- } else { | |
- err = syscall.EINVAL | |
- } | |
- } | |
- return | |
-} | |
- | |
-func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *bool) (err error) { | |
+func IsProcessInJob(procHandle windows.Handle, jobHandle windows.Handle, result *int32) (err error) { | |
r1, _, e1 := syscall.Syscall(procIsProcessInJob.Addr(), 3, uintptr(procHandle), uintptr(jobHandle), uintptr(unsafe.Pointer(result))) | |
if r1 == 0 { | |
if e1 != 0 { | |
@@ -256,6 +244,12 @@ func LocalFree(ptr uintptr) { | |
return | |
} | |
+func NtQueryInformationProcess(processHandle windows.Handle, processInfoClass uint32, processInfo uintptr, processInfoLength uint32, returnLength *uint32) (status uint32) { | |
+ r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(processHandle), uintptr(processInfoClass), uintptr(processInfo), uintptr(processInfoLength), uintptr(unsafe.Pointer(returnLength)), 0) | |
+ status = uint32(r0) | |
+ return | |
+} | |
+ | |
func GetActiveProcessorCount(groupNumber uint16) (amount uint32) { | |
r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0) | |
amount = uint32(r0) | |
diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml | |
index fd3d3105..53b13cd3 100644 | |
--- a/vendor/github.com/containers/storage/.cirrus.yml | |
+++ b/vendor/github.com/containers/storage/.cirrus.yml | |
@@ -17,14 +17,14 @@ env: | |
#### | |
#### Cache-image names to test with (double-quotes around names are critical) | |
### | |
- FEDORA_NAME: "fedora-35" | |
- PRIOR_FEDORA_NAME: "fedora-34" | |
- UBUNTU_NAME: "ubuntu-2104" | |
+ FEDORA_NAME: "fedora-36" | |
+ PRIOR_FEDORA_NAME: "fedora-35" | |
+ UBUNTU_NAME: "ubuntu-2204" | |
# GCE project where images live | |
IMAGE_PROJECT: "libpod-218412" | |
# VM Image built in containers/automation_images | |
- IMAGE_SUFFIX: "c4512539143831552" | |
+ IMAGE_SUFFIX: "c5878804328480768" | |
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" | |
PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" | |
UBUNTU_CACHE_IMAGE_NAME: "ubuntu-${IMAGE_SUFFIX}" | |
diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile | |
index 2c1e4a18..244576d5 100644 | |
--- a/vendor/github.com/containers/storage/Makefile | |
+++ b/vendor/github.com/containers/storage/Makefile | |
@@ -59,8 +59,8 @@ binary local-binary: containers-storage | |
local-gccgo: ## build using gccgo on the host | |
GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build $(MOD_VENDOR) -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage | |
-local-cross: ## cross build the binaries for arm, darwin, and\nfreebsd | |
- @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 ; do \ | |
+local-cross: ## cross build the binaries for arm, darwin, and freebsd | |
+ @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ | |
os=`echo $${target} | cut -f1 -d/` ; \ | |
arch=`echo $${target} | cut -f2 -d/` ; \ | |
suffix=$${os}.$${arch} ; \ | |
diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION | |
index 39d58cac..ce045fe2 100644 | |
--- a/vendor/github.com/containers/storage/VERSION | |
+++ b/vendor/github.com/containers/storage/VERSION | |
@@ -1 +1 @@ | |
-1.40.3 | |
+1.41.1-dev | |
diff --git a/vendor/github.com/containers/storage/drivers/chown_darwin.go b/vendor/github.com/containers/storage/drivers/chown_darwin.go | |
new file mode 100644 | |
index 00000000..cf608d47 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/drivers/chown_darwin.go | |
@@ -0,0 +1,109 @@ | |
+//go:build darwin | |
+// +build darwin | |
+ | |
+package graphdriver | |
+ | |
+import ( | |
+ "errors" | |
+ "fmt" | |
+ "os" | |
+ "sync" | |
+ "syscall" | |
+ | |
+ "github.com/containers/storage/pkg/idtools" | |
+ "github.com/containers/storage/pkg/system" | |
+) | |
+ | |
+type inode struct { | |
+ Dev uint64 | |
+ Ino uint64 | |
+} | |
+ | |
+type platformChowner struct { | |
+ mutex sync.Mutex | |
+ inodes map[inode]bool | |
+} | |
+ | |
+func newLChowner() *platformChowner { | |
+ return &platformChowner{ | |
+ inodes: make(map[inode]bool), | |
+ } | |
+} | |
+ | |
+func (c *platformChowner) LChown(path string, info os.FileInfo, toHost, toContainer *idtools.IDMappings) error { | |
+ st, ok := info.Sys().(*syscall.Stat_t) | |
+ if !ok { | |
+ return nil | |
+ } | |
+ | |
+ i := inode{ | |
+ Dev: uint64(st.Dev), | |
+ Ino: uint64(st.Ino), | |
+ } | |
+ c.mutex.Lock() | |
+ _, found := c.inodes[i] | |
+ if !found { | |
+ c.inodes[i] = true | |
+ } | |
+ c.mutex.Unlock() | |
+ | |
+ if found { | |
+ return nil | |
+ } | |
+ | |
+ // Map an on-disk UID/GID pair from host to container | |
+ // using the first map, then back to the host using the | |
+ // second map. Skip that first step if they're 0, to | |
+ // compensate for cases where a parent layer should | |
+ // have had a mapped value, but didn't. | |
+ uid, gid := int(st.Uid), int(st.Gid) | |
+ if toContainer != nil { | |
+ pair := idtools.IDPair{ | |
+ UID: uid, | |
+ GID: gid, | |
+ } | |
+ mappedUID, mappedGID, err := toContainer.ToContainer(pair) | |
+ if err != nil { | |
+ if (uid != 0) || (gid != 0) { | |
+ return fmt.Errorf("error mapping host ID pair %#v for %q to container: %v", pair, path, err) | |
+ } | |
+ mappedUID, mappedGID = uid, gid | |
+ } | |
+ uid, gid = mappedUID, mappedGID | |
+ } | |
+ if toHost != nil { | |
+ pair := idtools.IDPair{ | |
+ UID: uid, | |
+ GID: gid, | |
+ } | |
+ mappedPair, err := toHost.ToHostOverflow(pair) | |
+ if err != nil { | |
+ return fmt.Errorf("error mapping container ID pair %#v for %q to host: %v", pair, path, err) | |
+ } | |
+ uid, gid = mappedPair.UID, mappedPair.GID | |
+ } | |
+ if uid != int(st.Uid) || gid != int(st.Gid) { | |
+ cap, err := system.Lgetxattr(path, "security.capability") | |
+ if err != nil && !errors.Is(err, system.EOPNOTSUPP) && err != system.ErrNotSupportedPlatform { | |
+ return fmt.Errorf("%s: %v", os.Args[0], err) | |
+ } | |
+ | |
+ // Make the change. | |
+ if err := system.Lchown(path, uid, gid); err != nil { | |
+ return fmt.Errorf("%s: %v", os.Args[0], err) | |
+ } | |
+ // Restore the SUID and SGID bits if they were originally set. | |
+ if (info.Mode()&os.ModeSymlink == 0) && info.Mode()&(os.ModeSetuid|os.ModeSetgid) != 0 { | |
+ if err := system.Chmod(path, info.Mode()); err != nil { | |
+ return fmt.Errorf("%s: %v", os.Args[0], err) | |
+ } | |
+ } | |
+ if cap != nil { | |
+ if err := system.Lsetxattr(path, "security.capability", cap, 0); err != nil { | |
+ return fmt.Errorf("%s: %v", os.Args[0], err) | |
+ } | |
+ } | |
+ | |
+ } | |
+ return nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/drivers/chown_unix.go b/vendor/github.com/containers/storage/drivers/chown_unix.go | |
index c598b936..84c5b1bd 100644 | |
--- a/vendor/github.com/containers/storage/drivers/chown_unix.go | |
+++ b/vendor/github.com/containers/storage/drivers/chown_unix.go | |
@@ -1,5 +1,5 @@ | |
-//go:build !windows | |
-// +build !windows | |
+//go:build !windows && !darwin | |
+// +build !windows,!darwin | |
package graphdriver | |
diff --git a/vendor/github.com/containers/storage/drivers/driver_darwin.go b/vendor/github.com/containers/storage/drivers/driver_darwin.go | |
new file mode 100644 | |
index 00000000..35785154 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/drivers/driver_darwin.go | |
@@ -0,0 +1,14 @@ | |
+package graphdriver | |
+ | |
+var ( | |
+ // Slice of drivers that should be used in order | |
+ priority = []string{ | |
+ "vfs", | |
+ } | |
+) | |
+ | |
+// GetFSMagic returns the filesystem id given the path. | |
+func GetFSMagic(rootpath string) (FsMagic, error) { | |
+ // Note it is OK to return FsMagicUnsupported on Windows. | |
+ return FsMagicUnsupported, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/drivers/driver_linux.go b/vendor/github.com/containers/storage/drivers/driver_linux.go | |
index 7c527d27..0fe3eea7 100644 | |
--- a/vendor/github.com/containers/storage/drivers/driver_linux.go | |
+++ b/vendor/github.com/containers/storage/drivers/driver_linux.go | |
@@ -1,4 +1,3 @@ | |
-//go:build linux | |
// +build linux | |
package graphdriver | |
@@ -163,32 +162,11 @@ func (c *defaultChecker) IsMounted(path string) bool { | |
return m | |
} | |
-// isMountPoint checks that the given path is a mount point | |
-func isMountPoint(mountPath string) (bool, error) { | |
- // it is already the root | |
- if mountPath == "/" { | |
- return true, nil | |
- } | |
- | |
- var s1, s2 unix.Stat_t | |
- if err := unix.Stat(mountPath, &s1); err != nil { | |
- return true, err | |
- } | |
- if err := unix.Stat(filepath.Dir(mountPath), &s2); err != nil { | |
- return true, err | |
- } | |
- return s1.Dev != s2.Dev, nil | |
-} | |
- | |
// Mounted checks if the given path is mounted as the fs type | |
func Mounted(fsType FsMagic, mountPath string) (bool, error) { | |
var buf unix.Statfs_t | |
- | |
if err := unix.Statfs(mountPath, &buf); err != nil { | |
return false, err | |
} | |
- if FsMagic(buf.Type) != fsType { | |
- return false, nil | |
- } | |
- return isMountPoint(mountPath) | |
+ return FsMagic(buf.Type) == fsType, nil | |
} | |
diff --git a/vendor/github.com/containers/storage/drivers/driver_unsupported.go b/vendor/github.com/containers/storage/drivers/driver_unsupported.go | |
index 4a875608..3932c3ea 100644 | |
--- a/vendor/github.com/containers/storage/drivers/driver_unsupported.go | |
+++ b/vendor/github.com/containers/storage/drivers/driver_unsupported.go | |
@@ -1,4 +1,4 @@ | |
-// +build !linux,!windows,!freebsd,!solaris | |
+// +build !linux,!windows,!freebsd,!solaris,!darwin | |
package graphdriver | |
diff --git a/vendor/github.com/containers/storage/drivers/fsdiff.go b/vendor/github.com/containers/storage/drivers/fsdiff.go | |
index b7e681ac..b619317e 100644 | |
--- a/vendor/github.com/containers/storage/drivers/fsdiff.go | |
+++ b/vendor/github.com/containers/storage/drivers/fsdiff.go | |
@@ -2,6 +2,8 @@ package graphdriver | |
import ( | |
"io" | |
+ "os" | |
+ "runtime" | |
"time" | |
"github.com/containers/storage/pkg/archive" | |
@@ -170,9 +172,16 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, options ApplyDiffOpts) | |
} | |
defer driver.Put(id) | |
+ defaultForceMask := os.FileMode(0700) | |
+ var forceMask *os.FileMode = nil | |
+ if runtime.GOOS == "darwin" { | |
+ forceMask = &defaultForceMask | |
+ } | |
+ | |
tarOptions := &archive.TarOptions{ | |
InUserNS: userns.RunningInUserNS(), | |
IgnoreChownErrors: options.IgnoreChownErrors, | |
+ ForceMask: forceMask, | |
} | |
if options.Mappings != nil { | |
tarOptions.UIDMaps = options.Mappings.UIDs() | |
diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go | |
index 09d24ae8..8600ee68 100644 | |
--- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go | |
+++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go | |
@@ -207,14 +207,18 @@ func checkSupportVolatile(home, runhome string) (bool, error) { | |
// checkAndRecordIDMappedSupport checks and stores if the kernel supports mounting overlay on top of a | |
// idmapped lower layer. | |
func checkAndRecordIDMappedSupport(home, runhome string) (bool, error) { | |
+ if os.Geteuid() != 0 { | |
+ return false, nil | |
+ } | |
+ | |
feature := "idmapped-lower-dir" | |
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature) | |
if err == nil { | |
if overlayCacheResult { | |
- logrus.Debugf("Cached value indicated that overlay is supported") | |
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are supported") | |
return true, nil | |
} | |
- logrus.Debugf("Cached value indicated that overlay is not supported") | |
+ logrus.Debugf("Cached value indicated that idmapped mounts for overlay are not supported") | |
return false, errors.New(overlayCacheText) | |
} | |
supportsIDMappedMounts, err := supportsIdmappedLowerLayers(home) | |
diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go | |
index 1b58e2f6..b1073d55 100644 | |
--- a/vendor/github.com/containers/storage/drivers/vfs/driver.go | |
+++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go | |
@@ -5,6 +5,7 @@ import ( | |
"io" | |
"os" | |
"path/filepath" | |
+ "runtime" | |
"strconv" | |
"strings" | |
@@ -170,6 +171,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool | |
}() | |
rootPerms := defaultPerms | |
+ if runtime.GOOS == "darwin" { | |
+ rootPerms = os.FileMode(0700) | |
+ } | |
+ | |
if parent != "" { | |
st, err := system.Stat(d.dir(parent)) | |
if err != nil { | |
diff --git a/vendor/github.com/containers/storage/go.mod b/vendor/github.com/containers/storage/go.mod | |
index 5d8c5996..867d3dd8 100644 | |
--- a/vendor/github.com/containers/storage/go.mod | |
+++ b/vendor/github.com/containers/storage/go.mod | |
@@ -5,25 +5,25 @@ module github.com/containers/storage | |
require ( | |
github.com/BurntSushi/toml v1.1.0 | |
github.com/Microsoft/go-winio v0.5.2 | |
- github.com/Microsoft/hcsshim v0.9.2 | |
+ github.com/Microsoft/hcsshim v0.9.3 | |
github.com/containerd/stargz-snapshotter/estargz v0.11.4 | |
github.com/cyphar/filepath-securejoin v0.2.3 | |
github.com/docker/go-units v0.4.0 | |
github.com/google/go-intervals v0.0.2 | |
github.com/hashicorp/go-multierror v1.1.1 | |
github.com/json-iterator/go v1.1.12 | |
- github.com/klauspost/compress v1.15.2 | |
+ github.com/klauspost/compress v1.15.6 | |
github.com/klauspost/pgzip v1.2.5 | |
github.com/mattn/go-shellwords v1.0.12 | |
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible | |
github.com/moby/sys/mountinfo v0.6.1 | |
github.com/opencontainers/go-digest v1.0.0 | |
- github.com/opencontainers/runc v1.1.1 | |
+ github.com/opencontainers/runc v1.1.2 | |
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 | |
github.com/opencontainers/selinux v1.10.1 | |
github.com/pkg/errors v0.9.1 | |
github.com/sirupsen/logrus v1.8.1 | |
- github.com/stretchr/testify v1.7.1 | |
+ github.com/stretchr/testify v1.7.2 | |
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 | |
github.com/tchap/go-patricia v2.3.0+incompatible | |
github.com/ulikunitz/xz v0.5.10 | |
diff --git a/vendor/github.com/containers/storage/go.sum b/vendor/github.com/containers/storage/go.sum | |
index 97a0d167..03a7943a 100644 | |
--- a/vendor/github.com/containers/storage/go.sum | |
+++ b/vendor/github.com/containers/storage/go.sum | |
@@ -57,8 +57,8 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 | |
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= | |
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= | |
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= | |
-github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY= | |
-github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= | |
+github.com/Microsoft/hcsshim v0.9.3 h1:k371PzBuRrz2b+ebGuI2nVgVhgsVX60jMfSw80NECxo= | |
+github.com/Microsoft/hcsshim v0.9.3/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= | |
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= | |
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= | |
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= | |
@@ -425,8 +425,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o | |
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | |
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= | |
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= | |
-github.com/klauspost/compress v1.15.2 h1:3WH+AG7s2+T8o3nrM/8u2rdqUEcQhmga7smjrT41nAw= | |
-github.com/klauspost/compress v1.15.2/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | |
+github.com/klauspost/compress v1.15.6 h1:6D9PcO8QWu0JyaQ2zUMmu16T1T+zjjEpP91guRsvDfY= | |
+github.com/klauspost/compress v1.15.6/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= | |
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= | |
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= | |
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= | |
@@ -521,8 +521,8 @@ github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h | |
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= | |
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= | |
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= | |
-github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU= | |
-github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= | |
+github.com/opencontainers/runc v1.1.2 h1:2VSZwLx5k/BfsBxMMipG/LYUnmqOD/BPkIVgQUcTlLw= | |
+github.com/opencontainers/runc v1.1.2/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= | |
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= | |
@@ -624,8 +624,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV | |
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= | |
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= | |
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |
-github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= | |
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | |
+github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= | |
+github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= | |
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | |
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= | |
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= | |
@@ -1018,8 +1018,9 @@ gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | |
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= | |
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= | |
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= | |
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | |
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= | |
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= | |
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= | |
diff --git a/vendor/github.com/containers/storage/pkg/archive/archive.go b/vendor/github.com/containers/storage/pkg/archive/archive.go | |
index d4f129ee..570000e8 100644 | |
--- a/vendor/github.com/containers/storage/pkg/archive/archive.go | |
+++ b/vendor/github.com/containers/storage/pkg/archive/archive.go | |
@@ -12,6 +12,7 @@ import ( | |
"os" | |
"path/filepath" | |
"runtime" | |
+ "strconv" | |
"strings" | |
"sync" | |
"syscall" | |
@@ -72,10 +73,10 @@ type ( | |
) | |
const ( | |
- tarExt = "tar" | |
- solaris = "solaris" | |
- windows = "windows" | |
- containersOverrideXattr = "user.containers.override_stat" | |
+ tarExt = "tar" | |
+ solaris = "solaris" | |
+ windows = "windows" | |
+ darwin = "darwin" | |
) | |
var xattrsToIgnore = map[string]interface{}{ | |
@@ -698,9 +699,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L | |
return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) | |
} | |
- if forceMask != nil && hdr.Typeflag != tar.TypeSymlink { | |
+ if forceMask != nil && (hdr.Typeflag != tar.TypeSymlink || runtime.GOOS == "darwin") { | |
value := fmt.Sprintf("%d:%d:0%o", hdr.Uid, hdr.Gid, hdrInfo.Mode()&07777) | |
- if err := system.Lsetxattr(path, containersOverrideXattr, []byte(value), 0); err != nil { | |
+ if err := system.Lsetxattr(path, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { | |
return err | |
} | |
} | |
@@ -981,7 +982,7 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err | |
uid, gid, mode, err := GetFileOwner(dest) | |
if err == nil { | |
value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) | |
- if err := system.Lsetxattr(dest, containersOverrideXattr, []byte(value), 0); err != nil { | |
+ if err := system.Lsetxattr(dest, idtools.ContainersOverrideXattr, []byte(value), 0); err != nil { | |
return err | |
} | |
} | |
@@ -1313,6 +1314,21 @@ func remapIDs(readIDMappings, writeIDMappings *idtools.IDMappings, chownOpts *id | |
if err != nil { | |
return err | |
} | |
+ } else if runtime.GOOS == darwin { | |
+ uid, gid = hdr.Uid, hdr.Gid | |
+ if xstat, ok := hdr.Xattrs[idtools.ContainersOverrideXattr]; ok { | |
+ attrs := strings.Split(string(xstat), ":") | |
+ if len(attrs) == 3 { | |
+ val, err := strconv.ParseUint(attrs[0], 10, 32) | |
+ if err != nil { | |
+ uid = int(val) | |
+ } | |
+ val, err = strconv.ParseUint(attrs[1], 10, 32) | |
+ if err != nil { | |
+ gid = int(val) | |
+ } | |
+ } | |
+ } | |
} else { | |
uid, gid = hdr.Uid, hdr.Gid | |
} | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go | |
index e874eb74..482e0366 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive.go | |
@@ -5,9 +5,7 @@ import ( | |
"fmt" | |
"io" | |
"io/ioutil" | |
- "net" | |
"os" | |
- "os/user" | |
"path/filepath" | |
"sync" | |
@@ -17,13 +15,6 @@ import ( | |
"github.com/pkg/errors" | |
) | |
-func init() { | |
- // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host | |
- // environment not in the chroot from untrusted files. | |
- _, _ = user.Lookup("storage") | |
- _, _ = net.LookupHost("localhost") | |
-} | |
- | |
// NewArchiver returns a new Archiver which uses chrootarchive.Untar | |
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { | |
archiver := archive.NewArchiver(idMappings) | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go | |
new file mode 100644 | |
index 00000000..d257cc8e | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_darwin.go | |
@@ -0,0 +1,21 @@ | |
+package chrootarchive | |
+ | |
+import ( | |
+ "io" | |
+ | |
+ "github.com/containers/storage/pkg/archive" | |
+) | |
+ | |
+func chroot(path string) error { | |
+ return nil | |
+} | |
+ | |
+func invokeUnpack(decompressedArchive io.ReadCloser, | |
+ dest string, | |
+ options *archive.TarOptions, root string) error { | |
+ return archive.Unpack(decompressedArchive, dest, options) | |
+} | |
+ | |
+func invokePack(srcPath string, options *archive.TarOptions, root string) (io.ReadCloser, error) { | |
+ return archive.TarWithOptions(srcPath, options) | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go | |
index 9da10fe3..e4b45a45 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/archive_unix.go | |
@@ -1,4 +1,4 @@ | |
-// +build !windows | |
+// +build !windows,!darwin | |
package chrootarchive | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go | |
index 76c94c6c..58729ec8 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_linux.go | |
@@ -3,7 +3,9 @@ package chrootarchive | |
import ( | |
"fmt" | |
"io/ioutil" | |
+ "net" | |
"os" | |
+ "os/user" | |
"path/filepath" | |
"github.com/containers/storage/pkg/mount" | |
@@ -23,6 +25,11 @@ func chroot(path string) (err error) { | |
return err | |
} | |
+ // initialize nss libraries in Glibc so that the dynamic libraries are loaded in the host | |
+ // environment not in the chroot from untrusted files. | |
+ _, _ = user.Lookup("storage") | |
+ _, _ = net.LookupHost("localhost") | |
+ | |
// if the process doesn't have CAP_SYS_ADMIN, but does have CAP_SYS_CHROOT, we need to use the actual chroot | |
if !caps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) && caps.Get(capability.EFFECTIVE, capability.CAP_SYS_CHROOT) { | |
return realChroot(path) | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go | |
index 83278ee5..d5aedd00 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/chroot_unix.go | |
@@ -1,4 +1,4 @@ | |
-// +build !windows,!linux | |
+// +build !windows,!linux,!darwin | |
package chrootarchive | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go | |
new file mode 100644 | |
index 00000000..d6326c80 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_darwin.go | |
@@ -0,0 +1,41 @@ | |
+package chrootarchive | |
+ | |
+import ( | |
+ "fmt" | |
+ "io" | |
+ "io/ioutil" | |
+ "os" | |
+ "path/filepath" | |
+ | |
+ "github.com/containers/storage/pkg/archive" | |
+) | |
+ | |
+// applyLayerHandler parses a diff in the standard layer format from `layer`, and | |
+// applies it to the directory `dest`. Returns the size in bytes of the | |
+// contents of the layer. | |
+func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { | |
+ dest = filepath.Clean(dest) | |
+ | |
+ if decompress { | |
+ decompressed, err := archive.DecompressStream(layer) | |
+ if err != nil { | |
+ return 0, err | |
+ } | |
+ defer decompressed.Close() | |
+ | |
+ layer = decompressed | |
+ } | |
+ | |
+ tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-storage-extract") | |
+ if err != nil { | |
+ return 0, fmt.Errorf("ApplyLayer failed to create temp-storage-extract under %s. %s", dest, err) | |
+ } | |
+ | |
+ s, err := archive.UnpackLayer(dest, layer, options) | |
+ os.RemoveAll(tmpDir) | |
+ if err != nil { | |
+ return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) | |
+ } | |
+ | |
+ return s, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go | |
index 84253c6a..6dd5146c 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/diff_unix.go | |
@@ -1,4 +1,4 @@ | |
-//+build !windows | |
+//+build !windows,!darwin | |
package chrootarchive | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go | |
new file mode 100644 | |
index 00000000..fa17c9bf | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_darwin.go | |
@@ -0,0 +1,4 @@ | |
+package chrootarchive | |
+ | |
+func init() { | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go | |
index ea08135e..45caec97 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go | |
+++ b/vendor/github.com/containers/storage/pkg/chrootarchive/init_unix.go | |
@@ -1,4 +1,4 @@ | |
-// +build !windows | |
+// +build !windows,!darwin | |
package chrootarchive | |
diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go | |
index 7de20fea..9434499d 100644 | |
--- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go | |
+++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go | |
@@ -918,6 +918,9 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan | |
case p := <-streams: | |
part = p | |
case err := <-errs: | |
+ if err == nil { | |
+ return errors.New("not enough data returned from the server") | |
+ } | |
return err | |
} | |
if part == nil { | |
@@ -1081,12 +1084,18 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { | |
func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { | |
var chunksToRequest []ImageSourceChunk | |
- for _, c := range missingParts { | |
- if c.OriginFile == nil && !c.Hole { | |
- chunksToRequest = append(chunksToRequest, *c.SourceChunk) | |
+ | |
+ calculateChunksToRequest := func() { | |
+ chunksToRequest = []ImageSourceChunk{} | |
+ for _, c := range missingParts { | |
+ if c.OriginFile == nil && !c.Hole { | |
+ chunksToRequest = append(chunksToRequest, *c.SourceChunk) | |
+ } | |
} | |
} | |
+ calculateChunksToRequest() | |
+ | |
// There are some missing files. Prepare a multirange request for the missing chunks. | |
var streams chan io.ReadCloser | |
var err error | |
@@ -1106,6 +1115,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingPart | |
// Merge more chunks to request | |
missingParts = mergeMissingChunks(missingParts, requested/2) | |
+ calculateChunksToRequest() | |
continue | |
} | |
return err | |
@@ -1575,6 +1585,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra | |
wg.Wait() | |
for _, res := range copyResults[:filesToWaitFor] { | |
+ r := &mergedEntries[res.index] | |
+ | |
if res.err != nil { | |
return output, res.err | |
} | |
@@ -1584,8 +1596,6 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra | |
continue | |
} | |
- r := &mergedEntries[res.index] | |
- | |
missingPartsSize += r.Size | |
remainingSize := r.Size | |
diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go | |
index 7a8fec0c..3ae2a1cd 100644 | |
--- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go | |
+++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go | |
@@ -6,6 +6,7 @@ import ( | |
"io/ioutil" | |
"os" | |
"os/user" | |
+ "runtime" | |
"sort" | |
"strconv" | |
"strings" | |
@@ -38,8 +39,9 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } | |
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } | |
const ( | |
- subuidFileName string = "/etc/subuid" | |
- subgidFileName string = "/etc/subgid" | |
+ subuidFileName string = "/etc/subuid" | |
+ subgidFileName string = "/etc/subgid" | |
+ ContainersOverrideXattr = "user.containers.override_stat" | |
) | |
// MkdirAllAs creates a directory (include any along the path) and then modifies | |
@@ -366,6 +368,25 @@ func checkChownErr(err error, name string, uid, gid int) error { | |
} | |
func SafeChown(name string, uid, gid int) error { | |
+ if runtime.GOOS == "darwin" { | |
+ var mode uint64 = 0o0700 | |
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) | |
+ if err == nil { | |
+ attrs := strings.Split(string(xstat), ":") | |
+ if len(attrs) == 3 { | |
+ val, err := strconv.ParseUint(attrs[2], 8, 32) | |
+ if err == nil { | |
+ mode = val | |
+ } | |
+ } | |
+ } | |
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) | |
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { | |
+ return err | |
+ } | |
+ uid = os.Getuid() | |
+ gid = os.Getgid() | |
+ } | |
if stat, statErr := system.Stat(name); statErr == nil { | |
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { | |
return nil | |
@@ -375,6 +396,25 @@ func SafeChown(name string, uid, gid int) error { | |
} | |
func SafeLchown(name string, uid, gid int) error { | |
+ if runtime.GOOS == "darwin" { | |
+ var mode uint64 = 0o0700 | |
+ xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) | |
+ if err == nil { | |
+ attrs := strings.Split(string(xstat), ":") | |
+ if len(attrs) == 3 { | |
+ val, err := strconv.ParseUint(attrs[2], 8, 32) | |
+ if err == nil { | |
+ mode = val | |
+ } | |
+ } | |
+ } | |
+ value := fmt.Sprintf("%d:%d:0%o", uid, gid, mode) | |
+ if err = system.Lsetxattr(name, ContainersOverrideXattr, []byte(value), 0); err != nil { | |
+ return err | |
+ } | |
+ uid = os.Getuid() | |
+ gid = os.Getgid() | |
+ } | |
if stat, statErr := system.Lstat(name); statErr == nil { | |
if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { | |
return nil | |
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go | |
index 72ceec3d..2404e331 100644 | |
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go | |
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go | |
@@ -1,3 +1,6 @@ | |
+//go:build freebsd && cgo | |
+// +build freebsd,cgo | |
+ | |
package mount | |
/* | |
diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go | |
index 9d20cfbf..74fe6660 100644 | |
--- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go | |
+++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go | |
@@ -1,4 +1,6 @@ | |
-// +build !linux,!freebsd | |
+//go:build !linux && !(freebsd && cgo) | |
+// +build !linux | |
+// +build !freebsd !cgo | |
package mount | |
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go | |
new file mode 100644 | |
index 00000000..a0183885 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go | |
@@ -0,0 +1,84 @@ | |
+//go:build freebsd && cgo | |
+// +build freebsd,cgo | |
+ | |
+package system | |
+ | |
+import ( | |
+ "fmt" | |
+ "unsafe" | |
+ | |
+ "golang.org/x/sys/unix" | |
+) | |
+ | |
+// #include <unistd.h> | |
+// #include <sys/vmmeter.h> | |
+// #include <sys/sysctl.h> | |
+// #include <vm/vm_param.h> | |
+import "C" | |
+ | |
+func getMemInfo() (int64, int64, error) { | |
+ data, err := unix.SysctlRaw("vm.vmtotal") | |
+ if err != nil { | |
+ return -1, -1, fmt.Errorf("Can't get kernel info: %v", err) | |
+ } | |
+ if len(data) != C.sizeof_struct_vmtotal { | |
+ return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) | |
+ } | |
+ | |
+ total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) | |
+ | |
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE)) | |
+ npages := int64(C.sysconf(C._SC_PHYS_PAGES)) | |
+ return pagesize * npages, pagesize * int64(total.t_free), nil | |
+} | |
+ | |
+func getSwapInfo() (int64, int64, error) { | |
+ var ( | |
+ total int64 = 0 | |
+ used int64 = 0 | |
+ ) | |
+ swapCount, err := unix.SysctlUint32("vm.nswapdev") | |
+ if err != nil { | |
+ return -1, -1, fmt.Errorf("error reading vm.nswapdev: %v", err) | |
+ } | |
+ for i := 0; i < int(swapCount); i++ { | |
+ data, err := unix.SysctlRaw("vm.swap_info", i) | |
+ if err != nil { | |
+ return -1, -1, fmt.Errorf("error reading vm.swap_info.%d: %v", i, err) | |
+ } | |
+ if len(data) != C.sizeof_struct_xswdev { | |
+ return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) | |
+ } | |
+ xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) | |
+ total += int64(xsw.xsw_nblks) | |
+ used += int64(xsw.xsw_used) | |
+ } | |
+ pagesize := int64(C.sysconf(C._SC_PAGESIZE)) | |
+ return pagesize * total, pagesize * (total - used), nil | |
+} | |
+ | |
+// ReadMemInfo retrieves memory statistics of the host system and returns a | |
+// MemInfo type. | |
+func ReadMemInfo() (*MemInfo, error) { | |
+ MemTotal, MemFree, err := getMemInfo() | |
+ if err != nil { | |
+ return nil, fmt.Errorf("error getting memory totals %v\n", err) | |
+ } | |
+ SwapTotal, SwapFree, err := getSwapInfo() | |
+ if err != nil { | |
+ return nil, fmt.Errorf("error getting swap totals %v\n", err) | |
+ } | |
+ | |
+ if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { | |
+ return nil, fmt.Errorf("error getting system memory info %v\n", err) | |
+ } | |
+ | |
+ meminfo := &MemInfo{} | |
+ // Total memory is total physical memory less than memory locked by kernel | |
+ meminfo.MemTotal = MemTotal | |
+ meminfo.MemFree = MemFree | |
+ meminfo.SwapTotal = SwapTotal | |
+ meminfo.SwapFree = SwapFree | |
+ | |
+ return meminfo, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go | |
index 3ce019df..8d14fe9f 100644 | |
--- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go | |
+++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go | |
@@ -1,4 +1,5 @@ | |
-// +build !linux,!windows,!solaris | |
+//go:build !linux && !windows && !solaris && !freebsd | |
+// +build !linux,!windows,!solaris,!freebsd | |
package system | |
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go | |
new file mode 100644 | |
index 00000000..75275b96 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go | |
@@ -0,0 +1,84 @@ | |
+package system | |
+ | |
+import ( | |
+ "bytes" | |
+ "os" | |
+ | |
+ "golang.org/x/sys/unix" | |
+) | |
+ | |
+const ( | |
+ // Value is larger than the maximum size allowed | |
+ E2BIG unix.Errno = unix.E2BIG | |
+ | |
+ // Operation not supported | |
+ EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP | |
+) | |
+ | |
+// Lgetxattr retrieves the value of the extended attribute identified by attr | |
+// and associated with the given path in the file system. | |
+// Returns a []byte slice if the xattr is set and nil otherwise. | |
+func Lgetxattr(path string, attr string) ([]byte, error) { | |
+ // Start with a 128 length byte array | |
+ dest := make([]byte, 128) | |
+ sz, errno := unix.Lgetxattr(path, attr, dest) | |
+ | |
+ for errno == unix.ERANGE { | |
+ // Buffer too small, use zero-sized buffer to get the actual size | |
+ sz, errno = unix.Lgetxattr(path, attr, []byte{}) | |
+ if errno != nil { | |
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} | |
+ } | |
+ dest = make([]byte, sz) | |
+ sz, errno = unix.Lgetxattr(path, attr, dest) | |
+ } | |
+ | |
+ switch { | |
+ case errno == unix.ENOATTR: | |
+ return nil, nil | |
+ case errno != nil: | |
+ return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} | |
+ } | |
+ | |
+ return dest[:sz], nil | |
+} | |
+ | |
+// Lsetxattr sets the value of the extended attribute identified by attr | |
+// and associated with the given path in the file system. | |
+func Lsetxattr(path string, attr string, data []byte, flags int) error { | |
+ if err := unix.Lsetxattr(path, attr, data, flags); err != nil { | |
+ return &os.PathError{Op: "lsetxattr", Path: path, Err: err} | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+// Llistxattr lists extended attributes associated with the given path | |
+// in the file system. | |
+func Llistxattr(path string) ([]string, error) { | |
+ dest := make([]byte, 128) | |
+ sz, errno := unix.Llistxattr(path, dest) | |
+ | |
+ for errno == unix.ERANGE { | |
+ // Buffer too small, use zero-sized buffer to get the actual size | |
+ sz, errno = unix.Llistxattr(path, []byte{}) | |
+ if errno != nil { | |
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} | |
+ } | |
+ | |
+ dest = make([]byte, sz) | |
+ sz, errno = unix.Llistxattr(path, dest) | |
+ } | |
+ if errno != nil { | |
+ return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} | |
+ } | |
+ | |
+ var attrs []string | |
+ for _, token := range bytes.Split(dest[:sz], []byte{0}) { | |
+ if len(token) > 0 { | |
+ attrs = append(attrs, string(token)) | |
+ } | |
+ } | |
+ | |
+ return attrs, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go | |
index 3fc27f0b..221eb78b 100644 | |
--- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go | |
+++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go | |
@@ -1,4 +1,4 @@ | |
-// +build !linux | |
+// +build !linux,!darwin | |
package system | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c | |
index c0e359b2..f5a7c3a2 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c | |
@@ -1,4 +1,4 @@ | |
-#ifndef UNSHARE_NO_CODE_AT_ALL | |
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) | |
#define _GNU_SOURCE | |
#include <sys/types.h> | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go | |
index 53cfeb0e..221c7e08 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go | |
@@ -7,7 +7,7 @@ import ( | |
"sync" | |
"github.com/pkg/errors" | |
- "github.com/syndtr/gocapability/capability" | |
+ "github.com/sirupsen/logrus" | |
) | |
var ( | |
@@ -38,19 +38,13 @@ func HomeDir() (string, error) { | |
return homeDir, homeDirErr | |
} | |
-// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. | |
-func HasCapSysAdmin() (bool, error) { | |
- hasCapSysAdminOnce.Do(func() { | |
- currentCaps, err := capability.NewPid2(0) | |
- if err != nil { | |
- hasCapSysAdminErr = err | |
- return | |
- } | |
- if err = currentCaps.Load(); err != nil { | |
- hasCapSysAdminErr = err | |
- return | |
+func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname | |
+ if err != nil { | |
+ if format != "" { | |
+ logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) | |
+ } else { | |
+ logrus.Errorf("%v", err) | |
} | |
- hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) | |
- }) | |
- return hasCapSysAdminRet, hasCapSysAdminErr | |
+ os.Exit(1) | |
+ } | |
} | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go | |
index b3f8099f..6a6f21d9 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go | |
@@ -1,4 +1,5 @@ | |
-// +build linux,cgo,!gccgo | |
+//go:build (linux && cgo && !gccgo) || (freebsd && cgo) | |
+// +build linux,cgo,!gccgo freebsd,cgo | |
package unshare | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go | |
new file mode 100644 | |
index 00000000..01cf33bd | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go | |
@@ -0,0 +1,53 @@ | |
+// +build darwin | |
+ | |
+package unshare | |
+ | |
+import ( | |
+ "os" | |
+ | |
+ "github.com/containers/storage/pkg/idtools" | |
+ "github.com/opencontainers/runtime-spec/specs-go" | |
+) | |
+ | |
+const ( | |
+ // UsernsEnvName is the environment variable, if set indicates in rootless mode | |
+ UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" | |
+) | |
+ | |
+// IsRootless tells us if we are running in rootless mode | |
+func IsRootless() bool { | |
+ return true | |
+} | |
+ | |
+// GetRootlessUID returns the UID of the user in the parent userNS | |
+func GetRootlessUID() int { | |
+ return os.Getuid() | |
+} | |
+ | |
+// RootlessEnv returns the environment settings for the rootless containers | |
+func RootlessEnv() []string { | |
+ return append(os.Environ(), UsernsEnvName+"=") | |
+} | |
+ | |
+// MaybeReexecUsingUserNamespace re-exec the process in a new namespace | |
+func MaybeReexecUsingUserNamespace(evenForRoot bool) { | |
+} | |
+ | |
+// GetHostIDMappings reads mappings for the specified process (or the current | |
+// process if pid is "self" or an empty string) from the kernel. | |
+func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { | |
+ return nil, nil, nil | |
+} | |
+ | |
+// ParseIDMappings parses mapping triples. | |
+func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { | |
+ uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") | |
+ if err != nil { | |
+ return nil, nil, err | |
+ } | |
+ gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") | |
+ if err != nil { | |
+ return nil, nil, err | |
+ } | |
+ return uid, gid, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c | |
new file mode 100644 | |
index 00000000..0b2f1788 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c | |
@@ -0,0 +1,76 @@ | |
+#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) | |
+ | |
+ | |
+#include <sys/types.h> | |
+#include <sys/ioctl.h> | |
+#include <stdlib.h> | |
+#include <stdio.h> | |
+#include <string.h> | |
+#include <unistd.h> | |
+ | |
+static int _containers_unshare_parse_envint(const char *envname) { | |
+ char *p, *q; | |
+ long l; | |
+ | |
+ p = getenv(envname); | |
+ if (p == NULL) { | |
+ return -1; | |
+ } | |
+ q = NULL; | |
+ l = strtol(p, &q, 10); | |
+ if ((q == NULL) || (*q != '\0')) { | |
+ fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); | |
+ _exit(1); | |
+ } | |
+ unsetenv(envname); | |
+ return l; | |
+} | |
+ | |
+void _containers_unshare(void) | |
+{ | |
+ int pidfd, continuefd, n, pgrp, sid, ctty; | |
+ char buf[2048]; | |
+ | |
+ pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); | |
+ if (pidfd != -1) { | |
+ snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); | |
+ size_t size = write(pidfd, buf, strlen(buf)); | |
+ if (size != strlen(buf)) { | |
+ fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); | |
+ _exit(1); | |
+ } | |
+ close(pidfd); | |
+ } | |
+ continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); | |
+ if (continuefd != -1) { | |
+ n = read(continuefd, buf, sizeof(buf)); | |
+ if (n > 0) { | |
+ fprintf(stderr, "Error: %.*s\n", n, buf); | |
+ _exit(1); | |
+ } | |
+ close(continuefd); | |
+ } | |
+ sid = _containers_unshare_parse_envint("_Containers-setsid"); | |
+ if (sid == 1) { | |
+ if (setsid() == -1) { | |
+ fprintf(stderr, "Error during setsid: %m\n"); | |
+ _exit(1); | |
+ } | |
+ } | |
+ pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); | |
+ if (pgrp == 1) { | |
+ if (setpgrp(0, 0) == -1) { | |
+ fprintf(stderr, "Error during setpgrp: %m\n"); | |
+ _exit(1); | |
+ } | |
+ } | |
+ ctty = _containers_unshare_parse_envint("_Containers-ctty"); | |
+ if (ctty != -1) { | |
+ if (ioctl(ctty, TIOCSCTTY, 0) == -1) { | |
+ fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); | |
+ _exit(1); | |
+ } | |
+ } | |
+} | |
+ | |
+#endif | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go | |
new file mode 100644 | |
index 00000000..aec41672 | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go | |
@@ -0,0 +1,179 @@ | |
+//go:build freebsd | |
+// +build freebsd | |
+ | |
+package unshare | |
+ | |
+import ( | |
+ "bytes" | |
+ "fmt" | |
+ "io" | |
+ "os" | |
+ "os/exec" | |
+ "runtime" | |
+ "strconv" | |
+ "syscall" | |
+ | |
+ "github.com/containers/storage/pkg/reexec" | |
+ "github.com/pkg/errors" | |
+ "github.com/sirupsen/logrus" | |
+) | |
+ | |
+// Cmd wraps an exec.Cmd created by the reexec package in unshare(), | |
+// and one day might handle setting ID maps and other related setting*s | |
+// by triggering initialization code in the child. | |
+type Cmd struct { | |
+ *exec.Cmd | |
+ Setsid bool | |
+ Setpgrp bool | |
+ Ctty *os.File | |
+ Hook func(pid int) error | |
+} | |
+ | |
+// Command creates a new Cmd which can be customized. | |
+func Command(args ...string) *Cmd { | |
+ cmd := reexec.Command(args...) | |
+ return &Cmd{ | |
+ Cmd: cmd, | |
+ } | |
+} | |
+ | |
+func (c *Cmd) Start() error { | |
+ runtime.LockOSThread() | |
+ defer runtime.UnlockOSThread() | |
+ | |
+ // Set environment variables to tell the child to synchronize its startup. | |
+ if c.Env == nil { | |
+ c.Env = os.Environ() | |
+ } | |
+ | |
+ // Create the pipe for reading the child's PID. | |
+ pidRead, pidWrite, err := os.Pipe() | |
+ if err != nil { | |
+ return errors.Wrapf(err, "error creating pid pipe") | |
+ } | |
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) | |
+ c.ExtraFiles = append(c.ExtraFiles, pidWrite) | |
+ | |
+ // Create the pipe for letting the child know to proceed. | |
+ continueRead, continueWrite, err := os.Pipe() | |
+ if err != nil { | |
+ pidRead.Close() | |
+ pidWrite.Close() | |
+ return errors.Wrapf(err, "error creating pid pipe") | |
+ } | |
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) | |
+ c.ExtraFiles = append(c.ExtraFiles, continueRead) | |
+ | |
+ // Pass along other instructions. | |
+ if c.Setsid { | |
+ c.Env = append(c.Env, "_Containers-setsid=1") | |
+ } | |
+ if c.Setpgrp { | |
+ c.Env = append(c.Env, "_Containers-setpgrp=1") | |
+ } | |
+ if c.Ctty != nil { | |
+ c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) | |
+ c.ExtraFiles = append(c.ExtraFiles, c.Ctty) | |
+ } | |
+ | |
+ // Make sure we clean up our pipes. | |
+ defer func() { | |
+ if pidRead != nil { | |
+ pidRead.Close() | |
+ } | |
+ if pidWrite != nil { | |
+ pidWrite.Close() | |
+ } | |
+ if continueRead != nil { | |
+ continueRead.Close() | |
+ } | |
+ if continueWrite != nil { | |
+ continueWrite.Close() | |
+ } | |
+ }() | |
+ | |
+ // Start the new process. | |
+ err = c.Cmd.Start() | |
+ if err != nil { | |
+ return err | |
+ } | |
+ | |
+ // Close the ends of the pipes that the parent doesn't need. | |
+ continueRead.Close() | |
+ continueRead = nil | |
+ pidWrite.Close() | |
+ pidWrite = nil | |
+ | |
+ // Read the child's PID from the pipe. | |
+ pidString := "" | |
+ b := new(bytes.Buffer) | |
+ if _, err := io.Copy(b, pidRead); err != nil { | |
+ return errors.Wrapf(err, "Reading child PID") | |
+ } | |
+ pidString = b.String() | |
+ pid, err := strconv.Atoi(pidString) | |
+ if err != nil { | |
+ fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) | |
+ return errors.Wrapf(err, "error parsing PID %q", pidString) | |
+ } | |
+ | |
+ // Run any additional setup that we want to do before the child starts running proper. | |
+ if c.Hook != nil { | |
+ if err = c.Hook(pid); err != nil { | |
+ fmt.Fprintf(continueWrite, "hook error: %v", err) | |
+ return err | |
+ } | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
+func (c *Cmd) Run() error { | |
+ if err := c.Start(); err != nil { | |
+ return err | |
+ } | |
+ return c.Wait() | |
+} | |
+ | |
+func (c *Cmd) CombinedOutput() ([]byte, error) { | |
+ return nil, errors.New("unshare: CombinedOutput() not implemented") | |
+} | |
+ | |
+func (c *Cmd) Output() ([]byte, error) { | |
+ return nil, errors.New("unshare: Output() not implemented") | |
+} | |
+ | |
+type Runnable interface { | |
+ Run() error | |
+} | |
+ | |
+// ExecRunnable runs the specified unshare command, captures its exit status, | |
+// and exits with the same status. | |
+func ExecRunnable(cmd Runnable, cleanup func()) { | |
+ exit := func(status int) { | |
+ if cleanup != nil { | |
+ cleanup() | |
+ } | |
+ os.Exit(status) | |
+ } | |
+ if err := cmd.Run(); err != nil { | |
+ if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { | |
+ if exitError.ProcessState.Exited() { | |
+ if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { | |
+ if waitStatus.Exited() { | |
+ logrus.Debugf("%v", exitError) | |
+ exit(waitStatus.ExitStatus()) | |
+ } | |
+ if waitStatus.Signaled() { | |
+ logrus.Debugf("%v", exitError) | |
+ exit(int(waitStatus.Signal()) + 128) | |
+ } | |
+ } | |
+ } | |
+ } | |
+ logrus.Errorf("%v", err) | |
+ logrus.Errorf("(Unable to determine exit status)") | |
+ exit(1) | |
+ } | |
+ exit(0) | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go | |
index baeb8f1a..16d14d2a 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go | |
@@ -78,7 +78,7 @@ func getRootlessGID() int { | |
} | |
// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the | |
-// matching file capabilitiy | |
+// matching file capability | |
func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { | |
info, err := os.Stat(path) | |
if err != nil { | |
@@ -414,17 +414,6 @@ type Runnable interface { | |
Run() error | |
} | |
-func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname | |
- if err != nil { | |
- if format != "" { | |
- logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) | |
- } else { | |
- logrus.Errorf("%v", err) | |
- } | |
- os.Exit(1) | |
- } | |
-} | |
- | |
// MaybeReexecUsingUserNamespace re-exec the process in a new namespace | |
func MaybeReexecUsingUserNamespace(evenForRoot bool) { | |
// If we've already been through this once, no need to try again. | |
@@ -674,3 +663,20 @@ func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, | |
} | |
return uid, gid, nil | |
} | |
+ | |
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. | |
+func HasCapSysAdmin() (bool, error) { | |
+ hasCapSysAdminOnce.Do(func() { | |
+ currentCaps, err := capability.NewPid2(0) | |
+ if err != nil { | |
+ hasCapSysAdminErr = err | |
+ return | |
+ } | |
+ if err = currentCaps.Load(); err != nil { | |
+ hasCapSysAdminErr = err | |
+ return | |
+ } | |
+ hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) | |
+ }) | |
+ return hasCapSysAdminRet, hasCapSysAdminErr | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go | |
index bf4d567b..66dd5459 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go | |
@@ -1,4 +1,5 @@ | |
-// +build !linux | |
+//go:build !linux && !darwin | |
+// +build !linux,!darwin | |
package unshare | |
@@ -43,3 +44,8 @@ func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMappi | |
func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { | |
return nil, nil, nil | |
} | |
+ | |
+// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. | |
+func HasCapSysAdmin() (bool, error) { | |
+ return os.Geteuid() == 0, nil | |
+} | |
diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go | |
index d5f2d22a..a6b38eda 100644 | |
--- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go | |
+++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go | |
@@ -1,4 +1,5 @@ | |
-// +build !linux,cgo | |
+//go:build cgo && !(linux || freebsd) | |
+// +build cgo,!linux,!freebsd | |
package unshare | |
diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf | |
index c17dd6d3..500e14a3 100644 | |
--- a/vendor/github.com/containers/storage/storage.conf | |
+++ b/vendor/github.com/containers/storage/storage.conf | |
@@ -40,6 +40,32 @@ graphroot = "/var/lib/containers/storage" | |
additionalimagestores = [ | |
] | |
+# Allows specification of how storage is populated when pulling images. This | |
+# option can speed the pulling process of images compressed with format | |
+# zstd:chunked. Containers/storage looks for files within images that are being | |
+# pulled from a container registry that were previously pulled to the host. It | |
+# can copy or create a hard link to the existing file when it finds them, | |
+# eliminating the need to pull them from the container registry. These options | |
+# can deduplicate pulling of content, disk storage of content and can allow the | |
+# kernel to use less memory when running containers. | |
+ | |
+# containers/storage supports four keys | |
+# * enable_partial_images="true" | "false" | |
+# Tells containers/storage to look for files previously pulled in storage | |
+# rather then always pulling them from the container registry. | |
+# * use_hard_links = "false" | "true" | |
+# Tells containers/storage to use hard links rather then create new files in | |
+# the image, if an identical file already existed in storage. | |
+# * enable_host_deduplication = "false" | "true" | |
+# Tells containers/storage to search for files under `/usr` in addition to | |
+# files in other images when attempting to avoid pulling files from the | |
+# container registry. | |
+# * ostree_repos = "" | |
+# Tells containers/storage where an ostree repository exists that might have | |
+# previously pulled content which can be used when attempting to avoid | |
+# pulling content from the container registry | |
+pull_options = {enable_partial_images = "false", enable_host_deduplication = "false", use_hard_links = "false", ostree_repos=""} | |
+ | |
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of | |
# a container, to the UIDs/GIDs as they should appear outside of the container, | |
# and the length of the range of UIDs/GIDs. Additional mapped sets can be | |
diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd | |
index cc655c62..34d80152 100644 | |
--- a/vendor/github.com/containers/storage/storage.conf-freebsd | |
+++ b/vendor/github.com/containers/storage/storage.conf-freebsd | |
@@ -5,8 +5,8 @@ | |
# files. | |
# | |
# Note: The storage.conf file overrides other storage.conf files based on this precedence: | |
-# /usr/containers/storage.conf | |
-# /etc/containers/storage.conf | |
+# /usr/local/share/containers/storage.conf | |
+# /usr/local/etc/containers/storage.conf | |
# $HOME/.config/containers/storage.conf | |
# $XDG_CONFIG_HOME/containers/storage.conf (If XDG_CONFIG_HOME is set) | |
# See man 5 containers-storage.conf for more information | |
diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go | |
index 30d3e871..6bc104f1 100644 | |
--- a/vendor/github.com/containers/storage/store.go | |
+++ b/vendor/github.com/containers/storage/store.go | |
@@ -173,6 +173,7 @@ type Store interface { | |
GraphRoot() string | |
GraphDriverName() string | |
GraphOptions() []string | |
+ PullOptions() map[string]string | |
UIDMap() []idtools.IDMap | |
GIDMap() []idtools.IDMap | |
@@ -607,6 +608,7 @@ type store struct { | |
graphRoot string | |
graphDriverName string | |
graphOptions []string | |
+ pullOptions map[string]string | |
uidMap []idtools.IDMap | |
gidMap []idtools.IDMap | |
autoUsernsUser string | |
@@ -726,6 +728,7 @@ func GetStore(options types.StoreOptions) (Store, error) { | |
additionalGIDs: nil, | |
usernsLock: usernsLock, | |
disableVolatile: options.DisableVolatile, | |
+ pullOptions: options.PullOptions, | |
} | |
if err := s.load(); err != nil { | |
return nil, err | |
@@ -776,6 +779,14 @@ func (s *store) GraphOptions() []string { | |
return s.graphOptions | |
} | |
+func (s *store) PullOptions() map[string]string { | |
+ cp := make(map[string]string, len(s.pullOptions)) | |
+ for k, v := range s.pullOptions { | |
+ cp[k] = v | |
+ } | |
+ return cp | |
+} | |
+ | |
func (s *store) UIDMap() []idtools.IDMap { | |
return copyIDMap(s.uidMap) | |
} | |
@@ -1195,6 +1206,11 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore ROImageStore, crea | |
if layer == nil { | |
layer = cLayer | |
parentLayer = cParentLayer | |
+ if store != rlstore { | |
+ // The layer is in another store, so we cannot | |
+ // create a mapped version of it to the image. | |
+ createMappedLayer = false | |
+ } | |
} | |
} | |
} | |
diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go | |
index a71c6d2e..a55bf62c 100644 | |
--- a/vendor/github.com/containers/storage/types/options.go | |
+++ b/vendor/github.com/containers/storage/types/options.go | |
@@ -26,31 +26,24 @@ type TomlConfig struct { | |
} | |
const ( | |
- // these are default path for run and graph root for rootful users | |
- // for rootless path is constructed via getRootlessStorageOpts | |
- defaultRunRoot string = "/run/containers/storage" | |
- defaultGraphRoot string = "/var/lib/containers/storage" | |
+ overlayDriver = "overlay" | |
+ overlay2 = "overlay2" | |
+ storageConfEnv = "CONTAINERS_STORAGE_CONF" | |
) | |
-// defaultConfigFile path to the system wide storage.conf file | |
var ( | |
- defaultConfigFile = "/usr/share/containers/storage.conf" | |
- defaultOverrideConfigFile = "/etc/containers/storage.conf" | |
- defaultConfigFileSet = false | |
- // DefaultStoreOptions is a reasonable default set of options. | |
- defaultStoreOptions StoreOptions | |
+ defaultStoreOptionsOnce sync.Once | |
) | |
-const ( | |
- overlayDriver = "overlay" | |
- overlay2 = "overlay2" | |
-) | |
- | |
-func init() { | |
+func loaddefaultStoreOptions() { | |
defaultStoreOptions.RunRoot = defaultRunRoot | |
defaultStoreOptions.GraphRoot = defaultGraphRoot | |
defaultStoreOptions.GraphDriverName = "" | |
+ if path, ok := os.LookupEnv(storageConfEnv); ok { | |
+ defaultOverrideConfigFile = path | |
+ } | |
+ | |
if _, err := os.Stat(defaultOverrideConfigFile); err == nil { | |
// The DefaultConfigFile(rootless) function returns the path | |
// of the used storage.conf file, by returning defaultConfigFile | |
@@ -80,6 +73,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str | |
defaultRootlessGraphRoot string | |
err error | |
) | |
+ defaultStoreOptionsOnce.Do(loaddefaultStoreOptions) | |
storageOpts := defaultStoreOptions | |
if rootless && rootlessUID != 0 { | |
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts) | |
@@ -203,6 +197,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti | |
return opts, err | |
} | |
opts.RunRoot = rootlessRuntime | |
+ opts.PullOptions = systemOpts.PullOptions | |
if systemOpts.RootlessStoragePath != "" { | |
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID) | |
if err != nil { | |
@@ -219,7 +214,7 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti | |
opts.GraphDriverName = driver | |
} | |
if opts.GraphDriverName == overlay2 { | |
- logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") | |
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") | |
opts.GraphDriverName = overlayDriver | |
} | |
@@ -296,7 +291,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | |
if err == nil { | |
keys := meta.Undecoded() | |
if len(keys) > 0 { | |
- logrus.Warningf("Failed to decode the keys %q from %q.", keys, configFile) | |
+ logrus.Warningf("Failed to decode the keys %q from %q", keys, configFile) | |
} | |
} else { | |
if !os.IsNotExist(err) { | |
@@ -315,11 +310,11 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | |
storeOptions.GraphDriverName = config.Storage.Driver | |
} | |
if storeOptions.GraphDriverName == overlay2 { | |
- logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver.") | |
+ logrus.Warnf("Switching default driver from overlay2 to the equivalent overlay driver") | |
storeOptions.GraphDriverName = overlayDriver | |
} | |
if storeOptions.GraphDriverName == "" { | |
- logrus.Errorf("The storage 'driver' option must be set in %s, guarantee proper operation.", configFile) | |
+ logrus.Errorf("The storage 'driver' option must be set in %s to guarantee proper operation", configFile) | |
} | |
if config.Storage.RunRoot != "" { | |
storeOptions.RunRoot = config.Storage.RunRoot | |
@@ -406,6 +401,7 @@ func ReloadConfigurationFile(configFile string, storeOptions *StoreOptions) { | |
} | |
func Options() StoreOptions { | |
+ defaultStoreOptionsOnce.Do(loaddefaultStoreOptions) | |
return defaultStoreOptions | |
} | |
diff --git a/vendor/github.com/containers/storage/types/options_darwin.go b/vendor/github.com/containers/storage/types/options_darwin.go | |
new file mode 100644 | |
index 00000000..d5ad50bc | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/types/options_darwin.go | |
@@ -0,0 +1,17 @@ | |
+package types | |
+ | |
+const ( | |
+ // these are default path for run and graph root for rootful users | |
+ // for rootless path is constructed via getRootlessStorageOpts | |
+ defaultRunRoot string = "/run/containers/storage" | |
+ defaultGraphRoot string = "/var/lib/containers/storage" | |
+) | |
+ | |
+// defaultConfigFile path to the system wide storage.conf file | |
+var ( | |
+ defaultConfigFile = "/usr/share/containers/storage.conf" | |
+ defaultOverrideConfigFile = "/etc/containers/storage.conf" | |
+ defaultConfigFileSet = false | |
+ // DefaultStoreOptions is a reasonable default set of options. | |
+ defaultStoreOptions StoreOptions | |
+) | |
diff --git a/vendor/github.com/containers/storage/types/options_freebsd.go b/vendor/github.com/containers/storage/types/options_freebsd.go | |
new file mode 100644 | |
index 00000000..d5976b6d | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/types/options_freebsd.go | |
@@ -0,0 +1,17 @@ | |
+package types | |
+ | |
+const ( | |
+ // these are default path for run and graph root for rootful users | |
+ // for rootless path is constructed via getRootlessStorageOpts | |
+ defaultRunRoot string = "/var/run/containers/storage" | |
+ defaultGraphRoot string = "/var/db/containers/storage" | |
+) | |
+ | |
+// defaultConfigFile path to the system wide storage.conf file | |
+var ( | |
+ defaultConfigFile = "/usr/local/share/containers/storage.conf" | |
+ defaultOverrideConfigFile = "/usr/local/etc/containers/storage.conf" | |
+ defaultConfigFileSet = false | |
+ // DefaultStoreOptions is a reasonable default set of options. | |
+ defaultStoreOptions StoreOptions | |
+) | |
diff --git a/vendor/github.com/containers/storage/types/options_linux.go b/vendor/github.com/containers/storage/types/options_linux.go | |
new file mode 100644 | |
index 00000000..d5ad50bc | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/types/options_linux.go | |
@@ -0,0 +1,17 @@ | |
+package types | |
+ | |
+const ( | |
+ // these are default path for run and graph root for rootful users | |
+ // for rootless path is constructed via getRootlessStorageOpts | |
+ defaultRunRoot string = "/run/containers/storage" | |
+ defaultGraphRoot string = "/var/lib/containers/storage" | |
+) | |
+ | |
+// defaultConfigFile path to the system wide storage.conf file | |
+var ( | |
+ defaultConfigFile = "/usr/share/containers/storage.conf" | |
+ defaultOverrideConfigFile = "/etc/containers/storage.conf" | |
+ defaultConfigFileSet = false | |
+ // DefaultStoreOptions is a reasonable default set of options. | |
+ defaultStoreOptions StoreOptions | |
+) | |
diff --git a/vendor/github.com/containers/storage/types/options_windows.go b/vendor/github.com/containers/storage/types/options_windows.go | |
new file mode 100644 | |
index 00000000..d5ad50bc | |
--- /dev/null | |
+++ b/vendor/github.com/containers/storage/types/options_windows.go | |
@@ -0,0 +1,17 @@ | |
+package types | |
+ | |
+const ( | |
+ // these are default path for run and graph root for rootful users | |
+ // for rootless path is constructed via getRootlessStorageOpts | |
+ defaultRunRoot string = "/run/containers/storage" | |
+ defaultGraphRoot string = "/var/lib/containers/storage" | |
+) | |
+ | |
+// defaultConfigFile path to the system wide storage.conf file | |
+var ( | |
+ defaultConfigFile = "/usr/share/containers/storage.conf" | |
+ defaultOverrideConfigFile = "/etc/containers/storage.conf" | |
+ defaultConfigFileSet = false | |
+ // DefaultStoreOptions is a reasonable default set of options. | |
+ defaultStoreOptions StoreOptions | |
+) | |
diff --git a/vendor/github.com/containers/storage/types/utils.go b/vendor/github.com/containers/storage/types/utils.go | |
index 4dd1a786..c7f0d0fa 100644 | |
--- a/vendor/github.com/containers/storage/types/utils.go | |
+++ b/vendor/github.com/containers/storage/types/utils.go | |
@@ -170,7 +170,7 @@ func DefaultConfigFile(rootless bool) (string, error) { | |
return defaultConfigFile, nil | |
} | |
- if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { | |
+ if path, ok := os.LookupEnv(storageConfEnv); ok { | |
return path, nil | |
} | |
if !rootless { | |
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md | |
index 5b7cf781..5c3c2a25 100644 | |
--- a/vendor/github.com/klauspost/compress/README.md | |
+++ b/vendor/github.com/klauspost/compress/README.md | |
@@ -17,6 +17,34 @@ This package provides various compression algorithms. | |
# changelog | |
+* May 25, 2022 (v1.15.5) | |
+ * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 | |
+ * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 | |
+ * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 | |
+ * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 | |
+ * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 | |
+ * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 | |
+ * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 | |
+ * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 | |
+ * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 | |
+ | |
+ | |
+* May 11, 2022 (v1.15.4) | |
+ * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) | |
+ * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) | |
+ * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) | |
+ * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) | |
+ | |
+* May 5, 2022 (v1.15.3) | |
+ * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) | |
+ * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) | |
+ | |
+* Apr 26, 2022 (v1.15.2) | |
+ * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) | |
+ * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) | |
+ * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) | |
+ * Minimum version is Go 1.16, added CI test on 1.18. | |
+ | |
* Mar 11, 2022 (v1.15.1) | |
* huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) | |
* zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) | |
@@ -67,6 +95,9 @@ While the release has been extensively tested, it is recommended to testing when | |
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) | |
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) | |
+<details> | |
+ <summary>See changes to v1.13.x</summary> | |
+ | |
* Aug 30, 2021 (v1.13.5) | |
* gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) | |
* s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) | |
@@ -95,6 +126,8 @@ While the release has been extensively tested, it is recommended to testing when | |
* Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. | |
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) | |
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) | |
+</details> | |
+ | |
<details> | |
<summary>See changes to v1.12.x</summary> | |
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go | |
index bffa2f33..f8435998 100644 | |
--- a/vendor/github.com/klauspost/compress/flate/deflate.go | |
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go | |
@@ -84,24 +84,23 @@ type advancedState struct { | |
length int | |
offset int | |
maxInsertIndex int | |
+ chainHead int | |
+ hashOffset int | |
- // Input hash chains | |
- // hashHead[hashValue] contains the largest inputIndex with the specified hash value | |
- // If hashHead[hashValue] is within the current window, then | |
- // hashPrev[hashHead[hashValue] & windowMask] contains the previous index | |
- // with the same hash value. | |
- chainHead int | |
- hashHead [hashSize]uint32 | |
- hashPrev [windowSize]uint32 | |
- hashOffset int | |
+ ii uint16 // position of last match, intended to overflow to reset. | |
// input window: unprocessed data is window[index:windowEnd] | |
index int | |
estBitsPerByte int | |
hashMatch [maxMatchLength + minMatchLength]uint32 | |
- hash uint32 | |
- ii uint16 // position of last match, intended to overflow to reset. | |
+ // Input hash chains | |
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value | |
+ // If hashHead[hashValue] is within the current window, then | |
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index | |
+ // with the same hash value. | |
+ hashHead [hashSize]uint32 | |
+ hashPrev [windowSize]uint32 | |
} | |
type compressor struct { | |
@@ -259,7 +258,6 @@ func (d *compressor) fillWindow(b []byte) { | |
// Set the head of the hash chain to us. | |
s.hashHead[newH] = uint32(di + s.hashOffset) | |
} | |
- s.hash = newH | |
} | |
// Update window information. | |
d.windowEnd += n | |
@@ -403,7 +401,6 @@ func (d *compressor) initDeflate() { | |
s.hashOffset = 1 | |
s.length = minMatchLength - 1 | |
s.offset = 0 | |
- s.hash = 0 | |
s.chainHead = -1 | |
} | |
@@ -432,9 +429,6 @@ func (d *compressor) deflateLazy() { | |
} | |
s.maxInsertIndex = d.windowEnd - (minMatchLength - 1) | |
- if s.index < s.maxInsertIndex { | |
- s.hash = hash4(d.window[s.index:]) | |
- } | |
for { | |
if sanity && s.index > d.windowEnd { | |
@@ -466,11 +460,11 @@ func (d *compressor) deflateLazy() { | |
} | |
if s.index < s.maxInsertIndex { | |
// Update the hash | |
- s.hash = hash4(d.window[s.index:]) | |
- ch := s.hashHead[s.hash&hashMask] | |
+ hash := hash4(d.window[s.index:]) | |
+ ch := s.hashHead[hash] | |
s.chainHead = int(ch) | |
s.hashPrev[s.index&windowMask] = ch | |
- s.hashHead[s.hash&hashMask] = uint32(s.index + s.hashOffset) | |
+ s.hashHead[hash] = uint32(s.index + s.hashOffset) | |
} | |
prevLength := s.length | |
prevOffset := s.offset | |
@@ -503,7 +497,7 @@ func (d *compressor) deflateLazy() { | |
end += prevIndex | |
idx := prevIndex + prevLength - (4 - checkOff) | |
h := hash4(d.window[idx:]) | |
- ch2 := int(s.hashHead[h&hashMask]) - s.hashOffset - prevLength + (4 - checkOff) | |
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength + (4 - checkOff) | |
if ch2 > minIndex { | |
length := matchLen(d.window[prevIndex:end], d.window[ch2:]) | |
// It seems like a pure length metric is best. | |
@@ -547,7 +541,6 @@ func (d *compressor) deflateLazy() { | |
// Set the head of the hash chain to us. | |
s.hashHead[newH] = uint32(di + s.hashOffset) | |
} | |
- s.hash = newH | |
} | |
s.index = newIndex | |
@@ -793,7 +786,6 @@ func (d *compressor) reset(w io.Writer) { | |
d.tokens.Reset() | |
s.length = minMatchLength - 1 | |
s.offset = 0 | |
- s.hash = 0 | |
s.ii = 0 | |
s.maxInsertIndex = 0 | |
} | |
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go | |
index d55ea2a7..f781aaa6 100644 | |
--- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go | |
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go | |
@@ -117,7 +117,7 @@ func (e *fastGen) addBlock(src []byte) int32 { | |
// hash4 returns the hash of u to fit in a hash table with h bits. | |
// Preferably h should be a constant and should always be <32. | |
func hash4u(u uint32, h uint8) uint32 { | |
- return (u * prime4bytes) >> ((32 - h) & reg8SizeMask32) | |
+ return (u * prime4bytes) >> (32 - h) | |
} | |
type tableEntryPrev struct { | |
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go | |
index 8d632cea..61342b6b 100644 | |
--- a/vendor/github.com/klauspost/compress/flate/inflate_gen.go | |
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go | |
@@ -24,7 +24,7 @@ func (f *decompressor) huffmanBytesBuffer() { | |
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |
// but is smart enough to keep local variables in registers, so use nb and b, | |
// inline call to moreBits and reassign b,nb back to f on return. | |
- fnb, fb := f.nb, f.b | |
+ fnb, fb, dict := f.nb, f.b, &f.dict | |
switch f.stepState { | |
case stateInit: | |
@@ -82,9 +82,9 @@ readLiteral: | |
var length int | |
switch { | |
case v < 256: | |
- f.dict.writeByte(byte(v)) | |
- if f.dict.availWrite() == 0 { | |
- f.toRead = f.dict.readFlush() | |
+ dict.writeByte(byte(v)) | |
+ if dict.availWrite() == 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBytesBuffer | |
f.stepState = stateInit | |
f.b, f.nb = fb, fnb | |
@@ -227,10 +227,10 @@ readLiteral: | |
} | |
// No check on length; encoding can be prescient. | |
- if dist > uint32(f.dict.histSize()) { | |
+ if dist > uint32(dict.histSize()) { | |
f.b, f.nb = fb, fnb | |
if debugDecode { | |
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | |
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize()) | |
} | |
f.err = CorruptInputError(f.roffset) | |
return | |
@@ -243,14 +243,14 @@ readLiteral: | |
copyHistory: | |
// Perform a backwards copy according to RFC section 3.2.3. | |
{ | |
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | |
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) | |
if cnt == 0 { | |
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | |
+ cnt = dict.writeCopy(f.copyDist, f.copyLen) | |
} | |
f.copyLen -= cnt | |
- if f.dict.availWrite() == 0 || f.copyLen > 0 { | |
- f.toRead = f.dict.readFlush() | |
+ if dict.availWrite() == 0 || f.copyLen > 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBytesBuffer // We need to continue this work | |
f.stepState = stateDict | |
f.b, f.nb = fb, fnb | |
@@ -275,7 +275,7 @@ func (f *decompressor) huffmanBytesReader() { | |
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |
// but is smart enough to keep local variables in registers, so use nb and b, | |
// inline call to moreBits and reassign b,nb back to f on return. | |
- fnb, fb := f.nb, f.b | |
+ fnb, fb, dict := f.nb, f.b, &f.dict | |
switch f.stepState { | |
case stateInit: | |
@@ -333,9 +333,9 @@ readLiteral: | |
var length int | |
switch { | |
case v < 256: | |
- f.dict.writeByte(byte(v)) | |
- if f.dict.availWrite() == 0 { | |
- f.toRead = f.dict.readFlush() | |
+ dict.writeByte(byte(v)) | |
+ if dict.availWrite() == 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBytesReader | |
f.stepState = stateInit | |
f.b, f.nb = fb, fnb | |
@@ -478,10 +478,10 @@ readLiteral: | |
} | |
// No check on length; encoding can be prescient. | |
- if dist > uint32(f.dict.histSize()) { | |
+ if dist > uint32(dict.histSize()) { | |
f.b, f.nb = fb, fnb | |
if debugDecode { | |
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | |
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize()) | |
} | |
f.err = CorruptInputError(f.roffset) | |
return | |
@@ -494,14 +494,14 @@ readLiteral: | |
copyHistory: | |
// Perform a backwards copy according to RFC section 3.2.3. | |
{ | |
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | |
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) | |
if cnt == 0 { | |
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | |
+ cnt = dict.writeCopy(f.copyDist, f.copyLen) | |
} | |
f.copyLen -= cnt | |
- if f.dict.availWrite() == 0 || f.copyLen > 0 { | |
- f.toRead = f.dict.readFlush() | |
+ if dict.availWrite() == 0 || f.copyLen > 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBytesReader // We need to continue this work | |
f.stepState = stateDict | |
f.b, f.nb = fb, fnb | |
@@ -526,7 +526,7 @@ func (f *decompressor) huffmanBufioReader() { | |
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |
// but is smart enough to keep local variables in registers, so use nb and b, | |
// inline call to moreBits and reassign b,nb back to f on return. | |
- fnb, fb := f.nb, f.b | |
+ fnb, fb, dict := f.nb, f.b, &f.dict | |
switch f.stepState { | |
case stateInit: | |
@@ -584,9 +584,9 @@ readLiteral: | |
var length int | |
switch { | |
case v < 256: | |
- f.dict.writeByte(byte(v)) | |
- if f.dict.availWrite() == 0 { | |
- f.toRead = f.dict.readFlush() | |
+ dict.writeByte(byte(v)) | |
+ if dict.availWrite() == 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBufioReader | |
f.stepState = stateInit | |
f.b, f.nb = fb, fnb | |
@@ -729,10 +729,10 @@ readLiteral: | |
} | |
// No check on length; encoding can be prescient. | |
- if dist > uint32(f.dict.histSize()) { | |
+ if dist > uint32(dict.histSize()) { | |
f.b, f.nb = fb, fnb | |
if debugDecode { | |
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | |
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize()) | |
} | |
f.err = CorruptInputError(f.roffset) | |
return | |
@@ -745,14 +745,14 @@ readLiteral: | |
copyHistory: | |
// Perform a backwards copy according to RFC section 3.2.3. | |
{ | |
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | |
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) | |
if cnt == 0 { | |
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | |
+ cnt = dict.writeCopy(f.copyDist, f.copyLen) | |
} | |
f.copyLen -= cnt | |
- if f.dict.availWrite() == 0 || f.copyLen > 0 { | |
- f.toRead = f.dict.readFlush() | |
+ if dict.availWrite() == 0 || f.copyLen > 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanBufioReader // We need to continue this work | |
f.stepState = stateDict | |
f.b, f.nb = fb, fnb | |
@@ -777,7 +777,7 @@ func (f *decompressor) huffmanStringsReader() { | |
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |
// but is smart enough to keep local variables in registers, so use nb and b, | |
// inline call to moreBits and reassign b,nb back to f on return. | |
- fnb, fb := f.nb, f.b | |
+ fnb, fb, dict := f.nb, f.b, &f.dict | |
switch f.stepState { | |
case stateInit: | |
@@ -835,9 +835,9 @@ readLiteral: | |
var length int | |
switch { | |
case v < 256: | |
- f.dict.writeByte(byte(v)) | |
- if f.dict.availWrite() == 0 { | |
- f.toRead = f.dict.readFlush() | |
+ dict.writeByte(byte(v)) | |
+ if dict.availWrite() == 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanStringsReader | |
f.stepState = stateInit | |
f.b, f.nb = fb, fnb | |
@@ -980,10 +980,10 @@ readLiteral: | |
} | |
// No check on length; encoding can be prescient. | |
- if dist > uint32(f.dict.histSize()) { | |
+ if dist > uint32(dict.histSize()) { | |
f.b, f.nb = fb, fnb | |
if debugDecode { | |
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | |
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize()) | |
} | |
f.err = CorruptInputError(f.roffset) | |
return | |
@@ -996,14 +996,14 @@ readLiteral: | |
copyHistory: | |
// Perform a backwards copy according to RFC section 3.2.3. | |
{ | |
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | |
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) | |
if cnt == 0 { | |
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | |
+ cnt = dict.writeCopy(f.copyDist, f.copyLen) | |
} | |
f.copyLen -= cnt | |
- if f.dict.availWrite() == 0 || f.copyLen > 0 { | |
- f.toRead = f.dict.readFlush() | |
+ if dict.availWrite() == 0 || f.copyLen > 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanStringsReader // We need to continue this work | |
f.stepState = stateDict | |
f.b, f.nb = fb, fnb | |
@@ -1028,7 +1028,7 @@ func (f *decompressor) huffmanGenericReader() { | |
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers, | |
// but is smart enough to keep local variables in registers, so use nb and b, | |
// inline call to moreBits and reassign b,nb back to f on return. | |
- fnb, fb := f.nb, f.b | |
+ fnb, fb, dict := f.nb, f.b, &f.dict | |
switch f.stepState { | |
case stateInit: | |
@@ -1086,9 +1086,9 @@ readLiteral: | |
var length int | |
switch { | |
case v < 256: | |
- f.dict.writeByte(byte(v)) | |
- if f.dict.availWrite() == 0 { | |
- f.toRead = f.dict.readFlush() | |
+ dict.writeByte(byte(v)) | |
+ if dict.availWrite() == 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanGenericReader | |
f.stepState = stateInit | |
f.b, f.nb = fb, fnb | |
@@ -1231,10 +1231,10 @@ readLiteral: | |
} | |
// No check on length; encoding can be prescient. | |
- if dist > uint32(f.dict.histSize()) { | |
+ if dist > uint32(dict.histSize()) { | |
f.b, f.nb = fb, fnb | |
if debugDecode { | |
- fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize()) | |
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize()) | |
} | |
f.err = CorruptInputError(f.roffset) | |
return | |
@@ -1247,14 +1247,14 @@ readLiteral: | |
copyHistory: | |
// Perform a backwards copy according to RFC section 3.2.3. | |
{ | |
- cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) | |
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen) | |
if cnt == 0 { | |
- cnt = f.dict.writeCopy(f.copyDist, f.copyLen) | |
+ cnt = dict.writeCopy(f.copyDist, f.copyLen) | |
} | |
f.copyLen -= cnt | |
- if f.dict.availWrite() == 0 || f.copyLen > 0 { | |
- f.toRead = f.dict.readFlush() | |
+ if dict.availWrite() == 0 || f.copyLen > 0 { | |
+ f.toRead = dict.readFlush() | |
f.step = (*decompressor).huffmanGenericReader // We need to continue this work | |
f.stepState = stateDict | |
f.b, f.nb = fb, fnb | |
diff --git a/vendor/github.com/klauspost/compress/huff0/autogen.go b/vendor/github.com/klauspost/compress/huff0/autogen.go | |
deleted file mode 100644 | |
index ff2c69d6..00000000 | |
--- a/vendor/github.com/klauspost/compress/huff0/autogen.go | |
+++ /dev/null | |
@@ -1,5 +0,0 @@ | |
-package huff0 | |
- | |
-//go:generate go run generate.go | |
-//go:generate asmfmt -w decompress_amd64.s | |
-//go:generate asmfmt -w decompress_8b_amd64.s | |
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go | |
index 451160ed..504a7be9 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go | |
@@ -165,11 +165,6 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { | |
return uint16(b.value >> ((64 - n) & 63)) | |
} | |
-// peekTopBits(n) is equvialent to peekBitFast(64 - n) | |
-func (b *bitReaderShifted) peekTopBits(n uint8) uint16 { | |
- return uint16(b.value >> n) | |
-} | |
- | |
func (b *bitReaderShifted) advance(n uint8) { | |
b.bitsRead += n | |
b.value <<= n & 63 | |
@@ -220,11 +215,6 @@ func (b *bitReaderShifted) fill() { | |
} | |
} | |
-// finished returns true if all bits have been read from the bit stream. | |
-func (b *bitReaderShifted) finished() bool { | |
- return b.off == 0 && b.bitsRead >= 64 | |
-} | |
- | |
func (b *bitReaderShifted) remaining() uint { | |
return b.off*8 + uint(64-b.bitsRead) | |
} | |
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go | |
index 6bce4e87..ec71f7a3 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go | |
@@ -5,8 +5,6 @@ | |
package huff0 | |
-import "fmt" | |
- | |
// bitWriter will write bits. | |
// First bit will be LSB of the first byte of output. | |
type bitWriter struct { | |
@@ -23,14 +21,6 @@ var bitMask16 = [32]uint16{ | |
0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, | |
0xFFFF, 0xFFFF} /* up to 16 bits */ | |
-// addBits16NC will add up to 16 bits. | |
-// It will not check if there is space for them, | |
-// so the caller must ensure that it has flushed recently. | |
-func (b *bitWriter) addBits16NC(value uint16, bits uint8) { | |
- b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) | |
- b.nBits += bits | |
-} | |
- | |
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. | |
// It will not check if there is space for them, so the caller must ensure that it has flushed recently. | |
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { | |
@@ -70,104 +60,6 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { | |
b.nBits += encA.nBits + encB.nBits | |
} | |
-// addBits16ZeroNC will add up to 16 bits. | |
-// It will not check if there is space for them, | |
-// so the caller must ensure that it has flushed recently. | |
-// This is fastest if bits can be zero. | |
-func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { | |
- if bits == 0 { | |
- return | |
- } | |
- value <<= (16 - bits) & 15 | |
- value >>= (16 - bits) & 15 | |
- b.bitContainer |= uint64(value) << (b.nBits & 63) | |
- b.nBits += bits | |
-} | |
- | |
-// flush will flush all pending full bytes. | |
-// There will be at least 56 bits available for writing when this has been called. | |
-// Using flush32 is faster, but leaves less space for writing. | |
-func (b *bitWriter) flush() { | |
- v := b.nBits >> 3 | |
- switch v { | |
- case 0: | |
- return | |
- case 1: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- ) | |
- b.bitContainer >>= 1 << 3 | |
- case 2: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- ) | |
- b.bitContainer >>= 2 << 3 | |
- case 3: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- ) | |
- b.bitContainer >>= 3 << 3 | |
- case 4: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- ) | |
- b.bitContainer >>= 4 << 3 | |
- case 5: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- ) | |
- b.bitContainer >>= 5 << 3 | |
- case 6: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- ) | |
- b.bitContainer >>= 6 << 3 | |
- case 7: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- byte(b.bitContainer>>48), | |
- ) | |
- b.bitContainer >>= 7 << 3 | |
- case 8: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- byte(b.bitContainer>>48), | |
- byte(b.bitContainer>>56), | |
- ) | |
- b.bitContainer = 0 | |
- b.nBits = 0 | |
- return | |
- default: | |
- panic(fmt.Errorf("bits (%d) > 64", b.nBits)) | |
- } | |
- b.nBits &= 7 | |
-} | |
- | |
// flush32 will flush out, so there are at least 32 bits available for writing. | |
func (b *bitWriter) flush32() { | |
if b.nBits < 32 { | |
@@ -201,10 +93,3 @@ func (b *bitWriter) close() error { | |
b.flushAlign() | |
return nil | |
} | |
- | |
-// reset and continue writing by appending to out. | |
-func (b *bitWriter) reset(out []byte) { | |
- b.bitContainer = 0 | |
- b.nBits = 0 | |
- b.out = out | |
-} | |
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go | |
index 50bcdf6e..4dcab8d2 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/bytereader.go | |
@@ -20,11 +20,6 @@ func (b *byteReader) init(in []byte) { | |
b.off = 0 | |
} | |
-// advance the stream b n bytes. | |
-func (b *byteReader) advance(n uint) { | |
- b.off += int(n) | |
-} | |
- | |
// Int32 returns a little endian int32 starting at current offset. | |
func (b byteReader) Int32() int32 { | |
v3 := int32(b.b[b.off+3]) | |
@@ -43,11 +38,6 @@ func (b byteReader) Uint32() uint32 { | |
return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0 | |
} | |
-// unread returns the unread portion of the input. | |
-func (b byteReader) unread() []byte { | |
- return b.b[b.off:] | |
-} | |
- | |
// remain will return the number of bytes remaining. | |
func (b byteReader) remain() int { | |
return len(b.b) - b.off | |
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go | |
index bc95ac62..4d14542f 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/compress.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go | |
@@ -404,6 +404,7 @@ func (s *Scratch) canUseTable(c cTable) bool { | |
return true | |
} | |
+//lint:ignore U1000 used for debugging | |
func (s *Scratch) validateTable(c cTable) bool { | |
if len(c) < int(s.symbolLen) { | |
return false | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go | |
index 04f65299..c0c48bd7 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go | |
@@ -11,7 +11,6 @@ import ( | |
type dTable struct { | |
single []dEntrySingle | |
- double []dEntryDouble | |
} | |
// single-symbols decoding | |
@@ -19,13 +18,6 @@ type dEntrySingle struct { | |
entry uint16 | |
} | |
-// double-symbols decoding | |
-type dEntryDouble struct { | |
- seq [4]byte | |
- nBits uint8 | |
- len uint8 | |
-} | |
- | |
// Uses special code for all tables that are < 8 bits. | |
const use8BitTables = true | |
@@ -35,7 +27,7 @@ const use8BitTables = true | |
// If no Scratch is provided a new one is allocated. | |
// The returned Scratch can be used for encoding or decoding input using this table. | |
func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { | |
- s, err = s.prepare(in) | |
+ s, err = s.prepare(nil) | |
if err != nil { | |
return s, nil, err | |
} | |
@@ -236,108 +228,6 @@ func (d *Decoder) buffer() *[4][256]byte { | |
return &[4][256]byte{} | |
} | |
-// Decompress1X will decompress a 1X encoded stream. | |
-// The cap of the output buffer will be the maximum decompressed size. | |
-// The length of the supplied input must match the end of a block exactly. | |
-func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { | |
- if len(d.dt.single) == 0 { | |
- return nil, errors.New("no table loaded") | |
- } | |
- if use8BitTables && d.actualTableLog <= 8 { | |
- return d.decompress1X8Bit(dst, src) | |
- } | |
- var br bitReaderShifted | |
- err := br.init(src) | |
- if err != nil { | |
- return dst, err | |
- } | |
- maxDecodedSize := cap(dst) | |
- dst = dst[:0] | |
- | |
- // Avoid bounds check by always having full sized table. | |
- const tlSize = 1 << tableLogMax | |
- const tlMask = tlSize - 1 | |
- dt := d.dt.single[:tlSize] | |
- | |
- // Use temp table to avoid bound checks/append penalty. | |
- bufs := d.buffer() | |
- buf := &bufs[0] | |
- var off uint8 | |
- | |
- for br.off >= 8 { | |
- br.fillFast() | |
- v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
- br.advance(uint8(v.entry)) | |
- buf[off+0] = uint8(v.entry >> 8) | |
- | |
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
- br.advance(uint8(v.entry)) | |
- buf[off+1] = uint8(v.entry >> 8) | |
- | |
- // Refill | |
- br.fillFast() | |
- | |
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
- br.advance(uint8(v.entry)) | |
- buf[off+2] = uint8(v.entry >> 8) | |
- | |
- v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
- br.advance(uint8(v.entry)) | |
- buf[off+3] = uint8(v.entry >> 8) | |
- | |
- off += 4 | |
- if off == 0 { | |
- if len(dst)+256 > maxDecodedSize { | |
- br.close() | |
- d.bufs.Put(bufs) | |
- return nil, ErrMaxDecodedSizeExceeded | |
- } | |
- dst = append(dst, buf[:]...) | |
- } | |
- } | |
- | |
- if len(dst)+int(off) > maxDecodedSize { | |
- d.bufs.Put(bufs) | |
- br.close() | |
- return nil, ErrMaxDecodedSizeExceeded | |
- } | |
- dst = append(dst, buf[:off]...) | |
- | |
- // br < 8, so uint8 is fine | |
- bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead | |
- for bitsLeft > 0 { | |
- br.fill() | |
- if false && br.bitsRead >= 32 { | |
- if br.off >= 4 { | |
- v := br.in[br.off-4:] | |
- v = v[:4] | |
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- br.value = (br.value << 32) | uint64(low) | |
- br.bitsRead -= 32 | |
- br.off -= 4 | |
- } else { | |
- for br.off > 0 { | |
- br.value = (br.value << 8) | uint64(br.in[br.off-1]) | |
- br.bitsRead -= 8 | |
- br.off-- | |
- } | |
- } | |
- } | |
- if len(dst) >= maxDecodedSize { | |
- d.bufs.Put(bufs) | |
- br.close() | |
- return nil, ErrMaxDecodedSizeExceeded | |
- } | |
- v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] | |
- nBits := uint8(v.entry) | |
- br.advance(nBits) | |
- bitsLeft -= nBits | |
- dst = append(dst, uint8(v.entry>>8)) | |
- } | |
- d.bufs.Put(bufs) | |
- return dst, br.close() | |
-} | |
- | |
// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. | |
// The cap of the output buffer will be the maximum decompressed size. | |
// The length of the supplied input must match the end of a block exactly. | |
@@ -995,7 +885,6 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { | |
const shift = 56 | |
const tlSize = 1 << 8 | |
- const tlMask = tlSize - 1 | |
single := d.dt.single[:tlSize] | |
// Use temp table to avoid bound checks/append penalty. | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s | |
deleted file mode 100644 | |
index 0d6cb1a9..00000000 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s | |
+++ /dev/null | |
@@ -1,488 +0,0 @@ | |
-// +build !appengine | |
-// +build gc | |
-// +build !noasm | |
- | |
-#include "textflag.h" | |
-#include "funcdata.h" | |
-#include "go_asm.h" | |
- | |
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table | |
- | |
-// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) | |
-TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 | |
-#define off R8 | |
-#define buffer DI | |
-#define table SI | |
- | |
-#define br_bits_read R9 | |
-#define br_value R10 | |
-#define br_offset R11 | |
-#define peek_bits R12 | |
-#define exhausted DX | |
- | |
-#define br0 R13 | |
-#define br1 R14 | |
-#define br2 R15 | |
-#define br3 BP | |
- | |
- MOVQ BP, 0(SP) | |
- | |
- XORQ exhausted, exhausted // exhausted = false | |
- XORQ off, off // off = 0 | |
- | |
- MOVBQZX peekBits+32(FP), peek_bits | |
- MOVQ buf+40(FP), buffer | |
- MOVQ tbl+48(FP), table | |
- | |
- MOVQ pbr0+0(FP), br0 | |
- MOVQ pbr1+8(FP), br1 | |
- MOVQ pbr2+16(FP), br2 | |
- MOVQ pbr3+24(FP), br3 | |
- | |
-main_loop: | |
- | |
- // const stream = 0 | |
- // br0.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read | |
- MOVQ bitReaderShifted_value(br0), br_value | |
- MOVQ bitReaderShifted_off(br0), br_offset | |
- | |
- // if b.bitsRead >= 32 { | |
- CMPQ br_bits_read, $32 | |
- JB skip_fill0 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br0), AX | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br0.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- | |
- // } | |
-skip_fill0: | |
- | |
- // val0 := br0.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br0.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val1 := br0.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br0.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 0(buffer)(off*1) | |
- | |
- // SECOND PART: | |
- // val2 := br0.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v2 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br0.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val3 := br0.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v3 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br0.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off+2] = uint8(v2.entry >> 8) | |
- // buf[stream][off+3] = uint8(v3.entry >> 8) | |
- MOVW BX, 0+2(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br0) | |
- MOVQ br_value, bitReaderShifted_value(br0) | |
- MOVQ br_offset, bitReaderShifted_off(br0) | |
- | |
- // const stream = 1 | |
- // br1.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read | |
- MOVQ bitReaderShifted_value(br1), br_value | |
- MOVQ bitReaderShifted_off(br1), br_offset | |
- | |
- // if b.bitsRead >= 32 { | |
- CMPQ br_bits_read, $32 | |
- JB skip_fill1 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br1), AX | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br1.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- | |
- // } | |
-skip_fill1: | |
- | |
- // val0 := br1.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br1.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val1 := br1.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br1.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 256(buffer)(off*1) | |
- | |
- // SECOND PART: | |
- // val2 := br1.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v2 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br1.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val3 := br1.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v3 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br1.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off+2] = uint8(v2.entry >> 8) | |
- // buf[stream][off+3] = uint8(v3.entry >> 8) | |
- MOVW BX, 256+2(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br1) | |
- MOVQ br_value, bitReaderShifted_value(br1) | |
- MOVQ br_offset, bitReaderShifted_off(br1) | |
- | |
- // const stream = 2 | |
- // br2.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read | |
- MOVQ bitReaderShifted_value(br2), br_value | |
- MOVQ bitReaderShifted_off(br2), br_offset | |
- | |
- // if b.bitsRead >= 32 { | |
- CMPQ br_bits_read, $32 | |
- JB skip_fill2 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br2), AX | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br2.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- | |
- // } | |
-skip_fill2: | |
- | |
- // val0 := br2.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br2.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val1 := br2.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br2.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 512(buffer)(off*1) | |
- | |
- // SECOND PART: | |
- // val2 := br2.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v2 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br2.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val3 := br2.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v3 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br2.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off+2] = uint8(v2.entry >> 8) | |
- // buf[stream][off+3] = uint8(v3.entry >> 8) | |
- MOVW BX, 512+2(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br2) | |
- MOVQ br_value, bitReaderShifted_value(br2) | |
- MOVQ br_offset, bitReaderShifted_off(br2) | |
- | |
- // const stream = 3 | |
- // br3.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read | |
- MOVQ bitReaderShifted_value(br3), br_value | |
- MOVQ bitReaderShifted_off(br3), br_offset | |
- | |
- // if b.bitsRead >= 32 { | |
- CMPQ br_bits_read, $32 | |
- JB skip_fill3 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br3), AX | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br3.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- | |
- // } | |
-skip_fill3: | |
- | |
- // val0 := br3.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br3.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val1 := br3.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br3.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 768(buffer)(off*1) | |
- | |
- // SECOND PART: | |
- // val2 := br3.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v2 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br3.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val3 := br3.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v3 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br3.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off+2] = uint8(v2.entry >> 8) | |
- // buf[stream][off+3] = uint8(v3.entry >> 8) | |
- MOVW BX, 768+2(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br3) | |
- MOVQ br_value, bitReaderShifted_value(br3) | |
- MOVQ br_offset, bitReaderShifted_off(br3) | |
- | |
- ADDQ $4, off // off += 2 | |
- | |
- TESTB DH, DH // any br[i].ofs < 4? | |
- JNZ end | |
- | |
- CMPQ off, $bufoff | |
- JL main_loop | |
- | |
-end: | |
- MOVQ 0(SP), BP | |
- | |
- MOVB off, ret+56(FP) | |
- RET | |
- | |
-#undef off | |
-#undef buffer | |
-#undef table | |
- | |
-#undef br_bits_read | |
-#undef br_value | |
-#undef br_offset | |
-#undef peek_bits | |
-#undef exhausted | |
- | |
-#undef br0 | |
-#undef br1 | |
-#undef br2 | |
-#undef br3 | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in | |
deleted file mode 100644 | |
index 6d477a2c..00000000 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in | |
+++ /dev/null | |
@@ -1,197 +0,0 @@ | |
-// +build !appengine | |
-// +build gc | |
-// +build !noasm | |
- | |
-#include "textflag.h" | |
-#include "funcdata.h" | |
-#include "go_asm.h" | |
- | |
- | |
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table | |
- | |
-//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) | |
-TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8 | |
-#define off R8 | |
-#define buffer DI | |
-#define table SI | |
- | |
-#define br_bits_read R9 | |
-#define br_value R10 | |
-#define br_offset R11 | |
-#define peek_bits R12 | |
-#define exhausted DX | |
- | |
-#define br0 R13 | |
-#define br1 R14 | |
-#define br2 R15 | |
-#define br3 BP | |
- | |
- MOVQ BP, 0(SP) | |
- | |
- XORQ exhausted, exhausted // exhausted = false | |
- XORQ off, off // off = 0 | |
- | |
- MOVBQZX peekBits+32(FP), peek_bits | |
- MOVQ buf+40(FP), buffer | |
- MOVQ tbl+48(FP), table | |
- | |
- MOVQ pbr0+0(FP), br0 | |
- MOVQ pbr1+8(FP), br1 | |
- MOVQ pbr2+16(FP), br2 | |
- MOVQ pbr3+24(FP), br3 | |
- | |
-main_loop: | |
-{{ define "decode_2_values_x86" }} | |
- // const stream = {{ var "id" }} | |
- // br{{ var "id"}}.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read | |
- MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value | |
- MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset | |
- | |
- // if b.bitsRead >= 32 { | |
- CMPQ br_bits_read, $32 | |
- JB skip_fill{{ var "id" }} | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br{{ var "id" }}), AX | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br{{ var "id"}}.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- // } | |
-skip_fill{{ var "id" }}: | |
- | |
- // val0 := br{{ var "id"}}.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br{{ var "id"}}.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val1 := br{{ var "id"}}.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br{{ var "id"}}.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, {{ var "bufofs" }}(buffer)(off*1) | |
- | |
- // SECOND PART: | |
- // val2 := br{{ var "id"}}.peekTopBits(peekBits) | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v2 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br{{ var "id"}}.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- // val3 := br{{ var "id"}}.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
- // v3 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br{{ var "id"}}.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- MOVBQZX AL, CX | |
- SHLQ CX, br_value // value <<= n | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off+2] = uint8(v2.entry >> 8) | |
- // buf[stream][off+3] = uint8(v3.entry >> 8) | |
- MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) | |
- MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) | |
- MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) | |
-{{ end }} | |
- | |
- {{ set "id" "0" }} | |
- {{ set "ofs" "0" }} | |
- {{ set "bufofs" "0" }} {{/* id * bufoff */}} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "1" }} | |
- {{ set "ofs" "8" }} | |
- {{ set "bufofs" "256" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "2" }} | |
- {{ set "ofs" "16" }} | |
- {{ set "bufofs" "512" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "3" }} | |
- {{ set "ofs" "24" }} | |
- {{ set "bufofs" "768" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- ADDQ $4, off // off += 2 | |
- | |
- TESTB DH, DH // any br[i].ofs < 4? | |
- JNZ end | |
- | |
- CMPQ off, $bufoff | |
- JL main_loop | |
-end: | |
- MOVQ 0(SP), BP | |
- | |
- MOVB off, ret+56(FP) | |
- RET | |
-#undef off | |
-#undef buffer | |
-#undef table | |
- | |
-#undef br_bits_read | |
-#undef br_value | |
-#undef br_offset | |
-#undef peek_bits | |
-#undef exhausted | |
- | |
-#undef br0 | |
-#undef br1 | |
-#undef br2 | |
-#undef br3 | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go | |
index ce8e93bc..671e630a 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go | |
@@ -2,30 +2,43 @@ | |
// +build amd64,!appengine,!noasm,gc | |
// This file contains the specialisation of Decoder.Decompress4X | |
-// that uses an asm implementation of its main loop. | |
+// and Decoder.Decompress1X that use an asm implementation of thir main loops. | |
package huff0 | |
import ( | |
"errors" | |
"fmt" | |
+ | |
+ "github.com/klauspost/compress/internal/cpuinfo" | |
) | |
// decompress4x_main_loop_x86 is an x86 assembler implementation | |
// of Decompress4X when tablelog > 8. | |
//go:noescape | |
-func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
- peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 | |
+func decompress4x_main_loop_amd64(ctx *decompress4xContext) | |
// decompress4x_8b_loop_x86 is an x86 assembler implementation | |
// of Decompress4X when tablelog <= 8 which decodes 4 entries | |
// per loop. | |
//go:noescape | |
-func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
- peekBits uint8, buf *byte, tbl *dEntrySingle) uint8 | |
+func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) | |
// fallback8BitSize is the size where using Go version is faster. | |
const fallback8BitSize = 800 | |
+type decompress4xContext struct { | |
+ pbr0 *bitReaderShifted | |
+ pbr1 *bitReaderShifted | |
+ pbr2 *bitReaderShifted | |
+ pbr3 *bitReaderShifted | |
+ peekBits uint8 | |
+ out *byte | |
+ dstEvery int | |
+ tbl *dEntrySingle | |
+ decoded int | |
+ limit *byte | |
+} | |
+ | |
// Decompress4X will decompress a 4X encoded stream. | |
// The length of the supplied input must match the end of a block exactly. | |
// The *capacity* of the dst slice must match the destination size of | |
@@ -42,6 +55,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
if cap(dst) < fallback8BitSize && use8BitTables { | |
return d.decompress4X8bit(dst, src) | |
} | |
+ | |
var br [4]bitReaderShifted | |
// Decode "jump table" | |
start := 6 | |
@@ -71,70 +85,28 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
const tlMask = tlSize - 1 | |
single := d.dt.single[:tlSize] | |
- // Use temp table to avoid bound checks/append penalty. | |
- buf := d.buffer() | |
- var off uint8 | |
var decoded int | |
- const debug = false | |
- | |
- // see: bitReaderShifted.peekBitsFast() | |
- peekBits := uint8((64 - d.actualTableLog) & 63) | |
- | |
- // Decode 2 values from each decoder/loop. | |
- const bufoff = 256 | |
- for { | |
- if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { | |
- break | |
+ if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { | |
+ ctx := decompress4xContext{ | |
+ pbr0: &br[0], | |
+ pbr1: &br[1], | |
+ pbr2: &br[2], | |
+ pbr3: &br[3], | |
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() | |
+ out: &out[0], | |
+ dstEvery: dstEvery, | |
+ tbl: &single[0], | |
+ limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. | |
} | |
- | |
if use8BitTables { | |
- off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) | |
+ decompress4x_8b_main_loop_amd64(&ctx) | |
} else { | |
- off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0]) | |
- } | |
- if debug { | |
- fmt.Print("DEBUG: ") | |
- fmt.Printf("off=%d,", off) | |
- for i := 0; i < 4; i++ { | |
- fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}", | |
- i, br[i].bitsRead, br[i].value, br[i].off) | |
- } | |
- fmt.Println("") | |
- } | |
- | |
- if off != 0 { | |
- break | |
+ decompress4x_main_loop_amd64(&ctx) | |
} | |
- if bufoff > dstEvery { | |
- d.bufs.Put(buf) | |
- return nil, errors.New("corruption detected: stream overrun 1") | |
- } | |
- copy(out, buf[0][:]) | |
- copy(out[dstEvery:], buf[1][:]) | |
- copy(out[dstEvery*2:], buf[2][:]) | |
- copy(out[dstEvery*3:], buf[3][:]) | |
- out = out[bufoff:] | |
- decoded += bufoff * 4 | |
- // There must at least be 3 buffers left. | |
- if len(out) < dstEvery*3 { | |
- d.bufs.Put(buf) | |
- return nil, errors.New("corruption detected: stream overrun 2") | |
- } | |
- } | |
- if off > 0 { | |
- ioff := int(off) | |
- if len(out) < dstEvery*3+ioff { | |
- d.bufs.Put(buf) | |
- return nil, errors.New("corruption detected: stream overrun 3") | |
- } | |
- copy(out, buf[0][:off]) | |
- copy(out[dstEvery:], buf[1][:off]) | |
- copy(out[dstEvery*2:], buf[2][:off]) | |
- copy(out[dstEvery*3:], buf[3][:off]) | |
- decoded += int(off) * 4 | |
- out = out[off:] | |
+ decoded = ctx.decoded | |
+ out = out[decoded/4:] | |
} | |
// Decode remaining. | |
@@ -150,7 +122,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
for bitsLeft > 0 { | |
br.fill() | |
if offset >= endsAt { | |
- d.bufs.Put(buf) | |
return nil, errors.New("corruption detected: stream overrun 4") | |
} | |
@@ -164,7 +135,6 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
offset++ | |
} | |
if offset != endsAt { | |
- d.bufs.Put(buf) | |
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) | |
} | |
decoded += offset - dstEvery*i | |
@@ -173,9 +143,86 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
return nil, err | |
} | |
} | |
- d.bufs.Put(buf) | |
if dstSize != decoded { | |
return nil, errors.New("corruption detected: short output block") | |
} | |
return dst, nil | |
} | |
+ | |
+// decompress4x_main_loop_x86 is an x86 assembler implementation | |
+// of Decompress1X when tablelog > 8. | |
+//go:noescape | |
+func decompress1x_main_loop_amd64(ctx *decompress1xContext) | |
+ | |
+// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation | |
+// of Decompress1X when tablelog > 8. | |
+//go:noescape | |
+func decompress1x_main_loop_bmi2(ctx *decompress1xContext) | |
+ | |
+type decompress1xContext struct { | |
+ pbr *bitReaderShifted | |
+ peekBits uint8 | |
+ out *byte | |
+ outCap int | |
+ tbl *dEntrySingle | |
+ decoded int | |
+} | |
+ | |
+// Error reported by asm implementations | |
+const error_max_decoded_size_exeeded = -1 | |
+ | |
+// Decompress1X will decompress a 1X encoded stream. | |
+// The cap of the output buffer will be the maximum decompressed size. | |
+// The length of the supplied input must match the end of a block exactly. | |
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { | |
+ if len(d.dt.single) == 0 { | |
+ return nil, errors.New("no table loaded") | |
+ } | |
+ var br bitReaderShifted | |
+ err := br.init(src) | |
+ if err != nil { | |
+ return dst, err | |
+ } | |
+ maxDecodedSize := cap(dst) | |
+ dst = dst[:maxDecodedSize] | |
+ | |
+ const tlSize = 1 << tableLogMax | |
+ const tlMask = tlSize - 1 | |
+ | |
+ if maxDecodedSize >= 4 { | |
+ ctx := decompress1xContext{ | |
+ pbr: &br, | |
+ out: &dst[0], | |
+ outCap: maxDecodedSize, | |
+ peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() | |
+ tbl: &d.dt.single[0], | |
+ } | |
+ | |
+ if cpuinfo.HasBMI2() { | |
+ decompress1x_main_loop_bmi2(&ctx) | |
+ } else { | |
+ decompress1x_main_loop_amd64(&ctx) | |
+ } | |
+ if ctx.decoded == error_max_decoded_size_exeeded { | |
+ return nil, ErrMaxDecodedSizeExceeded | |
+ } | |
+ | |
+ dst = dst[:ctx.decoded] | |
+ } | |
+ | |
+ // br < 8, so uint8 is fine | |
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead | |
+ for bitsLeft > 0 { | |
+ br.fill() | |
+ if len(dst) >= maxDecodedSize { | |
+ br.close() | |
+ return nil, ErrMaxDecodedSizeExceeded | |
+ } | |
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ nBits := uint8(v.entry) | |
+ br.advance(nBits) | |
+ bitsLeft -= nBits | |
+ dst = append(dst, uint8(v.entry>>8)) | |
+ } | |
+ return dst, br.close() | |
+} | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s | |
index 2edad3ea..6c65c6e2 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s | |
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s | |
@@ -1,506 +1,865 @@ | |
-// +build !appengine | |
-// +build gc | |
-// +build !noasm | |
- | |
-#include "textflag.h" | |
-#include "funcdata.h" | |
-#include "go_asm.h" | |
- | |
-#ifdef GOAMD64_v4 | |
-#ifndef GOAMD64_v3 | |
-#define GOAMD64_v3 | |
-#endif | |
-#endif | |
- | |
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table | |
- | |
-// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) | |
-TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 | |
-#define off R8 | |
-#define buffer DI | |
-#define table SI | |
- | |
-#define br_bits_read R9 | |
-#define br_value R10 | |
-#define br_offset R11 | |
-#define peek_bits R12 | |
-#define exhausted DX | |
- | |
-#define br0 R13 | |
-#define br1 R14 | |
-#define br2 R15 | |
-#define br3 BP | |
- | |
- MOVQ BP, 0(SP) | |
- | |
- XORQ exhausted, exhausted // exhausted = false | |
- XORQ off, off // off = 0 | |
- | |
- MOVBQZX peekBits+32(FP), peek_bits | |
- MOVQ buf+40(FP), buffer | |
- MOVQ tbl+48(FP), table | |
- | |
- MOVQ pbr0+0(FP), br0 | |
- MOVQ pbr1+8(FP), br1 | |
- MOVQ pbr2+16(FP), br2 | |
- MOVQ pbr3+24(FP), br3 | |
- | |
+// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. | |
+ | |
+//go:build amd64 && !appengine && !noasm && gc | |
+// +build amd64,!appengine,!noasm,gc | |
+ | |
+// func decompress4x_main_loop_amd64(ctx *decompress4xContext) | |
+TEXT ·decompress4x_main_loop_amd64(SB), $8-8 | |
+ XORQ DX, DX | |
+ | |
+ // Preload values | |
+ MOVQ ctx+0(FP), AX | |
+ MOVBQZX 32(AX), SI | |
+ MOVQ 40(AX), DI | |
+ MOVQ DI, BX | |
+ MOVQ 72(AX), CX | |
+ MOVQ CX, (SP) | |
+ MOVQ 48(AX), R8 | |
+ MOVQ 56(AX), R9 | |
+ MOVQ (AX), R10 | |
+ MOVQ 8(AX), R11 | |
+ MOVQ 16(AX), R12 | |
+ MOVQ 24(AX), R13 | |
+ | |
+ // Main loop | |
main_loop: | |
- | |
- // const stream = 0 | |
- // br0.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read | |
- MOVQ bitReaderShifted_value(br0), br_value | |
- MOVQ bitReaderShifted_off(br0), br_offset | |
- | |
- // We must have at least 2 * max tablelog left | |
- CMPQ br_bits_read, $64-22 | |
- JBE skip_fill0 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br0), AX | |
+ MOVQ BX, DI | |
+ CMPQ DI, (SP) | |
+ SETGE DL | |
+ | |
+ // br0.fillFast32() | |
+ MOVQ 32(R10), R14 | |
+ MOVBQZX 40(R10), R15 | |
+ CMPQ R15, $0x20 | |
+ JBE skip_fill0 | |
+ MOVQ 24(R10), AX | |
+ SUBQ $0x20, R15 | |
+ SUBQ $0x04, AX | |
+ MOVQ (R10), BP | |
// b.value |= uint64(low) << (b.bitsRead & 63) | |
-#ifdef GOAMD64_v3 | |
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) | |
- | |
-#else | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- | |
-#endif | |
- | |
- ORQ AX, br_value | |
+ MOVL (AX)(BP*1), BP | |
+ MOVQ R15, CX | |
+ SHLQ CL, BP | |
+ MOVQ AX, 24(R10) | |
+ ORQ BP, R14 | |
// exhausted = exhausted || (br0.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
+ CMPQ AX, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
- // } | |
skip_fill0: | |
- | |
// val0 := br0.peekTopBits(peekBits) | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ R14, BP | |
+ MOVQ SI, CX | |
+ SHRQ CL, BP | |
// v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br0.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
+ MOVW (R9)(BP*2), CX | |
-#endif | |
+ // br0.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
// val1 := br0.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ SI, CX | |
+ MOVQ R14, BP | |
+ SHRQ CL, BP | |
// v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
+ MOVW (R9)(BP*2), CX | |
// br0.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- | |
-#endif | |
- | |
- ADDQ CX, br_bits_read // bits_read += n | |
+ MOVB CH, AH | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
// these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 0(buffer)(off*1) | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ MOVW AX, (DI) | |
// update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br0) | |
- MOVQ br_value, bitReaderShifted_value(br0) | |
- MOVQ br_offset, bitReaderShifted_off(br0) | |
- | |
- // const stream = 1 | |
- // br1.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read | |
- MOVQ bitReaderShifted_value(br1), br_value | |
- MOVQ bitReaderShifted_off(br1), br_offset | |
- | |
- // We must have at least 2 * max tablelog left | |
- CMPQ br_bits_read, $64-22 | |
- JBE skip_fill1 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br1), AX | |
+ MOVQ R14, 32(R10) | |
+ MOVB R15, 40(R10) | |
+ ADDQ R8, DI | |
+ | |
+ // br1.fillFast32() | |
+ MOVQ 32(R11), R14 | |
+ MOVBQZX 40(R11), R15 | |
+ CMPQ R15, $0x20 | |
+ JBE skip_fill1 | |
+ MOVQ 24(R11), AX | |
+ SUBQ $0x20, R15 | |
+ SUBQ $0x04, AX | |
+ MOVQ (R11), BP | |
// b.value |= uint64(low) << (b.bitsRead & 63) | |
-#ifdef GOAMD64_v3 | |
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) | |
- | |
-#else | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- | |
-#endif | |
- | |
- ORQ AX, br_value | |
+ MOVL (AX)(BP*1), BP | |
+ MOVQ R15, CX | |
+ SHLQ CL, BP | |
+ MOVQ AX, 24(R11) | |
+ ORQ BP, R14 | |
// exhausted = exhausted || (br1.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
+ CMPQ AX, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
- // } | |
skip_fill1: | |
- | |
// val0 := br1.peekTopBits(peekBits) | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ R14, BP | |
+ MOVQ SI, CX | |
+ SHRQ CL, BP | |
// v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br1.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
+ MOVW (R9)(BP*2), CX | |
-#endif | |
+ // br1.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
// val1 := br1.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ SI, CX | |
+ MOVQ R14, BP | |
+ SHRQ CL, BP | |
// v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
+ MOVW (R9)(BP*2), CX | |
// br1.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- | |
-#endif | |
- | |
- ADDQ CX, br_bits_read // bits_read += n | |
+ MOVB CH, AH | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
// these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 256(buffer)(off*1) | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ MOVW AX, (DI) | |
// update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br1) | |
- MOVQ br_value, bitReaderShifted_value(br1) | |
- MOVQ br_offset, bitReaderShifted_off(br1) | |
- | |
- // const stream = 2 | |
- // br2.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read | |
- MOVQ bitReaderShifted_value(br2), br_value | |
- MOVQ bitReaderShifted_off(br2), br_offset | |
- | |
- // We must have at least 2 * max tablelog left | |
- CMPQ br_bits_read, $64-22 | |
- JBE skip_fill2 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br2), AX | |
+ MOVQ R14, 32(R11) | |
+ MOVB R15, 40(R11) | |
+ ADDQ R8, DI | |
+ | |
+ // br2.fillFast32() | |
+ MOVQ 32(R12), R14 | |
+ MOVBQZX 40(R12), R15 | |
+ CMPQ R15, $0x20 | |
+ JBE skip_fill2 | |
+ MOVQ 24(R12), AX | |
+ SUBQ $0x20, R15 | |
+ SUBQ $0x04, AX | |
+ MOVQ (R12), BP | |
// b.value |= uint64(low) << (b.bitsRead & 63) | |
-#ifdef GOAMD64_v3 | |
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) | |
- | |
-#else | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- | |
-#endif | |
- | |
- ORQ AX, br_value | |
+ MOVL (AX)(BP*1), BP | |
+ MOVQ R15, CX | |
+ SHLQ CL, BP | |
+ MOVQ AX, 24(R12) | |
+ ORQ BP, R14 | |
// exhausted = exhausted || (br2.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
+ CMPQ AX, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
- // } | |
skip_fill2: | |
- | |
// val0 := br2.peekTopBits(peekBits) | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ R14, BP | |
+ MOVQ SI, CX | |
+ SHRQ CL, BP | |
// v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br2.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
+ MOVW (R9)(BP*2), CX | |
-#endif | |
+ // br2.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
- | |
-#else | |
// val1 := br2.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
- | |
-#endif | |
+ MOVQ SI, CX | |
+ MOVQ R14, BP | |
+ SHRQ CL, BP | |
// v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
+ MOVW (R9)(BP*2), CX | |
// br2.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
- | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
- | |
-#endif | |
- | |
- ADDQ CX, br_bits_read // bits_read += n | |
+ MOVB CH, AH | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
// these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 512(buffer)(off*1) | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ MOVW AX, (DI) | |
// update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br2) | |
- MOVQ br_value, bitReaderShifted_value(br2) | |
- MOVQ br_offset, bitReaderShifted_off(br2) | |
- | |
- // const stream = 3 | |
- // br3.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read | |
- MOVQ bitReaderShifted_value(br3), br_value | |
- MOVQ bitReaderShifted_off(br3), br_offset | |
- | |
- // We must have at least 2 * max tablelog left | |
- CMPQ br_bits_read, $64-22 | |
- JBE skip_fill3 | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br3), AX | |
+ MOVQ R14, 32(R12) | |
+ MOVB R15, 40(R12) | |
+ ADDQ R8, DI | |
+ | |
+ // br3.fillFast32() | |
+ MOVQ 32(R13), R14 | |
+ MOVBQZX 40(R13), R15 | |
+ CMPQ R15, $0x20 | |
+ JBE skip_fill3 | |
+ MOVQ 24(R13), AX | |
+ SUBQ $0x20, R15 | |
+ SUBQ $0x04, AX | |
+ MOVQ (R13), BP | |
// b.value |= uint64(low) << (b.bitsRead & 63) | |
-#ifdef GOAMD64_v3 | |
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) | |
- | |
-#else | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
- | |
-#endif | |
- | |
- ORQ AX, br_value | |
+ MOVL (AX)(BP*1), BP | |
+ MOVQ R15, CX | |
+ SHLQ CL, BP | |
+ MOVQ AX, 24(R13) | |
+ ORQ BP, R14 | |
// exhausted = exhausted || (br3.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
+ CMPQ AX, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
- // } | |
skip_fill3: | |
- | |
// val0 := br3.peekTopBits(peekBits) | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
+ MOVQ R14, BP | |
+ MOVQ SI, CX | |
+ SHRQ CL, BP | |
-#else | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
+ // v0 := table[val0&mask] | |
+ MOVW (R9)(BP*2), CX | |
-#endif | |
+ // br3.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
+ // val1 := br3.peekTopBits(peekBits) | |
+ MOVQ SI, CX | |
+ MOVQ R14, BP | |
+ SHRQ CL, BP | |
- // br3.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
+ // v1 := table[val1&mask] | |
+ MOVW (R9)(BP*2), CX | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
+ // br3.advance(uint8(v1.entry)) | |
+ MOVB CH, AH | |
+ SHLQ CL, R14 | |
+ ADDB CL, R15 | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
+ // these two writes get coalesced | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ MOVW AX, (DI) | |
-#endif | |
+ // update the bitrader reader structure | |
+ MOVQ R14, 32(R13) | |
+ MOVB R15, 40(R13) | |
+ ADDQ $0x02, BX | |
+ TESTB DL, DL | |
+ JZ main_loop | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ 40(AX), CX | |
+ MOVQ BX, DX | |
+ SUBQ CX, DX | |
+ SHLQ $0x02, DX | |
+ MOVQ DX, 64(AX) | |
+ RET | |
- ADDQ CX, br_bits_read // bits_read += n | |
+// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) | |
+TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8 | |
+ XORQ DX, DX | |
+ | |
+ // Preload values | |
+ MOVQ ctx+0(FP), CX | |
+ MOVBQZX 32(CX), BX | |
+ MOVQ 40(CX), SI | |
+ MOVQ SI, (SP) | |
+ MOVQ 72(CX), DX | |
+ MOVQ DX, 8(SP) | |
+ MOVQ 48(CX), DI | |
+ MOVQ 56(CX), R8 | |
+ MOVQ (CX), R9 | |
+ MOVQ 8(CX), R10 | |
+ MOVQ 16(CX), R11 | |
+ MOVQ 24(CX), R12 | |
+ | |
+ // Main loop | |
+main_loop: | |
+ MOVQ (SP), SI | |
+ CMPQ SI, 8(SP) | |
+ SETGE DL | |
+ | |
+ // br1000.fillFast32() | |
+ MOVQ 32(R9), R13 | |
+ MOVBQZX 40(R9), R14 | |
+ CMPQ R14, $0x20 | |
+ JBE skip_fill1000 | |
+ MOVQ 24(R9), R15 | |
+ SUBQ $0x20, R14 | |
+ SUBQ $0x04, R15 | |
+ MOVQ (R9), BP | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
+ // b.value |= uint64(low) << (b.bitsRead & 63) | |
+ MOVL (R15)(BP*1), BP | |
+ MOVQ R14, CX | |
+ SHLQ CL, BP | |
+ MOVQ R15, 24(R9) | |
+ ORQ BP, R13 | |
+ | |
+ // exhausted = exhausted || (br1000.off < 4) | |
+ CMPQ R15, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
+ | |
+skip_fill1000: | |
+ // val0 := br0.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
-#else | |
- // val1 := br3.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
+ // v0 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
-#endif | |
+ // br0.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
+ // val1 := br0.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v1 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br0.advance(uint8(v1.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // val2 := br0.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v2 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br0.advance(uint8(v2.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ | |
+ // val3 := br0.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v3 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br0.advance(uint8(v3.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // these four writes get coalesced | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |
+ MOVL AX, (SI) | |
+ | |
+ // update the bitreader reader structure | |
+ MOVQ R13, 32(R9) | |
+ MOVB R14, 40(R9) | |
+ ADDQ DI, SI | |
+ | |
+ // br1001.fillFast32() | |
+ MOVQ 32(R10), R13 | |
+ MOVBQZX 40(R10), R14 | |
+ CMPQ R14, $0x20 | |
+ JBE skip_fill1001 | |
+ MOVQ 24(R10), R15 | |
+ SUBQ $0x20, R14 | |
+ SUBQ $0x04, R15 | |
+ MOVQ (R10), BP | |
- // br3.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
+ // b.value |= uint64(low) << (b.bitsRead & 63) | |
+ MOVL (R15)(BP*1), BP | |
+ MOVQ R14, CX | |
+ SHLQ CL, BP | |
+ MOVQ R15, 24(R10) | |
+ ORQ BP, R13 | |
+ | |
+ // exhausted = exhausted || (br1001.off < 4) | |
+ CMPQ R15, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
+ | |
+skip_fill1001: | |
+ // val0 := br1.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
+ // v0 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
+ // br1.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
-#endif | |
+ // val1 := br1.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v1 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br1.advance(uint8(v1.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // val2 := br1.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v2 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br1.advance(uint8(v2.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ | |
+ // val3 := br1.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v3 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br1.advance(uint8(v3.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // these four writes get coalesced | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |
+ MOVL AX, (SI) | |
+ | |
+ // update the bitreader reader structure | |
+ MOVQ R13, 32(R10) | |
+ MOVB R14, 40(R10) | |
+ ADDQ DI, SI | |
+ | |
+ // br1002.fillFast32() | |
+ MOVQ 32(R11), R13 | |
+ MOVBQZX 40(R11), R14 | |
+ CMPQ R14, $0x20 | |
+ JBE skip_fill1002 | |
+ MOVQ 24(R11), R15 | |
+ SUBQ $0x20, R14 | |
+ SUBQ $0x04, R15 | |
+ MOVQ (R11), BP | |
- ADDQ CX, br_bits_read // bits_read += n | |
+ // b.value |= uint64(low) << (b.bitsRead & 63) | |
+ MOVL (R15)(BP*1), BP | |
+ MOVQ R14, CX | |
+ SHLQ CL, BP | |
+ MOVQ R15, 24(R11) | |
+ ORQ BP, R13 | |
+ | |
+ // exhausted = exhausted || (br1002.off < 4) | |
+ CMPQ R15, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
+ | |
+skip_fill1002: | |
+ // val0 := br2.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, 768(buffer)(off*1) | |
+ // v0 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br3) | |
- MOVQ br_value, bitReaderShifted_value(br3) | |
- MOVQ br_offset, bitReaderShifted_off(br3) | |
+ // br2.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
- ADDQ $2, off // off += 2 | |
+ // val1 := br2.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v1 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br2.advance(uint8(v1.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // val2 := br2.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v2 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br2.advance(uint8(v2.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ | |
+ // val3 := br2.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v3 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br2.advance(uint8(v3.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // these four writes get coalesced | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |
+ MOVL AX, (SI) | |
+ | |
+ // update the bitreader reader structure | |
+ MOVQ R13, 32(R11) | |
+ MOVB R14, 40(R11) | |
+ ADDQ DI, SI | |
+ | |
+ // br1003.fillFast32() | |
+ MOVQ 32(R12), R13 | |
+ MOVBQZX 40(R12), R14 | |
+ CMPQ R14, $0x20 | |
+ JBE skip_fill1003 | |
+ MOVQ 24(R12), R15 | |
+ SUBQ $0x20, R14 | |
+ SUBQ $0x04, R15 | |
+ MOVQ (R12), BP | |
- TESTB DH, DH // any br[i].ofs < 4? | |
- JNZ end | |
+ // b.value |= uint64(low) << (b.bitsRead & 63) | |
+ MOVL (R15)(BP*1), BP | |
+ MOVQ R14, CX | |
+ SHLQ CL, BP | |
+ MOVQ R15, 24(R12) | |
+ ORQ BP, R13 | |
+ | |
+ // exhausted = exhausted || (br1003.off < 4) | |
+ CMPQ R15, $0x04 | |
+ SETLT AL | |
+ ORB AL, DL | |
+ | |
+skip_fill1003: | |
+ // val0 := br3.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
- CMPQ off, $bufoff | |
- JL main_loop | |
+ // v0 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
-end: | |
- MOVQ 0(SP), BP | |
+ // br3.advance(uint8(v0.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
- MOVB off, ret+56(FP) | |
+ // val1 := br3.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v1 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br3.advance(uint8(v1.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // val2 := br3.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v2 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br3.advance(uint8(v2.entry) | |
+ MOVB CH, AH | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ | |
+ // val3 := br3.peekTopBits(peekBits) | |
+ MOVQ R13, R15 | |
+ MOVQ BX, CX | |
+ SHRQ CL, R15 | |
+ | |
+ // v3 := table[val0&mask] | |
+ MOVW (R8)(R15*2), CX | |
+ | |
+ // br3.advance(uint8(v3.entry) | |
+ MOVB CH, AL | |
+ SHLQ CL, R13 | |
+ ADDB CL, R14 | |
+ BSWAPL AX | |
+ | |
+ // these four writes get coalesced | |
+ // out[id * dstEvery + 0] = uint8(v0.entry >> 8) | |
+ // out[id * dstEvery + 1] = uint8(v1.entry >> 8) | |
+ // out[id * dstEvery + 3] = uint8(v2.entry >> 8) | |
+ // out[id * dstEvery + 4] = uint8(v3.entry >> 8) | |
+ MOVL AX, (SI) | |
+ | |
+ // update the bitreader reader structure | |
+ MOVQ R13, 32(R12) | |
+ MOVB R14, 40(R12) | |
+ ADDQ $0x04, (SP) | |
+ TESTB DL, DL | |
+ JZ main_loop | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ 40(AX), CX | |
+ MOVQ (SP), DX | |
+ SUBQ CX, DX | |
+ SHLQ $0x02, DX | |
+ MOVQ DX, 64(AX) | |
RET | |
-#undef off | |
-#undef buffer | |
-#undef table | |
+// func decompress1x_main_loop_amd64(ctx *decompress1xContext) | |
+TEXT ·decompress1x_main_loop_amd64(SB), $0-8 | |
+ MOVQ ctx+0(FP), CX | |
+ MOVQ 16(CX), DX | |
+ MOVQ 24(CX), BX | |
+ CMPQ BX, $0x04 | |
+ JB error_max_decoded_size_exeeded | |
+ LEAQ (DX)(BX*1), BX | |
+ MOVQ (CX), SI | |
+ MOVQ (SI), R8 | |
+ MOVQ 24(SI), R9 | |
+ MOVQ 32(SI), R10 | |
+ MOVBQZX 40(SI), R11 | |
+ MOVQ 32(CX), SI | |
+ MOVBQZX 8(CX), DI | |
+ JMP loop_condition | |
-#undef br_bits_read | |
-#undef br_value | |
-#undef br_offset | |
-#undef peek_bits | |
-#undef exhausted | |
+main_loop: | |
+ // Check if we have room for 4 bytes in the output buffer | |
+ LEAQ 4(DX), CX | |
+ CMPQ CX, BX | |
+ JGE error_max_decoded_size_exeeded | |
+ | |
+ // Decode 4 values | |
+ CMPQ R11, $0x20 | |
+ JL bitReader_fillFast_1_end | |
+ SUBQ $0x20, R11 | |
+ SUBQ $0x04, R9 | |
+ MOVL (R8)(R9*1), R12 | |
+ MOVQ R11, CX | |
+ SHLQ CL, R12 | |
+ ORQ R12, R10 | |
+ | |
+bitReader_fillFast_1_end: | |
+ MOVQ DI, CX | |
+ MOVQ R10, R12 | |
+ SHRQ CL, R12 | |
+ MOVW (SI)(R12*2), CX | |
+ MOVB CH, AL | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLQ CL, R10 | |
+ MOVQ DI, CX | |
+ MOVQ R10, R12 | |
+ SHRQ CL, R12 | |
+ MOVW (SI)(R12*2), CX | |
+ MOVB CH, AH | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLQ CL, R10 | |
+ BSWAPL AX | |
+ CMPQ R11, $0x20 | |
+ JL bitReader_fillFast_2_end | |
+ SUBQ $0x20, R11 | |
+ SUBQ $0x04, R9 | |
+ MOVL (R8)(R9*1), R12 | |
+ MOVQ R11, CX | |
+ SHLQ CL, R12 | |
+ ORQ R12, R10 | |
+ | |
+bitReader_fillFast_2_end: | |
+ MOVQ DI, CX | |
+ MOVQ R10, R12 | |
+ SHRQ CL, R12 | |
+ MOVW (SI)(R12*2), CX | |
+ MOVB CH, AH | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLQ CL, R10 | |
+ MOVQ DI, CX | |
+ MOVQ R10, R12 | |
+ SHRQ CL, R12 | |
+ MOVW (SI)(R12*2), CX | |
+ MOVB CH, AL | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLQ CL, R10 | |
+ BSWAPL AX | |
+ | |
+ // Store the decoded values | |
+ MOVL AX, (DX) | |
+ ADDQ $0x04, DX | |
+ | |
+loop_condition: | |
+ CMPQ R9, $0x08 | |
+ JGE main_loop | |
+ | |
+ // Update ctx structure | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ DX, CX | |
+ MOVQ 16(AX), DX | |
+ SUBQ DX, CX | |
+ MOVQ CX, 40(AX) | |
+ MOVQ (AX), AX | |
+ MOVQ R9, 24(AX) | |
+ MOVQ R10, 32(AX) | |
+ MOVB R11, 40(AX) | |
+ RET | |
-#undef br0 | |
-#undef br1 | |
-#undef br2 | |
-#undef br3 | |
+ // Report error | |
+error_max_decoded_size_exeeded: | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ $-1, CX | |
+ MOVQ CX, 40(AX) | |
+ RET | |
+ | |
+// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) | |
+// Requires: BMI2 | |
+TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 | |
+ MOVQ ctx+0(FP), CX | |
+ MOVQ 16(CX), DX | |
+ MOVQ 24(CX), BX | |
+ CMPQ BX, $0x04 | |
+ JB error_max_decoded_size_exeeded | |
+ LEAQ (DX)(BX*1), BX | |
+ MOVQ (CX), SI | |
+ MOVQ (SI), R8 | |
+ MOVQ 24(SI), R9 | |
+ MOVQ 32(SI), R10 | |
+ MOVBQZX 40(SI), R11 | |
+ MOVQ 32(CX), SI | |
+ MOVBQZX 8(CX), DI | |
+ JMP loop_condition | |
+ | |
+main_loop: | |
+ // Check if we have room for 4 bytes in the output buffer | |
+ LEAQ 4(DX), CX | |
+ CMPQ CX, BX | |
+ JGE error_max_decoded_size_exeeded | |
+ | |
+ // Decode 4 values | |
+ CMPQ R11, $0x20 | |
+ JL bitReader_fillFast_1_end | |
+ SUBQ $0x20, R11 | |
+ SUBQ $0x04, R9 | |
+ MOVL (R8)(R9*1), CX | |
+ SHLXQ R11, CX, CX | |
+ ORQ CX, R10 | |
+ | |
+bitReader_fillFast_1_end: | |
+ SHRXQ DI, R10, CX | |
+ MOVW (SI)(CX*2), CX | |
+ MOVB CH, AL | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLXQ CX, R10, R10 | |
+ SHRXQ DI, R10, CX | |
+ MOVW (SI)(CX*2), CX | |
+ MOVB CH, AH | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLXQ CX, R10, R10 | |
+ BSWAPL AX | |
+ CMPQ R11, $0x20 | |
+ JL bitReader_fillFast_2_end | |
+ SUBQ $0x20, R11 | |
+ SUBQ $0x04, R9 | |
+ MOVL (R8)(R9*1), CX | |
+ SHLXQ R11, CX, CX | |
+ ORQ CX, R10 | |
+ | |
+bitReader_fillFast_2_end: | |
+ SHRXQ DI, R10, CX | |
+ MOVW (SI)(CX*2), CX | |
+ MOVB CH, AH | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLXQ CX, R10, R10 | |
+ SHRXQ DI, R10, CX | |
+ MOVW (SI)(CX*2), CX | |
+ MOVB CH, AL | |
+ MOVBQZX CL, CX | |
+ ADDQ CX, R11 | |
+ SHLXQ CX, R10, R10 | |
+ BSWAPL AX | |
+ | |
+ // Store the decoded values | |
+ MOVL AX, (DX) | |
+ ADDQ $0x04, DX | |
+ | |
+loop_condition: | |
+ CMPQ R9, $0x08 | |
+ JGE main_loop | |
+ | |
+ // Update ctx structure | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ DX, CX | |
+ MOVQ 16(AX), DX | |
+ SUBQ DX, CX | |
+ MOVQ CX, 40(AX) | |
+ MOVQ (AX), AX | |
+ MOVQ R9, 24(AX) | |
+ MOVQ R10, 32(AX) | |
+ MOVB R11, 40(AX) | |
+ RET | |
+ | |
+ // Report error | |
+error_max_decoded_size_exeeded: | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ $-1, CX | |
+ MOVQ CX, 40(AX) | |
+ RET | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in | |
deleted file mode 100644 | |
index 330d86ae..00000000 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in | |
+++ /dev/null | |
@@ -1,195 +0,0 @@ | |
-// +build !appengine | |
-// +build gc | |
-// +build !noasm | |
- | |
-#include "textflag.h" | |
-#include "funcdata.h" | |
-#include "go_asm.h" | |
- | |
-#ifdef GOAMD64_v4 | |
-#ifndef GOAMD64_v3 | |
-#define GOAMD64_v3 | |
-#endif | |
-#endif | |
- | |
-#define bufoff 256 // see decompress.go, we're using [4][256]byte table | |
- | |
-//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted, | |
-// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool) | |
-TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8 | |
-#define off R8 | |
-#define buffer DI | |
-#define table SI | |
- | |
-#define br_bits_read R9 | |
-#define br_value R10 | |
-#define br_offset R11 | |
-#define peek_bits R12 | |
-#define exhausted DX | |
- | |
-#define br0 R13 | |
-#define br1 R14 | |
-#define br2 R15 | |
-#define br3 BP | |
- | |
- MOVQ BP, 0(SP) | |
- | |
- XORQ exhausted, exhausted // exhausted = false | |
- XORQ off, off // off = 0 | |
- | |
- MOVBQZX peekBits+32(FP), peek_bits | |
- MOVQ buf+40(FP), buffer | |
- MOVQ tbl+48(FP), table | |
- | |
- MOVQ pbr0+0(FP), br0 | |
- MOVQ pbr1+8(FP), br1 | |
- MOVQ pbr2+16(FP), br2 | |
- MOVQ pbr3+24(FP), br3 | |
- | |
-main_loop: | |
-{{ define "decode_2_values_x86" }} | |
- // const stream = {{ var "id" }} | |
- // br{{ var "id"}}.fillFast() | |
- MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read | |
- MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value | |
- MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset | |
- | |
- // We must have at least 2 * max tablelog left | |
- CMPQ br_bits_read, $64-22 | |
- JBE skip_fill{{ var "id" }} | |
- | |
- SUBQ $32, br_bits_read // b.bitsRead -= 32 | |
- SUBQ $4, br_offset // b.off -= 4 | |
- | |
- // v := b.in[b.off-4 : b.off] | |
- // v = v[:4] | |
- // low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
- MOVQ bitReaderShifted_in(br{{ var "id" }}), AX | |
- | |
- // b.value |= uint64(low) << (b.bitsRead & 63) | |
-#ifdef GOAMD64_v3 | |
- SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63) | |
-#else | |
- MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) | |
- MOVQ br_bits_read, CX | |
- SHLQ CL, AX | |
-#endif | |
- | |
- ORQ AX, br_value | |
- | |
- // exhausted = exhausted || (br{{ var "id"}}.off < 4) | |
- CMPQ br_offset, $4 | |
- SETLT DL | |
- ORB DL, DH | |
- // } | |
-skip_fill{{ var "id" }}: | |
- | |
- // val0 := br{{ var "id"}}.peekTopBits(peekBits) | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
-#else | |
- MOVQ br_value, AX | |
- MOVQ peek_bits, CX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
-#endif | |
- | |
- // v0 := table[val0&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v0 | |
- | |
- // br{{ var "id"}}.advance(uint8(v0.entry)) | |
- MOVB AH, BL // BL = uint8(v0.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
-#endif | |
- | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- | |
-#ifdef GOAMD64_v3 | |
- SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask | |
-#else | |
- // val1 := br{{ var "id"}}.peekTopBits(peekBits) | |
- MOVQ peek_bits, CX | |
- MOVQ br_value, AX | |
- SHRQ CL, AX // AX = (value >> peek_bits) & mask | |
-#endif | |
- | |
- // v1 := table[val1&mask] | |
- MOVW 0(table)(AX*2), AX // AX - v1 | |
- | |
- // br{{ var "id"}}.advance(uint8(v1.entry)) | |
- MOVB AH, BH // BH = uint8(v1.entry >> 8) | |
- | |
-#ifdef GOAMD64_v3 | |
- MOVBQZX AL, CX | |
- SHLXQ AX, br_value, br_value // value <<= n | |
-#else | |
- MOVBQZX AL, CX | |
- SHLQ CL, br_value // value <<= n | |
-#endif | |
- | |
- ADDQ CX, br_bits_read // bits_read += n | |
- | |
- | |
- // these two writes get coalesced | |
- // buf[stream][off] = uint8(v0.entry >> 8) | |
- // buf[stream][off+1] = uint8(v1.entry >> 8) | |
- MOVW BX, {{ var "bufofs" }}(buffer)(off*1) | |
- | |
- // update the bitrader reader structure | |
- MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }}) | |
- MOVQ br_value, bitReaderShifted_value(br{{ var "id" }}) | |
- MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }}) | |
-{{ end }} | |
- | |
- {{ set "id" "0" }} | |
- {{ set "ofs" "0" }} | |
- {{ set "bufofs" "0" }} {{/* id * bufoff */}} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "1" }} | |
- {{ set "ofs" "8" }} | |
- {{ set "bufofs" "256" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "2" }} | |
- {{ set "ofs" "16" }} | |
- {{ set "bufofs" "512" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- {{ set "id" "3" }} | |
- {{ set "ofs" "24" }} | |
- {{ set "bufofs" "768" }} | |
- {{ template "decode_2_values_x86" . }} | |
- | |
- ADDQ $2, off // off += 2 | |
- | |
- TESTB DH, DH // any br[i].ofs < 4? | |
- JNZ end | |
- | |
- CMPQ off, $bufoff | |
- JL main_loop | |
-end: | |
- MOVQ 0(SP), BP | |
- | |
- MOVB off, ret+56(FP) | |
- RET | |
-#undef off | |
-#undef buffer | |
-#undef table | |
- | |
-#undef br_bits_read | |
-#undef br_value | |
-#undef br_offset | |
-#undef peek_bits | |
-#undef exhausted | |
- | |
-#undef br0 | |
-#undef br1 | |
-#undef br2 | |
-#undef br3 | |
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go | |
index 126b4d68..4f6f37cb 100644 | |
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go | |
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go | |
@@ -191,3 +191,105 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { | |
} | |
return dst, nil | |
} | |
+ | |
+// Decompress1X will decompress a 1X encoded stream. | |
+// The cap of the output buffer will be the maximum decompressed size. | |
+// The length of the supplied input must match the end of a block exactly. | |
+func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { | |
+ if len(d.dt.single) == 0 { | |
+ return nil, errors.New("no table loaded") | |
+ } | |
+ if use8BitTables && d.actualTableLog <= 8 { | |
+ return d.decompress1X8Bit(dst, src) | |
+ } | |
+ var br bitReaderShifted | |
+ err := br.init(src) | |
+ if err != nil { | |
+ return dst, err | |
+ } | |
+ maxDecodedSize := cap(dst) | |
+ dst = dst[:0] | |
+ | |
+ // Avoid bounds check by always having full sized table. | |
+ const tlSize = 1 << tableLogMax | |
+ const tlMask = tlSize - 1 | |
+ dt := d.dt.single[:tlSize] | |
+ | |
+ // Use temp table to avoid bound checks/append penalty. | |
+ bufs := d.buffer() | |
+ buf := &bufs[0] | |
+ var off uint8 | |
+ | |
+ for br.off >= 8 { | |
+ br.fillFast() | |
+ v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ br.advance(uint8(v.entry)) | |
+ buf[off+0] = uint8(v.entry >> 8) | |
+ | |
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ br.advance(uint8(v.entry)) | |
+ buf[off+1] = uint8(v.entry >> 8) | |
+ | |
+ // Refill | |
+ br.fillFast() | |
+ | |
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ br.advance(uint8(v.entry)) | |
+ buf[off+2] = uint8(v.entry >> 8) | |
+ | |
+ v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ br.advance(uint8(v.entry)) | |
+ buf[off+3] = uint8(v.entry >> 8) | |
+ | |
+ off += 4 | |
+ if off == 0 { | |
+ if len(dst)+256 > maxDecodedSize { | |
+ br.close() | |
+ d.bufs.Put(bufs) | |
+ return nil, ErrMaxDecodedSizeExceeded | |
+ } | |
+ dst = append(dst, buf[:]...) | |
+ } | |
+ } | |
+ | |
+ if len(dst)+int(off) > maxDecodedSize { | |
+ d.bufs.Put(bufs) | |
+ br.close() | |
+ return nil, ErrMaxDecodedSizeExceeded | |
+ } | |
+ dst = append(dst, buf[:off]...) | |
+ | |
+ // br < 8, so uint8 is fine | |
+ bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead | |
+ for bitsLeft > 0 { | |
+ br.fill() | |
+ if false && br.bitsRead >= 32 { | |
+ if br.off >= 4 { | |
+ v := br.in[br.off-4:] | |
+ v = v[:4] | |
+ low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) | |
+ br.value = (br.value << 32) | uint64(low) | |
+ br.bitsRead -= 32 | |
+ br.off -= 4 | |
+ } else { | |
+ for br.off > 0 { | |
+ br.value = (br.value << 8) | uint64(br.in[br.off-1]) | |
+ br.bitsRead -= 8 | |
+ br.off-- | |
+ } | |
+ } | |
+ } | |
+ if len(dst) >= maxDecodedSize { | |
+ d.bufs.Put(bufs) | |
+ br.close() | |
+ return nil, ErrMaxDecodedSizeExceeded | |
+ } | |
+ v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] | |
+ nBits := uint8(v.entry) | |
+ br.advance(nBits) | |
+ bitsLeft -= nBits | |
+ dst = append(dst, uint8(v.entry>>8)) | |
+ } | |
+ d.bufs.Put(bufs) | |
+ return dst, br.close() | |
+} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go | |
index d7cd15ba..97299d49 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go | |
@@ -63,13 +63,6 @@ func (b *bitReader) get32BitsFast(n uint8) uint32 { | |
return v | |
} | |
-func (b *bitReader) get16BitsFast(n uint8) uint16 { | |
- const regMask = 64 - 1 | |
- v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) | |
- b.bitsRead += n | |
- return v | |
-} | |
- | |
// fillFast() will make sure at least 32 bits are available. | |
// There must be at least 4 bytes available. | |
func (b *bitReader) fillFast() { | |
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go | |
index b3661828..78b3c61b 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go | |
@@ -5,8 +5,6 @@ | |
package zstd | |
-import "fmt" | |
- | |
// bitWriter will write bits. | |
// First bit will be LSB of the first byte of output. | |
type bitWriter struct { | |
@@ -73,80 +71,6 @@ func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { | |
b.nBits += bits | |
} | |
-// flush will flush all pending full bytes. | |
-// There will be at least 56 bits available for writing when this has been called. | |
-// Using flush32 is faster, but leaves less space for writing. | |
-func (b *bitWriter) flush() { | |
- v := b.nBits >> 3 | |
- switch v { | |
- case 0: | |
- case 1: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- ) | |
- case 2: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- ) | |
- case 3: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- ) | |
- case 4: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- ) | |
- case 5: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- ) | |
- case 6: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- ) | |
- case 7: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- byte(b.bitContainer>>48), | |
- ) | |
- case 8: | |
- b.out = append(b.out, | |
- byte(b.bitContainer), | |
- byte(b.bitContainer>>8), | |
- byte(b.bitContainer>>16), | |
- byte(b.bitContainer>>24), | |
- byte(b.bitContainer>>32), | |
- byte(b.bitContainer>>40), | |
- byte(b.bitContainer>>48), | |
- byte(b.bitContainer>>56), | |
- ) | |
- default: | |
- panic(fmt.Errorf("bits (%d) > 64", b.nBits)) | |
- } | |
- b.bitContainer >>= v << 3 | |
- b.nBits &= 7 | |
-} | |
- | |
// flush32 will flush out, so there are at least 32 bits available for writing. | |
func (b *bitWriter) flush32() { | |
if b.nBits < 32 { | |
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go | |
index b2bca330..7eed729b 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go | |
@@ -49,11 +49,8 @@ const ( | |
// Maximum possible block size (all Raw+Uncompressed). | |
maxBlockSize = (1 << 21) - 1 | |
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#literals_section_header | |
- maxCompressedLiteralSize = 1 << 18 | |
- maxRLELiteralSize = 1 << 20 | |
- maxMatchLen = 131074 | |
- maxSequences = 0x7f00 + 0xffff | |
+ maxMatchLen = 131074 | |
+ maxSequences = 0x7f00 + 0xffff | |
// We support slightly less than the reference decoder to be able to | |
// use ints on 32 bit archs. | |
@@ -105,7 +102,6 @@ type blockDec struct { | |
// Block is RLE, this is the size. | |
RLESize uint32 | |
- tmp [4]byte | |
Type blockType | |
@@ -368,14 +364,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err | |
} | |
if cap(b.literalBuf) < litRegenSize { | |
if b.lowMem { | |
- b.literalBuf = make([]byte, litRegenSize) | |
+ b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) | |
} else { | |
- if litRegenSize > maxCompressedLiteralSize { | |
- // Exceptional | |
- b.literalBuf = make([]byte, litRegenSize) | |
- } else { | |
- b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) | |
- } | |
+ b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) | |
} | |
} | |
literals = b.literalBuf[:litRegenSize] | |
@@ -405,14 +396,14 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err | |
// Ensure we have space to store it. | |
if cap(b.literalBuf) < litRegenSize { | |
if b.lowMem { | |
- b.literalBuf = make([]byte, 0, litRegenSize) | |
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) | |
} else { | |
- b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) | |
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) | |
} | |
} | |
var err error | |
// Use our out buffer. | |
- huff.MaxDecodedSize = maxCompressedBlockSize | |
+ huff.MaxDecodedSize = litRegenSize | |
if fourStreams { | |
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) | |
} else { | |
@@ -437,9 +428,9 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err | |
// Ensure we have space to store it. | |
if cap(b.literalBuf) < litRegenSize { | |
if b.lowMem { | |
- b.literalBuf = make([]byte, 0, litRegenSize) | |
+ b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) | |
} else { | |
- b.literalBuf = make([]byte, 0, maxCompressedBlockSize) | |
+ b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) | |
} | |
} | |
huff := hist.huffTree | |
@@ -456,7 +447,7 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err | |
return in, err | |
} | |
hist.huffTree = huff | |
- huff.MaxDecodedSize = maxCompressedBlockSize | |
+ huff.MaxDecodedSize = litRegenSize | |
// Use our out buffer. | |
if fourStreams { | |
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) | |
@@ -471,6 +462,8 @@ func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err | |
if len(literals) != litRegenSize { | |
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) | |
} | |
+ // Re-cap to get extra size. | |
+ literals = b.literalBuf[:len(literals)] | |
if debugDecoder { | |
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) | |
} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go | |
index b80191e4..4493baa7 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go | |
@@ -52,10 +52,6 @@ func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { | |
return r, nil | |
} | |
-func (b *byteBuf) remain() []byte { | |
- return *b | |
-} | |
- | |
func (b *byteBuf) readByte() (byte, error) { | |
bb := *b | |
if len(bb) < 1 { | |
diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go | |
index 2c4fca17..0e59a242 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/bytereader.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/bytereader.go | |
@@ -13,12 +13,6 @@ type byteReader struct { | |
off int | |
} | |
-// init will initialize the reader and set the input. | |
-func (b *byteReader) init(in []byte) { | |
- b.b = in | |
- b.off = 0 | |
-} | |
- | |
// advance the stream b n bytes. | |
func (b *byteReader) advance(n uint) { | |
b.off += int(n) | |
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go | |
index c65ea979..286c8f9d 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go | |
@@ -439,7 +439,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { | |
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) | |
} | |
- if len(next.b) > 0 { | |
+ if !d.o.ignoreChecksum && len(next.b) > 0 { | |
n, err := d.current.crc.Write(next.b) | |
if err == nil { | |
if n != len(next.b) { | |
@@ -451,7 +451,7 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { | |
got := d.current.crc.Sum64() | |
var tmp [4]byte | |
binary.LittleEndian.PutUint32(tmp[:], uint32(got)) | |
- if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { | |
+ if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) { | |
if debugDecoder { | |
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") | |
} | |
@@ -535,9 +535,15 @@ func (d *Decoder) nextBlockSync() (ok bool) { | |
// Update/Check CRC | |
if d.frame.HasCheckSum { | |
- d.frame.crc.Write(d.current.b) | |
+ if !d.o.ignoreChecksum { | |
+ d.frame.crc.Write(d.current.b) | |
+ } | |
if d.current.d.Last { | |
- d.current.err = d.frame.checkCRC() | |
+ if !d.o.ignoreChecksum { | |
+ d.current.err = d.frame.checkCRC() | |
+ } else { | |
+ d.current.err = d.frame.consumeCRC() | |
+ } | |
if d.current.err != nil { | |
println("CRC error:", d.current.err) | |
return false | |
@@ -631,60 +637,18 @@ func (d *Decoder) startSyncDecoder(r io.Reader) error { | |
// Create Decoder: | |
// ASYNC: | |
-// Spawn 4 go routines. | |
-// 0: Read frames and decode blocks. | |
-// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. | |
-// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. | |
-// 3: Wait for stream history, execute sequences, send stream history. | |
+// Spawn 3 go routines. | |
+// 0: Read frames and decode block literals. | |
+// 1: Decode sequences. | |
+// 2: Execute sequences, send to output. | |
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { | |
defer d.streamWg.Done() | |
br := readerWrapper{r: r} | |
- var seqPrepare = make(chan *blockDec, d.o.concurrent) | |
var seqDecode = make(chan *blockDec, d.o.concurrent) | |
var seqExecute = make(chan *blockDec, d.o.concurrent) | |
- // Async 1: Prepare blocks... | |
- go func() { | |
- var hist history | |
- var hasErr bool | |
- for block := range seqPrepare { | |
- if hasErr { | |
- if block != nil { | |
- seqDecode <- block | |
- } | |
- continue | |
- } | |
- if block.async.newHist != nil { | |
- if debugDecoder { | |
- println("Async 1: new history") | |
- } | |
- hist.reset() | |
- if block.async.newHist.dict != nil { | |
- hist.setDict(block.async.newHist.dict) | |
- } | |
- } | |
- if block.err != nil || block.Type != blockTypeCompressed { | |
- hasErr = block.err != nil | |
- seqDecode <- block | |
- continue | |
- } | |
- | |
- remain, err := block.decodeLiterals(block.data, &hist) | |
- block.err = err | |
- hasErr = block.err != nil | |
- if err == nil { | |
- block.async.literals = hist.decoders.literals | |
- block.async.seqData = remain | |
- } else if debugDecoder { | |
- println("decodeLiterals error:", err) | |
- } | |
- seqDecode <- block | |
- } | |
- close(seqDecode) | |
- }() | |
- | |
- // Async 2: Decode sequences... | |
+ // Async 1: Decode sequences... | |
go func() { | |
var hist history | |
var hasErr bool | |
@@ -698,7 +662,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch | |
} | |
if block.async.newHist != nil { | |
if debugDecoder { | |
- println("Async 2: new history, recent:", block.async.newHist.recentOffsets) | |
+ println("Async 1: new history, recent:", block.async.newHist.recentOffsets) | |
} | |
hist.decoders = block.async.newHist.decoders | |
hist.recentOffsets = block.async.newHist.recentOffsets | |
@@ -752,7 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch | |
} | |
if block.async.newHist != nil { | |
if debugDecoder { | |
- println("Async 3: new history") | |
+ println("Async 2: new history") | |
} | |
hist.windowSize = block.async.newHist.windowSize | |
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer | |
@@ -839,6 +803,33 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch | |
decodeStream: | |
for { | |
+ var hist history | |
+ var hasErr bool | |
+ | |
+ decodeBlock := func(block *blockDec) { | |
+ if hasErr { | |
+ if block != nil { | |
+ seqDecode <- block | |
+ } | |
+ return | |
+ } | |
+ if block.err != nil || block.Type != blockTypeCompressed { | |
+ hasErr = block.err != nil | |
+ seqDecode <- block | |
+ return | |
+ } | |
+ | |
+ remain, err := block.decodeLiterals(block.data, &hist) | |
+ block.err = err | |
+ hasErr = block.err != nil | |
+ if err == nil { | |
+ block.async.literals = hist.decoders.literals | |
+ block.async.seqData = remain | |
+ } else if debugDecoder { | |
+ println("decodeLiterals error:", err) | |
+ } | |
+ seqDecode <- block | |
+ } | |
frame := d.frame | |
if debugDecoder { | |
println("New frame...") | |
@@ -865,7 +856,7 @@ decodeStream: | |
case <-ctx.Done(): | |
case dec := <-d.decoders: | |
dec.sendErr(err) | |
- seqPrepare <- dec | |
+ decodeBlock(dec) | |
} | |
break decodeStream | |
} | |
@@ -885,6 +876,10 @@ decodeStream: | |
if debugDecoder { | |
println("Alloc History:", h.allocFrameBuffer) | |
} | |
+ hist.reset() | |
+ if h.dict != nil { | |
+ hist.setDict(h.dict) | |
+ } | |
dec.async.newHist = &h | |
dec.async.fcs = frame.FrameContentSize | |
historySent = true | |
@@ -911,7 +906,7 @@ decodeStream: | |
} | |
err = dec.err | |
last := dec.Last | |
- seqPrepare <- dec | |
+ decodeBlock(dec) | |
if err != nil { | |
break decodeStream | |
} | |
@@ -920,7 +915,7 @@ decodeStream: | |
} | |
} | |
} | |
- close(seqPrepare) | |
+ close(seqDecode) | |
wg.Wait() | |
d.frame.history.b = frameHistCache | |
} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go | |
index fc52ebc4..c70e6fa0 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go | |
@@ -19,6 +19,7 @@ type decoderOptions struct { | |
maxDecodedSize uint64 | |
maxWindowSize uint64 | |
dicts []dict | |
+ ignoreChecksum bool | |
} | |
func (o *decoderOptions) setDefault() { | |
@@ -112,3 +113,11 @@ func WithDecoderMaxWindow(size uint64) DOption { | |
return nil | |
} | |
} | |
+ | |
+// IgnoreChecksum allows to forcibly ignore checksum checking. | |
+func IgnoreChecksum(b bool) DOption { | |
+ return func(o *decoderOptions) error { | |
+ o.ignoreChecksum = b | |
+ return nil | |
+ } | |
+} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go | |
index 602c05ee..c769f694 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go | |
@@ -156,8 +156,8 @@ encodeLoop: | |
panic("offset0 was 0") | |
} | |
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) | |
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
candidateL := e.longTable[nextHashL] | |
candidateS := e.table[nextHashS] | |
@@ -518,8 +518,8 @@ encodeLoop: | |
} | |
// Store this, since we have it. | |
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) | |
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
// We have at least 4 byte match. | |
// No need to check backwards. We come straight from a match | |
@@ -674,8 +674,8 @@ encodeLoop: | |
panic("offset0 was 0") | |
} | |
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) | |
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
candidateL := e.longTable[nextHashL] | |
candidateS := e.table[nextHashS] | |
@@ -1047,8 +1047,8 @@ encodeLoop: | |
} | |
// Store this, since we have it. | |
- nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) | |
+ nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) | |
// We have at least 4 byte match. | |
// No need to check backwards. We come straight from a match | |
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go | |
index d6b31042..7ff0c64f 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go | |
@@ -127,8 +127,8 @@ encodeLoop: | |
panic("offset0 was 0") | |
} | |
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) | |
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
candidateL := e.longTable[nextHashL] | |
candidateS := e.table[nextHashS] | |
@@ -439,8 +439,8 @@ encodeLoop: | |
var t int32 | |
for { | |
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) | |
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
candidateL := e.longTable[nextHashL] | |
candidateS := e.table[nextHashS] | |
@@ -785,8 +785,8 @@ encodeLoop: | |
panic("offset0 was 0") | |
} | |
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) | |
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
candidateL := e.longTable[nextHashL] | |
candidateS := e.table[nextHashS] | |
@@ -969,7 +969,7 @@ encodeLoop: | |
te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} | |
te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} | |
longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) | |
- longHash2 := hashLen(cv0, dFastLongTableBits, dFastLongLen) | |
+ longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) | |
e.longTable[longHash1] = te0 | |
e.longTable[longHash2] = te1 | |
e.markLongShardDirty(longHash1) | |
@@ -1002,8 +1002,8 @@ encodeLoop: | |
} | |
// Store this, since we have it. | |
- nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) | |
+ nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) | |
// We have at least 4 byte match. | |
// No need to check backwards. We come straight from a match | |
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go | |
index dcc987a7..e6b1d01c 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go | |
@@ -551,7 +551,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { | |
} | |
// If we can do everything in one block, prefer that. | |
- if len(src) <= maxCompressedBlockSize { | |
+ if len(src) <= e.o.blockSize { | |
enc.Reset(e.o.dict, true) | |
// Slightly faster with no history and everything in one block. | |
if e.o.crc { | |
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go | |
index 509d5cec..fa0a633f 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go | |
@@ -253,10 +253,11 @@ func (d *frameDec) reset(br byteBuffer) error { | |
return ErrWindowSizeTooSmall | |
} | |
d.history.windowSize = int(d.WindowSize) | |
- if d.o.lowMem && d.history.windowSize < maxBlockSize { | |
+ if !d.o.lowMem || d.history.windowSize < maxBlockSize { | |
+ // Alloc 2x window size if not low-mem, or very small window size. | |
d.history.allocFrameBuffer = d.history.windowSize * 2 | |
- // TODO: Maybe use FrameContent size | |
} else { | |
+ // Alloc with one additional block | |
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize | |
} | |
@@ -290,13 +291,6 @@ func (d *frameDec) checkCRC() error { | |
if !d.HasCheckSum { | |
return nil | |
} | |
- var tmp [4]byte | |
- got := d.crc.Sum64() | |
- // Flip to match file order. | |
- tmp[0] = byte(got >> 0) | |
- tmp[1] = byte(got >> 8) | |
- tmp[2] = byte(got >> 16) | |
- tmp[3] = byte(got >> 24) | |
// We can overwrite upper tmp now | |
want, err := d.rawInput.readSmall(4) | |
@@ -305,7 +299,19 @@ func (d *frameDec) checkCRC() error { | |
return err | |
} | |
- if !bytes.Equal(tmp[:], want) && !ignoreCRC { | |
+ if d.o.ignoreChecksum { | |
+ return nil | |
+ } | |
+ | |
+ var tmp [4]byte | |
+ got := d.crc.Sum64() | |
+ // Flip to match file order. | |
+ tmp[0] = byte(got >> 0) | |
+ tmp[1] = byte(got >> 8) | |
+ tmp[2] = byte(got >> 16) | |
+ tmp[3] = byte(got >> 24) | |
+ | |
+ if !bytes.Equal(tmp[:], want) { | |
if debugDecoder { | |
println("CRC Check Failed:", tmp[:], "!=", want) | |
} | |
@@ -317,6 +323,19 @@ func (d *frameDec) checkCRC() error { | |
return nil | |
} | |
+// consumeCRC reads the checksum data if the frame has one. | |
+func (d *frameDec) consumeCRC() error { | |
+ if d.HasCheckSum { | |
+ _, err := d.rawInput.readSmall(4) | |
+ if err != nil { | |
+ println("CRC missing?", err) | |
+ return err | |
+ } | |
+ } | |
+ | |
+ return nil | |
+} | |
+ | |
// runDecoder will create a sync decoder that will decode a block of data. | |
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { | |
saved := d.history.b | |
@@ -373,13 +392,17 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { | |
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { | |
err = ErrFrameSizeMismatch | |
} else if d.HasCheckSum { | |
- var n int | |
- n, err = d.crc.Write(dst[crcStart:]) | |
- if err == nil { | |
- if n != len(dst)-crcStart { | |
- err = io.ErrShortWrite | |
- } else { | |
- err = d.checkCRC() | |
+ if d.o.ignoreChecksum { | |
+ err = d.consumeCRC() | |
+ } else { | |
+ var n int | |
+ n, err = d.crc.Write(dst[crcStart:]) | |
+ if err == nil { | |
+ if n != len(dst)-crcStart { | |
+ err = io.ErrShortWrite | |
+ } else { | |
+ err = d.checkCRC() | |
+ } | |
} | |
} | |
} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go | |
index fde4e6b6..23333b96 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go | |
@@ -229,18 +229,10 @@ func (d decSymbol) newState() uint16 { | |
return uint16(d >> 16) | |
} | |
-func (d decSymbol) baseline() uint32 { | |
- return uint32(d >> 32) | |
-} | |
- | |
func (d decSymbol) baselineInt() int { | |
return int(d >> 32) | |
} | |
-func (d *decSymbol) set(nbits, addBits uint8, newState uint16, baseline uint32) { | |
- *d = decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) | |
-} | |
- | |
func (d *decSymbol) setNBits(nBits uint8) { | |
const mask = 0xffffffffffffff00 | |
*d = (*d & mask) | decSymbol(nBits) | |
@@ -256,11 +248,6 @@ func (d *decSymbol) setNewState(state uint16) { | |
*d = (*d & mask) | decSymbol(state)<<16 | |
} | |
-func (d *decSymbol) setBaseline(baseline uint32) { | |
- const mask = 0xffffffff | |
- *d = (*d & mask) | decSymbol(baseline)<<32 | |
-} | |
- | |
func (d *decSymbol) setExt(addBits uint8, baseline uint32) { | |
const mask = 0xffff00ff | |
*d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) | |
@@ -377,34 +364,7 @@ func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { | |
s.state = dt[br.getBits(tableLog)] | |
} | |
-// next returns the current symbol and sets the next state. | |
-// At least tablelog bits must be available in the bit reader. | |
-func (s *fseState) next(br *bitReader) { | |
- lowBits := uint16(br.getBits(s.state.nbBits())) | |
- s.state = s.dt[s.state.newState()+lowBits] | |
-} | |
- | |
-// finished returns true if all bits have been read from the bitstream | |
-// and the next state would require reading bits from the input. | |
-func (s *fseState) finished(br *bitReader) bool { | |
- return br.finished() && s.state.nbBits() > 0 | |
-} | |
- | |
-// final returns the current state symbol without decoding the next. | |
-func (s *fseState) final() (int, uint8) { | |
- return s.state.baselineInt(), s.state.addBits() | |
-} | |
- | |
// final returns the current state symbol without decoding the next. | |
func (s decSymbol) final() (int, uint8) { | |
return s.baselineInt(), s.addBits() | |
} | |
- | |
-// nextFast returns the next symbol and sets the next state. | |
-// This can only be used if no symbols are 0 bits. | |
-// At least tablelog bits must be available in the bit reader. | |
-func (s *fseState) nextFast(br *bitReader) (uint32, uint8) { | |
- lowBits := br.get16BitsFast(s.state.nbBits()) | |
- s.state = s.dt[s.state.newState()+lowBits] | |
- return s.state.baseline(), s.state.addBits() | |
-} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go | |
index 5442061b..ab26326a 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go | |
@@ -76,21 +76,6 @@ func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { | |
s.clearCount = maxCount != 0 | |
} | |
-// prepare will prepare and allocate scratch tables used for both compression and decompression. | |
-func (s *fseEncoder) prepare() (*fseEncoder, error) { | |
- if s == nil { | |
- s = &fseEncoder{} | |
- } | |
- s.useRLE = false | |
- if s.clearCount && s.maxCount == 0 { | |
- for i := range s.count { | |
- s.count[i] = 0 | |
- } | |
- s.clearCount = false | |
- } | |
- return s, nil | |
-} | |
- | |
// allocCtable will allocate tables needed for compression. | |
// If existing tables a re big enough, they are simply re-used. | |
func (s *fseEncoder) allocCtable() { | |
@@ -709,14 +694,6 @@ func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { | |
c.state = c.stateTable[lu] | |
} | |
-// encode the output symbol provided and write it to the bitstream. | |
-func (c *cState) encode(symbolTT symbolTransform) { | |
- nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 | |
- dstState := int32(c.state>>(nbBitsOut&15)) + int32(symbolTT.deltaFindState) | |
- c.bw.addBits16NC(c.state, uint8(nbBitsOut)) | |
- c.state = c.stateTable[dstState] | |
-} | |
- | |
// flush will write the tablelog to the output and flush the remaining full bytes. | |
func (c *cState) flush(tableLog uint8) { | |
c.bw.flush32() | |
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go | |
deleted file mode 100644 | |
index 7f2210e0..00000000 | |
--- a/vendor/github.com/klauspost/compress/zstd/fuzz.go | |
+++ /dev/null | |
@@ -1,11 +0,0 @@ | |
-//go:build ignorecrc | |
-// +build ignorecrc | |
- | |
-// Copyright 2019+ Klaus Post. All rights reserved. | |
-// License information can be found in the LICENSE file. | |
-// Based on work by Yann Collet, released under BSD License. | |
- | |
-package zstd | |
- | |
-// ignoreCRC can be used for fuzz testing to ignore CRC values... | |
-const ignoreCRC = true | |
diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go | |
deleted file mode 100644 | |
index 6811c68a..00000000 | |
--- a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go | |
+++ /dev/null | |
@@ -1,11 +0,0 @@ | |
-//go:build !ignorecrc | |
-// +build !ignorecrc | |
- | |
-// Copyright 2019+ Klaus Post. All rights reserved. | |
-// License information can be found in the LICENSE file. | |
-// Based on work by Yann Collet, released under BSD License. | |
- | |
-package zstd | |
- | |
-// ignoreCRC can be used for fuzz testing to ignore CRC values... | |
-const ignoreCRC = false | |
diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go | |
index cf33f29a..5d73c21e 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/hash.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/hash.go | |
@@ -33,9 +33,3 @@ func hashLen(u uint64, length, mls uint8) uint32 { | |
return (uint32(u) * prime4bytes) >> (32 - length) | |
} | |
} | |
- | |
-// hash3 returns the hash of the lower 3 bytes of u to fit in a hash table with h bits. | |
-// Preferably h should be a constant and should always be <32. | |
-func hash3(u uint32, h uint8) uint32 { | |
- return ((u << (32 - 24)) * prime3bytes) >> ((32 - h) & 31) | |
-} | |
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go | |
index e80139dd..df044720 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go | |
@@ -188,6 +188,7 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { | |
} | |
} | |
} | |
+ | |
// Add final literals | |
copy(out[t:], s.literals) | |
if debugDecoder { | |
@@ -203,12 +204,11 @@ func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { | |
// decode sequences from the stream with the provided history. | |
func (s *sequenceDecs) decodeSync(hist []byte) error { | |
- if true { | |
- supported, err := s.decodeSyncSimple(hist) | |
- if supported { | |
- return err | |
- } | |
+ supported, err := s.decodeSyncSimple(hist) | |
+ if supported { | |
+ return err | |
} | |
+ | |
br := s.br | |
seqs := s.nSeqs | |
startSize := len(s.out) | |
@@ -396,6 +396,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { | |
ofState = ofTable[ofState.newState()&maxTableMask] | |
} else { | |
bits := br.get32BitsFast(nBits) | |
+ | |
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) | |
llState = llTable[(llState.newState()+lowBits)&maxTableMask] | |
@@ -418,16 +419,6 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { | |
return br.close() | |
} | |
-// update states, at least 27 bits must be available. | |
-func (s *sequenceDecs) update(br *bitReader) { | |
- // Max 8 bits | |
- s.litLengths.state.next(br) | |
- // Max 9 bits | |
- s.matchLengths.state.next(br) | |
- // Max 8 bits | |
- s.offsets.state.next(br) | |
-} | |
- | |
var bitMask [16]uint16 | |
func init() { | |
@@ -436,87 +427,6 @@ func init() { | |
} | |
} | |
-// update states, at least 27 bits must be available. | |
-func (s *sequenceDecs) updateAlt(br *bitReader) { | |
- // Update all 3 states at once. Approx 20% faster. | |
- a, b, c := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state | |
- | |
- nBits := a.nbBits() + b.nbBits() + c.nbBits() | |
- if nBits == 0 { | |
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()] | |
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()] | |
- s.offsets.state.state = s.offsets.state.dt[c.newState()] | |
- return | |
- } | |
- bits := br.get32BitsFast(nBits) | |
- lowBits := uint16(bits >> ((c.nbBits() + b.nbBits()) & 31)) | |
- s.litLengths.state.state = s.litLengths.state.dt[a.newState()+lowBits] | |
- | |
- lowBits = uint16(bits >> (c.nbBits() & 31)) | |
- lowBits &= bitMask[b.nbBits()&15] | |
- s.matchLengths.state.state = s.matchLengths.state.dt[b.newState()+lowBits] | |
- | |
- lowBits = uint16(bits) & bitMask[c.nbBits()&15] | |
- s.offsets.state.state = s.offsets.state.dt[c.newState()+lowBits] | |
-} | |
- | |
-// nextFast will return new states when there are at least 4 unused bytes left on the stream when done. | |
-func (s *sequenceDecs) nextFast(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { | |
- // Final will not read from stream. | |
- ll, llB := llState.final() | |
- ml, mlB := mlState.final() | |
- mo, moB := ofState.final() | |
- | |
- // extra bits are stored in reverse order. | |
- br.fillFast() | |
- mo += br.getBits(moB) | |
- if s.maxBits > 32 { | |
- br.fillFast() | |
- } | |
- ml += br.getBits(mlB) | |
- ll += br.getBits(llB) | |
- | |
- if moB > 1 { | |
- s.prevOffset[2] = s.prevOffset[1] | |
- s.prevOffset[1] = s.prevOffset[0] | |
- s.prevOffset[0] = mo | |
- return | |
- } | |
- // mo = s.adjustOffset(mo, ll, moB) | |
- // Inlined for rather big speedup | |
- if ll == 0 { | |
- // There is an exception though, when current sequence's literals_length = 0. | |
- // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, | |
- // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. | |
- mo++ | |
- } | |
- | |
- if mo == 0 { | |
- mo = s.prevOffset[0] | |
- return | |
- } | |
- var temp int | |
- if mo == 3 { | |
- temp = s.prevOffset[0] - 1 | |
- } else { | |
- temp = s.prevOffset[mo] | |
- } | |
- | |
- if temp == 0 { | |
- // 0 is not valid; input is corrupted; force offset to 1 | |
- println("temp was 0") | |
- temp = 1 | |
- } | |
- | |
- if mo != 1 { | |
- s.prevOffset[2] = s.prevOffset[1] | |
- } | |
- s.prevOffset[1] = s.prevOffset[0] | |
- s.prevOffset[0] = temp | |
- mo = temp | |
- return | |
-} | |
- | |
func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { | |
// Final will not read from stream. | |
ll, llB := llState.final() | |
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go | |
index 4676b09c..847b322a 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go | |
@@ -62,6 +62,10 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { | |
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { | |
useSafe = true | |
} | |
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { | |
+ useSafe = true | |
+ } | |
+ | |
br := s.br | |
maxBlockSize := maxCompressedBlockSize | |
@@ -301,6 +305,10 @@ type executeAsmContext struct { | |
//go:noescape | |
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool | |
+// Same as above, but with safe memcopies | |
+//go:noescape | |
+func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool | |
+ | |
// executeSimple handles cases when dictionary is not used. | |
func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { | |
// Ensure we have enough output size... | |
@@ -327,8 +335,12 @@ func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { | |
literals: s.literals, | |
windowSize: s.windowSize, | |
} | |
- | |
- ok := sequenceDecs_executeSimple_amd64(&ctx) | |
+ var ok bool | |
+ if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { | |
+ ok = sequenceDecs_executeSimple_safe_amd64(&ctx) | |
+ } else { | |
+ ok = sequenceDecs_executeSimple_amd64(&ctx) | |
+ } | |
if !ok { | |
return fmt.Errorf("match offset (%d) bigger than current history (%d)", | |
seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) | |
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s | |
index 01cc23fa..212c6cac 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s | |
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s | |
@@ -705,60 +705,55 @@ sequenceDecs_decode_bmi2_fill_2_end: | |
MOVQ CX, (R9) | |
// Fill bitreader for state updates | |
- MOVQ R13, (SP) | |
- MOVQ $0x00000808, CX | |
- BEXTRQ CX, R8, R13 | |
- MOVQ ctx+16(FP), CX | |
- CMPQ 96(CX), $0x00 | |
- JZ sequenceDecs_decode_bmi2_skip_update | |
- | |
- // Update Literal Length State | |
- MOVBQZX SI, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, SI, SI | |
+ MOVQ R13, (SP) | |
+ MOVQ $0x00000808, CX | |
+ BEXTRQ CX, R8, R13 | |
+ MOVQ ctx+16(FP), CX | |
+ CMPQ 96(CX), $0x00 | |
+ JZ sequenceDecs_decode_bmi2_skip_update | |
+ LEAQ (SI)(DI*1), R14 | |
+ ADDQ R8, R14 | |
+ MOVBQZX R14, R14 | |
LEAQ (DX)(R14*1), CX | |
MOVQ AX, R15 | |
MOVQ CX, DX | |
ROLQ CL, R15 | |
BZHIQ R14, R15, R15 | |
- ADDQ R15, SI | |
- // Load ctx.llTable | |
+ // Update Offset State | |
+ BZHIQ R8, R15, CX | |
+ SHRXQ R8, R15, R15 | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, R8, R8 | |
+ ADDQ CX, R8 | |
+ | |
+ // Load ctx.ofTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ (CX), CX | |
- MOVQ (CX)(SI*8), SI | |
+ MOVQ 48(CX), CX | |
+ MOVQ (CX)(R8*8), R8 | |
// Update Match Length State | |
- MOVBQZX DI, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, DI, DI | |
- LEAQ (DX)(R14*1), CX | |
- MOVQ AX, R15 | |
- MOVQ CX, DX | |
- ROLQ CL, R15 | |
- BZHIQ R14, R15, R15 | |
- ADDQ R15, DI | |
+ BZHIQ DI, R15, CX | |
+ SHRXQ DI, R15, R15 | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, DI, DI | |
+ ADDQ CX, DI | |
// Load ctx.mlTable | |
MOVQ ctx+16(FP), CX | |
MOVQ 24(CX), CX | |
MOVQ (CX)(DI*8), DI | |
- // Update Offset State | |
- MOVBQZX R8, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, R8, R8 | |
- LEAQ (DX)(R14*1), CX | |
- MOVQ AX, R15 | |
- MOVQ CX, DX | |
- ROLQ CL, R15 | |
- BZHIQ R14, R15, R15 | |
- ADDQ R15, R8 | |
+ // Update Literal Length State | |
+ BZHIQ SI, R15, CX | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, SI, SI | |
+ ADDQ CX, SI | |
- // Load ctx.ofTable | |
+ // Load ctx.llTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ 48(CX), CX | |
- MOVQ (CX)(R8*8), R8 | |
+ MOVQ (CX), CX | |
+ MOVQ (CX)(SI*8), SI | |
sequenceDecs_decode_bmi2_skip_update: | |
// Adjust offset | |
@@ -965,60 +960,55 @@ sequenceDecs_decode_56_bmi2_fill_end: | |
MOVQ CX, (R9) | |
// Fill bitreader for state updates | |
- MOVQ R13, (SP) | |
- MOVQ $0x00000808, CX | |
- BEXTRQ CX, R8, R13 | |
- MOVQ ctx+16(FP), CX | |
- CMPQ 96(CX), $0x00 | |
- JZ sequenceDecs_decode_56_bmi2_skip_update | |
- | |
- // Update Literal Length State | |
- MOVBQZX SI, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, SI, SI | |
+ MOVQ R13, (SP) | |
+ MOVQ $0x00000808, CX | |
+ BEXTRQ CX, R8, R13 | |
+ MOVQ ctx+16(FP), CX | |
+ CMPQ 96(CX), $0x00 | |
+ JZ sequenceDecs_decode_56_bmi2_skip_update | |
+ LEAQ (SI)(DI*1), R14 | |
+ ADDQ R8, R14 | |
+ MOVBQZX R14, R14 | |
LEAQ (DX)(R14*1), CX | |
MOVQ AX, R15 | |
MOVQ CX, DX | |
ROLQ CL, R15 | |
BZHIQ R14, R15, R15 | |
- ADDQ R15, SI | |
- // Load ctx.llTable | |
+ // Update Offset State | |
+ BZHIQ R8, R15, CX | |
+ SHRXQ R8, R15, R15 | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, R8, R8 | |
+ ADDQ CX, R8 | |
+ | |
+ // Load ctx.ofTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ (CX), CX | |
- MOVQ (CX)(SI*8), SI | |
+ MOVQ 48(CX), CX | |
+ MOVQ (CX)(R8*8), R8 | |
// Update Match Length State | |
- MOVBQZX DI, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, DI, DI | |
- LEAQ (DX)(R14*1), CX | |
- MOVQ AX, R15 | |
- MOVQ CX, DX | |
- ROLQ CL, R15 | |
- BZHIQ R14, R15, R15 | |
- ADDQ R15, DI | |
+ BZHIQ DI, R15, CX | |
+ SHRXQ DI, R15, R15 | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, DI, DI | |
+ ADDQ CX, DI | |
// Load ctx.mlTable | |
MOVQ ctx+16(FP), CX | |
MOVQ 24(CX), CX | |
MOVQ (CX)(DI*8), DI | |
- // Update Offset State | |
- MOVBQZX R8, R14 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, R8, R8 | |
- LEAQ (DX)(R14*1), CX | |
- MOVQ AX, R15 | |
- MOVQ CX, DX | |
- ROLQ CL, R15 | |
- BZHIQ R14, R15, R15 | |
- ADDQ R15, R8 | |
+ // Update Literal Length State | |
+ BZHIQ SI, R15, CX | |
+ MOVQ $0x00001010, R14 | |
+ BEXTRQ R14, SI, SI | |
+ ADDQ CX, SI | |
- // Load ctx.ofTable | |
+ // Load ctx.llTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ 48(CX), CX | |
- MOVQ (CX)(R8*8), R8 | |
+ MOVQ (CX), CX | |
+ MOVQ (CX)(SI*8), SI | |
sequenceDecs_decode_56_bmi2_skip_update: | |
// Adjust offset | |
@@ -1162,6 +1152,228 @@ TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 | |
// outBase += outPosition | |
ADDQ DI, BX | |
+main_loop: | |
+ MOVQ (AX), R11 | |
+ MOVQ 16(AX), R12 | |
+ MOVQ 8(AX), R13 | |
+ | |
+ // Copy literals | |
+ TESTQ R11, R11 | |
+ JZ check_offset | |
+ XORQ R14, R14 | |
+ | |
+copy_1: | |
+ MOVUPS (SI)(R14*1), X0 | |
+ MOVUPS X0, (BX)(R14*1) | |
+ ADDQ $0x10, R14 | |
+ CMPQ R14, R11 | |
+ JB copy_1 | |
+ ADDQ R11, SI | |
+ ADDQ R11, BX | |
+ ADDQ R11, DI | |
+ | |
+ // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) | |
+check_offset: | |
+ LEAQ (DI)(R10*1), R11 | |
+ CMPQ R12, R11 | |
+ JG error_match_off_too_big | |
+ CMPQ R12, R8 | |
+ JG error_match_off_too_big | |
+ | |
+ // Copy match from history | |
+ MOVQ R12, R11 | |
+ SUBQ DI, R11 | |
+ JLS copy_match | |
+ MOVQ R9, R14 | |
+ SUBQ R11, R14 | |
+ CMPQ R13, R11 | |
+ JGE copy_all_from_history | |
+ XORQ R11, R11 | |
+ TESTQ $0x00000001, R13 | |
+ JZ copy_4_word | |
+ MOVB (R14)(R11*1), R12 | |
+ MOVB R12, (BX)(R11*1) | |
+ ADDQ $0x01, R11 | |
+ | |
+copy_4_word: | |
+ TESTQ $0x00000002, R13 | |
+ JZ copy_4_dword | |
+ MOVW (R14)(R11*1), R12 | |
+ MOVW R12, (BX)(R11*1) | |
+ ADDQ $0x02, R11 | |
+ | |
+copy_4_dword: | |
+ TESTQ $0x00000004, R13 | |
+ JZ copy_4_qword | |
+ MOVL (R14)(R11*1), R12 | |
+ MOVL R12, (BX)(R11*1) | |
+ ADDQ $0x04, R11 | |
+ | |
+copy_4_qword: | |
+ TESTQ $0x00000008, R13 | |
+ JZ copy_4_test | |
+ MOVQ (R14)(R11*1), R12 | |
+ MOVQ R12, (BX)(R11*1) | |
+ ADDQ $0x08, R11 | |
+ JMP copy_4_test | |
+ | |
+copy_4: | |
+ MOVUPS (R14)(R11*1), X0 | |
+ MOVUPS X0, (BX)(R11*1) | |
+ ADDQ $0x10, R11 | |
+ | |
+copy_4_test: | |
+ CMPQ R11, R13 | |
+ JB copy_4 | |
+ ADDQ R13, DI | |
+ ADDQ R13, BX | |
+ ADDQ $0x18, AX | |
+ INCQ DX | |
+ CMPQ DX, CX | |
+ JB main_loop | |
+ JMP loop_finished | |
+ | |
+copy_all_from_history: | |
+ XORQ R15, R15 | |
+ TESTQ $0x00000001, R11 | |
+ JZ copy_5_word | |
+ MOVB (R14)(R15*1), BP | |
+ MOVB BP, (BX)(R15*1) | |
+ ADDQ $0x01, R15 | |
+ | |
+copy_5_word: | |
+ TESTQ $0x00000002, R11 | |
+ JZ copy_5_dword | |
+ MOVW (R14)(R15*1), BP | |
+ MOVW BP, (BX)(R15*1) | |
+ ADDQ $0x02, R15 | |
+ | |
+copy_5_dword: | |
+ TESTQ $0x00000004, R11 | |
+ JZ copy_5_qword | |
+ MOVL (R14)(R15*1), BP | |
+ MOVL BP, (BX)(R15*1) | |
+ ADDQ $0x04, R15 | |
+ | |
+copy_5_qword: | |
+ TESTQ $0x00000008, R11 | |
+ JZ copy_5_test | |
+ MOVQ (R14)(R15*1), BP | |
+ MOVQ BP, (BX)(R15*1) | |
+ ADDQ $0x08, R15 | |
+ JMP copy_5_test | |
+ | |
+copy_5: | |
+ MOVUPS (R14)(R15*1), X0 | |
+ MOVUPS X0, (BX)(R15*1) | |
+ ADDQ $0x10, R15 | |
+ | |
+copy_5_test: | |
+ CMPQ R15, R11 | |
+ JB copy_5 | |
+ ADDQ R11, BX | |
+ ADDQ R11, DI | |
+ SUBQ R11, R13 | |
+ | |
+ // Copy match from the current buffer | |
+copy_match: | |
+ TESTQ R13, R13 | |
+ JZ handle_loop | |
+ MOVQ BX, R11 | |
+ SUBQ R12, R11 | |
+ | |
+ // ml <= mo | |
+ CMPQ R13, R12 | |
+ JA copy_overlapping_match | |
+ | |
+ // Copy non-overlapping match | |
+ ADDQ R13, DI | |
+ MOVQ BX, R12 | |
+ ADDQ R13, BX | |
+ | |
+copy_2: | |
+ MOVUPS (R11), X0 | |
+ MOVUPS X0, (R12) | |
+ ADDQ $0x10, R11 | |
+ ADDQ $0x10, R12 | |
+ SUBQ $0x10, R13 | |
+ JHI copy_2 | |
+ JMP handle_loop | |
+ | |
+ // Copy overlapping match | |
+copy_overlapping_match: | |
+ ADDQ R13, DI | |
+ | |
+copy_slow_3: | |
+ MOVB (R11), R12 | |
+ MOVB R12, (BX) | |
+ INCQ R11 | |
+ INCQ BX | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
+ | |
+handle_loop: | |
+ ADDQ $0x18, AX | |
+ INCQ DX | |
+ CMPQ DX, CX | |
+ JB main_loop | |
+ | |
+loop_finished: | |
+ // Return value | |
+ MOVB $0x01, ret+8(FP) | |
+ | |
+ // Update the context | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ DX, 24(AX) | |
+ MOVQ DI, 104(AX) | |
+ MOVQ 80(AX), CX | |
+ SUBQ CX, SI | |
+ MOVQ SI, 112(AX) | |
+ RET | |
+ | |
+error_match_off_too_big: | |
+ // Return value | |
+ MOVB $0x00, ret+8(FP) | |
+ | |
+ // Update the context | |
+ MOVQ ctx+0(FP), AX | |
+ MOVQ DX, 24(AX) | |
+ MOVQ DI, 104(AX) | |
+ MOVQ 80(AX), CX | |
+ SUBQ CX, SI | |
+ MOVQ SI, 112(AX) | |
+ RET | |
+ | |
+empty_seqs: | |
+ // Return value | |
+ MOVB $0x01, ret+8(FP) | |
+ RET | |
+ | |
+// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool | |
+// Requires: SSE | |
+TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 | |
+ MOVQ ctx+0(FP), R10 | |
+ MOVQ 8(R10), CX | |
+ TESTQ CX, CX | |
+ JZ empty_seqs | |
+ MOVQ (R10), AX | |
+ MOVQ 24(R10), DX | |
+ MOVQ 32(R10), BX | |
+ MOVQ 80(R10), SI | |
+ MOVQ 104(R10), DI | |
+ MOVQ 120(R10), R8 | |
+ MOVQ 56(R10), R9 | |
+ MOVQ 64(R10), R10 | |
+ ADDQ R10, R9 | |
+ | |
+ // seqsBase += 24 * seqIndex | |
+ LEAQ (DX)(DX*2), R11 | |
+ SHLQ $0x03, R11 | |
+ ADDQ R11, AX | |
+ | |
+ // outBase += outPosition | |
+ ADDQ DI, BX | |
+ | |
main_loop: | |
MOVQ (AX), R11 | |
MOVQ 16(AX), R12 | |
@@ -1326,30 +1538,58 @@ copy_match: | |
JA copy_overlapping_match | |
// Copy non-overlapping match | |
- XORQ R12, R12 | |
+ ADDQ R13, DI | |
+ XORQ R12, R12 | |
+ TESTQ $0x00000001, R13 | |
+ JZ copy_2_word | |
+ MOVB (R11)(R12*1), R14 | |
+ MOVB R14, (BX)(R12*1) | |
+ ADDQ $0x01, R12 | |
+ | |
+copy_2_word: | |
+ TESTQ $0x00000002, R13 | |
+ JZ copy_2_dword | |
+ MOVW (R11)(R12*1), R14 | |
+ MOVW R14, (BX)(R12*1) | |
+ ADDQ $0x02, R12 | |
+ | |
+copy_2_dword: | |
+ TESTQ $0x00000004, R13 | |
+ JZ copy_2_qword | |
+ MOVL (R11)(R12*1), R14 | |
+ MOVL R14, (BX)(R12*1) | |
+ ADDQ $0x04, R12 | |
+ | |
+copy_2_qword: | |
+ TESTQ $0x00000008, R13 | |
+ JZ copy_2_test | |
+ MOVQ (R11)(R12*1), R14 | |
+ MOVQ R14, (BX)(R12*1) | |
+ ADDQ $0x08, R12 | |
+ JMP copy_2_test | |
copy_2: | |
MOVUPS (R11)(R12*1), X0 | |
MOVUPS X0, (BX)(R12*1) | |
ADDQ $0x10, R12 | |
- CMPQ R12, R13 | |
- JB copy_2 | |
- ADDQ R13, BX | |
- ADDQ R13, DI | |
- JMP handle_loop | |
+ | |
+copy_2_test: | |
+ CMPQ R12, R13 | |
+ JB copy_2 | |
+ ADDQ R13, BX | |
+ JMP handle_loop | |
// Copy overlapping match | |
copy_overlapping_match: | |
- XORQ R12, R12 | |
+ ADDQ R13, DI | |
copy_slow_3: | |
- MOVB (R11)(R12*1), R14 | |
- MOVB R14, (BX)(R12*1) | |
- INCQ R12 | |
- CMPQ R12, R13 | |
- JB copy_slow_3 | |
- ADDQ R13, BX | |
- ADDQ R13, DI | |
+ MOVB (R11), R12 | |
+ MOVB R12, (BX) | |
+ INCQ R11 | |
+ INCQ BX | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
handle_loop: | |
ADDQ $0x18, AX | |
@@ -1673,45 +1913,16 @@ sequenceDecs_decodeSync_amd64_match_len_ofs_ok: | |
TESTQ AX, AX | |
JZ check_offset | |
XORQ R14, R14 | |
- TESTQ $0x00000001, AX | |
- JZ copy_1_word | |
- MOVB (R11)(R14*1), R15 | |
- MOVB R15, (R10)(R14*1) | |
- ADDQ $0x01, R14 | |
- | |
-copy_1_word: | |
- TESTQ $0x00000002, AX | |
- JZ copy_1_dword | |
- MOVW (R11)(R14*1), R15 | |
- MOVW R15, (R10)(R14*1) | |
- ADDQ $0x02, R14 | |
- | |
-copy_1_dword: | |
- TESTQ $0x00000004, AX | |
- JZ copy_1_qword | |
- MOVL (R11)(R14*1), R15 | |
- MOVL R15, (R10)(R14*1) | |
- ADDQ $0x04, R14 | |
- | |
-copy_1_qword: | |
- TESTQ $0x00000008, AX | |
- JZ copy_1_test | |
- MOVQ (R11)(R14*1), R15 | |
- MOVQ R15, (R10)(R14*1) | |
- ADDQ $0x08, R14 | |
- JMP copy_1_test | |
copy_1: | |
MOVUPS (R11)(R14*1), X0 | |
MOVUPS X0, (R10)(R14*1) | |
ADDQ $0x10, R14 | |
- | |
-copy_1_test: | |
- CMPQ R14, AX | |
- JB copy_1 | |
- ADDQ AX, R11 | |
- ADDQ AX, R10 | |
- ADDQ AX, R12 | |
+ CMPQ R14, AX | |
+ JB copy_1 | |
+ ADDQ AX, R11 | |
+ ADDQ AX, R10 | |
+ ADDQ AX, R12 | |
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) | |
check_offset: | |
@@ -1826,30 +2037,30 @@ copy_match: | |
JA copy_overlapping_match | |
// Copy non-overlapping match | |
- XORQ CX, CX | |
+ ADDQ R13, R12 | |
+ MOVQ R10, CX | |
+ ADDQ R13, R10 | |
copy_2: | |
- MOVUPS (AX)(CX*1), X0 | |
- MOVUPS X0, (R10)(CX*1) | |
+ MOVUPS (AX), X0 | |
+ MOVUPS X0, (CX) | |
+ ADDQ $0x10, AX | |
ADDQ $0x10, CX | |
- CMPQ CX, R13 | |
- JB copy_2 | |
- ADDQ R13, R10 | |
- ADDQ R13, R12 | |
+ SUBQ $0x10, R13 | |
+ JHI copy_2 | |
JMP handle_loop | |
// Copy overlapping match | |
copy_overlapping_match: | |
- XORQ CX, CX | |
+ ADDQ R13, R12 | |
copy_slow_3: | |
- MOVB (AX)(CX*1), R14 | |
- MOVB R14, (R10)(CX*1) | |
- INCQ CX | |
- CMPQ CX, R13 | |
- JB copy_slow_3 | |
- ADDQ R13, R10 | |
- ADDQ R13, R12 | |
+ MOVB (AX), CL | |
+ MOVB CL, (R10) | |
+ INCQ AX | |
+ INCQ R10 | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
handle_loop: | |
MOVQ ctx+16(FP), AX | |
@@ -2044,60 +2255,55 @@ sequenceDecs_decodeSync_bmi2_fill_2_end: | |
MOVQ CX, 24(SP) | |
// Fill bitreader for state updates | |
- MOVQ R12, (SP) | |
- MOVQ $0x00000808, CX | |
- BEXTRQ CX, R8, R12 | |
- MOVQ ctx+16(FP), CX | |
- CMPQ 96(CX), $0x00 | |
- JZ sequenceDecs_decodeSync_bmi2_skip_update | |
- | |
- // Update Literal Length State | |
- MOVBQZX SI, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, SI, SI | |
+ MOVQ R12, (SP) | |
+ MOVQ $0x00000808, CX | |
+ BEXTRQ CX, R8, R12 | |
+ MOVQ ctx+16(FP), CX | |
+ CMPQ 96(CX), $0x00 | |
+ JZ sequenceDecs_decodeSync_bmi2_skip_update | |
+ LEAQ (SI)(DI*1), R13 | |
+ ADDQ R8, R13 | |
+ MOVBQZX R13, R13 | |
LEAQ (DX)(R13*1), CX | |
MOVQ AX, R14 | |
MOVQ CX, DX | |
ROLQ CL, R14 | |
BZHIQ R13, R14, R14 | |
- ADDQ R14, SI | |
- // Load ctx.llTable | |
+ // Update Offset State | |
+ BZHIQ R8, R14, CX | |
+ SHRXQ R8, R14, R14 | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, R8, R8 | |
+ ADDQ CX, R8 | |
+ | |
+ // Load ctx.ofTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ (CX), CX | |
- MOVQ (CX)(SI*8), SI | |
+ MOVQ 48(CX), CX | |
+ MOVQ (CX)(R8*8), R8 | |
// Update Match Length State | |
- MOVBQZX DI, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, DI, DI | |
- LEAQ (DX)(R13*1), CX | |
- MOVQ AX, R14 | |
- MOVQ CX, DX | |
- ROLQ CL, R14 | |
- BZHIQ R13, R14, R14 | |
- ADDQ R14, DI | |
+ BZHIQ DI, R14, CX | |
+ SHRXQ DI, R14, R14 | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, DI, DI | |
+ ADDQ CX, DI | |
// Load ctx.mlTable | |
MOVQ ctx+16(FP), CX | |
MOVQ 24(CX), CX | |
MOVQ (CX)(DI*8), DI | |
- // Update Offset State | |
- MOVBQZX R8, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, R8, R8 | |
- LEAQ (DX)(R13*1), CX | |
- MOVQ AX, R14 | |
- MOVQ CX, DX | |
- ROLQ CL, R14 | |
- BZHIQ R13, R14, R14 | |
- ADDQ R14, R8 | |
+ // Update Literal Length State | |
+ BZHIQ SI, R14, CX | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, SI, SI | |
+ ADDQ CX, SI | |
- // Load ctx.ofTable | |
+ // Load ctx.llTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ 48(CX), CX | |
- MOVQ (CX)(R8*8), R8 | |
+ MOVQ (CX), CX | |
+ MOVQ (CX)(SI*8), SI | |
sequenceDecs_decodeSync_bmi2_skip_update: | |
// Adjust offset | |
@@ -2180,45 +2386,16 @@ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: | |
TESTQ CX, CX | |
JZ check_offset | |
XORQ R14, R14 | |
- TESTQ $0x00000001, CX | |
- JZ copy_1_word | |
- MOVB (R10)(R14*1), R15 | |
- MOVB R15, (R9)(R14*1) | |
- ADDQ $0x01, R14 | |
- | |
-copy_1_word: | |
- TESTQ $0x00000002, CX | |
- JZ copy_1_dword | |
- MOVW (R10)(R14*1), R15 | |
- MOVW R15, (R9)(R14*1) | |
- ADDQ $0x02, R14 | |
- | |
-copy_1_dword: | |
- TESTQ $0x00000004, CX | |
- JZ copy_1_qword | |
- MOVL (R10)(R14*1), R15 | |
- MOVL R15, (R9)(R14*1) | |
- ADDQ $0x04, R14 | |
- | |
-copy_1_qword: | |
- TESTQ $0x00000008, CX | |
- JZ copy_1_test | |
- MOVQ (R10)(R14*1), R15 | |
- MOVQ R15, (R9)(R14*1) | |
- ADDQ $0x08, R14 | |
- JMP copy_1_test | |
copy_1: | |
MOVUPS (R10)(R14*1), X0 | |
MOVUPS X0, (R9)(R14*1) | |
ADDQ $0x10, R14 | |
- | |
-copy_1_test: | |
- CMPQ R14, CX | |
- JB copy_1 | |
- ADDQ CX, R10 | |
- ADDQ CX, R9 | |
- ADDQ CX, R11 | |
+ CMPQ R14, CX | |
+ JB copy_1 | |
+ ADDQ CX, R10 | |
+ ADDQ CX, R9 | |
+ ADDQ CX, R11 | |
// Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) | |
check_offset: | |
@@ -2333,30 +2510,30 @@ copy_match: | |
JA copy_overlapping_match | |
// Copy non-overlapping match | |
- XORQ R12, R12 | |
+ ADDQ R13, R11 | |
+ MOVQ R9, R12 | |
+ ADDQ R13, R9 | |
copy_2: | |
- MOVUPS (CX)(R12*1), X0 | |
- MOVUPS X0, (R9)(R12*1) | |
+ MOVUPS (CX), X0 | |
+ MOVUPS X0, (R12) | |
+ ADDQ $0x10, CX | |
ADDQ $0x10, R12 | |
- CMPQ R12, R13 | |
- JB copy_2 | |
- ADDQ R13, R9 | |
- ADDQ R13, R11 | |
+ SUBQ $0x10, R13 | |
+ JHI copy_2 | |
JMP handle_loop | |
// Copy overlapping match | |
copy_overlapping_match: | |
- XORQ R12, R12 | |
+ ADDQ R13, R11 | |
copy_slow_3: | |
- MOVB (CX)(R12*1), R14 | |
- MOVB R14, (R9)(R12*1) | |
- INCQ R12 | |
- CMPQ R12, R13 | |
- JB copy_slow_3 | |
- ADDQ R13, R9 | |
- ADDQ R13, R11 | |
+ MOVB (CX), R12 | |
+ MOVB R12, (R9) | |
+ INCQ CX | |
+ INCQ R9 | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
handle_loop: | |
MOVQ ctx+16(FP), CX | |
@@ -2862,6 +3039,7 @@ copy_match: | |
JA copy_overlapping_match | |
// Copy non-overlapping match | |
+ ADDQ R13, R12 | |
XORQ CX, CX | |
TESTQ $0x00000001, R13 | |
JZ copy_2_word | |
@@ -2900,21 +3078,19 @@ copy_2_test: | |
CMPQ CX, R13 | |
JB copy_2 | |
ADDQ R13, R10 | |
- ADDQ R13, R12 | |
JMP handle_loop | |
// Copy overlapping match | |
copy_overlapping_match: | |
- XORQ CX, CX | |
+ ADDQ R13, R12 | |
copy_slow_3: | |
- MOVB (AX)(CX*1), R14 | |
- MOVB R14, (R10)(CX*1) | |
- INCQ CX | |
- CMPQ CX, R13 | |
- JB copy_slow_3 | |
- ADDQ R13, R10 | |
- ADDQ R13, R12 | |
+ MOVB (AX), CL | |
+ MOVB CL, (R10) | |
+ INCQ AX | |
+ INCQ R10 | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
handle_loop: | |
MOVQ ctx+16(FP), AX | |
@@ -3109,60 +3285,55 @@ sequenceDecs_decodeSync_safe_bmi2_fill_2_end: | |
MOVQ CX, 24(SP) | |
// Fill bitreader for state updates | |
- MOVQ R12, (SP) | |
- MOVQ $0x00000808, CX | |
- BEXTRQ CX, R8, R12 | |
- MOVQ ctx+16(FP), CX | |
- CMPQ 96(CX), $0x00 | |
- JZ sequenceDecs_decodeSync_safe_bmi2_skip_update | |
- | |
- // Update Literal Length State | |
- MOVBQZX SI, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, SI, SI | |
+ MOVQ R12, (SP) | |
+ MOVQ $0x00000808, CX | |
+ BEXTRQ CX, R8, R12 | |
+ MOVQ ctx+16(FP), CX | |
+ CMPQ 96(CX), $0x00 | |
+ JZ sequenceDecs_decodeSync_safe_bmi2_skip_update | |
+ LEAQ (SI)(DI*1), R13 | |
+ ADDQ R8, R13 | |
+ MOVBQZX R13, R13 | |
LEAQ (DX)(R13*1), CX | |
MOVQ AX, R14 | |
MOVQ CX, DX | |
ROLQ CL, R14 | |
BZHIQ R13, R14, R14 | |
- ADDQ R14, SI | |
- // Load ctx.llTable | |
+ // Update Offset State | |
+ BZHIQ R8, R14, CX | |
+ SHRXQ R8, R14, R14 | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, R8, R8 | |
+ ADDQ CX, R8 | |
+ | |
+ // Load ctx.ofTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ (CX), CX | |
- MOVQ (CX)(SI*8), SI | |
+ MOVQ 48(CX), CX | |
+ MOVQ (CX)(R8*8), R8 | |
// Update Match Length State | |
- MOVBQZX DI, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, DI, DI | |
- LEAQ (DX)(R13*1), CX | |
- MOVQ AX, R14 | |
- MOVQ CX, DX | |
- ROLQ CL, R14 | |
- BZHIQ R13, R14, R14 | |
- ADDQ R14, DI | |
+ BZHIQ DI, R14, CX | |
+ SHRXQ DI, R14, R14 | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, DI, DI | |
+ ADDQ CX, DI | |
// Load ctx.mlTable | |
MOVQ ctx+16(FP), CX | |
MOVQ 24(CX), CX | |
MOVQ (CX)(DI*8), DI | |
- // Update Offset State | |
- MOVBQZX R8, R13 | |
- MOVQ $0x00001010, CX | |
- BEXTRQ CX, R8, R8 | |
- LEAQ (DX)(R13*1), CX | |
- MOVQ AX, R14 | |
- MOVQ CX, DX | |
- ROLQ CL, R14 | |
- BZHIQ R13, R14, R14 | |
- ADDQ R14, R8 | |
+ // Update Literal Length State | |
+ BZHIQ SI, R14, CX | |
+ MOVQ $0x00001010, R13 | |
+ BEXTRQ R13, SI, SI | |
+ ADDQ CX, SI | |
- // Load ctx.ofTable | |
+ // Load ctx.llTable | |
MOVQ ctx+16(FP), CX | |
- MOVQ 48(CX), CX | |
- MOVQ (CX)(R8*8), R8 | |
+ MOVQ (CX), CX | |
+ MOVQ (CX)(SI*8), SI | |
sequenceDecs_decodeSync_safe_bmi2_skip_update: | |
// Adjust offset | |
@@ -3398,6 +3569,7 @@ copy_match: | |
JA copy_overlapping_match | |
// Copy non-overlapping match | |
+ ADDQ R13, R11 | |
XORQ R12, R12 | |
TESTQ $0x00000001, R13 | |
JZ copy_2_word | |
@@ -3436,21 +3608,19 @@ copy_2_test: | |
CMPQ R12, R13 | |
JB copy_2 | |
ADDQ R13, R9 | |
- ADDQ R13, R11 | |
JMP handle_loop | |
// Copy overlapping match | |
copy_overlapping_match: | |
- XORQ R12, R12 | |
+ ADDQ R13, R11 | |
copy_slow_3: | |
- MOVB (CX)(R12*1), R14 | |
- MOVB R14, (R9)(R12*1) | |
- INCQ R12 | |
- CMPQ R12, R13 | |
- JB copy_slow_3 | |
- ADDQ R13, R9 | |
- ADDQ R13, R11 | |
+ MOVB (CX), R12 | |
+ MOVB R12, (R9) | |
+ INCQ CX | |
+ INCQ R9 | |
+ DECQ R13 | |
+ JNZ copy_slow_3 | |
handle_loop: | |
MOVQ ctx+16(FP), CX | |
diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go | |
index b53f606a..29c15c8c 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/zip.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/zip.go | |
@@ -18,7 +18,14 @@ const ZipMethodWinZip = 93 | |
// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT | |
const ZipMethodPKWare = 20 | |
-var zipReaderPool sync.Pool | |
+// zipReaderPool is the default reader pool. | |
+var zipReaderPool = sync.Pool{New: func() interface{} { | |
+ z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) | |
+ if err != nil { | |
+ panic(err) | |
+ } | |
+ return z | |
+}} | |
// newZipReader creates a pooled zip decompressor. | |
func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { | |
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go | |
index c1c90b4a..3eb3f1c8 100644 | |
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go | |
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go | |
@@ -110,17 +110,6 @@ func printf(format string, a ...interface{}) { | |
} | |
} | |
-// matchLenFast does matching, but will not match the last up to 7 bytes. | |
-func matchLenFast(a, b []byte) int { | |
- endI := len(a) & (math.MaxInt32 - 7) | |
- for i := 0; i < endI; i += 8 { | |
- if diff := load64(a, i) ^ load64(b, i); diff != 0 { | |
- return i + bits.TrailingZeros64(diff)>>3 | |
- } | |
- } | |
- return endI | |
-} | |
- | |
// matchLen returns the maximum length. | |
// a must be the shortest of the two. | |
// The function also returns whether all bytes matched. | |
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go | |
index df22c47f..da867903 100644 | |
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go | |
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go | |
@@ -9,7 +9,7 @@ package assert | |
import "reflect" | |
-// Wrapper around reflect.Value.CanConvert, for compatability | |
+// Wrapper around reflect.Value.CanConvert, for compatibility | |
// reasons. | |
func canConvert(value reflect.Value, to reflect.Type) bool { | |
return value.CanConvert(to) | |
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go | |
index df36e3a3..0173b698 100644 | |
--- a/vendor/gopkg.in/yaml.v3/decode.go | |
+++ b/vendor/gopkg.in/yaml.v3/decode.go | |
@@ -100,7 +100,10 @@ func (p *parser) peek() yaml_event_type_t { | |
if p.event.typ != yaml_NO_EVENT { | |
return p.event.typ | |
} | |
- if !yaml_parser_parse(&p.parser, &p.event) { | |
+ // It's curious choice from the underlying API to generally return a | |
+ // positive result on success, but on this case return true in an error | |
+ // scenario. This was the source of bugs in the past (issue #666). | |
+ if !yaml_parser_parse(&p.parser, &p.event) || p.parser.error != yaml_NO_ERROR { | |
p.fail() | |
} | |
return p.event.typ | |
@@ -320,6 +323,8 @@ type decoder struct { | |
decodeCount int | |
aliasCount int | |
aliasDepth int | |
+ | |
+ mergedFields map[interface{}]bool | |
} | |
var ( | |
@@ -808,6 +813,11 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { | |
} | |
} | |
+ mergedFields := d.mergedFields | |
+ d.mergedFields = nil | |
+ | |
+ var mergeNode *Node | |
+ | |
mapIsNew := false | |
if out.IsNil() { | |
out.Set(reflect.MakeMap(outt)) | |
@@ -815,11 +825,18 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { | |
} | |
for i := 0; i < l; i += 2 { | |
if isMerge(n.Content[i]) { | |
- d.merge(n.Content[i+1], out) | |
+ mergeNode = n.Content[i+1] | |
continue | |
} | |
k := reflect.New(kt).Elem() | |
if d.unmarshal(n.Content[i], k) { | |
+ if mergedFields != nil { | |
+ ki := k.Interface() | |
+ if mergedFields[ki] { | |
+ continue | |
+ } | |
+ mergedFields[ki] = true | |
+ } | |
kkind := k.Kind() | |
if kkind == reflect.Interface { | |
kkind = k.Elem().Kind() | |
@@ -833,6 +850,12 @@ func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { | |
} | |
} | |
} | |
+ | |
+ d.mergedFields = mergedFields | |
+ if mergeNode != nil { | |
+ d.merge(n, mergeNode, out) | |
+ } | |
+ | |
d.stringMapType = stringMapType | |
d.generalMapType = generalMapType | |
return true | |
@@ -844,7 +867,8 @@ func isStringMap(n *Node) bool { | |
} | |
l := len(n.Content) | |
for i := 0; i < l; i += 2 { | |
- if n.Content[i].ShortTag() != strTag { | |
+ shortTag := n.Content[i].ShortTag() | |
+ if shortTag != strTag && shortTag != mergeTag { | |
return false | |
} | |
} | |
@@ -861,7 +885,6 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { | |
var elemType reflect.Type | |
if sinfo.InlineMap != -1 { | |
inlineMap = out.Field(sinfo.InlineMap) | |
- inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) | |
elemType = inlineMap.Type().Elem() | |
} | |
@@ -870,6 +893,9 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { | |
d.prepare(n, field) | |
} | |
+ mergedFields := d.mergedFields | |
+ d.mergedFields = nil | |
+ var mergeNode *Node | |
var doneFields []bool | |
if d.uniqueKeys { | |
doneFields = make([]bool, len(sinfo.FieldsList)) | |
@@ -879,13 +905,20 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { | |
for i := 0; i < l; i += 2 { | |
ni := n.Content[i] | |
if isMerge(ni) { | |
- d.merge(n.Content[i+1], out) | |
+ mergeNode = n.Content[i+1] | |
continue | |
} | |
if !d.unmarshal(ni, name) { | |
continue | |
} | |
- if info, ok := sinfo.FieldsMap[name.String()]; ok { | |
+ sname := name.String() | |
+ if mergedFields != nil { | |
+ if mergedFields[sname] { | |
+ continue | |
+ } | |
+ mergedFields[sname] = true | |
+ } | |
+ if info, ok := sinfo.FieldsMap[sname]; ok { | |
if d.uniqueKeys { | |
if doneFields[info.Id] { | |
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) | |
@@ -911,6 +944,11 @@ func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { | |
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) | |
} | |
} | |
+ | |
+ d.mergedFields = mergedFields | |
+ if mergeNode != nil { | |
+ d.merge(n, mergeNode, out) | |
+ } | |
return true | |
} | |
@@ -918,19 +956,29 @@ func failWantMap() { | |
failf("map merge requires map or sequence of maps as the value") | |
} | |
-func (d *decoder) merge(n *Node, out reflect.Value) { | |
- switch n.Kind { | |
+func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { | |
+ mergedFields := d.mergedFields | |
+ if mergedFields == nil { | |
+ d.mergedFields = make(map[interface{}]bool) | |
+ for i := 0; i < len(parent.Content); i += 2 { | |
+ k := reflect.New(ifaceType).Elem() | |
+ if d.unmarshal(parent.Content[i], k) { | |
+ d.mergedFields[k.Interface()] = true | |
+ } | |
+ } | |
+ } | |
+ | |
+ switch merge.Kind { | |
case MappingNode: | |
- d.unmarshal(n, out) | |
+ d.unmarshal(merge, out) | |
case AliasNode: | |
- if n.Alias != nil && n.Alias.Kind != MappingNode { | |
+ if merge.Alias != nil && merge.Alias.Kind != MappingNode { | |
failWantMap() | |
} | |
- d.unmarshal(n, out) | |
+ d.unmarshal(merge, out) | |
case SequenceNode: | |
- // Step backwards as earlier nodes take precedence. | |
- for i := len(n.Content) - 1; i >= 0; i-- { | |
- ni := n.Content[i] | |
+ for i := 0; i < len(merge.Content); i++ { | |
+ ni := merge.Content[i] | |
if ni.Kind == AliasNode { | |
if ni.Alias != nil && ni.Alias.Kind != MappingNode { | |
failWantMap() | |
@@ -943,6 +991,8 @@ func (d *decoder) merge(n *Node, out reflect.Value) { | |
default: | |
failWantMap() | |
} | |
+ | |
+ d.mergedFields = mergedFields | |
} | |
func isMerge(n *Node) bool { | |
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go | |
index ac66fccc..268558a0 100644 | |
--- a/vendor/gopkg.in/yaml.v3/parserc.go | |
+++ b/vendor/gopkg.in/yaml.v3/parserc.go | |
@@ -687,6 +687,9 @@ func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, i | |
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { | |
if first { | |
token := peek_token(parser) | |
+ if token == nil { | |
+ return false | |
+ } | |
parser.marks = append(parser.marks, token.start_mark) | |
skip_token(parser) | |
} | |
@@ -786,7 +789,7 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { | |
} | |
token := peek_token(parser) | |
- if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { | |
+ if token == nil || token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { | |
return | |
} | |
@@ -813,6 +816,9 @@ func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { | |
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { | |
if first { | |
token := peek_token(parser) | |
+ if token == nil { | |
+ return false | |
+ } | |
parser.marks = append(parser.marks, token.start_mark) | |
skip_token(parser) | |
} | |
@@ -922,6 +928,9 @@ func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_ev | |
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { | |
if first { | |
token := peek_token(parser) | |
+ if token == nil { | |
+ return false | |
+ } | |
parser.marks = append(parser.marks, token.start_mark) | |
skip_token(parser) | |
} | |
diff --git a/vendor/modules.txt b/vendor/modules.txt | |
index 471d1ce3..a62113ac 100644 | |
--- a/vendor/modules.txt | |
+++ b/vendor/modules.txt | |
@@ -10,7 +10,7 @@ github.com/Microsoft/go-winio/backuptar | |
github.com/Microsoft/go-winio/pkg/guid | |
github.com/Microsoft/go-winio/pkg/security | |
github.com/Microsoft/go-winio/vhd | |
-# github.com/Microsoft/hcsshim v0.9.2 | |
+# github.com/Microsoft/hcsshim v0.9.3 | |
github.com/Microsoft/hcsshim | |
github.com/Microsoft/hcsshim/computestorage | |
github.com/Microsoft/hcsshim/internal/cow | |
@@ -20,11 +20,13 @@ github.com/Microsoft/hcsshim/internal/hcs/schema2 | |
github.com/Microsoft/hcsshim/internal/hcserror | |
github.com/Microsoft/hcsshim/internal/hns | |
github.com/Microsoft/hcsshim/internal/interop | |
+github.com/Microsoft/hcsshim/internal/jobobject | |
github.com/Microsoft/hcsshim/internal/log | |
github.com/Microsoft/hcsshim/internal/logfields | |
github.com/Microsoft/hcsshim/internal/longpath | |
github.com/Microsoft/hcsshim/internal/mergemaps | |
github.com/Microsoft/hcsshim/internal/oc | |
+github.com/Microsoft/hcsshim/internal/queue | |
github.com/Microsoft/hcsshim/internal/safefile | |
github.com/Microsoft/hcsshim/internal/timeout | |
github.com/Microsoft/hcsshim/internal/vmcompute | |
@@ -173,7 +175,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 | |
github.com/containers/ocicrypt/spec | |
github.com/containers/ocicrypt/utils | |
github.com/containers/ocicrypt/utils/keyprovider | |
-# github.com/containers/storage v1.40.3 | |
+# github.com/containers/storage v1.41.1-0.20220606145428-2b2fb9fa246d | |
## explicit | |
github.com/containers/storage | |
github.com/containers/storage/drivers | |
@@ -327,7 +329,7 @@ github.com/ishidawataru/sctp | |
github.com/jinzhu/copier | |
# github.com/json-iterator/go v1.1.12 | |
github.com/json-iterator/go | |
-# github.com/klauspost/compress v1.15.2 | |
+# github.com/klauspost/compress v1.15.6 | |
github.com/klauspost/compress | |
github.com/klauspost/compress/flate | |
github.com/klauspost/compress/fse | |
@@ -418,7 +420,7 @@ github.com/opencontainers/go-digest | |
## explicit | |
github.com/opencontainers/image-spec/specs-go | |
github.com/opencontainers/image-spec/specs-go/v1 | |
-# github.com/opencontainers/runc v1.1.1 | |
+# github.com/opencontainers/runc v1.1.2 | |
## explicit | |
github.com/opencontainers/runc/libcontainer/apparmor | |
github.com/opencontainers/runc/libcontainer/devices | |
@@ -491,7 +493,7 @@ github.com/spf13/cobra | |
github.com/spf13/pflag | |
# github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 | |
github.com/stefanberger/go-pkcs11uri | |
-# github.com/stretchr/testify v1.7.1 | |
+# github.com/stretchr/testify v1.7.2 | |
## explicit | |
github.com/stretchr/testify/assert | |
github.com/stretchr/testify/require | |
@@ -693,7 +695,7 @@ gopkg.in/square/go-jose.v2/json | |
gopkg.in/tomb.v1 | |
# gopkg.in/yaml.v2 v2.4.0 | |
gopkg.in/yaml.v2 | |
-# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b | |
+# gopkg.in/yaml.v3 v3.0.1 | |
gopkg.in/yaml.v3 | |
# k8s.io/klog v1.0.0 | |
k8s.io/klog | |
-- | |
2.35.1 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment