chore(deps): bump github.com/go-git/go-git/v5 from 5.4.2 to 5.5.2

Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.4.2 to 5.5.2.
- [Release notes](https://github.com/go-git/go-git/releases)
- [Commits](https://github.com/go-git/go-git/compare/v5.4.2...v5.5.2)

---
updated-dependencies:
- dependency-name: github.com/go-git/go-git/v5
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-01-06 22:02:34 +00:00 committed by GitHub
parent 0d9c92c8c0
commit 3e88bda9be
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
116 changed files with 6737 additions and 431 deletions

14
go.mod
View File

@ -6,7 +6,7 @@ require (
github.com/alecthomas/chroma v0.10.0
github.com/davecgh/go-spew v1.1.1
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
github.com/go-git/go-git/v5 v5.4.2
github.com/go-git/go-git/v5 v5.5.2
github.com/mattn/go-isatty v0.0.16
github.com/mitchellh/go-homedir v1.1.0
gopkg.in/yaml.v2 v2.4.0
@ -20,16 +20,18 @@ require (
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.3.1 // indirect
github.com/go-git/go-billy/v5 v5.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/pjbgf/sha1cd v0.2.3 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/xanzy/ssh-agent v0.3.2 // indirect
golang.org/x/crypto v0.1.0 // indirect
github.com/skeema/knownhosts v1.1.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/net v0.1.0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/tools v0.2.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
)

93
go.sum
View File

@ -1,17 +1,14 @@
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
@ -28,33 +25,28 @@ github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE=
github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
github.com/go-git/go-git/v5 v5.5.2 h1:v8lgZa5k9ylUw+OR/roJHTxR4QItsNFI5nKtAXFuynw=
github.com/go-git/go-git/v5 v5.5.2/go.mod h1:BE5hUJ5yaV2YMxhmaP4l6RBQ08kMxKSPD4BlxtH7OjI=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -69,7 +61,8 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pjbgf/sha1cd v0.2.3 h1:uKQP/7QOzNtKYH7UTohZLcjF5/55EnTw0jO/Ru4jZwI=
github.com/pjbgf/sha1cd v0.2.3/go.mod h1:HOK9QrgzdHpbc2Kzip0Q1yi3M2MFGPADtR6HjG65m5M=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -77,58 +70,73 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/xanzy/ssh-agent v0.3.2 h1:eKj4SX2Fe7mui28ZgnFW5fmTz1EIr7ugo5s6wDxdHBM=
github.com/xanzy/ssh-agent v0.3.2/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -138,7 +146,6 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=

11
vendor/github.com/go-git/go-billy/v5/Makefile generated vendored Normal file
View File

@ -0,0 +1,11 @@
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
.PHONY: test
test:
$(GOTEST) -race ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
"sync"
)
type storage struct {
@ -174,6 +175,8 @@ func clean(path string) string {
type content struct {
name string
bytes []byte
m sync.RWMutex
}
func (c *content) WriteAt(p []byte, off int64) (int, error) {
@ -185,6 +188,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
}
}
c.m.Lock()
prev := len(c.bytes)
diff := int(off) - prev
@ -196,6 +200,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
if len(c.bytes) < prev {
c.bytes = c.bytes[:prev]
}
c.m.Unlock()
return len(p), nil
}
@ -209,8 +214,10 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
}
c.m.RLock()
size := int64(len(c.bytes))
if off >= size {
c.m.RUnlock()
return 0, io.EOF
}
@ -220,10 +227,12 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
btr := c.bytes[off : off+l]
n = copy(b, btr)
if len(btr) < len(b) {
err = io.EOF
}
n = copy(b, btr)
c.m.RUnlock()
return
}

72
vendor/github.com/go-git/go-billy/v5/util/walk.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package util
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readdirnames(fs, path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}

View File

@ -2,3 +2,4 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/

View File

@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@ -15,7 +15,6 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@ -150,7 +149,7 @@ func ReadConfig(r io.Reader) (*Config, error) {
// config file to the given scope, a empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@ -185,7 +184,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -247,6 +246,7 @@ const (
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
// DefaultPackWindow holds the number of previous objects used to

View File

@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("Getting object %s failed: %v", hash, err)
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}

View File

@ -91,6 +91,8 @@ func (o *CloneOptions) Validate() error {
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
@ -147,7 +149,9 @@ const (
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
RefSpecs []config.RefSpec
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
@ -192,8 +196,16 @@ func (o *FetchOptions) Validate() error {
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RefSpecs specify what destination ref to update with what source
// object. A refspec with empty src can be used to delete a reference.
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
@ -206,13 +218,35 @@ type PushOptions struct {
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
@ -274,6 +308,8 @@ type CheckoutOptions struct {
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
@ -366,7 +402,7 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatility, it can be replaced with PathFilter
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
@ -422,6 +458,10 @@ type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
@ -571,7 +611,7 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte

View File

@ -11,6 +11,10 @@ type Encoder struct {
w io.Writer
}
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error {
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
return err
}
@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
return err
}
}

View File

@ -103,7 +103,7 @@ func (s *Section) RemoveSubsection(name string) *Section {
return s
}
// Option return the value for the specified key. Empty string is returned if
// Option returns the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)

View File

@ -9,7 +9,7 @@ import (
type Operation int
const (
// Equal item represents a equals diff.
// Equal item represents an equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
@ -26,15 +26,15 @@ type Patch interface {
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
// FilePatch represents the necessary steps to transform one file into another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// Files returns the from and to Files, with all the necessary metadata
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// Chunks returns a slice of ordered changes to transform "from" File into
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
@ -49,7 +49,7 @@ type File interface {
Path() string
}
// Chunk represents a portion of a file transformation to another.
// Chunk represents a portion of a file transformation into another.
type Chunk interface {
// Content contains the portion of the file.
Content() string

View File

@ -13,13 +13,14 @@ import (
)
const (
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
infoExcludeFile = gitDir + "/info/exclude"
)
// readIgnoreFile reads a specific git ignore file.
@ -42,10 +43,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
// recursively traversing through the directory structure. The result is in
// the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, _ := readIgnoreFile(fs, path, gitignoreFile)
ps = append(ps, subps...)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))

View File

@ -12,9 +12,9 @@ import (
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsupported version")
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
ErrMalformedIdxFile = errors.New("malformed IDX file")
)
const (

View File

@ -1,10 +1,10 @@
package idxfile
import (
"crypto/sha1"
"hash"
"crypto"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -16,7 +16,7 @@ type Encoder struct {
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}

View File

@ -3,15 +3,15 @@ package index
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -49,7 +49,7 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
return &Decoder{
r: io.TeeReader(r, h),
hash: h,

View File

@ -2,19 +2,19 @@ package index
import (
"bytes"
"crypto/sha1"
"crypto"
"errors"
"hash"
"io"
"sort"
"time"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
EncodeVersionSupported uint32 = 3
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
@ -29,16 +29,16 @@ type Encoder struct {
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
if idx.Version > EncodeVersionSupported {
return ErrUnsupportedVersion
}
@ -68,8 +68,12 @@ func (e *Encoder) encodeEntries(idx *Index) error {
if err := e.encodeEntry(entry); err != nil {
return err
}
entryLength := entryHeaderLength
if entry.IntentToAdd || entry.SkipWorktree {
entryLength += 2
}
wrote := entryHeaderLength + len(entry.Name)
wrote := entryLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
@ -79,10 +83,6 @@ func (e *Encoder) encodeEntries(idx *Index) error {
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
@ -110,9 +110,25 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
flagsFlow := []interface{}{flags}
if entry.IntentToAdd || entry.SkipWorktree {
var extendedFlags uint16
if entry.IntentToAdd {
extendedFlags |= intentToAddMask
}
if entry.SkipWorktree {
extendedFlags |= skipWorkTreeMask
}
flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
}
flow = append(flow, flagsFlow...)
if err := binary.Write(e.w, flow...); err != nil {
return err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing"
@ -211,3 +212,20 @@ type EndOfIndexEntry struct {
// their contents).
Hash plumbing.Hash
}
// SkipUnless applies patterns in the form of A, A/B, A/B/C
// to the index to prevent the files from being checked out
func (i *Index) SkipUnless(patterns []string) {
for _, e := range i.Entries {
var include bool
for _, pattern := range patterns {
if strings.HasPrefix(e.Name, pattern) {
include = true
break
}
}
if !include {
e.SkipWorktree = true
}
}
}

View File

@ -1,13 +1,13 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -20,20 +20,22 @@ var (
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
multi io.Reader
zlib io.Reader
zlibref sync.ZLibReader
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
zlib, err := sync.GetZlibReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
zlib: zlib.Reader,
zlibref: zlib,
}, nil
}
@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash {
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
sync.PutZlibReader(r.zlibref)
return nil
}

View File

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib *zlib.Writer
closed bool
pending int64 // number of unwritten bytes
@ -31,9 +32,10 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := sync.GetZlibWriter(w)
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}
@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer sync.PutZlibWriter(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}

View File

@ -1,10 +1,7 @@
package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -61,18 +58,3 @@ func WritePackfileToObjectStorage(
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@ -5,6 +5,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
@ -43,18 +44,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin
defer ioutil.CheckClose(tr, &err)
bb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bb)
bb.Reset()
bb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(tb)
tb.Reset()
tb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
@ -80,9 +79,8 @@ func DiffDelta(src, tgt []byte) []byte {
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@ -90,9 +88,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
ibuf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(ibuf)
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)

View File

@ -2,11 +2,12 @@ package packfile
import (
"compress/zlib"
"crypto/sha1"
"crypto"
"fmt"
"io"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -28,7 +29,7 @@ type Encoder struct {
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
Hash: hash.New(crypto.SHA1),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)

View File

@ -7,19 +7,20 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
hash plumbing.Hash
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
largeObjectThreshold int64
}
// NewFSObject creates a new filesystem object.
@ -32,16 +33,18 @@ func NewFSObject(
fs billy.Filesystem,
path string,
cache cache.Object,
largeObjectThreshold int64,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
largeObjectThreshold: largeObjectThreshold,
}
}
@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType {
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"fmt"
"io"
"os"
@ -11,6 +12,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem,
file billy.File,
cache cache.Object,
largeObjectThreshold int64,
) *Packfile {
s := NewScanner(file)
return &Packfile{
@ -59,6 +63,7 @@ func NewPackfileWithCache(
s,
cache,
make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
}
}
@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
}
// Get retrieves the encoded object in the packfile with the given hash.
@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs,
p.file.Name(),
p.deltaBaseCache,
p.largeObjectThreshold,
), nil
}
@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr, err := sync.GetZlibReader(reader)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {

View File

@ -10,6 +10,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -46,7 +47,6 @@ type Parser struct {
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
@ -176,7 +176,8 @@ func (p *Parser) init() error {
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
@ -220,6 +221,7 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
buf.Grow(int(oh.Length))
_, crc, err := p.scanner.NextObject(buf)
if err != nil {
return err
@ -265,7 +267,9 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
buf := &bytes.Buffer{}
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for _, obj := range p.oi {
buf.Reset()
err := p.get(obj, buf)
@ -287,6 +291,7 @@ func (p *Parser) resolveDeltas() error {
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
return err
}
p.resolveExternalRef(child)
}
// Remove the delta from the cache.
@ -299,6 +304,16 @@ func (p *Parser) resolveDeltas() error {
return nil
}
func (p *Parser) resolveExternalRef(o *objectInfo) {
if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
p.oiByHash[o.SHA1] = o
o.Children = ref.Children
for _, c := range o.Children {
c.Parent = o
}
}
}
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok := p.cache.Get(o.Offset)
@ -336,9 +351,8 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
if o.DiskType.IsDelta() {
b := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(b)
b.Reset()
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
err := p.get(o.Parent, b)
if err != nil {
return err
@ -372,9 +386,8 @@ func (p *Parser) resolveObject(
if !o.DiskType.IsDelta() {
return nil
}
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
err := p.readData(buf, o)
if err != nil {
return err

View File

@ -1,12 +1,15 @@
package packfile
import (
"bufio"
"bytes"
"errors"
"io"
"math"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
@ -32,18 +35,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
defer ioutil.CheckClose(w, &err)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, err = buf.ReadFrom(r)
if err != nil {
return err
}
src := buf.Bytes()
dst := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(dst)
dst.Reset()
dst := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(dst)
err = patchDelta(dst, src, delta)
if err != nil {
return err
@ -51,9 +52,9 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
b := sync.GetByteSlice()
_, err = io.CopyBuffer(w, dst, *b)
sync.PutByteSlice(b)
return err
}
@ -73,6 +74,131 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil
}
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
} else {
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
return ErrInvalidDelta
@ -161,6 +287,25 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:]
}
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
@ -174,6 +319,40 @@ func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
if (cmd & 0x01) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset = uint(next)
}
if (cmd & 0x02) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 8
}
if (cmd & 0x04) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 16
}
if (cmd & 0x08) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 24
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
@ -208,6 +387,36 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
return offset, delta, nil
}
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
if (cmd & 0x10) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz = uint(next)
}
if (cmd & 0x20) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 8
}
if (cmd & 0x40) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 16
}
if sz == 0 {
sz = 0x10000
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {

View File

@ -3,17 +3,16 @@ package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -114,7 +113,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) {
return
}
// readSignature reads an returns the signature field in the packfile.
// readSignature reads a returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
@ -320,29 +319,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return
}
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr, err := sync.GetZlibReader(s.r)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
zr, err := sync.GetZlibReader(s.r)
defer sync.PutZlibReader(zr)
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
if err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
defer ioutil.CheckClose(zr.Reader, &err)
buf := sync.GetByteSlice()
n, err = io.CopyBuffer(w, zr.Reader, *buf)
sync.PutByteSlice(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
@ -372,9 +380,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
buf := sync.GetByteSlice()
_, err := io.CopyBuffer(stdioutil.Discard, s.r, *buf)
sync.PutByteSlice(buf)
return err
}
@ -384,13 +393,13 @@ func (s *Scanner) Flush() error {
}
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penalty for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer

View File

@ -2,11 +2,12 @@ package plumbing
import (
"bytes"
"crypto/sha1"
"crypto"
"encoding/hex"
"hash"
"sort"
"strconv"
"github.com/go-git/go-git/v5/plumbing/hash"
)
// Hash SHA1 hashed content
@ -46,7 +47,7 @@ type Hasher struct {
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h := Hasher{hash.New(crypto.SHA1)}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))

View File

@ -0,0 +1,59 @@
// package hash provides a way for managing the
// underlying hash implementations used across go-git.
package hash
import (
"crypto"
"fmt"
"hash"
"github.com/pjbgf/sha1cd/cgo"
)
// algos is a map of hash algorithms.
var algos = map[crypto.Hash]func() hash.Hash{}
func init() {
reset()
}
// reset resets the default algos value. Can be used after running tests
// that registers new algorithms to avoid side effects.
func reset() {
// For performance reasons the cgo version of the collision
// detection algorithm is being used.
algos[crypto.SHA1] = cgo.New
}
// RegisterHash allows for the hash algorithm used to be overriden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
if f == nil {
return fmt.Errorf("cannot register hash: f is nil")
}
switch h {
case crypto.SHA1:
algos[h] = f
default:
return fmt.Errorf("unsupported hash function: %v", h)
}
return nil
}
// Hash is the same as hash.Hash. This allows consumers
// to not having to import this package alongside "hash".
type Hash interface {
hash.Hash
}
// New returns a new Hash for the given hash function.
// It panics if the hash function is not registered.
func New(h crypto.Hash) Hash {
hh, ok := algos[h]
if !ok {
panic(fmt.Sprintf("hash algorithm not registered: %v", h))
}
return hh()
}

View File

@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash {
return o.h
}
// Type return the ObjectType
// Type returns the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
// Size returns the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written

View File

@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) {
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// Files returns the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()

View File

@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) {
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
return nil, fmt.Errorf("from field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
return nil, fmt.Errorf("to field: %s", err)
}
return ret, nil

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@ -180,9 +180,8 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
var message bool
var pgpsig bool

View File

@ -1,12 +0,0 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return

View File

@ -403,10 +403,16 @@ func min(a, b int) int {
return b
}
const maxMatrixSize = 10000
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
// Allocate for the worst-case scenario where every pair has a score
// that we need to consider. We might not need that many.
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
matrixSize := len(srcs) * len(dsts)
if matrixSize > maxMatrixSize {
matrixSize = maxMatrixSize
}
matrix := make(similarityMatrix, 0, matrixSize)
srcSizes := make([]int64, len(srcs))
dstSizes := make([]int64, len(dsts))
dstTooLarge := make(map[int]bool)

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
@ -13,6 +12,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// Tag represents an annotated tag object. It points to a single git object of
@ -93,9 +93,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
var line []byte
line, err = r.ReadBytes('\n')

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@ -230,9 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
str, err := r.ReadString(' ')
if err != nil {

View File

@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder {
}
}
func (t *treeNoder) Skip() bool {
return false
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}

View File

@ -1,6 +1,11 @@
// Package capability defines the server and client capabilities.
package capability
import (
"fmt"
"os"
)
// Capability describes a server or client capability.
type Capability string
@ -238,7 +243,15 @@ const (
Filter Capability = "filter"
)
const DefaultAgent = "go-git/4.x"
const userAgent = "go-git/5.x"
// DefaultAgent provides the user agent string.
func DefaultAgent() string {
if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
return fmt.Sprintf("%s %s", userAgent, envUserAgent)
}
return userAgent
}
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,

View File

@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
delete(l.m, capability)
if _, ok := l.m[capability]; ok {
l.m[capability].Values = l.m[capability].Values[:0]
}
return l.Add(capability, values...)
}

View File

@ -19,7 +19,6 @@ var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")

View File

@ -21,11 +21,6 @@ type ServerResponse struct {
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
}
}
return s.Err()
// isMultiACK is true when the remote server advertises the related
// capabilities when they are not in transport.UnsupportedCapabilities.
//
// Users may decide to remove multi_ack and multi_ack_detailed from the
// unsupported capabilities list, which allows them to do initial clones
// from Azure DevOps.
//
// Follow-up fetches may error, therefore errors are wrapped with additional
// information highlighting that this capabilities are not supported by go-git.
//
// TODO: Implement support for multi_ack or multi_ack_detailed responses.
err := s.Err()
if err != nil && isMultiACK {
return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
}
return err
}
// stopReading detects when a valid command such as ACK or NAK is found to be
@ -113,8 +124,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
if len(r.ACKs) > 1 && !isMultiACK {
// For further information, refer to comments in the Decode func above.
return errors.New("multi_ack and multi_ack_detailed are not supported")
}

View File

@ -95,7 +95,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
return r

View File

@ -19,6 +19,7 @@ var (
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Options []*Option
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
if adv.Supports(capability.ReportStatus) {
@ -86,9 +87,9 @@ type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
Update Action = "update"
Delete Action = "delete"
Invalid Action = "invalid"
)
type Command struct {
@ -120,3 +121,8 @@ func (c *Command) validate() error {
return nil
}
type Option struct {
Key string
Value string
}

View File

@ -9,10 +9,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
return err
}
if req.Capabilities.Supports(capability.PushOptions) {
if err := req.encodeOptions(e, req.Options); err != nil {
return err
}
}
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string {
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}
func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
opts []*Option) error {
for _, opt := range opts {
if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -24,7 +24,6 @@ type UploadPackResponse struct {
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
}
}
if err := r.ServerResponse.Encode(w); err != nil {
if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
return err
}

View File

@ -168,22 +168,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference {
}
}
// Type return the type of a reference
// Type returns the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
// Name returns the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
// Hash returns the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
// Target returns the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
@ -204,6 +204,21 @@ func (r *Reference) Strings() [2]string {
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}
name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}

View File

@ -52,8 +52,8 @@ type DeltaObjectStorer interface {
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
// operations.
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
@ -87,8 +87,8 @@ type PackedObjectStorer interface {
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
// a packfile to storage.
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//

View File

@ -112,7 +112,7 @@ type Endpoint struct {
Port int
// Path is the repository path.
Path string
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CaBundle specify additional ca bundle with system cert pool
CaBundle []byte

View File

@ -77,14 +77,14 @@ func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
// StdoutPipe returns the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}

View File

@ -428,11 +428,6 @@ func isRepoNotFoundError(s string) bool {
return false
}
var (
nak = []byte("NAK")
eol = []byte("\n")
)
// uploadPack implements the git-upload-pack protocol.
func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode

View File

@ -189,7 +189,7 @@ func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Ha
}
func (*upSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}
@ -355,7 +355,7 @@ func (s *rpSession) reportStatus() *packp.ReportStatus {
}
func (*rpSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}

View File

@ -10,10 +10,9 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/mitchellh/go-homedir"
"github.com/skeema/knownhosts"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const DefaultUsername = "git"
@ -44,7 +43,6 @@ const (
type KeyboardInteractive struct {
User string
Challenge ssh.KeyboardInteractiveChallenge
HostKeyCallbackHelper
}
func (a *KeyboardInteractive) Name() string {
@ -56,19 +54,18 @@ func (a *KeyboardInteractive) String() string {
}
func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{
a.Challenge,
},
})
}, nil
}
// Password implements AuthMethod by using the given password.
type Password struct {
User string
Password string
HostKeyCallbackHelper
}
func (a *Password) Name() string {
@ -80,10 +77,10 @@ func (a *Password) String() string {
}
func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.Password(a.Password)},
})
}, nil
}
// PasswordCallback implements AuthMethod by using a callback
@ -91,7 +88,6 @@ func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
type PasswordCallback struct {
User string
Callback func() (pass string, err error)
HostKeyCallbackHelper
}
func (a *PasswordCallback) Name() string {
@ -103,17 +99,16 @@ func (a *PasswordCallback) String() string {
}
func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)},
})
}, nil
}
// PublicKeys implements AuthMethod by using the given key pairs.
type PublicKeys struct {
User string
Signer ssh.Signer
HostKeyCallbackHelper
}
// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An
@ -152,10 +147,10 @@ func (a *PublicKeys) String() string {
}
func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)},
})
}, nil
}
func username() (string, error) {
@ -178,7 +173,6 @@ func username() (string, error) {
type PublicKeysCallback struct {
User string
Callback func() (signers []ssh.Signer, err error)
HostKeyCallbackHelper
}
// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens
@ -213,10 +207,10 @@ func (a *PublicKeysCallback) String() string {
}
func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)},
})
}, nil
}
// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
@ -224,12 +218,19 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
//
// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
// environment variable, example:
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
//
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
kh, err := newKnownHosts(files...)
return ssh.HostKeyCallback(kh), err
}
func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) {
var err error
if len(files) == 0 {
@ -251,7 +252,7 @@ func getDefaultKnownHostsFiles() ([]string, error) {
return files, nil
}
homeDirPath, err := homedir.Dir()
homeDirPath, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -285,6 +286,9 @@ func filterKnownHostsFiles(files ...string) ([]string, error) {
// HostKeyCallbackHelper is a helper that provides common functionality to
// configure HostKeyCallback into a ssh.ClientConfig.
// Deprecated in favor of SetConfigHostKeyFields (see common.go) which provides
// a mechanism for also setting ClientConfig.HostKeyAlgorithms for a specific
// host.
type HostKeyCallbackHelper struct {
// HostKeyCallback is the function type used for verifying server keys.
// If nil default callback will be create using NewKnownHostsCallback

View File

@ -121,10 +121,15 @@ func (c *command) connect() error {
if err != nil {
return err
}
hostWithPort := c.getHostWithPort()
config, err = SetConfigHostKeyFields(config, hostWithPort)
if err != nil {
return err
}
overrideConfig(c.config, config)
c.client, err = dial("tcp", c.getHostWithPort(), config)
c.client, err = dial("tcp", hostWithPort, config)
if err != nil {
return err
}
@ -162,6 +167,23 @@ func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
return ssh.NewClient(c, chans, reqs), nil
}
// SetConfigHostKeyFields sets cfg.HostKeyCallback and cfg.HostKeyAlgorithms
// based on OpenSSH known_hosts. cfg is modified in-place. hostWithPort must be
// supplied, since the algorithms will be set based on the known host keys for
// that specific host. Otherwise, golang.org/x/crypto/ssh can return an error
// upon connecting to a host whose *first* key is not known, even though other
// keys (of different types) are known and match properly.
// For background see https://github.com/go-git/go-git/issues/411 as well as
// https://github.com/golang/go/issues/29286 for root cause.
func SetConfigHostKeyFields(cfg *ssh.ClientConfig, hostWithPort string) (*ssh.ClientConfig, error) {
kh, err := newKnownHosts()
if err == nil {
cfg.HostKeyCallback = kh.HostKeyCallback()
cfg.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort)
}
return cfg, err
}
func (c *command) getHostWithPort() string {
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
return addr

View File

@ -17,7 +17,7 @@ type PruneOptions struct {
Handler PruneHandler
}
var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported")
var ErrLooseObjectsNotSupported = errors.New("loose objects not supported")
// DeleteObject deletes an object from a repository.
// The type conveniently matches PruneHandler.

View File

@ -5,10 +5,12 @@ import (
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
@ -103,7 +105,11 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
}
s, err := newSendPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle)
if err != nil {
return err
}
@ -183,12 +189,12 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
var hashesToPush []plumbing.Hash
// Avoid the expensive revlist operation if we're only doing deletes.
if !allDelete {
if r.c.IsFirstURLLocal() {
if url.IsLocalEndpoint(o.RemoteURL) {
// If we're are pushing to a local repo, it might be much
// faster to use a local storage layer to get the commits
// to ignore, when calculating the object revlist.
localStorer := filesystem.NewStorage(
osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
osfs.New(o.RemoteURL), cache.NewObjectLRUDefault())
hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
r.s, localStorer, objects, haves)
} else {
@ -225,6 +231,74 @@ func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
return !ar.Capabilities.Supports(capability.OFSDelta)
}
func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
tags := make(map[plumbing.Reference]struct{})
// get a list of all tags locally
for _, ref := range localRefs {
if strings.HasPrefix(string(ref.Name()), "refs/tags") {
tags[*ref] = struct{}{}
}
}
remoteRefIter, err := remoteRefs.IterReferences()
if err != nil {
return err
}
// remove any that are already on the remote
if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error {
delete(tags, *reference)
return nil
}); err != nil {
return err
}
for tag := range tags {
tagObject, err := object.GetObject(r.s, tag.Hash())
var tagCommit *object.Commit
if err != nil {
return fmt.Errorf("get tag object: %w", err)
}
if tagObject.Type() != plumbing.TagObject {
continue
}
annotatedTag, ok := tagObject.(*object.Tag)
if !ok {
return errors.New("could not get annotated tag object")
}
tagCommit, err = object.GetCommit(r.s, annotatedTag.Target)
if err != nil {
return fmt.Errorf("get annotated tag commit: %w", err)
}
// only include tags that are reachable from one of the refs
// already being pushed
for _, cmd := range req.Commands {
if tag.Name() == cmd.Name {
continue
}
if strings.HasPrefix(cmd.Name.String(), "refs/tags") {
continue
}
c, err := object.GetCommit(r.s, cmd.New)
if err != nil {
return fmt.Errorf("get commit %v: %w", cmd.Name, err)
}
if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor {
req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()})
}
}
}
return nil
}
func (r *Remote) newReferenceUpdateRequest(
o *PushOptions,
localRefs []*plumbing.Reference,
@ -242,10 +316,28 @@ func (r *Remote) newReferenceUpdateRequest(
}
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
if ar.Capabilities.Supports(capability.PushOptions) {
_ = req.Capabilities.Set(capability.PushOptions)
for k, v := range o.Options {
req.Options = append(req.Options, &packp.Option{Key: k, Value: v})
}
}
if o.Atomic && ar.Capabilities.Supports(capability.Atomic) {
_ = req.Capabilities.Set(capability.Atomic)
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil {
return nil, err
}
if o.FollowTags {
if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil {
return nil, err
}
}
return req, nil
}
@ -314,7 +406,11 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
o.RefSpecs = r.c.Fetch
}
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle)
if err != nil {
return nil, err
}
@ -474,6 +570,7 @@ func (r *Remote) addReferencesToUpdate(
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
prune bool,
forceWithLease *ForceWithLease,
) error {
// This references dictionary will be used to search references by name.
refsDict := make(map[string]*plumbing.Reference)
@ -487,7 +584,7 @@ func (r *Remote) addReferencesToUpdate(
return err
}
} else {
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease)
if err != nil {
return err
}
@ -509,20 +606,25 @@ func (r *Remote) addOrUpdateReferences(
refsDict map[string]*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
forceWithLease *ForceWithLease,
) error {
// If it is not a wilcard refspec we can directly search for the reference
// in the references dictionary.
if !rs.IsWildcard() {
ref, ok := refsDict[rs.Src()]
if !ok {
commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src()))
if err == nil {
return r.addCommit(rs, remoteRefs, commit.Hash, req)
}
return nil
}
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
}
for _, ref := range localRefs {
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
if err != nil {
return err
}
@ -569,9 +671,46 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
})
}
func (r *Remote) addCommit(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash,
req *packp.ReferenceUpdateRequest) error {
if rs.IsWildcard() {
return errors.New("can't use wildcard together with hash refspecs")
}
cmd := &packp.Command{
Name: rs.Dst(""),
Old: plumbing.ZeroHash,
New: localCommit,
}
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
//TODO: check actual git behavior here
return nil
}
cmd.Old = remoteRef.Hash()
} else if err != plumbing.ErrReferenceNotFound {
return err
}
if cmd.Old == cmd.New {
return nil
}
if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
}
req.Commands = append(req.Commands, cmd)
return nil
}
func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
req *packp.ReferenceUpdateRequest) error {
req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error {
if localRef.Type() != plumbing.HashReference {
return nil
@ -603,7 +742,11 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
if !rs.IsForceUpdate() {
if forceWithLease != nil {
if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil {
return err
}
} else if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
@ -613,6 +756,31 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error {
remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name)
ref, err := storer.ResolveReference(
r.s,
plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1)))
if err != nil {
return err
}
if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) {
expectedOID := ref.Hash()
if !forceWithLease.Hash.IsZero() {
expectedOID = forceWithLease.Hash
}
if cmd.Old != expectedOID {
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
}
}
return nil
}
func (r *Remote) references() ([]*plumbing.Reference, error) {
var localRefs []*plumbing.Reference

View File

@ -56,7 +56,7 @@ var (
ErrWorktreeNotProvided = errors.New("worktree should be provided")
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
ErrPackedObjectsNotSupported = errors.New("Packed objects not supported")
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
)
// Repository represents a git repository
@ -280,6 +280,9 @@ func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem,
pathinfo, err := fs.Stat("/")
if !os.IsNotExist(err) {
if pathinfo == nil {
return nil, nil, err
}
if !pathinfo.IsDir() && detect {
fs = osfs.New(filepath.Dir(path))
}
@ -1547,7 +1550,7 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
}
if c == nil {
return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String())
return &plumbing.ZeroHash, fmt.Errorf("no commit message match regexp: %q", re.String())
}
commit = c

View File

@ -0,0 +1,79 @@
package dotgit
import (
"fmt"
"io"
"os"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/objfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
var _ (plumbing.EncodedObject) = &EncodedObject{}
type EncodedObject struct {
dir *DotGit
h plumbing.Hash
t plumbing.ObjectType
sz int64
}
func (e *EncodedObject) Hash() plumbing.Hash {
return e.h
}
func (e *EncodedObject) Reader() (io.ReadCloser, error) {
f, err := e.dir.Object(e.h)
if err != nil {
if os.IsNotExist(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
}
t, size, err := r.Header()
if err != nil {
_ = r.Close()
return nil, err
}
if t != e.t {
_ = r.Close()
return nil, objfile.ErrHeader
}
if size != e.sz {
_ = r.Close()
return nil, objfile.ErrHeader
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
func (e *EncodedObject) SetType(plumbing.ObjectType) {}
func (e *EncodedObject) Type() plumbing.ObjectType {
return e.t
}
func (e *EncodedObject) Size() int64 {
return e.sz
}
func (e *EncodedObject) SetSize(int64) {}
func (e *EncodedObject) Writer() (io.WriteCloser, error) {
return nil, fmt.Errorf("not supported")
}
func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject {
return &EncodedObject{
dir: dir,
h: h,
t: t,
sz: size,
}
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"io"
"os"
"sync"
"time"
"github.com/go-git/go-git/v5/plumbing"
@ -204,9 +205,9 @@ func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfi
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold)
}
return p, s.storePackfileInCache(pack, p)
@ -389,7 +390,6 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return cacheObj, nil
}
obj = s.NewEncodedObject()
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
@ -402,6 +402,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return nil, err
}
if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold {
obj = dotgit.NewEncodedObject(s.dir, h, t, size)
return obj, nil
}
obj = s.NewEncodedObject()
obj.SetType(t)
obj.SetSize(size)
w, err := obj.Writer()
@ -413,10 +420,21 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
s.objectCache.Put(obj)
_, err = io.Copy(w, r)
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(w, r, buf)
copyBufferPool.Put(bufp)
return obj, err
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
// Get returns the object with the given hash, by searching for it in
// the packfile.
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
@ -595,6 +613,7 @@ func (s *ObjectStorage) buildPackfileIters(
return newPackfileIter(
s.dir.Fs(), pack, t, seen, s.index[h],
s.objectCache, s.options.KeepDescriptors,
s.options.LargeObjectThreshold,
)
},
}, nil
@ -684,6 +703,7 @@ func NewPackfileIter(
idxFile billy.File,
t plumbing.ObjectType,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
idx := idxfile.NewMemoryIndex()
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
@ -695,7 +715,7 @@ func NewPackfileIter(
}
seen := make(map[plumbing.Hash]struct{})
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold)
}
func newPackfileIter(
@ -706,12 +726,13 @@ func newPackfileIter(
index idxfile.Index,
cache cache.Object,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
var p *packfile.Packfile
if cache != nil {
p = packfile.NewPackfileWithCache(index, fs, f, cache)
p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold)
} else {
p = packfile.NewPackfile(index, fs, f)
p = packfile.NewPackfile(index, fs, f, largeObjectThreshold)
}
iter, err := p.GetByType(t)

View File

@ -34,7 +34,7 @@ func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
return err
}
// Shallow return the shallow commits reading from shallo file from .git
// Shallow returns the shallow commits reading from shallo file from .git
func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
f, err := s.dir.Shallow()
if f == nil || err != nil {

View File

@ -34,6 +34,9 @@ type Options struct {
// MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int
// LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
// If left unset or set to 0 there is no limit
LargeObjectThreshold int64
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.

View File

@ -193,7 +193,7 @@ func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) er
return nil
}
var errNotSupported = fmt.Errorf("Not supported")
var errNotSupported = fmt.Errorf("not supported")
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
return time.Time{}, errNotSupported

View File

@ -55,6 +55,28 @@ func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
return &readCloser{Reader: r, closer: c}
}
type readCloserCloser struct {
io.ReadCloser
closer func() error
}
func (r *readCloserCloser) Close() (err error) {
defer func() {
if err == nil {
err = r.closer()
return
}
_ = r.closer()
}()
return r.ReadCloser.Close()
}
// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and
// `io.Closer` that ensures that the closer is closed on close
func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser {
return &readCloserCloser{ReadCloser: r, closer: c}
}
type writeCloser struct {
io.Writer
closer io.Closer
@ -82,6 +104,24 @@ func WriteNopCloser(w io.Writer) io.WriteCloser {
return writeNopCloser{w}
}
type readerAtAsReader struct {
io.ReaderAt
offset int64
}
func (r *readerAtAsReader) Read(bs []byte) (int, error) {
n, err := r.ReaderAt.ReadAt(bs, r.offset)
r.offset += int64(n)
return n, err
}
func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader {
return &readerAtAsReader{
ReaderAt: r,
offset: offset,
}
}
// CheckClose calls Close on the given io.Closer. If the given *error points to
// nil, it will be assigned the error returned by Close. Otherwise, any error
// returned by Close will be ignored. CheckClose is usually called with defer.

View File

@ -304,13 +304,38 @@ func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
return nil, err
}
case onlyToRemains:
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
} else {
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
}
}
if err = ii.nextTo(); err != nil {
return nil, err
}
case bothHaveNodes:
if from.Skip() {
if err = ret.AddRecursiveDelete(from); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if err = diffNodes(&ret, ii); err != nil {
return nil, err
}

View File

@ -61,6 +61,10 @@ func (n *node) IsDir() bool {
return n.isDir
}
func (n *node) Skip() bool {
return false
}
func (n *node) Children() ([]noder.Noder, error) {
if err := n.calculateChildren(); err != nil {
return nil, err

View File

@ -19,6 +19,7 @@ type node struct {
entry *index.Entry
children []noder.Noder
isDir bool
skip bool
}
// NewRootNode returns the root node of a computed tree from a index.Index,
@ -39,7 +40,7 @@ func NewRootNode(idx *index.Index) noder.Noder {
continue
}
n := &node{path: fullpath}
n := &node{path: fullpath, skip: e.SkipWorktree}
if fullpath == e.Name {
n.entry = e
} else {
@ -58,6 +59,10 @@ func (n *node) String() string {
return n.path
}
func (n *node) Skip() bool {
return n.skip
}
// Hash the hash of a filesystem is a 24-byte slice, is the result of
// concatenating the computed plumbing.Hash of the file as a Blob and its
// plumbing.FileMode; that way the difftree algorithm will detect changes in the

View File

@ -53,6 +53,7 @@ type Noder interface {
// implement NumChildren in O(1) while Children is usually more
// complex.
NumChildren() (int, error)
Skip() bool
}
// NoChildren represents the children of a noder without children.

View File

@ -15,6 +15,14 @@ import (
// not be used.
type Path []Noder
func (p Path) Skip() bool {
if len(p) > 0 {
return p.Last().Skip()
}
return false
}
// String returns the full path of the final noder as a string, using
// "/" as the separator.
func (p Path) String() string {

29
vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package sync
import (
"bufio"
"io"
"sync"
)
var bufioReader = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}
// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool.
// Returns a bufio.Reader that is resetted with reader and ready for use.
//
// After use, the *bufio.Reader should be put back into the sync.Pool
// by calling PutBufioReader.
func GetBufioReader(reader io.Reader) *bufio.Reader {
r := bufioReader.Get().(*bufio.Reader)
r.Reset(reader)
return r
}
// PutBufioReader puts reader back into its sync.Pool.
func PutBufioReader(reader *bufio.Reader) {
bufioReader.Put(reader)
}

51
vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sync
import (
"bytes"
"sync"
)
var (
byteSlice = sync.Pool{
New: func() interface{} {
b := make([]byte, 16*1024)
return &b
},
}
bytesBuffer = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
)
// GetByteSlice returns a *[]byte that is managed by a sync.Pool.
// The initial slice length will be 16384 (16kb).
//
// After use, the *[]byte should be put back into the sync.Pool
// by calling PutByteSlice.
func GetByteSlice() *[]byte {
buf := byteSlice.Get().(*[]byte)
return buf
}
// PutByteSlice puts buf back into its sync.Pool.
func PutByteSlice(buf *[]byte) {
byteSlice.Put(buf)
}
// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool.
// Returns a buffer that is resetted and ready for use.
//
// After use, the *bytes.Buffer should be put back into the sync.Pool
// by calling PutBytesBuffer.
func GetBytesBuffer() *bytes.Buffer {
buf := bytesBuffer.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
// PutBytesBuffer puts buf back into its sync.Pool.
func PutBytesBuffer(buf *bytes.Buffer) {
bytesBuffer.Put(buf)
}

74
vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package sync
import (
"bytes"
"compress/zlib"
"io"
"sync"
)
var (
zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
zlibReader = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return ZLibReader{
Reader: r.(zlibReadCloser),
}
},
}
zlibWriter = sync.Pool{
New: func() interface{} {
return zlib.NewWriter(nil)
},
}
)
type zlibReadCloser interface {
io.ReadCloser
zlib.Resetter
}
type ZLibReader struct {
dict *[]byte
Reader zlibReadCloser
}
// GetZlibReader returns a ZLibReader that is managed by a sync.Pool.
// Returns a ZLibReader that is resetted using a dictionary that is
// also managed by a sync.Pool.
//
// After use, the ZLibReader should be put back into the sync.Pool
// by calling PutZlibReader.
func GetZlibReader(r io.Reader) (ZLibReader, error) {
z := zlibReader.Get().(ZLibReader)
z.dict = GetByteSlice()
err := z.Reader.Reset(r, *z.dict)
return z, err
}
// PutZlibReader puts z back into its sync.Pool, first closing the reader.
// The Byte slice dictionary is also put back into its sync.Pool.
func PutZlibReader(z ZLibReader) {
z.Reader.Close()
PutByteSlice(z.dict)
zlibReader.Put(z)
}
// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool.
// Returns a writer that is resetted with w and ready for use.
//
// After use, the *zlib.Writer should be put back into the sync.Pool
// by calling PutZlibWriter.
func GetZlibWriter(w io.Writer) *zlib.Writer {
z := zlibWriter.Get().(*zlib.Writer)
z.Reset(w)
return z
}
// PutZlibWriter puts w back into its sync.Pool.
func PutZlibWriter(w *zlib.Writer) {
zlibWriter.Put(w)
}

View File

@ -9,8 +9,9 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
@ -20,9 +21,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/merkletrie"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -73,6 +72,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
fetchHead, err := remote.fetch(ctx, &FetchOptions{
RemoteName: o.RemoteName,
RemoteURL: o.RemoteURL,
Depth: o.Depth,
Auth: o.Auth,
Progress: o.Progress,
@ -182,6 +182,10 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
return err
}
if len(opts.SparseCheckoutDirectories) > 0 {
return w.ResetSparsely(ro, opts.SparseCheckoutDirectories)
}
return w.Reset(ro)
}
func (w *Worktree) createBranch(opts *CheckoutOptions) error {
@ -262,8 +266,7 @@ func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbin
return w.r.Storer.SetReference(head)
}
// Reset the worktree to a specified state.
func (w *Worktree) Reset(opts *ResetOptions) error {
func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error {
if err := opts.Validate(w.r); err != nil {
return err
}
@ -293,7 +296,7 @@ func (w *Worktree) Reset(opts *ResetOptions) error {
}
if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset {
if err := w.resetIndex(t); err != nil {
if err := w.resetIndex(t, dirs); err != nil {
return err
}
}
@ -307,8 +310,17 @@ func (w *Worktree) Reset(opts *ResetOptions) error {
return nil
}
func (w *Worktree) resetIndex(t *object.Tree) error {
// Reset the worktree to a specified state.
func (w *Worktree) Reset(opts *ResetOptions) error {
return w.ResetSparsely(opts, nil)
}
func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
idx, err := w.r.Storer.Index()
if len(dirs) > 0 {
idx.SkipUnless(dirs)
}
if err != nil {
return err
}
@ -520,12 +532,6 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
return nil
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
func (w *Worktree) checkoutFile(f *object.File) (err error) {
mode, err := f.Mode.ToOSFileMode()
if err != nil {
@ -549,9 +555,9 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}
defer ioutil.CheckClose(to, &err)
buf := copyBufferPool.Get().([]byte)
_, err = io.CopyBuffer(to, from, buf)
copyBufferPool.Put(buf)
buf := sync.GetByteSlice()
_, err = io.CopyBuffer(to, from, *buf)
sync.PutByteSlice(buf)
return
}

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec))
e.CreatedAt = time.Unix(os.Atimespec.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

View File

@ -2,6 +2,7 @@ package git
import (
"bytes"
"errors"
"path"
"sort"
"strings"
@ -16,6 +17,12 @@ import (
"github.com/go-git/go-billy/v5"
)
var (
// ErrEmptyCommit occurs when a commit is attempted using a clean
// working tree, with no changes to be committed.
ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree")
)
// Commit stores the current contents of the index in a new commit along with
// a log message from the user describing the changes.
func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) {
@ -39,7 +46,7 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error
s: w.r.Storer,
}
tree, err := h.BuildTree(idx)
tree, err := h.BuildTree(idx, opts)
if err != nil {
return plumbing.ZeroHash, err
}
@ -145,7 +152,11 @@ type buildTreeHelper struct {
// BuildTree builds the tree objects and push its to the storer, the hash
// of the root tree is returned.
func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) {
func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) {
if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) {
return plumbing.ZeroHash, ErrEmptyCommit
}
const rootNode = ""
h.trees = map[string]*object.Tree{rootNode: {}}
h.entries = map[string]*object.TreeEntry{}

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec))
e.CreatedAt = time.Unix(os.Ctim.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

View File

@ -270,10 +270,6 @@ func (w *Worktree) Add(path string) (plumbing.Hash, error) {
}
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
files, err := w.Filesystem.ReadDir(directory)
if err != nil {
return false, err
}
if len(ignorePattern) > 0 {
m := gitignore.NewMatcher(ignorePattern)
matchPath := strings.Split(directory, string(os.PathSeparator))
@ -283,20 +279,13 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string,
}
}
for _, file := range files {
name := path.Join(directory, file.Name())
var a bool
if file.IsDir() {
if file.Name() == GitDirName {
// ignore special git directory
continue
}
a, err = w.doAddDirectory(idx, s, name, ignorePattern)
} else {
a, _, err = w.doAddFile(idx, s, name, ignorePattern)
for name := range s {
if !isPathInDirectory(name, filepath.ToSlash(filepath.Clean(directory))) {
continue
}
var a bool
a, _, err = w.doAddFile(idx, s, name, ignorePattern)
if err != nil {
return
}
@ -309,6 +298,26 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string,
return
}
func isPathInDirectory(path, directory string) bool {
ps := strings.Split(path, "/")
ds := strings.Split(directory, "/")
if len(ds) == 1 && ds[0] == "." {
return true
}
if len(ps) < len(ds) {
return false
}
for i := 0; i < len(ds); i++ {
if ps[i] != ds[i] {
return false
}
}
return true
}
// AddWithOptions file contents to the index, updates the index using the
// current content found in the working tree, to prepare the content staged for
// the next commit.

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec))
e.CreatedAt = time.Unix(os.Atim.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

23
vendor/github.com/pjbgf/sha1cd/Dockerfile.arm generated vendored Normal file
View File

@ -0,0 +1,23 @@
FROM golang:1.19@sha256:dc76ef03e54c34a00dcdca81e55c242d24b34d231637776c4bb5c1a8e8514253
ENV GOOS=linux
ENV GOARCH=arm
ENV CGO_ENABLED=1
ENV CC=arm-linux-gnueabihf-gcc
ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig
RUN dpkg --add-architecture armhf \
&& apt update \
&& apt install -y --no-install-recommends \
upx \
gcc-arm-linux-gnueabihf \
libc6-dev-armhf-cross \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
COPY . /src/workdir
WORKDIR /src/workdir
RUN go build ./...

23
vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 generated vendored Normal file
View File

@ -0,0 +1,23 @@
FROM golang:1.19@sha256:dc76ef03e54c34a00dcdca81e55c242d24b34d231637776c4bb5c1a8e8514253
ENV GOOS=linux
ENV GOARCH=arm64
ENV CGO_ENABLED=1
ENV CC=aarch64-linux-gnu-gcc
ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
ENV PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig
# install build & runtime dependencies
RUN dpkg --add-architecture arm64 \
&& apt update \
&& apt install -y --no-install-recommends \
gcc-aarch64-linux-gnu \
libc6-dev-arm64-cross \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
COPY . /src/workdir
WORKDIR /src/workdir
RUN go build ./...

32
vendor/github.com/pjbgf/sha1cd/Makefile generated vendored Normal file
View File

@ -0,0 +1,32 @@
FUZZ_TIME ?= 1m
export CGO_ENABLED := 1
.PHONY: test
test:
go test ./...
.PHONY: bench
bench:
go test -benchmem -run=^$$ -bench ^Benchmark ./...
.PHONY: fuzz
fuzz:
go test -tags gofuzz -fuzz=. -fuzztime=$(FUZZ_TIME) ./test/
# Cross build project in arm/v7.
build-arm:
docker build -t sha1cd-arm -f Dockerfile.arm .
docker run --rm sha1cd-arm
# Cross build project in arm64.
build-arm64:
docker build -t sha1cd-arm64 -f Dockerfile.arm64 .
docker run --rm sha1cd-arm64
# Build with cgo disabled.
build-nocgo:
CGO_ENABLED=0 go build ./cgo
# Run cross-compilation to assure supported architectures.
cross-build: build-arm build-arm64 build-nocgo

58
vendor/github.com/pjbgf/sha1cd/README.md generated vendored Normal file
View File

@ -0,0 +1,58 @@
# sha1cd
A Go implementation of SHA1 with counter-cryptanalysis, which detects
collision attacks.
The `cgo/lib` code is a carbon copy of the [original code], based on
the award winning [white paper] by Marc Stevens.
The Go implementation is largely based off Go's generic sha1.
At present no SIMD optimisations have been implemented.
## Usage
`sha1cd` can be used as a drop-in replacement for `crypto/sha1`:
```golang
import "github.com/pjbgf/sha1cd"
func test(){
data := []byte("data to be sha1 hashed")
h := sha1cd.Sum(data)
fmt.Printf("hash: %q\n", hex.EncodeToString(h))
}
```
To obtain information as to whether a collision was found, use the
func `CollisionResistantSum`.
```golang
import "github.com/pjbgf/sha1cd"
func test(){
data := []byte("data to be sha1 hashed")
h, col := sha1cd.CollisionResistantSum(data)
if col {
fmt.Println("collision found!")
}
fmt.Printf("hash: %q", hex.EncodeToString(h))
}
```
Note that the algorithm will automatically avoid collision, by
extending the SHA1 to 240-steps, instead of 80 when a collision
attempt is detected. Therefore, inputs that contains the unavoidable
bit conditions will yield a different hash from `sha1cd`, when compared
with results using `crypto/sha1`. Valid inputs will have matching the outputs.
## References
- https://shattered.io/
- https://github.com/cr-marcstevens/sha1collisiondetection
- https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program/Secure-Hashing#shavs
## Use of the Original Implementation
- https://github.com/git/git/commit/28dc98e343ca4eb370a29ceec4c19beac9b5c01e
- https://github.com/libgit2/libgit2/pull/4136
[original code]: https://github.com/cr-marcstevens/sha1collisiondetection
[white paper]: https://marc-stevens.nl/research/papers/C13-S.pdf

102
vendor/github.com/pjbgf/sha1cd/cgo/README.md generated vendored Normal file
View File

@ -0,0 +1,102 @@
# sha1collisiondetection
Library and command line tool to detect SHA-1 collisions in files
Copyright 2017 Marc Stevens <marc@marc-stevens.nl>
Distributed under the MIT Software License.
See accompanying file LICENSE.txt or copy at https://opensource.org/licenses/MIT.
## Developers
- Marc Stevens, CWI Amsterdam (https://marc-stevens.nl)
- Dan Shumow, Microsoft Research (https://www.microsoft.com/en-us/research/people/danshu/)
## About
This library and command line tool were designed as near drop-in replacements for common SHA-1 libraries and sha1sum.
They will compute the SHA-1 hash of any given file and additionally will detect cryptanalytic collision attacks against SHA-1 present in each file. It is very fast and takes less than twice the amount of time as regular SHA-1.
More specifically they will detect any cryptanalytic collision attack against SHA-1 using any of the top 32 SHA-1 disturbance vectors with probability 1:
```
I(43,0), I(44,0), I(45,0), I(46,0), I(47,0), I(48,0), I(49,0), I(50,0), I(51,0), I(52,0),
I(46,2), I(47,2), I(48,2), I(49,2), I(50,2), I(51,2),
II(45,0), II(46,0), II(47,0), II(48,0), II(49,0), II(50,0), II(51,0), II(52,0), II(53,0), II(54,0), II(55,0), II(56,0),
II(46,2), II(49,2), II(50,2), II(51,2)
```
The possibility of false positives can be neglected as the probability is smaller than 2^-90.
The library supports both an indicator flag that applications can check and act on, as well as a special _safe-hash_ mode that returns the real SHA-1 hash when no collision was detected and a different _safe_ hash when a collision was detected.
Colliding files will have the same SHA-1 hash, but will have different unpredictable safe-hashes.
This essentially enables protection of applications against SHA-1 collisions with no further changes in the application, e.g., digital signature forgeries based on SHA-1 collisions automatically become invalid.
For the theoretical explanation of collision detection see the award-winning paper on _Counter-Cryptanalysis_:
Counter-cryptanalysis, Marc Stevens, CRYPTO 2013, Lecture Notes in Computer Science, vol. 8042, Springer, 2013, pp. 129-146,
https://marc-stevens.nl/research/papers/C13-S.pdf
## Inclusion in other programs
In order to make it easier to include these sources in other project
there are several preprocessor macros that the code uses. Rather than
copy/pasting and customizing or specializing the code, first see if
setting any of these defines appropriately will allow you to avoid
modifying the code yourself.
- SHA1DC_NO_STANDARD_INCLUDES
Skips including standard headers. Use this if your project for
whatever reason wishes to do its own header includes.
- SHA1DC_CUSTOM_INCLUDE_SHA1_C
Includes a custom header at the top of sha1.c. Usually this would be
set in conjunction with SHA1DC_NO_STANDARD_INCLUDES to point to a
header file which includes various standard headers.
- SHA1DC_INIT_SAFE_HASH_DEFAULT
Sets the default for safe_hash in SHA1DCInit(). Valid values are 0
and 1. If unset 1 is the default.
- SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C
Includes a custom trailer in sha1.c. Useful for any extra utility
functions that make use of the functions already defined in sha1.c.
- SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
Includes a custom trailer in sha1.h. Useful for defining the
prototypes of the functions or code included by
SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_C.
- SHA1DC_CUSTOM_INCLUDE_UBC_CHECK_C
Includes a custom header at the top of ubc_check.c.
- SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_C
Includes a custom trailer in ubc_check.c.
- SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
Includes a custom trailer in ubc_check.H.
This code will try to auto-detect certain things based on
CPU/platform. Unless you're running on some really obscure CPU or
porting to a new platform you should not need to tweak this. If you do
please open an issue at
https://github.com/cr-marcstevens/sha1collisiondetection
- SHA1DC_FORCE_LITTLEENDIAN / SHA1DC_FORCE_BIGENDIAN
Override the check for processor endianenss and force either
Little-Endian or Big-Endian.
- SHA1DC_FORCE_UNALIGNED_ACCESS
Permit unaligned access. This will fail on e.g. SPARC processors, so
it's only permitted on a whitelist of processors. If your CPU isn't
detected as allowing this, and allows unaligned access, setting this
may improve performance (or make it worse, if the kernel has to
catch and emulate such access on its own).

32
vendor/github.com/pjbgf/sha1cd/cgo/fallback_no_cgo.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
//go:build !cgo
// +build !cgo
package cgo
import (
"hash"
"github.com/pjbgf/sha1cd"
"github.com/pjbgf/sha1cd/ubc"
)
// CalculateDvMask falls back to github.com/pjbgf/sha1cd/ubc implementation
// due to CGO being disabled at compilation time.
func CalculateDvMask(W []uint32) (uint32, error) {
return ubc.CalculateDvMask(W)
}
// CalculateDvMask falls back to github.com/pjbgf/sha1cd implementation
// due to CGO being disabled at compilation time.
func New() hash.Hash {
return sha1cd.New()
}
// CalculateDvMask falls back to github.com/pjbgf/sha1cd implementation
// due to CGO being disabled at compilation time.
func Sum(data []byte) ([]byte, bool) {
d := sha1cd.New().(sha1cd.CollisionResistantHash)
d.Write(data)
return d.CollisionResistantSum(nil)
}

2144
vendor/github.com/pjbgf/sha1cd/cgo/sha1.c generated vendored Normal file

File diff suppressed because it is too large Load Diff

78
vendor/github.com/pjbgf/sha1cd/cgo/sha1.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package cgo
// #include <sha1.h>
// #include <ubc_check.h>
import "C"
import (
"crypto"
"hash"
"unsafe"
)
const (
Size = 20
BlockSize = 64
)
func init() {
crypto.RegisterHash(crypto.SHA1, New)
}
func New() hash.Hash {
d := new(digest)
d.Reset()
return d
}
type digest struct {
ctx C.SHA1_CTX
}
func (d *digest) sum() ([]byte, bool) {
b := make([]byte, Size)
c := C.SHA1DCFinal((*C.uchar)(unsafe.Pointer(&b[0])), &d.ctx)
if c != 0 {
return b, true
}
return b, false
}
func (d *digest) Sum(in []byte) []byte {
d0 := *d // use a copy of d to avoid race conditions.
h, _ := d0.sum()
return append(in, h...)
}
func (d *digest) CollisionResistantSum(in []byte) ([]byte, bool) {
d0 := *d // use a copy of d to avoid race conditions.
h, c := d0.sum()
return append(in, h...), c
}
func (d *digest) Reset() {
C.SHA1DCInit(&d.ctx)
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func Sum(data []byte) ([]byte, bool) {
d := New().(*digest)
d.Write(data)
return d.sum()
}
func (d *digest) Write(p []byte) (nn int, err error) {
if len(p) == 0 {
return 0, nil
}
data := (*C.char)(unsafe.Pointer(&p[0]))
C.SHA1DCUpdate(&d.ctx, data, (C.size_t)(len(p)))
return len(p), nil
}

114
vendor/github.com/pjbgf/sha1cd/cgo/sha1.h generated vendored Normal file
View File

@ -0,0 +1,114 @@
/***
* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
* Distributed under the MIT Software License.
* See accompanying file LICENSE.txt or copy at
* https://opensource.org/licenses/MIT
***/
// Originally from: https://github.com/cr-marcstevens/sha1collisiondetection
#ifndef SHA1DC_SHA1_H
#define SHA1DC_SHA1_H
#if defined(__cplusplus)
extern "C"
{
#endif
#ifndef SHA1DC_NO_STANDARD_INCLUDES
#include <stdint.h>
#endif
/* sha-1 compression function that takes an already expanded message, and additionally store intermediate states */
/* only stores states ii (the state between step ii-1 and step ii) when DOSTORESTATEii is defined in ubc_check.h */
void sha1_compression_states(uint32_t[5], const uint32_t[16], uint32_t[80], uint32_t[80][5]);
/*
// Function type for sha1_recompression_step_T (uint32_t ihvin[5], uint32_t ihvout[5], const uint32_t me2[80], const uint32_t state[5]).
// Where 0 <= T < 80
// me2 is an expanded message (the expansion of an original message block XOR'ed with a disturbance vector's message block difference.)
// state is the internal state (a,b,c,d,e) before step T of the SHA-1 compression function while processing the original message block.
// The function will return:
// ihvin: The reconstructed input chaining value.
// ihvout: The reconstructed output chaining value.
*/
typedef void (*sha1_recompression_type)(uint32_t *, uint32_t *, const uint32_t *, const uint32_t *);
/* A callback function type that can be set to be called when a collision block has been found: */
/* void collision_block_callback(uint64_t byteoffset, const uint32_t ihvin1[5], const uint32_t ihvin2[5], const uint32_t m1[80], const uint32_t m2[80]) */
typedef void (*collision_block_callback)(uint64_t, const uint32_t *, const uint32_t *, const uint32_t *, const uint32_t *);
/* The SHA-1 context. */
typedef struct
{
uint64_t total;
uint32_t ihv[5];
unsigned char buffer[64];
int found_collision;
int safe_hash;
int detect_coll;
int ubc_check;
int reduced_round_coll;
collision_block_callback callback;
uint32_t ihv1[5];
uint32_t ihv2[5];
uint32_t m1[80];
uint32_t m2[80];
uint32_t states[80][5];
} SHA1_CTX;
/* Initialize SHA-1 context. */
void SHA1DCInit(SHA1_CTX *);
/*
Function to enable safe SHA-1 hashing:
Collision attacks are thwarted by hashing a detected near-collision block 3 times.
Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
The best collision attacks against SHA-1 have complexity about 2^60,
thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180.
An attacker would be better off using a generic birthday search of complexity 2^80.
Enabling safe SHA-1 hashing will result in the correct SHA-1 hash for messages where no collision attack was detected,
but it will result in a different SHA-1 hash for messages where a collision attack was detected.
This will automatically invalidate SHA-1 based digital signature forgeries.
Enabled by default.
*/
void SHA1DCSetSafeHash(SHA1_CTX *, int);
/*
Function to disable or enable the use of Unavoidable Bitconditions (provides a significant speed up).
Enabled by default
*/
void SHA1DCSetUseUBC(SHA1_CTX *, int);
/*
Function to disable or enable the use of Collision Detection.
Enabled by default.
*/
void SHA1DCSetUseDetectColl(SHA1_CTX *, int);
/* function to disable or enable the detection of reduced-round SHA-1 collisions */
/* disabled by default */
void SHA1DCSetDetectReducedRoundCollision(SHA1_CTX *, int);
/* function to set a callback function, pass NULL to disable */
/* by default no callback set */
void SHA1DCSetCallback(SHA1_CTX *, collision_block_callback);
/* update SHA-1 context with buffer contents */
void SHA1DCUpdate(SHA1_CTX *, const char *, size_t);
/* obtain SHA-1 hash from SHA-1 context */
/* returns: 0 = no collision detected, otherwise = collision found => warn user for active attack */
int SHA1DCFinal(unsigned char[20], SHA1_CTX *);
#if defined(__cplusplus)
}
#endif
#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
#include SHA1DC_CUSTOM_TRAILING_INCLUDE_SHA1_H
#endif
#endif

297
vendor/github.com/pjbgf/sha1cd/cgo/ubc_check.c generated vendored Normal file

File diff suppressed because one or more lines are too long

28
vendor/github.com/pjbgf/sha1cd/cgo/ubc_check.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package cgo
// #include <ubc_check.h>
// #include <stdlib.h>
//
// uint32_t check(const uint32_t W[80])
// {
// uint32_t ubc_dv_mask[DVMASKSIZE] = {(uint32_t)(0xFFFFFFFF)};
// ubc_check(W, ubc_dv_mask);
// return ubc_dv_mask[0];
// }
import "C"
import (
"fmt"
"unsafe"
)
// CalculateDvMask takes as input an expanded message block and verifies the unavoidable
// bitconditions for all listed DVs. It returns a dvmask where each bit belonging to a DV
// is set if all unavoidable bitconditions for that DV have been met.
// Thus, one needs to do the recompression check for each DV that has its bit set.
func CalculateDvMask(W []uint32) (uint32, error) {
if len(W) < 80 {
return 0, fmt.Errorf("invalid input: len(W) must be 80, was %d", len(W))
}
return uint32(C.check((*C.uint32_t)(unsafe.Pointer(&W[0])))), nil
}

64
vendor/github.com/pjbgf/sha1cd/cgo/ubc_check.h generated vendored Normal file
View File

@ -0,0 +1,64 @@
/***
* Copyright 2017 Marc Stevens <marc@marc-stevens.nl>, Dan Shumow <danshu@microsoft.com>
* Distributed under the MIT Software License.
* See accompanying file LICENSE.txt or copy at
* https://opensource.org/licenses/MIT
***/
// Originally from: https://github.com/cr-marcstevens/sha1collisiondetection
/*
// this file was generated by the 'parse_bitrel' program in the tools section
// using the data files from directory 'tools/data/3565'
//
// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) to check
// dvType, dvK and dvB define the DV: I(K,B) or II(K,B) (see the paper)
// dm[80] is the expanded message block XOR-difference defined by the DV
// testt is the step to do the recompression from for collision detection
// maski and maskb define the bit to check for each DV in the dvmask returned by ubc_check
//
// ubc_check takes as input an expanded message block and verifies the unavoidable bitconditions for all listed DVs
// it returns a dvmask where each bit belonging to a DV is set if all unavoidable bitconditions for that DV have been met
// thus one needs to do the recompression check for each DV that has its bit set
*/
#ifndef SHA1DC_UBC_CHECK_H
#define SHA1DC_UBC_CHECK_H
#if defined(__cplusplus)
extern "C"
{
#endif
#ifndef SHA1DC_NO_STANDARD_INCLUDES
#include <stdint.h>
#endif
#define DVMASKSIZE 1
typedef struct
{
int dvType;
int dvK;
int dvB;
int testt;
int maski;
int maskb;
uint32_t dm[80];
} dv_info_t;
extern dv_info_t sha1_dvs[];
void ubc_check(const uint32_t W[80], uint32_t dvmask[DVMASKSIZE]);
#define DOSTORESTATE58
#define DOSTORESTATE65
#define CHECK_DVMASK(_DVMASK) (0 != _DVMASK[0])
#if defined(__cplusplus)
}
#endif
#ifdef SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
#include SHA1DC_CUSTOM_TRAILING_INCLUDE_UBC_CHECK_H
#endif
#endif

11
vendor/github.com/pjbgf/sha1cd/detection.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package sha1cd
import "hash"
type CollisionResistantHash interface {
// CollisionResistantSum extends on Sum by returning an additional boolean
// which indicates whether a collision was found during the hashing process.
CollisionResistantSum(b []byte) ([]byte, bool)
hash.Hash
}

273
vendor/github.com/pjbgf/sha1cd/sha1block.go generated vendored Normal file
View File

@ -0,0 +1,273 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Originally from: https://github.com/go/blob/master/src/crypto/sha1/sha1block.go
package sha1cd
import (
"math/bits"
"github.com/pjbgf/sha1cd/ubc"
)
const (
msize = 80
_K0 = 0x5A827999
_K1 = 0x6ED9EBA1
_K2 = 0x8F1BBCDC
_K3 = 0xCA62C1D6
)
// TODO: Implement SIMD support.
func block(dig *digest, p []byte) {
blockGeneric(dig, p)
}
// blockGeneric is a portable, pure Go version of the SHA-1 block step.
// It's used by sha1block_generic.go and tests.
func blockGeneric(dig *digest, p []byte) {
var w [16]uint32
h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
for len(p) >= chunk {
m1 := make([]uint32, msize)
bcol := false
// Can interlace the computation of w with the
// rounds below if needed for speed.
for i := 0; i < 16; i++ {
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
}
a, b, c, d, e := h0, h1, h2, h3, h4
// Each of the four 20-iteration rounds
// differs only in the computation of f and
// the choice of K (_K0, _K1, etc).
i := 0
for ; i < 16; i++ {
// Store pre-step compression state for the collision detection.
dig.cs[i] = [5]uint32{a, b, c, d, e}
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 20; i++ {
// Store pre-step compression state for the collision detection.
dig.cs[i] = [5]uint32{a, b, c, d, e}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 40; i++ {
// Store pre-step compression state for the collision detection.
dig.cs[i] = [5]uint32{a, b, c, d, e}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K1
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 60; i++ {
// Store pre-step compression state for the collision detection.
dig.cs[i] = [5]uint32{a, b, c, d, e}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K2
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 80; i++ {
// Store pre-step compression state for the collision detection.
dig.cs[i] = [5]uint32{a, b, c, d, e}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + _K3
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
if mask, err := ubc.CalculateDvMask(m1); err == nil && mask != 0 {
dvs := ubc.SHA1_dvs()
for i := 0; dvs[i].DvType != 0; i++ {
if (mask & ((uint32)(1) << uint32(dvs[i].MaskB))) != 0 {
for j := 0; j < msize; j++ {
dig.m2[j] = m1[j] ^ dvs[i].Dm[j]
}
recompressionStep(dvs[i].TestT, &dig.ihv2, &dig.ihvtmp, dig.m2, dig.cs[dvs[i].TestT])
if 0 == ((dig.ihvtmp[0] ^ h0) | (dig.ihvtmp[1] ^ h1) |
(dig.ihvtmp[2] ^ h2) | (dig.ihvtmp[3] ^ h3) | (dig.ihvtmp[4] ^ h4)) {
dig.col = true
bcol = true
}
}
}
}
// Collision attacks are thwarted by hashing a detected near-collision block 3 times.
// Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
// The best collision attacks against SHA-1 have complexity about 2^60,
// thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180.
// An attacker would be better off using a generic birthday search of complexity 2^80.
if bcol {
for j := 0; j < 2; j++ {
a, b, c, d, e := h0, h1, h2, h3, h4
i := 0
for ; i < 20; i++ {
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + m1[i] + _K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 40; i++ {
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + m1[i] + _K1
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 60; i++ {
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + m1[i] + _K2
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
for ; i < 80; i++ {
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + m1[i] + _K3
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
}
}
p = p[chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
}
func recompressionStep(step int, ihvin, ihvout *[5]uint32, m2 [msize]uint32, state [5]uint32) {
a, b, c, d, e := state[0], state[1], state[2], state[3], state[4]
// Walk backwards from current step to undo previous compression.
for i := 79; i >= 60; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := b ^ c ^ d
e -= bits.RotateLeft32(a, 5) + f + _K3 + m2[i]
}
}
for i := 59; i >= 40; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := ((b | c) & d) | (b & c)
e -= bits.RotateLeft32(a, 5) + f + _K2 + m2[i]
}
}
for i := 39; i >= 20; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := b ^ c ^ d
e -= bits.RotateLeft32(a, 5) + f + _K1 + m2[i]
}
}
for i := 19; i >= 0; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := b&c | (^b)&d
e -= bits.RotateLeft32(a, 5) + f + _K0 + m2[i]
}
}
ihvin[0] = a
ihvin[1] = b
ihvin[2] = c
ihvin[3] = d
ihvin[4] = e
a = state[0]
b = state[1]
c = state[2]
d = state[3]
e = state[4]
// Recompress blocks based on the current step.
for i := 0; i < 20; i++ {
if step <= i {
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + _K0 + m2[i]
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
for i := 20; i < 40; i++ {
if step <= i {
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + _K1 + m2[i]
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
for i := 40; i < 60; i++ {
if step <= i {
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + _K2 + m2[i]
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
for i := 60; i < 80; i++ {
if step <= i {
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + _K3 + m2[i]
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
ihvout[0] = ihvin[0] + a
ihvout[1] = ihvin[1] + b
ihvout[2] = ihvin[2] + c
ihvout[3] = ihvin[3] + d
ihvout[4] = ihvin[4] + e
}

328
vendor/github.com/pjbgf/sha1cd/sha1cd.go generated vendored Normal file
View File

@ -0,0 +1,328 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha1cd implements collision detection based on the whitepaper
// Counter-cryptanalysis from Marc Stevens. The original ubc implementation
// was done by Marc Stevens and Dan Shumow, and can be found at:
// https://github.com/cr-marcstevens/sha1collisiondetection
package sha1cd
// This SHA1 implementation is based on Go's generic SHA1.
// Original: https://github.com/golang/go/blob/master/src/crypto/sha1/sha1.go
import (
"crypto"
"encoding/binary"
"errors"
"hash"
)
func init() {
crypto.RegisterHash(crypto.SHA1, New)
}
// The size of a SHA-1 checksum in bytes.
const Size = 20
// The blocksize of SHA-1 in bytes.
const BlockSize = 64
const (
chunk = 64
init0 = 0x67452301
init1 = 0xEFCDAB89
init2 = 0x98BADCFE
init3 = 0x10325476
init4 = 0xC3D2E1F0
)
// digest represents the partial evaluation of a checksum.
type digest struct {
h [5]uint32
x [chunk]byte
nx int
len uint64
// col defines whether a collision has been found.
col bool
// cs stores the compression state for each of the SHA1's 80 steps.
cs map[int][5]uint32
// m2 is a secondary message created XORing with ubc's DM prior to the SHA recompression step.
m2 [msize]uint32
// ihv2 is an Intermediary Hash Value created during the SHA recompression step.
ihv2 [5]uint32
// ihvtmp is an Intermediary Hash Value created during the SHA recompression step.
ihvtmp [5]uint32
}
const (
magic = "shacd\x01"
marshaledSize = len(magic) + 5*4 + chunk + 8
)
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, marshaledSize)
b = append(b, magic...)
b = appendUint32(b, d.h[0])
b = appendUint32(b, d.h[1])
b = appendUint32(b, d.h[2])
b = appendUint32(b, d.h[3])
b = appendUint32(b, d.h[4])
b = append(b, d.x[:d.nx]...)
b = b[:len(b)+len(d.x)-d.nx] // already zero
b = appendUint64(b, d.len)
return b, nil
}
func appendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func appendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
return errors.New("crypto/sha1: invalid hash state identifier")
}
if len(b) != marshaledSize {
return errors.New("crypto/sha1: invalid hash state size")
}
b = b[len(magic):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % chunk)
return nil
}
func consumeUint64(b []byte) ([]byte, uint64) {
_ = b[7]
x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
_ = b[3]
x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return b[4:], x
}
func (d *digest) Reset() {
d.h[0] = init0
d.h[1] = init1
d.h[2] = init2
d.h[3] = init3
d.h[4] = init4
d.nx = 0
d.len = 0
d.col = false
d.ihv2[0] = 0x0
d.ihv2[1] = 0x0
d.ihv2[2] = 0x0
d.ihv2[3] = 0x0
d.ihv2[4] = 0x0
d.ihvtmp[0] = 0xD5
d.ihvtmp[1] = 0x394
d.ihvtmp[2] = 0x8152A8
d.ihvtmp[3] = 0x0
d.ihvtmp[4] = 0xA7ECE0
for i := range d.m2 {
d.m2[i] = 0x0
}
for k := range d.cs {
delete(d.cs, k)
}
}
// New returns a new hash.Hash computing the SHA1 checksum. The Hash also
// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
// marshal and unmarshal the internal state of the hash.
func New() hash.Hash {
d := new(digest)
d.cs = map[int][5]uint32{}
d.m2 = [msize]uint32{}
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
if len(p) == 0 {
return
}
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == chunk {
block(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= chunk {
n := len(p) &^ (chunk - 1)
block(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
binary.BigEndian.PutUint64(tmp[:], len)
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
binary.BigEndian.PutUint32(digest[0:], d.h[0])
binary.BigEndian.PutUint32(digest[4:], d.h[1])
binary.BigEndian.PutUint32(digest[8:], d.h[2])
binary.BigEndian.PutUint32(digest[12:], d.h[3])
binary.BigEndian.PutUint32(digest[16:], d.h[4])
return digest
}
// ConstantTimeSum computes the same result of Sum() but in constant time
func (d *digest) ConstantTimeSum(in []byte) ([]byte, error) {
d0 := *d
hash, err := d0.constSum()
if err != nil {
return nil, err
}
return append(in, hash[:]...), nil
}
func (d *digest) constSum() ([Size]byte, error) {
var length [8]byte
l := d.len << 3
for i := uint(0); i < 8; i++ {
length[i] = byte(l >> (56 - 8*i))
}
nx := byte(d.nx)
t := nx - 56 // if nx < 56 then the MSB of t is one
mask1b := byte(int8(t) >> 7) // mask1b is 0xFF iff one block is enough
separator := byte(0x80) // gets reset to 0x00 once used
for i := byte(0); i < chunk; i++ {
mask := byte(int8(i-nx) >> 7) // 0x00 after the end of data
// if we reached the end of the data, replace with 0x80 or 0x00
d.x[i] = (^mask & separator) | (mask & d.x[i])
// zero the separator once used
separator &= mask
if i >= 56 {
// we might have to write the length here if all fit in one block
d.x[i] |= mask1b & length[i-56]
}
}
// compress, and only keep the digest if all fit in one block
block(d, d.x[:])
var digest [Size]byte
for i, s := range d.h {
digest[i*4] = mask1b & byte(s>>24)
digest[i*4+1] = mask1b & byte(s>>16)
digest[i*4+2] = mask1b & byte(s>>8)
digest[i*4+3] = mask1b & byte(s)
}
for i := byte(0); i < chunk; i++ {
// second block, it's always past the end of data, might start with 0x80
if i < 56 {
d.x[i] = separator
separator = 0
} else {
d.x[i] = length[i-56]
}
}
// compress, and only keep the digest if we actually needed the second block
block(d, d.x[:])
for i, s := range d.h {
digest[i*4] |= ^mask1b & byte(s>>24)
digest[i*4+1] |= ^mask1b & byte(s>>16)
digest[i*4+2] |= ^mask1b & byte(s>>8)
digest[i*4+3] |= ^mask1b & byte(s)
}
return digest, nil
}
// Sum returns the SHA-1 checksum of the data.
func Sum(data []byte) ([Size]byte, bool) {
d := New().(*digest)
d.Write(data)
return d.checkSum(), d.col
}
func (d *digest) CollisionResistantSum(in []byte) ([]byte, bool) {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...), d0.col
}

374
vendor/github.com/pjbgf/sha1cd/ubc/check.go generated vendored Normal file
View File

@ -0,0 +1,374 @@
// Based on the C implementation from Marc Stevens and Dan Shumow.
// https://github.com/cr-marcstevens/sha1collisiondetection
package ubc
import "fmt"
type DvInfo struct {
// DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper).
// https://marc-stevens.nl/research/papers/C13-S.pdf
DvType int
DvK int
DvB int
// TestT is the step to do the recompression from for collision detection.
TestT int
// MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check.
MaskI int
MaskB int
// Dm is the expanded message block XOR-difference defined by the DV.
Dm [80]uint32
}
// Check takes as input an expanded message block and verifies the unavoidable bitconditions
// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all
// unavoidable bitconditions for that DV have been met.
// Thus, one needs to do the recompression check for each DV that has its bit set.
func CalculateDvMask(W []uint32) (uint32, error) {
if len(W) < 80 {
return 0, fmt.Errorf("invalid input: len(W) must be 80, was %d", len(W))
}
mask := uint32(0xFFFFFFFF)
mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit))
mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit))
mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit))
mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit))
mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit))
mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit))
mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit))
mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit))
mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit))
mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit))
mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit))
mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit))
mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit))
mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit))
mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit))
mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit))
mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit))
if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
}
mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 {
mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 {
mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 {
mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit))
}
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 {
mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
}
if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 {
mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit))
}
if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 {
mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 {
mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit))
}
if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 {
mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit))
}
mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit))
if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 {
mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit))
}
mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit))
if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 {
mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit))
}
mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit))
mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit))
if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit))
}
mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit))
if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit))
}
if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
}
mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 {
mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 {
mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit))
}
if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 {
mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit))
}
mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 {
mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit))
}
if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 {
mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 {
mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 {
mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 {
mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 {
mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 {
mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit))
}
mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 {
mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit))
}
mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit))
mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit))
mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit))
mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit))
mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit))
mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit))
mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit))
mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit))
mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit))
mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit))
if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 {
mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit))
}
if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 {
mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit))
}
if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 {
mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit))
}
if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 {
mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit))
}
if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
}
mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit))
if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit))
}
if mask != 0 {
if (mask & DV_I_43_0_bit) != 0 {
if not((W[61]^(W[62]>>5))&(1<<1)) != 0 ||
not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 ||
not((W[58]^(W[63]>>30))&(1<<0)) != 0 {
mask &= ^DV_I_43_0_bit
}
}
if (mask & DV_I_44_0_bit) != 0 {
if not((W[62]^(W[63]>>5))&(1<<1)) != 0 ||
not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 ||
not((W[59]^(W[64]>>30))&(1<<0)) != 0 {
mask &= ^DV_I_44_0_bit
}
}
if (mask & DV_I_46_2_bit) != 0 {
mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit)
}
if (mask & DV_I_47_2_bit) != 0 {
if not((W[62]^(W[63]>>5))&(1<<2)) != 0 ||
not(not((W[41]^W[43])&(1<<6))) != 0 {
mask &= ^DV_I_47_2_bit
}
}
if (mask & DV_I_48_2_bit) != 0 {
if not((W[63]^(W[64]>>5))&(1<<2)) != 0 ||
not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 {
mask &= ^DV_I_48_2_bit
}
}
if (mask & DV_I_49_2_bit) != 0 {
if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 ||
not((W[42]^W[50])&(1<<1)) != 0 ||
not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 ||
not((W[38]^W[40])&(1<<1)) != 0 {
mask &= ^DV_I_49_2_bit
}
}
if (mask & DV_I_50_0_bit) != 0 {
mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit)
}
if (mask & DV_I_50_2_bit) != 0 {
mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit)
}
if (mask & DV_I_51_0_bit) != 0 {
mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit)
}
if (mask & DV_I_51_2_bit) != 0 {
if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 ||
not(not((W[49]^W[51])&(1<<6))) != 0 ||
not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 ||
not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 {
mask &= ^DV_I_51_2_bit
}
}
if (mask & DV_I_52_0_bit) != 0 {
mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit)
}
if (mask & DV_II_46_2_bit) != 0 {
mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit)
}
if (mask & DV_II_48_0_bit) != 0 {
if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 ||
not((W[35]^(W[40]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_48_0_bit
}
}
if (mask & DV_II_49_0_bit) != 0 {
if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 ||
not((W[36]^(W[41]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_49_0_bit
}
}
if (mask & DV_II_49_2_bit) != 0 {
if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 ||
not(not((W[51]^W[53])&(1<<6))) != 0 ||
not((W[50]^W[54])&(1<<1)) != 0 ||
not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 ||
not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 ||
not((W[36]^(W[41]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_49_2_bit
}
}
if (mask & DV_II_50_0_bit) != 0 {
if not((W[55]^W[58])&(1<<29)) != 0 ||
not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 ||
not((W[37]^(W[42]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_50_0_bit
}
}
if (mask & DV_II_50_2_bit) != 0 {
if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 ||
not(not((W[52]^W[54])&(1<<6))) != 0 ||
not((W[51]^W[55])&(1<<1)) != 0 ||
not((W[45]^W[47])&(1<<1)) != 0 ||
not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 ||
not((W[37]^(W[42]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_50_2_bit
}
}
if (mask & DV_II_51_0_bit) != 0 {
if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 ||
not((W[38]^(W[43]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_51_0_bit
}
}
if (mask & DV_II_51_2_bit) != 0 {
if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 ||
not(not((W[53]^W[55])&(1<<6))) != 0 ||
not((W[52]^W[56])&(1<<1)) != 0 ||
not((W[46]^W[48])&(1<<1)) != 0 ||
not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 ||
not((W[38]^(W[43]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_51_2_bit
}
}
if (mask & DV_II_52_0_bit) != 0 {
if not(not((W[59]^W[60])&(1<<29))) != 0 ||
not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 ||
not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 ||
not((W[39]^(W[44]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_52_0_bit
}
}
if (mask & DV_II_53_0_bit) != 0 {
if not((W[58]^W[61])&(1<<29)) != 0 ||
not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 ||
not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 ||
not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_53_0_bit
}
}
if (mask & DV_II_54_0_bit) != 0 {
if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 ||
not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 ||
not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_54_0_bit
}
}
if (mask & DV_II_55_0_bit) != 0 {
if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 ||
not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 ||
not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 ||
not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_55_0_bit
}
}
if (mask & DV_II_56_0_bit) != 0 {
if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 ||
not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 ||
not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_56_0_bit
}
}
}
return mask, nil
}
func not(x uint32) uint32 {
if x == 0 {
return 1
}
return 0
}
func SHA1_dvs() []DvInfo {
return sha1_dvs
}

624
vendor/github.com/pjbgf/sha1cd/ubc/const.go generated vendored Normal file
View File

@ -0,0 +1,624 @@
// Based on the C implementation from Marc Stevens and Dan Shumow.
// https://github.com/cr-marcstevens/sha1collisiondetection
package ubc
const (
CheckSize = 80
DV_I_43_0_bit = (uint32)(1 << 0)
DV_I_44_0_bit = (uint32)(1 << 1)
DV_I_45_0_bit = (uint32)(1 << 2)
DV_I_46_0_bit = (uint32)(1 << 3)
DV_I_46_2_bit = (uint32)(1 << 4)
DV_I_47_0_bit = (uint32)(1 << 5)
DV_I_47_2_bit = (uint32)(1 << 6)
DV_I_48_0_bit = (uint32)(1 << 7)
DV_I_48_2_bit = (uint32)(1 << 8)
DV_I_49_0_bit = (uint32)(1 << 9)
DV_I_49_2_bit = (uint32)(1 << 10)
DV_I_50_0_bit = (uint32)(1 << 11)
DV_I_50_2_bit = (uint32)(1 << 12)
DV_I_51_0_bit = (uint32)(1 << 13)
DV_I_51_2_bit = (uint32)(1 << 14)
DV_I_52_0_bit = (uint32)(1 << 15)
DV_II_45_0_bit = (uint32)(1 << 16)
DV_II_46_0_bit = (uint32)(1 << 17)
DV_II_46_2_bit = (uint32)(1 << 18)
DV_II_47_0_bit = (uint32)(1 << 19)
DV_II_48_0_bit = (uint32)(1 << 20)
DV_II_49_0_bit = (uint32)(1 << 21)
DV_II_49_2_bit = (uint32)(1 << 22)
DV_II_50_0_bit = (uint32)(1 << 23)
DV_II_50_2_bit = (uint32)(1 << 24)
DV_II_51_0_bit = (uint32)(1 << 25)
DV_II_51_2_bit = (uint32)(1 << 26)
DV_II_52_0_bit = (uint32)(1 << 27)
DV_II_53_0_bit = (uint32)(1 << 28)
DV_II_54_0_bit = (uint32)(1 << 29)
DV_II_55_0_bit = (uint32)(1 << 30)
DV_II_56_0_bit = (uint32)(1 << 31)
)
// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) which defines the
// unavoidable bit conditions when a collision attack is in progress.
var sha1_dvs = []DvInfo{
{
DvType: 1, DvK: 43, DvB: 0, TestT: 58, MaskI: 0, MaskB: 0,
Dm: [CheckSize]uint32{
0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6, 0x8000004c,
0x00000803, 0x80000161, 0x80000599},
}, {
DvType: 1, DvK: 44, DvB: 0, TestT: 58, MaskI: 0, MaskB: 1,
Dm: [CheckSize]uint32{
0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6,
0x8000004c, 0x00000803, 0x80000161},
},
{
DvType: 1, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 2,
Dm: [CheckSize]uint32{
0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408,
0x800000e6, 0x8000004c, 0x00000803},
},
{
DvType: 1, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 3,
Dm: [CheckSize]uint32{
0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
0x00000408, 0x800000e6, 0x8000004c},
},
{
DvType: 1, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 4,
Dm: [CheckSize]uint32{
0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043,
0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003,
0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001,
0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003,
0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043,
0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002,
0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000,
0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002,
0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101,
0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c,
0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590, 0x00001020,
0x0000039a, 0x00000132},
},
{
DvType: 1, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 5,
Dm: [CheckSize]uint32{
0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c,
0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008,
0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
0x00000408, 0x800000e6},
},
{
DvType: 1, DvK: 47, DvB: 2, TestT: 58, MaskI: 0, MaskB: 6,
Dm: [CheckSize]uint32{
0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032,
0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020,
0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040,
0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022,
0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002,
0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002,
0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040,
0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040,
0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009,
0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124,
0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590,
0x00001020, 0x0000039a,
},
},
{
DvType: 1, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 7,
Dm: [CheckSize]uint32{
0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000,
0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010,
0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000,
0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000,
0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018,
0x00000164, 0x00000408,
},
},
{
DvType: 1, DvK: 48, DvB: 2, TestT: 58, MaskI: 0, MaskB: 8,
Dm: [CheckSize]uint32{
0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000,
0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001,
0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000,
0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043,
0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001,
0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042,
0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002,
0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000,
0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004,
0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a,
0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060,
0x00000590, 0x00001020},
},
{
DvType: 1, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 9,
Dm: [CheckSize]uint32{
0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008,
0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008,
0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010,
0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000,
0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080,
0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202,
0x00000018, 0x00000164},
},
{
DvType: 1, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 10,
Dm: [CheckSize]uint32{
0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022,
0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002,
0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052,
0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042,
0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022,
0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042,
0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000,
0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202,
0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a,
0x00000060, 0x00000590},
},
{
DvType: 1, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 11,
Dm: [CheckSize]uint32{
0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014,
0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000,
0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010,
0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000,
0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004,
0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012,
0x80000202, 0x00000018,
},
},
{
DvType: 1, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 12,
Dm: [CheckSize]uint32{
0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053,
0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042,
0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040,
0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001,
0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043,
0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001,
0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002,
0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000,
0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012,
0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a,
0x0000080a, 0x00000060},
},
{
DvType: 1, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 13,
Dm: [CheckSize]uint32{
0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010,
0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014,
0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018,
0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010,
0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002,
0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
0x80000012, 0x80000202},
},
{
DvType: 1, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 14,
Dm: [CheckSize]uint32{
0xa0000003, 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040,
0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040,
0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052,
0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060,
0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042,
0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062,
0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040,
0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040,
0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000,
0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009,
0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026,
0x0000004a, 0x0000080a},
},
{
DvType: 1, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 15,
Dm: [CheckSize]uint32{
0x04000010, 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010,
0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010,
0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000,
0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000,
0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
0x80000009, 0x80000012},
},
{
DvType: 2, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 16,
Dm: [CheckSize]uint32{
0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a, 0x000002e4,
0x80000054, 0x00000967},
},
{
DvType: 2, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 17,
Dm: [CheckSize]uint32{
0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a,
0x000002e4, 0x80000054},
},
{
DvType: 2, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 18,
Dm: [CheckSize]uint32{
0x90000070, 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010,
0xf0000062, 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041,
0x20000050, 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041,
0xc0000032, 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002,
0x00000000, 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
0x80000040, 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003,
0x40000002, 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002,
0x00000040, 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002,
0x00000040, 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105,
0x00000089, 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e,
0x00000224, 0x00000050, 0x0000092e, 0x0000046c, 0x000005b6, 0x0000106a,
0x00000b90, 0x00000152},
},
{
DvType: 2, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 19,
Dm: [CheckSize]uint32{
0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d,
0x8000041a, 0x000002e4},
},
{
DvType: 2, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 20,
Dm: [CheckSize]uint32{
0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b,
0x8000016d, 0x8000041a},
},
{
DvType: 2, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 21,
Dm: [CheckSize]uint32{
0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b,
0x0000011b, 0x8000016d},
},
{
DvType: 2, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 22,
Dm: [CheckSize]uint32{
0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053, 0x30000008,
0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042, 0x00000030,
0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041, 0xe0000072,
0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001, 0xc0000002,
0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000, 0x80000000,
0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040, 0xc0000002,
0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002, 0x80000000,
0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016, 0x0000020b,
0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050, 0x0000092e,
0x0000046c, 0x000005b6},
},
{
DvType: 2, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 23,
Dm: [CheckSize]uint32{
0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014,
0x8000024b, 0x0000011b},
},
{
DvType: 2, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 24,
Dm: [CheckSize]uint32{
0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053,
0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042,
0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041,
0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001,
0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000,
0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000,
0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040,
0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002,
0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016,
0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050,
0x0000092e, 0x0000046c},
},
{
DvType: 2, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 25,
Dm: [CheckSize]uint32{
0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c,
0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
0x00000014, 0x8000024b},
},
{
DvType: 2, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 26,
Dm: [CheckSize]uint32{
0x00000043, 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070,
0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062,
0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050,
0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032,
0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000,
0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002,
0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040,
0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040,
0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089,
0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224,
0x00000050, 0x0000092e},
},
{
DvType: 2, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 27,
Dm: [CheckSize]uint32{
0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010,
0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
0x00000089, 0x00000014},
},
{
DvType: 2, DvK: 53, DvB: 0, TestT: 65, MaskI: 0, MaskB: 28,
Dm: [CheckSize]uint32{
0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a,
0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
0x80000107, 0x00000089},
},
{
DvType: 2, DvK: 54, DvB: 0, TestT: 65, MaskI: 0, MaskB: 29,
Dm: [CheckSize]uint32{
0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004,
0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
0x4000004b, 0x80000107},
},
{
DvType: 2, DvK: 55, DvB: 0, TestT: 65, MaskI: 0, MaskB: 30,
Dm: [CheckSize]uint32{
0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
0xc0000046, 0x4000004b},
},
{
DvType: 2, DvK: 56, DvB: 0, TestT: 65, MaskI: 0, MaskB: 31,
Dm: [CheckSize]uint32{
0x2600001a, 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
0xc0000082, 0xc0000046},
},
{
DvType: 0, DvK: 0, DvB: 0, TestT: 0, MaskI: 0, MaskB: 0,
Dm: [CheckSize]uint32{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0},
},
}

3
vendor/github.com/pjbgf/sha1cd/ubc/doc.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
// ubc package provides ways for SHA1 blocks to be checked for
// Unavoidable Bit Conditions that arise from crypto analysis attacks.
package ubc

Some files were not shown because too many files have changed in this diff Show More