chore(deps): bump github.com/go-git/go-git/v5 from 5.4.2 to 5.6.0

Bumps [github.com/go-git/go-git/v5](https://github.com/go-git/go-git) from 5.4.2 to 5.6.0.
- [Release notes](https://github.com/go-git/go-git/releases)
- [Commits](https://github.com/go-git/go-git/compare/v5.4.2...v5.6.0)

---
updated-dependencies:
- dependency-name: github.com/go-git/go-git/v5
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
dependabot[bot] 2023-03-01 23:00:13 +00:00 committed by GitHub
parent 0d9c92c8c0
commit ef48bbb732
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
114 changed files with 6496 additions and 470 deletions

14
go.mod
View File

@ -6,7 +6,7 @@ require (
github.com/alecthomas/chroma v0.10.0
github.com/davecgh/go-spew v1.1.1
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815
github.com/go-git/go-git/v5 v5.4.2
github.com/go-git/go-git/v5 v5.6.0
github.com/mattn/go-isatty v0.0.16
github.com/mitchellh/go-homedir v1.1.0
gopkg.in/yaml.v2 v2.4.0
@ -20,16 +20,18 @@ require (
github.com/dlclark/regexp2 v1.7.0 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-git/go-billy/v5 v5.3.1 // indirect
github.com/go-git/go-billy/v5 v5.4.0 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/pjbgf/sha1cd v0.3.0 // indirect
github.com/sergi/go-diff v1.2.0 // indirect
github.com/xanzy/ssh-agent v0.3.2 // indirect
golang.org/x/crypto v0.1.0 // indirect
github.com/skeema/knownhosts v1.1.0 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
golang.org/x/crypto v0.3.0 // indirect
golang.org/x/mod v0.6.0 // indirect
golang.org/x/net v0.1.0 // indirect
golang.org/x/sys v0.1.0 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/tools v0.2.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
)

94
go.sum
View File

@ -1,17 +1,14 @@
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4 h1:ra2OtmuW0AE5csawV4YXMNGNQQXvLRps3z2Z59OPO+I=
github.com/ProtonMail/go-crypto v0.0.0-20221026131551-cf6655e29de4/go.mod h1:UBYPn8k0D56RtnR8RFQMjmh4KrZzWJ5o7Z9SYjossQ8=
github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/alecthomas/chroma v0.10.0 h1:7XDcGkCQopCNKjZHfYrNLraA+M7e0fMiJ/Mfikbfjek=
github.com/alecthomas/chroma v0.10.0/go.mod h1:jtJATyUxlIORhUOFNA9NZDWGAQ8wpxQQqNSB4rjA/1s=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
@ -28,33 +25,28 @@ github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo
github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/gliderlabs/ssh v0.3.5 h1:OcaySEmAQJgyYcArR+gGGTHCyE7nvhEMTlYY+Dp8CpY=
github.com/gliderlabs/ssh v0.3.5/go.mod h1:8XB4KraRrX39qHhT6yxPsHedjA08I/uBVwj4xC+/+z4=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/go-git/go-billy/v5 v5.4.0 h1:Vaw7LaSTRJOUric7pe4vnzBSgyuf2KrLsu2Y4ZpQBDE=
github.com/go-git/go-billy/v5 v5.4.0/go.mod h1:vjbugF6Fz7JIflbVpl1hJsGjSHNltrSw45YK/ukIvQg=
github.com/go-git/go-git-fixtures/v4 v4.3.1 h1:y5z6dd3qi8Hl+stezc8p3JxDkoTRqMAlKnXHuzrfjTQ=
github.com/go-git/go-git-fixtures/v4 v4.3.1/go.mod h1:8LHG1a3SRW71ettAD/jW13h8c6AqjVSeL11RAdgaqpo=
github.com/go-git/go-git/v5 v5.6.0 h1:JvBdYfcttd+0kdpuWO7KTu0FYgCf5W0t5VwkWGobaa4=
github.com/go-git/go-git/v5 v5.6.0/go.mod h1:6nmJ0tJ3N4noMV1Omv7rC5FG3/o8Cm51TB4CJp7mRmE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -68,8 +60,10 @@ github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peK
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -77,58 +71,78 @@ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZN
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.1.0 h1:Wvr9V0MxhjRbl3f9nMnKnFfiWTJmtECJ9Njkea3ysW0=
github.com/skeema/knownhosts v1.1.0/go.mod h1:sKFq3RD6/TKZkSWn8boUbDC7Qkgcv+8XXijpFO6roag=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/xanzy/ssh-agent v0.3.2 h1:eKj4SX2Fe7mui28ZgnFW5fmTz1EIr7ugo5s6wDxdHBM=
github.com/xanzy/ssh-agent v0.3.2/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/arch v0.1.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
golang.org/x/crypto v0.0.0-20220826181053-bd7e27e6170d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.3.0 h1:a06MkbcxBrEFc0w0QIZWXrH/9cCX6KJyWbBOIwAn+7A=
golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220315194320-039c03cc5b86/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
golang.org/x/sys v0.0.0-20220825204002-c680a09ffe64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20220722155259-a9ba230a4035/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0 h1:z85xZCsEl7bi/KwbNADeBYoOP0++7W1ipu+aGnpwzRM=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -138,9 +152,9 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

11
vendor/github.com/go-git/go-billy/v5/Makefile generated vendored Normal file
View File

@ -0,0 +1,11 @@
# Go parameters
GOCMD = go
GOTEST = $(GOCMD) test
.PHONY: test
test:
$(GOTEST) -race ./...
test-coverage:
echo "" > $(COVERAGE_REPORT); \
$(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./...

View File

@ -6,6 +6,7 @@ import (
"io"
"os"
"path/filepath"
"sync"
)
type storage struct {
@ -174,6 +175,8 @@ func clean(path string) string {
type content struct {
name string
bytes []byte
m sync.RWMutex
}
func (c *content) WriteAt(p []byte, off int64) (int, error) {
@ -185,6 +188,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
}
}
c.m.Lock()
prev := len(c.bytes)
diff := int(off) - prev
@ -196,6 +200,7 @@ func (c *content) WriteAt(p []byte, off int64) (int, error) {
if len(c.bytes) < prev {
c.bytes = c.bytes[:prev]
}
c.m.Unlock()
return len(p), nil
}
@ -209,8 +214,10 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
}
c.m.RLock()
size := int64(len(c.bytes))
if off >= size {
c.m.RUnlock()
return 0, io.EOF
}
@ -220,10 +227,12 @@ func (c *content) ReadAt(b []byte, off int64) (n int, err error) {
}
btr := c.bytes[off : off+l]
n = copy(b, btr)
if len(btr) < len(b) {
err = io.EOF
}
n = copy(b, btr)
c.m.RUnlock()
return
}

72
vendor/github.com/go-git/go-billy/v5/util/walk.go generated vendored Normal file
View File

@ -0,0 +1,72 @@
package util
import (
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
)
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
if !info.IsDir() {
return walkFn(path, info, nil)
}
names, err := readdirnames(fs, path)
err1 := walkFn(path, info, err)
// If err != nil, walk can't walk into this directory.
// err1 != nil means walkFn want walk to skip this directory or stop walking.
// Therefore, if one of err and err1 isn't nil, walk will return.
if err != nil || err1 != nil {
// The caller's behavior is controlled by the return value, which is decided
// by walkFn. walkFn may ignore err and return nil.
// If walkFn returns SkipDir, it will be handled by the caller.
// So walk should return whatever walkFn returns.
return err1
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := fs.Lstat(filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
//
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
if err != nil {
err = walkFn(root, nil, err)
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}

View File

@ -2,3 +2,4 @@ coverage.out
*~
coverage.txt
profile.out
.tmp/

View File

@ -2,6 +2,7 @@ package config
import (
"errors"
"strings"
"github.com/go-git/go-git/v5/plumbing"
format "github.com/go-git/go-git/v5/plumbing/format/config"
@ -26,6 +27,12 @@ type Branch struct {
// "true" and "interactive". "false" is undocumented and
// typically represented by the non-existence of this field
Rebase string
// Description explains what the branch is for.
// Multi-line explanations may be used.
//
// Original git command to edit:
// git branch --edit-description
Description string
raw *format.Subsection
}
@ -75,9 +82,27 @@ func (b *Branch) marshal() *format.Subsection {
b.raw.SetOption(rebaseKey, b.Rebase)
}
if b.Description == "" {
b.raw.RemoveOption(descriptionKey)
} else {
desc := quoteDescription(b.Description)
b.raw.SetOption(descriptionKey, desc)
}
return b.raw
}
// hack to trigger conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
//
// Current Encoder implementation uses Go %q format if value contains a backslash character,
// which is not consistent with reference git implementation.
// git just replaces newline characters with \n, while Encoder prints them directly.
// Until value quoting fix, we should escape description value by replacing newline characters with \n.
func quoteDescription(desc string) string {
return strings.ReplaceAll(desc, "\n", `\n`)
}
func (b *Branch) unmarshal(s *format.Subsection) error {
b.raw = s
@ -85,6 +110,14 @@ func (b *Branch) unmarshal(s *format.Subsection) error {
b.Remote = b.raw.Options.Get(remoteSection)
b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey))
b.Rebase = b.raw.Options.Get(rebaseKey)
b.Description = unquoteDescription(b.raw.Options.Get(descriptionKey))
return b.Validate()
}
// hack to enable conditional quoting in the
// plumbing/format/config/Encoder.encodeOptions
// goto quoteDescription for details.
func unquoteDescription(desc string) string {
return strings.ReplaceAll(desc, `\n`, "\n")
}

View File

@ -15,7 +15,6 @@ import (
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/internal/url"
format "github.com/go-git/go-git/v5/plumbing/format/config"
"github.com/mitchellh/go-homedir"
)
const (
@ -150,7 +149,7 @@ func ReadConfig(r io.Reader) (*Config, error) {
// config file to the given scope, a empty one is returned.
func LoadConfig(scope Scope) (*Config, error) {
if scope == LocalScope {
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.")
return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer")
}
files, err := Paths(scope)
@ -185,7 +184,7 @@ func Paths(scope Scope) ([]string, error) {
files = append(files, filepath.Join(xdg, "git/config"))
}
home, err := homedir.Dir()
home, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -247,6 +246,7 @@ const (
rebaseKey = "rebase"
nameKey = "name"
emailKey = "email"
descriptionKey = "description"
defaultBranchKey = "defaultBranch"
// DefaultPackWindow holds the number of previous objects used to

View File

@ -64,7 +64,7 @@ func (s RefSpec) IsExactSHA1() bool {
return plumbing.IsHash(s.Src())
}
// Src return the src side.
// Src returns the src side.
func (s RefSpec) Src() string {
spec := string(s)

View File

@ -322,6 +322,8 @@ func (p *Parser) parseAt() (Revisioner, error) {
}
return AtDate{t}, nil
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in @{<data>} structure`}
default:
date += lit
}
@ -424,6 +426,8 @@ func (p *Parser) parseCaretBraces() (Revisioner, error) {
p.unscan()
case tok != slash && start:
return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)}
case tok == eof:
return nil, &ErrInvalidRevision{s: `missing "}" in ^{<data>} structure`}
case tok != cbrace:
p.unscan()
re += lit

View File

@ -60,7 +60,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
// Fetch the object.
obj, err := object.GetObject(p.Storer, hash)
if err != nil {
return fmt.Errorf("Getting object %s failed: %v", hash, err)
return fmt.Errorf("getting object %s failed: %v", hash, err)
}
// Walk all children depending on object type.
switch obj := obj.(type) {
@ -98,7 +98,7 @@ func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error {
return p.walkObjectTree(obj.Target)
default:
// Error out on unhandled object types.
return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj)
return fmt.Errorf("unknown object %X %s %T", obj.ID(), obj.Type(), obj)
}
return nil
}

View File

@ -91,6 +91,8 @@ func (o *CloneOptions) Validate() error {
type PullOptions struct {
// Name of the remote to be pulled. If empty, uses the default.
RemoteName string
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// Remote branch to clone. If empty, uses HEAD.
ReferenceName plumbing.ReferenceName
// Fetch only ReferenceName if true.
@ -147,7 +149,9 @@ const (
type FetchOptions struct {
// Name of the remote to fetch from. Defaults to origin.
RemoteName string
RefSpecs []config.RefSpec
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
RefSpecs []config.RefSpec
// Depth limit fetching to the specified number of commits from the tip of
// each remote branch history.
Depth int
@ -192,8 +196,16 @@ func (o *FetchOptions) Validate() error {
type PushOptions struct {
// RemoteName is the name of the remote to be pushed to.
RemoteName string
// RefSpecs specify what destination ref to update with what source
// object. A refspec with empty src can be used to delete a reference.
// RemoteURL overrides the remote repo address with a custom URL
RemoteURL string
// RefSpecs specify what destination ref to update with what source object.
//
// The format of a <refspec> parameter is an optional plus +, followed by
// the source object <src>, followed by a colon :, followed by the destination ref <dst>.
// The <src> is often the name of the branch you would want to push, but it can be a SHA-1.
// The <dst> tells which ref on the remote side is updated with this push.
//
// A refspec with empty src can be used to delete a reference.
RefSpecs []config.RefSpec
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
@ -206,13 +218,35 @@ type PushOptions struct {
// Force allows the push to update a remote branch even when the local
// branch does not descend from it.
Force bool
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte
// RequireRemoteRefs only allows a remote ref to be updated if its current
// value is the one specified here.
RequireRemoteRefs []config.RefSpec
// FollowTags will send any annotated tags with a commit target reachable from
// the refs already being pushed
FollowTags bool
// ForceWithLease allows a force push as long as the remote ref adheres to a "lease"
ForceWithLease *ForceWithLease
// PushOptions sets options to be transferred to the server during push.
Options map[string]string
// Atomic sets option to be an atomic push
Atomic bool
}
// ForceWithLease sets fields on the lease
// If neither RefName nor Hash are set, ForceWithLease protects
// all refs in the refspec by ensuring the ref of the remote in the local repsitory
// matches the one in the ref advertisement.
type ForceWithLease struct {
// RefName, when set will protect the ref by ensuring it matches the
// hash in the ref advertisement.
RefName plumbing.ReferenceName
// Hash is the expected object id of RefName. The push will be rejected unless this
// matches the corresponding object id of RefName in the refs advertisement.
Hash plumbing.Hash
}
// Validate validates the fields and sets the default values.
@ -274,6 +308,8 @@ type CheckoutOptions struct {
// target branch. Force and Keep are mutually exclusive, should not be both
// set to true.
Keep bool
// SparseCheckoutDirectories
SparseCheckoutDirectories []string
}
// Validate validates the fields and sets the default values.
@ -366,7 +402,7 @@ type LogOptions struct {
// Show only those commits in which the specified file was inserted/updated.
// It is equivalent to running `git log -- <file-name>`.
// this field is kept for compatility, it can be replaced with PathFilter
// this field is kept for compatibility, it can be replaced with PathFilter
FileName *string
// Filter commits based on the path of files that are updated
@ -422,6 +458,10 @@ type CommitOptions struct {
// All automatically stage files that have been modified and deleted, but
// new files you have not told Git about are not affected.
All bool
// AllowEmptyCommits enable empty commits to be created. An empty commit
// is when no changes to the tree were made, but a new commit message is
// provided. The default behavior is false, which results in ErrEmptyCommit.
AllowEmptyCommits bool
// Author is the author's signature of the commit. If Author is empty the
// Name and Email is read from the config, and time.Now it's used as When.
Author *object.Signature
@ -571,7 +611,7 @@ func (o *CreateTagOptions) loadConfigTagger(r *Repository) error {
type ListOptions struct {
// Auth credentials, if required, to use with the remote repository.
Auth transport.AuthMethod
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CABundle specify additional ca bundle with system cert pool
CABundle []byte

View File

@ -11,6 +11,10 @@ type Encoder struct {
w io.Writer
}
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@ -48,8 +52,7 @@ func (e *Encoder) encodeSection(s *Section) error {
}
func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
//TODO: escape
if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil {
if err := e.printf("[%s \"%s\"]\n", sectionName, subsectionReplacer.Replace(s.Name)); err != nil {
return err
}
@ -58,12 +61,14 @@ func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error {
func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
pattern := "\t%s = %s\n"
if strings.Contains(o.Value, "\\") {
pattern = "\t%s = %q\n"
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}
if err := e.printf(pattern, o.Key, o.Value); err != nil {
if err := e.printf("\t%s = %s\n", o.Key, value); err != nil {
return err
}
}

View File

@ -103,7 +103,7 @@ func (s *Section) RemoveSubsection(name string) *Section {
return s
}
// Option return the value for the specified key. Empty string is returned if
// Option returns the value for the specified key. Empty string is returned if
// key does not exists.
func (s *Section) Option(key string) string {
return s.Options.Get(key)

View File

@ -9,7 +9,7 @@ import (
type Operation int
const (
// Equal item represents a equals diff.
// Equal item represents an equals diff.
Equal Operation = iota
// Add item represents an insert diff.
Add
@ -26,15 +26,15 @@ type Patch interface {
Message() string
}
// FilePatch represents the necessary steps to transform one file to another.
// FilePatch represents the necessary steps to transform one file into another.
type FilePatch interface {
// IsBinary returns true if this patch is representing a binary file.
IsBinary() bool
// Files returns the from and to Files, with all the necessary metadata to
// Files returns the from and to Files, with all the necessary metadata
// about them. If the patch creates a new file, "from" will be nil.
// If the patch deletes a file, "to" will be nil.
Files() (from, to File)
// Chunks returns a slice of ordered changes to transform "from" File to
// Chunks returns a slice of ordered changes to transform "from" File into
// "to" File. If the file is a binary one, Chunks will be empty.
Chunks() []Chunk
}
@ -49,7 +49,7 @@ type File interface {
Path() string
}
// Chunk represents a portion of a file transformation to another.
// Chunk represents a portion of a file transformation into another.
type Chunk interface {
// Content contains the portion of the file.
Content() string

View File

@ -13,13 +13,14 @@ import (
)
const (
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
commentPrefix = "#"
coreSection = "core"
excludesfile = "excludesfile"
gitDir = ".git"
gitignoreFile = ".gitignore"
gitconfigFile = ".gitconfig"
systemFile = "/etc/gitconfig"
infoExcludeFile = gitDir + "/info/exclude"
)
// readIgnoreFile reads a specific git ignore file.
@ -42,10 +43,14 @@ func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps [
return
}
// ReadPatterns reads gitignore patterns recursively traversing through the directory
// structure. The result is in the ascending order of priority (last higher).
// ReadPatterns reads the .git/info/exclude and then the gitignore patterns
// recursively traversing through the directory structure. The result is in
// the ascending order of priority (last higher).
func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) {
ps, _ = readIgnoreFile(fs, path, gitignoreFile)
ps, _ = readIgnoreFile(fs, path, infoExcludeFile)
subps, _ := readIgnoreFile(fs, path, gitignoreFile)
ps = append(ps, subps...)
var fis []os.FileInfo
fis, err = fs.ReadDir(fs.Join(path...))

View File

@ -12,9 +12,9 @@ import (
var (
// ErrUnsupportedVersion is returned by Decode when the idx file version
// is not supported.
ErrUnsupportedVersion = errors.New("Unsupported version")
ErrUnsupportedVersion = errors.New("unsupported version")
// ErrMalformedIdxFile is returned by Decode when the idx file is corrupted.
ErrMalformedIdxFile = errors.New("Malformed IDX file")
ErrMalformedIdxFile = errors.New("malformed IDX file")
)
const (

View File

@ -1,10 +1,10 @@
package idxfile
import (
"crypto/sha1"
"hash"
"crypto"
"io"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -16,7 +16,7 @@ type Encoder struct {
// NewEncoder returns a new stream encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}

View File

@ -3,15 +3,15 @@ package index
import (
"bufio"
"bytes"
"crypto/sha1"
"crypto"
"errors"
"hash"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
@ -49,7 +49,7 @@ type Decoder struct {
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
return &Decoder{
r: io.TeeReader(r, h),
hash: h,

View File

@ -2,19 +2,19 @@ package index
import (
"bytes"
"crypto/sha1"
"crypto"
"errors"
"hash"
"io"
"sort"
"time"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/utils/binary"
)
var (
// EncodeVersionSupported is the range of supported index versions
EncodeVersionSupported uint32 = 2
EncodeVersionSupported uint32 = 3
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
// negative timestamp values
@ -29,16 +29,16 @@ type Encoder struct {
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
h := sha1.New()
h := hash.New(crypto.SHA1)
mw := io.MultiWriter(w, h)
return &Encoder{mw, h}
}
// Encode writes the Index to the stream of the encoder.
func (e *Encoder) Encode(idx *Index) error {
// TODO: support versions v3 and v4
// TODO: support v4
// TODO: support extensions
if idx.Version != EncodeVersionSupported {
if idx.Version > EncodeVersionSupported {
return ErrUnsupportedVersion
}
@ -68,8 +68,12 @@ func (e *Encoder) encodeEntries(idx *Index) error {
if err := e.encodeEntry(entry); err != nil {
return err
}
entryLength := entryHeaderLength
if entry.IntentToAdd || entry.SkipWorktree {
entryLength += 2
}
wrote := entryHeaderLength + len(entry.Name)
wrote := entryLength + len(entry.Name)
if err := e.padEntry(wrote); err != nil {
return err
}
@ -79,10 +83,6 @@ func (e *Encoder) encodeEntries(idx *Index) error {
}
func (e *Encoder) encodeEntry(entry *Entry) error {
if entry.IntentToAdd || entry.SkipWorktree {
return ErrUnsupportedVersion
}
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
if err != nil {
return err
@ -110,9 +110,25 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
entry.GID,
entry.Size,
entry.Hash[:],
flags,
}
flagsFlow := []interface{}{flags}
if entry.IntentToAdd || entry.SkipWorktree {
var extendedFlags uint16
if entry.IntentToAdd {
extendedFlags |= intentToAddMask
}
if entry.SkipWorktree {
extendedFlags |= skipWorkTreeMask
}
flagsFlow = []interface{}{flags | entryExtended, extendedFlags}
}
flow = append(flow, flagsFlow...)
if err := binary.Write(e.w, flow...); err != nil {
return err
}

View File

@ -5,6 +5,7 @@ import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/go-git/go-git/v5/plumbing"
@ -211,3 +212,20 @@ type EndOfIndexEntry struct {
// their contents).
Hash plumbing.Hash
}
// SkipUnless applies patterns in the form of A, A/B, A/B/C
// to the index to prevent the files from being checked out
func (i *Index) SkipUnless(patterns []string) {
for _, e := range i.Entries {
var include bool
for _, pattern := range patterns {
if strings.HasPrefix(e.Name, pattern) {
include = true
break
}
}
if !include {
e.SkipWorktree = true
}
}
}

View File

@ -1,13 +1,13 @@
package objfile
import (
"compress/zlib"
"errors"
"io"
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -20,20 +20,22 @@ var (
// Reader implements io.ReadCloser. Close should be called when finished with
// the Reader. Close will not close the underlying io.Reader.
type Reader struct {
multi io.Reader
zlib io.ReadCloser
hasher plumbing.Hasher
multi io.Reader
zlib io.Reader
zlibref sync.ZLibReader
hasher plumbing.Hasher
}
// NewReader returns a new Reader reading from r.
func NewReader(r io.Reader) (*Reader, error) {
zlib, err := zlib.NewReader(r)
zlib, err := sync.GetZlibReader(r)
if err != nil {
return nil, packfile.ErrZLib.AddDetails(err.Error())
}
return &Reader{
zlib: zlib,
zlib: zlib.Reader,
zlibref: zlib,
}, nil
}
@ -110,5 +112,6 @@ func (r *Reader) Hash() plumbing.Hash {
// Close releases any resources consumed by the Reader. Calling Close does not
// close the wrapped io.Reader originally passed to NewReader.
func (r *Reader) Close() error {
return r.zlib.Close()
sync.PutZlibReader(r.zlibref)
return nil
}

View File

@ -7,6 +7,7 @@ import (
"strconv"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -18,9 +19,9 @@ var (
// not close the underlying io.Writer.
type Writer struct {
raw io.Writer
zlib io.WriteCloser
hasher plumbing.Hasher
multi io.Writer
zlib *zlib.Writer
closed bool
pending int64 // number of unwritten bytes
@ -31,9 +32,10 @@ type Writer struct {
// The returned Writer implements io.WriteCloser. Close should be called when
// finished with the Writer. Close will not close the underlying io.Writer.
func NewWriter(w io.Writer) *Writer {
zlib := sync.GetZlibWriter(w)
return &Writer{
raw: w,
zlib: zlib.NewWriter(w),
zlib: zlib,
}
}
@ -100,6 +102,7 @@ func (w *Writer) Hash() plumbing.Hash {
// Calling Close does not close the wrapped io.Writer originally passed to
// NewWriter.
func (w *Writer) Close() error {
defer sync.PutZlibWriter(w.zlib)
if err := w.zlib.Close(); err != nil {
return err
}

View File

@ -1,10 +1,7 @@
package packfile
import (
"bytes"
"compress/zlib"
"io"
"sync"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -61,18 +58,3 @@ func WritePackfileToObjectStorage(
return err
}
var bufPool = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
var zlibReaderPool = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return r
},
}

View File

@ -5,6 +5,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and
@ -43,18 +44,16 @@ func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbin
defer ioutil.CheckClose(tr, &err)
bb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(bb)
bb.Reset()
bb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(bb)
_, err = bb.ReadFrom(br)
if err != nil {
return nil, err
}
tb := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(tb)
tb.Reset()
tb := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(tb)
_, err = tb.ReadFrom(tr)
if err != nil {
@ -80,9 +79,8 @@ func DiffDelta(src, tgt []byte) []byte {
}
func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
buf.Write(deltaEncodeSize(len(src)))
buf.Write(deltaEncodeSize(len(tgt)))
@ -90,9 +88,8 @@ func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte {
index.init(src)
}
ibuf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(ibuf)
ibuf.Reset()
ibuf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(ibuf)
for i := 0; i < len(tgt); i++ {
offset, l := index.findMatch(src, tgt, i)

View File

@ -2,11 +2,12 @@ package packfile
import (
"compress/zlib"
"crypto/sha1"
"crypto"
"fmt"
"io"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/hash"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
@ -28,7 +29,7 @@ type Encoder struct {
// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true.
func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder {
h := plumbing.Hasher{
Hash: sha1.New(),
Hash: hash.New(crypto.SHA1),
}
mw := io.MultiWriter(w, h)
ow := newOffsetWriter(mw)

View File

@ -7,19 +7,20 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
// FSObject is an object from the packfile on the filesystem.
type FSObject struct {
hash plumbing.Hash
h *ObjectHeader
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
hash plumbing.Hash
offset int64
size int64
typ plumbing.ObjectType
index idxfile.Index
fs billy.Filesystem
path string
cache cache.Object
largeObjectThreshold int64
}
// NewFSObject creates a new filesystem object.
@ -32,16 +33,18 @@ func NewFSObject(
fs billy.Filesystem,
path string,
cache cache.Object,
largeObjectThreshold int64,
) *FSObject {
return &FSObject{
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
hash: hash,
offset: offset,
size: contentSize,
typ: finalType,
index: index,
fs: fs,
path: path,
cache: cache,
largeObjectThreshold: largeObjectThreshold,
}
}
@ -62,7 +65,21 @@ func (o *FSObject) Reader() (io.ReadCloser, error) {
return nil, err
}
p := NewPackfileWithCache(o.index, nil, f, o.cache)
p := NewPackfileWithCache(o.index, nil, f, o.cache, o.largeObjectThreshold)
if o.largeObjectThreshold > 0 && o.size > o.largeObjectThreshold {
// We have a big object
h, err := p.objectHeaderAtOffset(o.offset)
if err != nil {
return nil, err
}
r, err := p.getReaderDirect(h)
if err != nil {
_ = f.Close()
return nil, err
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
r, err := p.getObjectContent(o.offset)
if err != nil {
_ = f.Close()
@ -100,17 +117,3 @@ func (o *FSObject) Type() plumbing.ObjectType {
func (o *FSObject) Writer() (io.WriteCloser, error) {
return nil, nil
}
type objectReader struct {
io.ReadCloser
f billy.File
}
func (r *objectReader) Close() error {
if err := r.ReadCloser.Close(); err != nil {
_ = r.f.Close()
return err
}
return r.f.Close()
}

View File

@ -2,6 +2,7 @@ package packfile
import (
"bytes"
"fmt"
"io"
"os"
@ -11,6 +12,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/format/idxfile"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -35,11 +37,12 @@ const smallObjectThreshold = 16 * 1024
// Packfile allows retrieving information from inside a packfile.
type Packfile struct {
idxfile.Index
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
fs billy.Filesystem
file billy.File
s *Scanner
deltaBaseCache cache.Object
offsetToType map[int64]plumbing.ObjectType
largeObjectThreshold int64
}
// NewPackfileWithCache creates a new Packfile with the given object cache.
@ -50,6 +53,7 @@ func NewPackfileWithCache(
fs billy.Filesystem,
file billy.File,
cache cache.Object,
largeObjectThreshold int64,
) *Packfile {
s := NewScanner(file)
return &Packfile{
@ -59,6 +63,7 @@ func NewPackfileWithCache(
s,
cache,
make(map[int64]plumbing.ObjectType),
largeObjectThreshold,
}
}
@ -66,8 +71,8 @@ func NewPackfileWithCache(
// and packfile idx.
// If the filesystem is provided, the packfile will return FSObjects, otherwise
// it will return MemoryObjects.
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault())
func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File, largeObjectThreshold int64) *Packfile {
return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault(), largeObjectThreshold)
}
// Get retrieves the encoded object in the packfile with the given hash.
@ -133,9 +138,8 @@ func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return h.Length, nil
case plumbing.REFDeltaObject, plumbing.OFSDeltaObject:
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return 0, err
@ -222,9 +226,9 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
// For delta objects we read the delta data and apply the small object
// optimization only if the expanded version of the object still meets
// the small object threshold condition.
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
if _, _, err := p.s.NextObject(buf); err != nil {
return nil, err
}
@ -263,6 +267,7 @@ func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.
p.fs,
p.file.Name(),
p.deltaBaseCache,
p.largeObjectThreshold,
), nil
}
@ -282,6 +287,49 @@ func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) {
return obj.Reader()
}
func asyncReader(p *Packfile) (io.ReadCloser, error) {
reader := ioutil.NewReaderUsingReaderAt(p.file, p.s.r.offset)
zr, err := sync.GetZlibReader(reader)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
func (p *Packfile) getReaderDirect(h *ObjectHeader) (io.ReadCloser, error) {
switch h.Type {
case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject:
return asyncReader(p)
case plumbing.REFDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readREFDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
case plumbing.OFSDeltaObject:
deltaRc, err := asyncReader(p)
if err != nil {
return nil, err
}
r, err := p.readOFSDeltaObjectContent(h, deltaRc)
if err != nil {
return nil, err
}
return r, nil
default:
return nil, ErrInvalidObject.AddDetails("type %q", h.Type)
}
}
func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) {
var obj = new(plumbing.MemoryObject)
obj.SetSize(h.Length)
@ -323,9 +371,9 @@ func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err err
}
func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -334,6 +382,20 @@ func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plu
return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf)
}
func (p *Packfile) readREFDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
var err error
base, ok := p.cacheGet(h.Reference)
if !ok {
base, err = p.Get(h.Reference)
if err != nil {
return nil, err
}
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error {
var err error
@ -353,9 +415,9 @@ func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObjec
}
func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error {
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, _, err := p.s.NextObject(buf)
if err != nil {
return err
@ -364,6 +426,20 @@ func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset
return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf)
}
func (p *Packfile) readOFSDeltaObjectContent(h *ObjectHeader, deltaRC io.Reader) (io.ReadCloser, error) {
hash, err := p.FindHash(h.OffsetReference)
if err != nil {
return nil, err
}
base, err := p.objectAtOffset(h.OffsetReference, hash)
if err != nil {
return nil, err
}
return ReaderFromDelta(base, deltaRC)
}
func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error {
hash, err := p.FindHash(offset)
if err != nil {

View File

@ -10,6 +10,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -46,7 +47,6 @@ type Parser struct {
oi []*objectInfo
oiByHash map[plumbing.Hash]*objectInfo
oiByOffset map[int64]*objectInfo
hashOffset map[plumbing.Hash]int64
checksum plumbing.Hash
cache *cache.BufferLRU
@ -176,7 +176,8 @@ func (p *Parser) init() error {
}
func (p *Parser) indexObjects() error {
buf := new(bytes.Buffer)
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for i := uint32(0); i < p.count; i++ {
buf.Reset()
@ -220,6 +221,7 @@ func (p *Parser) indexObjects() error {
ota = newBaseObject(oh.Offset, oh.Length, t)
}
buf.Grow(int(oh.Length))
_, crc, err := p.scanner.NextObject(buf)
if err != nil {
return err
@ -265,7 +267,9 @@ func (p *Parser) indexObjects() error {
}
func (p *Parser) resolveDeltas() error {
buf := &bytes.Buffer{}
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
for _, obj := range p.oi {
buf.Reset()
err := p.get(obj, buf)
@ -287,6 +291,7 @@ func (p *Parser) resolveDeltas() error {
if err := p.resolveObject(stdioutil.Discard, child, content); err != nil {
return err
}
p.resolveExternalRef(child)
}
// Remove the delta from the cache.
@ -299,6 +304,16 @@ func (p *Parser) resolveDeltas() error {
return nil
}
func (p *Parser) resolveExternalRef(o *objectInfo) {
if ref, ok := p.oiByHash[o.SHA1]; ok && ref.ExternalRef {
p.oiByHash[o.SHA1] = o
o.Children = ref.Children
for _, c := range o.Children {
c.Parent = o
}
}
}
func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
if !o.ExternalRef { // skip cache check for placeholder parents
b, ok := p.cache.Get(o.Offset)
@ -336,9 +351,8 @@ func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) {
}
if o.DiskType.IsDelta() {
b := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(b)
b.Reset()
b := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(b)
err := p.get(o.Parent, b)
if err != nil {
return err
@ -372,9 +386,8 @@ func (p *Parser) resolveObject(
if !o.DiskType.IsDelta() {
return nil
}
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
err := p.readData(buf, o)
if err != nil {
return err

View File

@ -1,12 +1,15 @@
package packfile
import (
"bufio"
"bytes"
"errors"
"io"
"math"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h
@ -32,18 +35,16 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
defer ioutil.CheckClose(w, &err)
buf := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(buf)
buf.Reset()
buf := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(buf)
_, err = buf.ReadFrom(r)
if err != nil {
return err
}
src := buf.Bytes()
dst := bufPool.Get().(*bytes.Buffer)
defer bufPool.Put(dst)
dst.Reset()
dst := sync.GetBytesBuffer()
defer sync.PutBytesBuffer(dst)
err = patchDelta(dst, src, delta)
if err != nil {
return err
@ -51,9 +52,9 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
target.SetSize(int64(dst.Len()))
b := byteSlicePool.Get().([]byte)
_, err = io.CopyBuffer(w, dst, b)
byteSlicePool.Put(b)
b := sync.GetByteSlice()
_, err = io.CopyBuffer(w, dst, *b)
sync.PutByteSlice(b)
return err
}
@ -73,6 +74,131 @@ func PatchDelta(src, delta []byte) ([]byte, error) {
return b.Bytes(), nil
}
func ReaderFromDelta(base plumbing.EncodedObject, deltaRC io.Reader) (io.ReadCloser, error) {
deltaBuf := bufio.NewReaderSize(deltaRC, 1024)
srcSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
if srcSz != uint(base.Size()) {
return nil, ErrInvalidDelta
}
targetSz, err := decodeLEB128ByteReader(deltaBuf)
if err != nil {
if err == io.EOF {
return nil, ErrInvalidDelta
}
return nil, err
}
remainingTargetSz := targetSz
dstRd, dstWr := io.Pipe()
go func() {
baseRd, err := base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
defer baseRd.Close()
baseBuf := bufio.NewReader(baseRd)
basePos := uint(0)
for {
cmd, err := deltaBuf.ReadByte()
if err == io.EOF {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if isCopyFromSrc(cmd) {
offset, err := decodeOffsetByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
sz, err := decodeSizeByteReader(cmd, deltaBuf)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
if invalidSize(sz, targetSz) ||
invalidOffsetSize(offset, sz, srcSz) {
_ = dstWr.Close()
return
}
discard := offset - basePos
if basePos > offset {
_ = baseRd.Close()
baseRd, err = base.Reader()
if err != nil {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
baseBuf.Reset(baseRd)
discard = offset
}
for discard > math.MaxInt32 {
n, err := baseBuf.Discard(math.MaxInt32)
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
for discard > 0 {
n, err := baseBuf.Discard(int(discard))
if err != nil {
_ = dstWr.CloseWithError(err)
return
}
basePos += uint(n)
discard -= uint(n)
}
if _, err := io.Copy(dstWr, io.LimitReader(baseBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
basePos += sz
} else if isCopyFromDelta(cmd) {
sz := uint(cmd) // cmd is the size itself
if invalidSize(sz, targetSz) {
_ = dstWr.CloseWithError(ErrInvalidDelta)
return
}
if _, err := io.Copy(dstWr, io.LimitReader(deltaBuf, int64(sz))); err != nil {
_ = dstWr.CloseWithError(err)
return
}
remainingTargetSz -= sz
} else {
_ = dstWr.CloseWithError(ErrDeltaCmd)
return
}
if remainingTargetSz <= 0 {
_ = dstWr.Close()
return
}
}
}()
return dstRd, nil
}
func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
if len(delta) < deltaSizeMin {
return ErrInvalidDelta
@ -161,6 +287,25 @@ func decodeLEB128(input []byte) (uint, []byte) {
return num, input[sz:]
}
func decodeLEB128ByteReader(input io.ByteReader) (uint, error) {
var num, sz uint
for {
b, err := input.ReadByte()
if err != nil {
return 0, err
}
num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks
sz++
if uint(b)&continuation == 0 {
break
}
}
return num, nil
}
const (
payload = 0x7f // 0111 1111
continuation = 0x80 // 1000 0000
@ -174,6 +319,40 @@ func isCopyFromDelta(cmd byte) bool {
return (cmd&0x80) == 0 && cmd != 0
}
func decodeOffsetByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var offset uint
if (cmd & 0x01) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset = uint(next)
}
if (cmd & 0x02) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 8
}
if (cmd & 0x04) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 16
}
if (cmd & 0x08) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
offset |= uint(next) << 24
}
return offset, nil
}
func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
var offset uint
if (cmd & 0x01) != 0 {
@ -208,6 +387,36 @@ func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) {
return offset, delta, nil
}
func decodeSizeByteReader(cmd byte, delta io.ByteReader) (uint, error) {
var sz uint
if (cmd & 0x10) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz = uint(next)
}
if (cmd & 0x20) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 8
}
if (cmd & 0x40) != 0 {
next, err := delta.ReadByte()
if err != nil {
return 0, err
}
sz |= uint(next) << 16
}
if sz == 0 {
sz = 0x10000
}
return sz, nil
}
func decodeSize(cmd byte, delta []byte) (uint, []byte, error) {
var sz uint
if (cmd & 0x10) != 0 {

View File

@ -3,17 +3,16 @@ package packfile
import (
"bufio"
"bytes"
"compress/zlib"
"fmt"
"hash"
"hash/crc32"
"io"
stdioutil "io/ioutil"
"sync"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/utils/binary"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -114,7 +113,7 @@ func (s *Scanner) Header() (version, objects uint32, err error) {
return
}
// readSignature reads an returns the signature field in the packfile.
// readSignature reads a returns the signature field in the packfile.
func (s *Scanner) readSignature() ([]byte, error) {
var sig = make([]byte, 4)
if _, err := io.ReadFull(s.r, sig); err != nil {
@ -320,29 +319,38 @@ func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err erro
return
}
// ReadObject returns a reader for the object content and an error
func (s *Scanner) ReadObject() (io.ReadCloser, error) {
s.pendingObject = nil
zr, err := sync.GetZlibReader(s.r)
if err != nil {
return nil, fmt.Errorf("zlib reset error: %s", err)
}
return ioutil.NewReadCloserWithCloser(zr.Reader, func() error {
sync.PutZlibReader(zr)
return nil
}), nil
}
// ReadRegularObject reads and write a non-deltified object
// from it zlib stream in an object entry in the packfile.
func (s *Scanner) copyObject(w io.Writer) (n int64, err error) {
zr := zlibReaderPool.Get().(io.ReadCloser)
defer zlibReaderPool.Put(zr)
zr, err := sync.GetZlibReader(s.r)
defer sync.PutZlibReader(zr)
if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil {
if err != nil {
return 0, fmt.Errorf("zlib reset error: %s", err)
}
defer ioutil.CheckClose(zr, &err)
buf := byteSlicePool.Get().([]byte)
n, err = io.CopyBuffer(w, zr, buf)
byteSlicePool.Put(buf)
defer ioutil.CheckClose(zr.Reader, &err)
buf := sync.GetByteSlice()
n, err = io.CopyBuffer(w, zr.Reader, *buf)
sync.PutByteSlice(buf)
return
}
var byteSlicePool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
// SeekFromStart sets a new offset from start, returns the old position before
// the change.
func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) {
@ -372,9 +380,10 @@ func (s *Scanner) Checksum() (plumbing.Hash, error) {
// Close reads the reader until io.EOF
func (s *Scanner) Close() error {
buf := byteSlicePool.Get().([]byte)
_, err := io.CopyBuffer(stdioutil.Discard, s.r, buf)
byteSlicePool.Put(buf)
buf := sync.GetByteSlice()
_, err := io.CopyBuffer(stdioutil.Discard, s.r, *buf)
sync.PutByteSlice(buf)
return err
}
@ -384,13 +393,13 @@ func (s *Scanner) Flush() error {
}
// scannerReader has the following characteristics:
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penality for performing small writes
// to the crc32 hash writer.
// - Provides an io.SeekReader impl for bufio.Reader, when the underlying
// reader supports it.
// - Keeps track of the current read position, for when the underlying reader
// isn't an io.SeekReader, but we still want to know the current offset.
// - Writes to the hash writer what it reads, with the aid of a smaller buffer.
// The buffer helps avoid a performance penalty for performing small writes
// to the crc32 hash writer.
type scannerReader struct {
reader io.Reader
crc io.Writer

View File

@ -2,11 +2,12 @@ package plumbing
import (
"bytes"
"crypto/sha1"
"crypto"
"encoding/hex"
"hash"
"sort"
"strconv"
"github.com/go-git/go-git/v5/plumbing/hash"
)
// Hash SHA1 hashed content
@ -46,7 +47,7 @@ type Hasher struct {
}
func NewHasher(t ObjectType, size int64) Hasher {
h := Hasher{sha1.New()}
h := Hasher{hash.New(crypto.SHA1)}
h.Write(t.Bytes())
h.Write([]byte(" "))
h.Write([]byte(strconv.FormatInt(size, 10)))

View File

@ -0,0 +1,57 @@
// package hash provides a way for managing the
// underlying hash implementations used across go-git.
package hash
import (
"crypto"
"fmt"
"hash"
"github.com/pjbgf/sha1cd"
)
// algos is a map of hash algorithms.
var algos = map[crypto.Hash]func() hash.Hash{}
func init() {
reset()
}
// reset resets the default algos value. Can be used after running tests
// that registers new algorithms to avoid side effects.
func reset() {
algos[crypto.SHA1] = sha1cd.New
}
// RegisterHash allows for the hash algorithm used to be overriden.
// This ensures the hash selection for go-git must be explicit, when
// overriding the default value.
func RegisterHash(h crypto.Hash, f func() hash.Hash) error {
if f == nil {
return fmt.Errorf("cannot register hash: f is nil")
}
switch h {
case crypto.SHA1:
algos[h] = f
default:
return fmt.Errorf("unsupported hash function: %v", h)
}
return nil
}
// Hash is the same as hash.Hash. This allows consumers
// to not having to import this package alongside "hash".
type Hash interface {
hash.Hash
}
// New returns a new Hash for the given hash function.
// It panics if the hash function is not registered.
func New(h crypto.Hash) Hash {
hh, ok := algos[h]
if !ok {
panic(fmt.Sprintf("hash algorithm not registered: %v", h))
}
return hh()
}

View File

@ -25,13 +25,13 @@ func (o *MemoryObject) Hash() Hash {
return o.h
}
// Type return the ObjectType
// Type returns the ObjectType
func (o *MemoryObject) Type() ObjectType { return o.t }
// SetType sets the ObjectType
func (o *MemoryObject) SetType(t ObjectType) { o.t = t }
// Size return the size of the object
// Size returns the size of the object
func (o *MemoryObject) Size() int64 { return o.sz }
// SetSize set the object size, a content of the given size should be written

View File

@ -39,7 +39,7 @@ func (c *Change) Action() (merkletrie.Action, error) {
return merkletrie.Modify, nil
}
// Files return the files before and after a change.
// Files returns the files before and after a change.
// For insertions from will be nil. For deletions to will be nil.
func (c *Change) Files() (from, to *File, err error) {
action, err := c.Action()

View File

@ -16,11 +16,11 @@ func newChange(c merkletrie.Change) (*Change, error) {
var err error
if ret.From, err = newChangeEntry(c.From); err != nil {
return nil, fmt.Errorf("From field: %s", err)
return nil, fmt.Errorf("from field: %s", err)
}
if ret.To, err = newChangeEntry(c.To); err != nil {
return nil, fmt.Errorf("To field: %s", err)
return nil, fmt.Errorf("to field: %s", err)
}
return ret, nil

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"bytes"
"context"
"errors"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@ -180,9 +180,8 @@ func (c *Commit) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
var message bool
var pgpsig bool

View File

@ -1,12 +0,0 @@
package object
import (
"bufio"
"sync"
)
var bufPool = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}

View File

@ -96,10 +96,6 @@ func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, erro
}
func filePatch(c *Change) (fdiff.FilePatch, error) {
return filePatchWithContext(context.Background(), c)
}
func fileContent(f *File) (content string, isBinary bool, err error) {
if f == nil {
return

View File

@ -403,10 +403,16 @@ func min(a, b int) int {
return b
}
const maxMatrixSize = 10000
func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) {
// Allocate for the worst-case scenario where every pair has a score
// that we need to consider. We might not need that many.
matrix := make(similarityMatrix, 0, len(srcs)*len(dsts))
matrixSize := len(srcs) * len(dsts)
if matrixSize > maxMatrixSize {
matrixSize = maxMatrixSize
}
matrix := make(similarityMatrix, 0, matrixSize)
srcSizes := make([]int64, len(srcs))
dstSizes := make([]int64, len(dsts))
dstTooLarge := make(map[int]bool)

View File

@ -0,0 +1,101 @@
package object
import "bytes"
const (
signatureTypeUnknown signatureType = iota
signatureTypeOpenPGP
signatureTypeX509
signatureTypeSSH
)
var (
// openPGPSignatureFormat is the format of an OpenPGP signature.
openPGPSignatureFormat = signatureFormat{
[]byte("-----BEGIN PGP SIGNATURE-----"),
[]byte("-----BEGIN PGP MESSAGE-----"),
}
// x509SignatureFormat is the format of an X509 signature, which is
// a PKCS#7 (S/MIME) signature.
x509SignatureFormat = signatureFormat{
[]byte("-----BEGIN CERTIFICATE-----"),
}
// sshSignatureFormat is the format of an SSH signature.
sshSignatureFormat = signatureFormat{
[]byte("-----BEGIN SSH SIGNATURE-----"),
}
)
var (
// knownSignatureFormats is a map of known signature formats, indexed by
// their signatureType.
knownSignatureFormats = map[signatureType]signatureFormat{
signatureTypeOpenPGP: openPGPSignatureFormat,
signatureTypeX509: x509SignatureFormat,
signatureTypeSSH: sshSignatureFormat,
}
)
// signatureType represents the type of the signature.
type signatureType int8
// signatureFormat represents the beginning of a signature.
type signatureFormat [][]byte
// typeForSignature returns the type of the signature based on its format.
func typeForSignature(b []byte) signatureType {
for t, i := range knownSignatureFormats {
for _, begin := range i {
if bytes.HasPrefix(b, begin) {
return t
}
}
}
return signatureTypeUnknown
}
// parseSignedBytes returns the position of the last signature block found in
// the given bytes. If no signature block is found, it returns -1.
//
// When multiple signature blocks are found, the position of the last one is
// returned. Any tailing bytes after this signature block start should be
// considered part of the signature.
//
// Given this, it would be safe to use the returned position to split the bytes
// into two parts: the first part containing the message, the second part
// containing the signature.
//
// Example:
//
// message := []byte(`Message with signature
//
// -----BEGIN SSH SIGNATURE-----
// ...`)
//
// var signature string
// if pos, _ := parseSignedBytes(message); pos != -1 {
// signature = string(message[pos:])
// message = message[:pos]
// }
//
// This logic is on par with git's gpg-interface.c:parse_signed_buffer().
// https://github.com/git/git/blob/7c2ef319c52c4997256f5807564523dfd4acdfc7/gpg-interface.c#L668
func parseSignedBytes(b []byte) (int, signatureType) {
var n, match = 0, -1
var t signatureType
for n < len(b) {
var i = b[n:]
if st := typeForSignature(i); st != signatureTypeUnknown {
match = n
t = st
}
if eol := bytes.IndexByte(i, '\n'); eol >= 0 {
n += eol + 1
continue
}
// If we reach this point, we've reached the end.
break
}
return match, t
}

View File

@ -1,18 +1,16 @@
package object
import (
"bufio"
"bytes"
"fmt"
"io"
stdioutil "io/ioutil"
"strings"
"github.com/ProtonMail/go-crypto/openpgp"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
// Tag represents an annotated tag object. It points to a single git object of
@ -93,9 +91,9 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
var line []byte
line, err = r.ReadBytes('\n')
@ -128,40 +126,15 @@ func (t *Tag) Decode(o plumbing.EncodedObject) (err error) {
}
}
data, err := stdioutil.ReadAll(r)
data, err := io.ReadAll(r)
if err != nil {
return err
}
var pgpsig bool
// Check if data contains PGP signature.
if bytes.Contains(data, []byte(beginpgp)) {
// Split the lines at newline.
messageAndSig := bytes.Split(data, []byte("\n"))
for _, l := range messageAndSig {
if pgpsig {
if bytes.Contains(l, []byte(endpgp)) {
t.PGPSignature += endpgp + "\n"
break
} else {
t.PGPSignature += string(l) + "\n"
}
continue
}
// Check if it's the beginning of a PGP signature.
if bytes.Contains(l, []byte(beginpgp)) {
t.PGPSignature += beginpgp + "\n"
pgpsig = true
continue
}
t.Message += string(l) + "\n"
}
} else {
t.Message = string(data)
if sm, _ := parseSignedBytes(data); sm >= 0 {
t.PGPSignature = string(data[sm:])
data = data[:sm]
}
t.Message = string(data)
return nil
}

View File

@ -1,7 +1,6 @@
package object
import (
"bufio"
"context"
"errors"
"fmt"
@ -14,6 +13,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/filemode"
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/sync"
)
const (
@ -230,9 +230,9 @@ func (t *Tree) Decode(o plumbing.EncodedObject) (err error) {
}
defer ioutil.CheckClose(reader, &err)
r := bufPool.Get().(*bufio.Reader)
defer bufPool.Put(r)
r.Reset(reader)
r := sync.GetBufioReader(reader)
defer sync.PutBufioReader(r)
for {
str, err := r.ReadString(' ')
if err != nil {

View File

@ -38,6 +38,10 @@ func NewTreeRootNode(t *Tree) noder.Noder {
}
}
func (t *treeNoder) Skip() bool {
return false
}
func (t *treeNoder) isRoot() bool {
return t.name == ""
}

View File

@ -1,6 +1,11 @@
// Package capability defines the server and client capabilities.
package capability
import (
"fmt"
"os"
)
// Capability describes a server or client capability.
type Capability string
@ -238,7 +243,15 @@ const (
Filter Capability = "filter"
)
const DefaultAgent = "go-git/4.x"
const userAgent = "go-git/5.x"
// DefaultAgent provides the user agent string.
func DefaultAgent() string {
if envUserAgent, ok := os.LookupEnv("GO_GIT_USER_AGENT_EXTRA"); ok {
return fmt.Sprintf("%s %s", userAgent, envUserAgent)
}
return userAgent
}
var known = map[Capability]bool{
MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true,

View File

@ -86,7 +86,9 @@ func (l *List) Get(capability Capability) []string {
// Set sets a capability removing the previous values
func (l *List) Set(capability Capability, values ...string) error {
delete(l.m, capability)
if _, ok := l.m[capability]; ok {
l.m[capability].Values = l.m[capability].Values[:0]
}
return l.Add(capability, values...)
}

View File

@ -19,7 +19,6 @@ var (
// common
sp = []byte(" ")
eol = []byte("\n")
eq = []byte{'='}
// advertised-refs
null = []byte("\x00")

View File

@ -21,11 +21,6 @@ type ServerResponse struct {
// Decode decodes the response into the struct, isMultiACK should be true, if
// the request was done with multi_ack or multi_ack_detailed capabilities.
func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
// TODO: implement support for multi_ack or multi_ack_detailed responses
if isMultiACK {
return errors.New("multi_ack and multi_ack_detailed are not supported")
}
s := pktline.NewScanner(reader)
for s.Scan() {
@ -48,7 +43,23 @@ func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error {
}
}
return s.Err()
// isMultiACK is true when the remote server advertises the related
// capabilities when they are not in transport.UnsupportedCapabilities.
//
// Users may decide to remove multi_ack and multi_ack_detailed from the
// unsupported capabilities list, which allows them to do initial clones
// from Azure DevOps.
//
// Follow-up fetches may error, therefore errors are wrapped with additional
// information highlighting that this capabilities are not supported by go-git.
//
// TODO: Implement support for multi_ack or multi_ack_detailed responses.
err := s.Err()
if err != nil && isMultiACK {
return fmt.Errorf("multi_ack and multi_ack_detailed are not supported: %w", err)
}
return err
}
// stopReading detects when a valid command such as ACK or NAK is found to be
@ -113,8 +124,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
}
// Encode encodes the ServerResponse into a writer.
func (r *ServerResponse) Encode(w io.Writer) error {
if len(r.ACKs) > 1 {
func (r *ServerResponse) Encode(w io.Writer, isMultiACK bool) error {
if len(r.ACKs) > 1 && !isMultiACK {
// For further information, refer to comments in the Decode func above.
return errors.New("multi_ack and multi_ack_detailed are not supported")
}

View File

@ -95,7 +95,7 @@ func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest {
}
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
return r

View File

@ -19,6 +19,7 @@ var (
type ReferenceUpdateRequest struct {
Capabilities *capability.List
Commands []*Command
Options []*Option
Shallow *plumbing.Hash
// Packfile contains an optional packfile reader.
Packfile io.ReadCloser
@ -58,7 +59,7 @@ func NewReferenceUpdateRequestFromCapabilities(adv *capability.List) *ReferenceU
r := NewReferenceUpdateRequest()
if adv.Supports(capability.Agent) {
r.Capabilities.Set(capability.Agent, capability.DefaultAgent)
r.Capabilities.Set(capability.Agent, capability.DefaultAgent())
}
if adv.Supports(capability.ReportStatus) {
@ -86,9 +87,9 @@ type Action string
const (
Create Action = "create"
Update = "update"
Delete = "delete"
Invalid = "invalid"
Update Action = "update"
Delete Action = "delete"
Invalid Action = "invalid"
)
type Command struct {
@ -120,3 +121,8 @@ func (c *Command) validate() error {
return nil
}
type Option struct {
Key string
Value string
}

View File

@ -9,10 +9,6 @@ import (
"github.com/go-git/go-git/v5/plumbing/protocol/packp/capability"
)
var (
zeroHashString = plumbing.ZeroHash.String()
)
// Encode writes the ReferenceUpdateRequest encoding to the stream.
func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
if err := req.validate(); err != nil {
@ -29,6 +25,12 @@ func (req *ReferenceUpdateRequest) Encode(w io.Writer) error {
return err
}
if req.Capabilities.Supports(capability.PushOptions) {
if err := req.encodeOptions(e, req.Options); err != nil {
return err
}
}
if req.Packfile != nil {
if _, err := io.Copy(w, req.Packfile); err != nil {
return err
@ -73,3 +75,15 @@ func formatCommand(cmd *Command) string {
n := cmd.New.String()
return fmt.Sprintf("%s %s %s", o, n, cmd.Name)
}
func (req *ReferenceUpdateRequest) encodeOptions(e *pktline.Encoder,
opts []*Option) error {
for _, opt := range opts {
if err := e.Encodef("%s=%s", opt.Key, opt.Value); err != nil {
return err
}
}
return e.Flush()
}

View File

@ -24,7 +24,6 @@ type UploadPackResponse struct {
r io.ReadCloser
isShallow bool
isMultiACK bool
isOk bool
}
// NewUploadPackResponse create a new UploadPackResponse instance, the request
@ -79,7 +78,7 @@ func (r *UploadPackResponse) Encode(w io.Writer) (err error) {
}
}
if err := r.ServerResponse.Encode(w); err != nil {
if err := r.ServerResponse.Encode(w, r.isMultiACK); err != nil {
return err
}

View File

@ -168,22 +168,22 @@ func NewHashReference(n ReferenceName, h Hash) *Reference {
}
}
// Type return the type of a reference
// Type returns the type of a reference
func (r *Reference) Type() ReferenceType {
return r.t
}
// Name return the name of a reference
// Name returns the name of a reference
func (r *Reference) Name() ReferenceName {
return r.n
}
// Hash return the hash of a hash reference
// Hash returns the hash of a hash reference
func (r *Reference) Hash() Hash {
return r.h
}
// Target return the target of a symbolic reference
// Target returns the target of a symbolic reference
func (r *Reference) Target() ReferenceName {
return r.target
}
@ -204,6 +204,21 @@ func (r *Reference) Strings() [2]string {
}
func (r *Reference) String() string {
s := r.Strings()
return fmt.Sprintf("%s %s", s[1], s[0])
ref := ""
switch r.Type() {
case HashReference:
ref = r.Hash().String()
case SymbolicReference:
ref = symrefPrefix + r.Target().String()
default:
return ""
}
name := r.Name().String()
var v strings.Builder
v.Grow(len(ref) + len(name) + 1)
v.WriteString(ref)
v.WriteString(" ")
v.WriteString(name)
return v.String()
}

View File

@ -52,8 +52,8 @@ type DeltaObjectStorer interface {
DeltaObject(plumbing.ObjectType, plumbing.Hash) (plumbing.EncodedObject, error)
}
// Transactioner is a optional method for ObjectStorer, it enable transaction
// base write and read operations in the storage
// Transactioner is a optional method for ObjectStorer, it enables transactional read and write
// operations.
type Transactioner interface {
// Begin starts a transaction.
Begin() Transaction
@ -87,8 +87,8 @@ type PackedObjectStorer interface {
DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) error
}
// PackfileWriter is a optional method for ObjectStorer, it enable direct write
// of packfile to the storage
// PackfileWriter is an optional method for ObjectStorer, it enables directly writing
// a packfile to storage.
type PackfileWriter interface {
// PackfileWriter returns a writer for writing a packfile to the storage
//

View File

@ -112,7 +112,7 @@ type Endpoint struct {
Port int
// Path is the repository path.
Path string
// InsecureSkipTLS skips ssl verify if protocal is https
// InsecureSkipTLS skips ssl verify if protocol is https
InsecureSkipTLS bool
// CaBundle specify additional ca bundle with system cert pool
CaBundle []byte

View File

@ -77,14 +77,14 @@ func (c *command) StderrPipe() (io.Reader, error) {
return nil, nil
}
// StdinPipe return the underlying connection as WriteCloser, wrapped to prevent
// StdinPipe returns the underlying connection as WriteCloser, wrapped to prevent
// call to the Close function from the connection, a command execution in git
// protocol can't be closed or killed
func (c *command) StdinPipe() (io.WriteCloser, error) {
return ioutil.WriteNopCloser(c.conn), nil
}
// StdoutPipe return the underlying connection as Reader
// StdoutPipe returns the underlying connection as Reader
func (c *command) StdoutPipe() (io.Reader, error) {
return c.conn, nil
}

View File

@ -428,11 +428,6 @@ func isRepoNotFoundError(s string) bool {
return false
}
var (
nak = []byte("NAK")
eol = []byte("\n")
)
// uploadPack implements the git-upload-pack protocol.
func uploadPack(w io.WriteCloser, r io.Reader, req *packp.UploadPackRequest) error {
// TODO support multi_ack mode

View File

@ -189,7 +189,7 @@ func (s *upSession) objectsToUpload(req *packp.UploadPackRequest) ([]plumbing.Ha
}
func (*upSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}
@ -355,7 +355,7 @@ func (s *rpSession) reportStatus() *packp.ReportStatus {
}
func (*rpSession) setSupportedCapabilities(c *capability.List) error {
if err := c.Set(capability.Agent, capability.DefaultAgent); err != nil {
if err := c.Set(capability.Agent, capability.DefaultAgent()); err != nil {
return err
}

View File

@ -10,10 +10,9 @@ import (
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/mitchellh/go-homedir"
"github.com/skeema/knownhosts"
sshagent "github.com/xanzy/ssh-agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/knownhosts"
)
const DefaultUsername = "git"
@ -44,7 +43,6 @@ const (
type KeyboardInteractive struct {
User string
Challenge ssh.KeyboardInteractiveChallenge
HostKeyCallbackHelper
}
func (a *KeyboardInteractive) Name() string {
@ -56,19 +54,18 @@ func (a *KeyboardInteractive) String() string {
}
func (a *KeyboardInteractive) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{
a.Challenge,
},
})
}, nil
}
// Password implements AuthMethod by using the given password.
type Password struct {
User string
Password string
HostKeyCallbackHelper
}
func (a *Password) Name() string {
@ -80,10 +77,10 @@ func (a *Password) String() string {
}
func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.Password(a.Password)},
})
}, nil
}
// PasswordCallback implements AuthMethod by using a callback
@ -91,7 +88,6 @@ func (a *Password) ClientConfig() (*ssh.ClientConfig, error) {
type PasswordCallback struct {
User string
Callback func() (pass string, err error)
HostKeyCallbackHelper
}
func (a *PasswordCallback) Name() string {
@ -103,17 +99,16 @@ func (a *PasswordCallback) String() string {
}
func (a *PasswordCallback) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PasswordCallback(a.Callback)},
})
}, nil
}
// PublicKeys implements AuthMethod by using the given key pairs.
type PublicKeys struct {
User string
Signer ssh.Signer
HostKeyCallbackHelper
}
// NewPublicKeys returns a PublicKeys from a PEM encoded private key. An
@ -152,10 +147,10 @@ func (a *PublicKeys) String() string {
}
func (a *PublicKeys) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PublicKeys(a.Signer)},
})
}, nil
}
func username() (string, error) {
@ -178,7 +173,6 @@ func username() (string, error) {
type PublicKeysCallback struct {
User string
Callback func() (signers []ssh.Signer, err error)
HostKeyCallbackHelper
}
// NewSSHAgentAuth returns a PublicKeysCallback based on a SSH agent, it opens
@ -213,10 +207,10 @@ func (a *PublicKeysCallback) String() string {
}
func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
return a.SetHostKeyCallback(&ssh.ClientConfig{
return &ssh.ClientConfig{
User: a.User,
Auth: []ssh.AuthMethod{ssh.PublicKeysCallback(a.Callback)},
})
}, nil
}
// NewKnownHostsCallback returns ssh.HostKeyCallback based on a file based on a
@ -224,12 +218,19 @@ func (a *PublicKeysCallback) ClientConfig() (*ssh.ClientConfig, error) {
//
// If list of files is empty, then it will be read from the SSH_KNOWN_HOSTS
// environment variable, example:
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// /home/foo/custom_known_hosts_file:/etc/custom_known/hosts_file
//
// If SSH_KNOWN_HOSTS is not set the following file locations will be used:
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
//
// ~/.ssh/known_hosts
// /etc/ssh/ssh_known_hosts
func NewKnownHostsCallback(files ...string) (ssh.HostKeyCallback, error) {
kh, err := newKnownHosts(files...)
return ssh.HostKeyCallback(kh), err
}
func newKnownHosts(files ...string) (knownhosts.HostKeyCallback, error) {
var err error
if len(files) == 0 {
@ -251,7 +252,7 @@ func getDefaultKnownHostsFiles() ([]string, error) {
return files, nil
}
homeDirPath, err := homedir.Dir()
homeDirPath, err := os.UserHomeDir()
if err != nil {
return nil, err
}
@ -285,6 +286,9 @@ func filterKnownHostsFiles(files ...string) ([]string, error) {
// HostKeyCallbackHelper is a helper that provides common functionality to
// configure HostKeyCallback into a ssh.ClientConfig.
// Deprecated in favor of SetConfigHostKeyFields (see common.go) which provides
// a mechanism for also setting ClientConfig.HostKeyAlgorithms for a specific
// host.
type HostKeyCallbackHelper struct {
// HostKeyCallback is the function type used for verifying server keys.
// If nil default callback will be create using NewKnownHostsCallback

View File

@ -121,10 +121,15 @@ func (c *command) connect() error {
if err != nil {
return err
}
hostWithPort := c.getHostWithPort()
config, err = SetConfigHostKeyFields(config, hostWithPort)
if err != nil {
return err
}
overrideConfig(c.config, config)
c.client, err = dial("tcp", c.getHostWithPort(), config)
c.client, err = dial("tcp", hostWithPort, config)
if err != nil {
return err
}
@ -162,6 +167,23 @@ func dial(network, addr string, config *ssh.ClientConfig) (*ssh.Client, error) {
return ssh.NewClient(c, chans, reqs), nil
}
// SetConfigHostKeyFields sets cfg.HostKeyCallback and cfg.HostKeyAlgorithms
// based on OpenSSH known_hosts. cfg is modified in-place. hostWithPort must be
// supplied, since the algorithms will be set based on the known host keys for
// that specific host. Otherwise, golang.org/x/crypto/ssh can return an error
// upon connecting to a host whose *first* key is not known, even though other
// keys (of different types) are known and match properly.
// For background see https://github.com/go-git/go-git/issues/411 as well as
// https://github.com/golang/go/issues/29286 for root cause.
func SetConfigHostKeyFields(cfg *ssh.ClientConfig, hostWithPort string) (*ssh.ClientConfig, error) {
kh, err := newKnownHosts()
if err == nil {
cfg.HostKeyCallback = kh.HostKeyCallback()
cfg.HostKeyAlgorithms = kh.HostKeyAlgorithms(hostWithPort)
}
return cfg, err
}
func (c *command) getHostWithPort() string {
if addr, found := c.doGetHostWithPortFromSSHConfig(); found {
return addr

View File

@ -17,7 +17,7 @@ type PruneOptions struct {
Handler PruneHandler
}
var ErrLooseObjectsNotSupported = errors.New("Loose objects not supported")
var ErrLooseObjectsNotSupported = errors.New("loose objects not supported")
// DeleteObject deletes an object from a repository.
// The type conveniently matches PruneHandler.

View File

@ -5,10 +5,12 @@ import (
"errors"
"fmt"
"io"
"strings"
"time"
"github.com/go-git/go-billy/v5/osfs"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/internal/url"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/cache"
"github.com/go-git/go-git/v5/plumbing/format/packfile"
@ -103,7 +105,11 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
}
s, err := newSendPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle)
if err != nil {
return err
}
@ -183,12 +189,12 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
var hashesToPush []plumbing.Hash
// Avoid the expensive revlist operation if we're only doing deletes.
if !allDelete {
if r.c.IsFirstURLLocal() {
if url.IsLocalEndpoint(o.RemoteURL) {
// If we're are pushing to a local repo, it might be much
// faster to use a local storage layer to get the commits
// to ignore, when calculating the object revlist.
localStorer := filesystem.NewStorage(
osfs.New(r.c.URLs[0]), cache.NewObjectLRUDefault())
osfs.New(o.RemoteURL), cache.NewObjectLRUDefault())
hashesToPush, err = revlist.ObjectsWithStorageForIgnores(
r.s, localStorer, objects, haves)
} else {
@ -225,6 +231,74 @@ func (r *Remote) useRefDeltas(ar *packp.AdvRefs) bool {
return !ar.Capabilities.Supports(capability.OFSDelta)
}
func (r *Remote) addReachableTags(localRefs []*plumbing.Reference, remoteRefs storer.ReferenceStorer, req *packp.ReferenceUpdateRequest) error {
tags := make(map[plumbing.Reference]struct{})
// get a list of all tags locally
for _, ref := range localRefs {
if strings.HasPrefix(string(ref.Name()), "refs/tags") {
tags[*ref] = struct{}{}
}
}
remoteRefIter, err := remoteRefs.IterReferences()
if err != nil {
return err
}
// remove any that are already on the remote
if err := remoteRefIter.ForEach(func(reference *plumbing.Reference) error {
delete(tags, *reference)
return nil
}); err != nil {
return err
}
for tag := range tags {
tagObject, err := object.GetObject(r.s, tag.Hash())
var tagCommit *object.Commit
if err != nil {
return fmt.Errorf("get tag object: %w", err)
}
if tagObject.Type() != plumbing.TagObject {
continue
}
annotatedTag, ok := tagObject.(*object.Tag)
if !ok {
return errors.New("could not get annotated tag object")
}
tagCommit, err = object.GetCommit(r.s, annotatedTag.Target)
if err != nil {
return fmt.Errorf("get annotated tag commit: %w", err)
}
// only include tags that are reachable from one of the refs
// already being pushed
for _, cmd := range req.Commands {
if tag.Name() == cmd.Name {
continue
}
if strings.HasPrefix(cmd.Name.String(), "refs/tags") {
continue
}
c, err := object.GetCommit(r.s, cmd.New)
if err != nil {
return fmt.Errorf("get commit %v: %w", cmd.Name, err)
}
if isAncestor, err := tagCommit.IsAncestor(c); err == nil && isAncestor {
req.Commands = append(req.Commands, &packp.Command{Name: tag.Name(), New: tag.Hash()})
}
}
}
return nil
}
func (r *Remote) newReferenceUpdateRequest(
o *PushOptions,
localRefs []*plumbing.Reference,
@ -242,10 +316,28 @@ func (r *Remote) newReferenceUpdateRequest(
}
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune); err != nil {
if ar.Capabilities.Supports(capability.PushOptions) {
_ = req.Capabilities.Set(capability.PushOptions)
for k, v := range o.Options {
req.Options = append(req.Options, &packp.Option{Key: k, Value: v})
}
}
if o.Atomic && ar.Capabilities.Supports(capability.Atomic) {
_ = req.Capabilities.Set(capability.Atomic)
}
if err := r.addReferencesToUpdate(o.RefSpecs, localRefs, remoteRefs, req, o.Prune, o.ForceWithLease); err != nil {
return nil, err
}
if o.FollowTags {
if err := r.addReachableTags(localRefs, remoteRefs, req); err != nil {
return nil, err
}
}
return req, nil
}
@ -314,7 +406,11 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
o.RefSpecs = r.c.Fetch
}
s, err := newUploadPackSession(r.c.URLs[0], o.Auth, o.InsecureSkipTLS, o.CABundle)
if o.RemoteURL == "" {
o.RemoteURL = r.c.URLs[0]
}
s, err := newUploadPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle)
if err != nil {
return nil, err
}
@ -474,6 +570,7 @@ func (r *Remote) addReferencesToUpdate(
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
prune bool,
forceWithLease *ForceWithLease,
) error {
// This references dictionary will be used to search references by name.
refsDict := make(map[string]*plumbing.Reference)
@ -487,7 +584,7 @@ func (r *Remote) addReferencesToUpdate(
return err
}
} else {
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req)
err := r.addOrUpdateReferences(rs, localRefs, refsDict, remoteRefs, req, forceWithLease)
if err != nil {
return err
}
@ -509,20 +606,25 @@ func (r *Remote) addOrUpdateReferences(
refsDict map[string]*plumbing.Reference,
remoteRefs storer.ReferenceStorer,
req *packp.ReferenceUpdateRequest,
forceWithLease *ForceWithLease,
) error {
// If it is not a wilcard refspec we can directly search for the reference
// in the references dictionary.
if !rs.IsWildcard() {
ref, ok := refsDict[rs.Src()]
if !ok {
commit, err := object.GetCommit(r.s, plumbing.NewHash(rs.Src()))
if err == nil {
return r.addCommit(rs, remoteRefs, commit.Hash, req)
}
return nil
}
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
return r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
}
for _, ref := range localRefs {
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req)
err := r.addReferenceIfRefSpecMatches(rs, remoteRefs, ref, req, forceWithLease)
if err != nil {
return err
}
@ -569,9 +671,46 @@ func (r *Remote) deleteReferences(rs config.RefSpec,
})
}
func (r *Remote) addCommit(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localCommit plumbing.Hash,
req *packp.ReferenceUpdateRequest) error {
if rs.IsWildcard() {
return errors.New("can't use wildcard together with hash refspecs")
}
cmd := &packp.Command{
Name: rs.Dst(""),
Old: plumbing.ZeroHash,
New: localCommit,
}
remoteRef, err := remoteRefs.Reference(cmd.Name)
if err == nil {
if remoteRef.Type() != plumbing.HashReference {
//TODO: check actual git behavior here
return nil
}
cmd.Old = remoteRef.Hash()
} else if err != plumbing.ErrReferenceNotFound {
return err
}
if cmd.Old == cmd.New {
return nil
}
if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
}
req.Commands = append(req.Commands, cmd)
return nil
}
func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
remoteRefs storer.ReferenceStorer, localRef *plumbing.Reference,
req *packp.ReferenceUpdateRequest) error {
req *packp.ReferenceUpdateRequest, forceWithLease *ForceWithLease) error {
if localRef.Type() != plumbing.HashReference {
return nil
@ -603,7 +742,11 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
if !rs.IsForceUpdate() {
if forceWithLease != nil {
if err = r.checkForceWithLease(localRef, cmd, forceWithLease); err != nil {
return err
}
} else if !rs.IsForceUpdate() {
if err := checkFastForwardUpdate(r.s, remoteRefs, cmd); err != nil {
return err
}
@ -613,6 +756,31 @@ func (r *Remote) addReferenceIfRefSpecMatches(rs config.RefSpec,
return nil
}
func (r *Remote) checkForceWithLease(localRef *plumbing.Reference, cmd *packp.Command, forceWithLease *ForceWithLease) error {
remotePrefix := fmt.Sprintf("refs/remotes/%s/", r.Config().Name)
ref, err := storer.ResolveReference(
r.s,
plumbing.ReferenceName(remotePrefix+strings.Replace(localRef.Name().String(), "refs/heads/", "", -1)))
if err != nil {
return err
}
if forceWithLease.RefName.String() == "" || (forceWithLease.RefName == cmd.Name) {
expectedOID := ref.Hash()
if !forceWithLease.Hash.IsZero() {
expectedOID = forceWithLease.Hash
}
if cmd.Old != expectedOID {
return fmt.Errorf("non-fast-forward update: %s", cmd.Name.String())
}
}
return nil
}
func (r *Remote) references() ([]*plumbing.Reference, error) {
var localRefs []*plumbing.Reference

View File

@ -56,7 +56,7 @@ var (
ErrWorktreeNotProvided = errors.New("worktree should be provided")
ErrIsBareRepository = errors.New("worktree not available in a bare repository")
ErrUnableToResolveCommit = errors.New("unable to resolve commit")
ErrPackedObjectsNotSupported = errors.New("Packed objects not supported")
ErrPackedObjectsNotSupported = errors.New("packed objects not supported")
)
// Repository represents a git repository
@ -280,6 +280,9 @@ func dotGitToOSFilesystems(path string, detect bool) (dot, wt billy.Filesystem,
pathinfo, err := fs.Stat("/")
if !os.IsNotExist(err) {
if pathinfo == nil {
return nil, nil, err
}
if !pathinfo.IsDir() && detect {
fs = osfs.New(filepath.Dir(path))
}
@ -1547,7 +1550,7 @@ func (r *Repository) ResolveRevision(rev plumbing.Revision) (*plumbing.Hash, err
}
if c == nil {
return &plumbing.ZeroHash, fmt.Errorf(`No commit message match regexp : "%s"`, re.String())
return &plumbing.ZeroHash, fmt.Errorf("no commit message match regexp: %q", re.String())
}
commit = c

View File

@ -0,0 +1,79 @@
package dotgit
import (
"fmt"
"io"
"os"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/format/objfile"
"github.com/go-git/go-git/v5/utils/ioutil"
)
var _ (plumbing.EncodedObject) = &EncodedObject{}
type EncodedObject struct {
dir *DotGit
h plumbing.Hash
t plumbing.ObjectType
sz int64
}
func (e *EncodedObject) Hash() plumbing.Hash {
return e.h
}
func (e *EncodedObject) Reader() (io.ReadCloser, error) {
f, err := e.dir.Object(e.h)
if err != nil {
if os.IsNotExist(err) {
return nil, plumbing.ErrObjectNotFound
}
return nil, err
}
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
}
t, size, err := r.Header()
if err != nil {
_ = r.Close()
return nil, err
}
if t != e.t {
_ = r.Close()
return nil, objfile.ErrHeader
}
if size != e.sz {
_ = r.Close()
return nil, objfile.ErrHeader
}
return ioutil.NewReadCloserWithCloser(r, f.Close), nil
}
func (e *EncodedObject) SetType(plumbing.ObjectType) {}
func (e *EncodedObject) Type() plumbing.ObjectType {
return e.t
}
func (e *EncodedObject) Size() int64 {
return e.sz
}
func (e *EncodedObject) SetSize(int64) {}
func (e *EncodedObject) Writer() (io.WriteCloser, error) {
return nil, fmt.Errorf("not supported")
}
func NewEncodedObject(dir *DotGit, h plumbing.Hash, t plumbing.ObjectType, size int64) *EncodedObject {
return &EncodedObject{
dir: dir,
h: h,
t: t,
sz: size,
}
}

View File

@ -4,6 +4,7 @@ import (
"bytes"
"io"
"os"
"sync"
"time"
"github.com/go-git/go-git/v5/plumbing"
@ -204,9 +205,9 @@ func (s *ObjectStorage) packfile(idx idxfile.Index, pack plumbing.Hash) (*packfi
var p *packfile.Packfile
if s.objectCache != nil {
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache)
p = packfile.NewPackfileWithCache(idx, s.dir.Fs(), f, s.objectCache, s.options.LargeObjectThreshold)
} else {
p = packfile.NewPackfile(idx, s.dir.Fs(), f)
p = packfile.NewPackfile(idx, s.dir.Fs(), f, s.options.LargeObjectThreshold)
}
return p, s.storePackfileInCache(pack, p)
@ -389,7 +390,6 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return cacheObj, nil
}
obj = s.NewEncodedObject()
r, err := objfile.NewReader(f)
if err != nil {
return nil, err
@ -402,6 +402,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
return nil, err
}
if s.options.LargeObjectThreshold > 0 && size > s.options.LargeObjectThreshold {
obj = dotgit.NewEncodedObject(s.dir, h, t, size)
return obj, nil
}
obj = s.NewEncodedObject()
obj.SetType(t)
obj.SetSize(size)
w, err := obj.Writer()
@ -413,10 +420,21 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
s.objectCache.Put(obj)
_, err = io.Copy(w, r)
bufp := copyBufferPool.Get().(*[]byte)
buf := *bufp
_, err = io.CopyBuffer(w, r, buf)
copyBufferPool.Put(bufp)
return obj, err
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
b := make([]byte, 32*1024)
return &b
},
}
// Get returns the object with the given hash, by searching for it in
// the packfile.
func (s *ObjectStorage) getFromPackfile(h plumbing.Hash, canBeDelta bool) (
@ -595,6 +613,7 @@ func (s *ObjectStorage) buildPackfileIters(
return newPackfileIter(
s.dir.Fs(), pack, t, seen, s.index[h],
s.objectCache, s.options.KeepDescriptors,
s.options.LargeObjectThreshold,
)
},
}, nil
@ -684,6 +703,7 @@ func NewPackfileIter(
idxFile billy.File,
t plumbing.ObjectType,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
idx := idxfile.NewMemoryIndex()
if err := idxfile.NewDecoder(idxFile).Decode(idx); err != nil {
@ -695,7 +715,7 @@ func NewPackfileIter(
}
seen := make(map[plumbing.Hash]struct{})
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack)
return newPackfileIter(fs, f, t, seen, idx, nil, keepPack, largeObjectThreshold)
}
func newPackfileIter(
@ -706,12 +726,13 @@ func newPackfileIter(
index idxfile.Index,
cache cache.Object,
keepPack bool,
largeObjectThreshold int64,
) (storer.EncodedObjectIter, error) {
var p *packfile.Packfile
if cache != nil {
p = packfile.NewPackfileWithCache(index, fs, f, cache)
p = packfile.NewPackfileWithCache(index, fs, f, cache, largeObjectThreshold)
} else {
p = packfile.NewPackfile(index, fs, f)
p = packfile.NewPackfile(index, fs, f, largeObjectThreshold)
}
iter, err := p.GetByType(t)

View File

@ -34,7 +34,7 @@ func (s *ShallowStorage) SetShallow(commits []plumbing.Hash) error {
return err
}
// Shallow return the shallow commits reading from shallo file from .git
// Shallow returns the shallow commits reading from shallo file from .git
func (s *ShallowStorage) Shallow() ([]plumbing.Hash, error) {
f, err := s.dir.Shallow()
if f == nil || err != nil {

View File

@ -34,6 +34,9 @@ type Options struct {
// MaxOpenDescriptors is the max number of file descriptors to keep
// open. If KeepDescriptors is true, all file descriptors will remain open.
MaxOpenDescriptors int
// LargeObjectThreshold maximum object size (in bytes) that will be read in to memory.
// If left unset or set to 0 there is no limit
LargeObjectThreshold int64
}
// NewStorage returns a new Storage backed by a given `fs.Filesystem` and cache.

View File

@ -193,7 +193,7 @@ func (o *ObjectStorage) DeleteOldObjectPackAndIndex(plumbing.Hash, time.Time) er
return nil
}
var errNotSupported = fmt.Errorf("Not supported")
var errNotSupported = fmt.Errorf("not supported")
func (o *ObjectStorage) LooseObjectTime(hash plumbing.Hash) (time.Time, error) {
return time.Time{}, errNotSupported

View File

@ -55,6 +55,28 @@ func NewReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
return &readCloser{Reader: r, closer: c}
}
type readCloserCloser struct {
io.ReadCloser
closer func() error
}
func (r *readCloserCloser) Close() (err error) {
defer func() {
if err == nil {
err = r.closer()
return
}
_ = r.closer()
}()
return r.ReadCloser.Close()
}
// NewReadCloserWithCloser creates an `io.ReadCloser` with the given `io.ReaderCloser` and
// `io.Closer` that ensures that the closer is closed on close
func NewReadCloserWithCloser(r io.ReadCloser, c func() error) io.ReadCloser {
return &readCloserCloser{ReadCloser: r, closer: c}
}
type writeCloser struct {
io.Writer
closer io.Closer
@ -82,6 +104,24 @@ func WriteNopCloser(w io.Writer) io.WriteCloser {
return writeNopCloser{w}
}
type readerAtAsReader struct {
io.ReaderAt
offset int64
}
func (r *readerAtAsReader) Read(bs []byte) (int, error) {
n, err := r.ReaderAt.ReadAt(bs, r.offset)
r.offset += int64(n)
return n, err
}
func NewReaderUsingReaderAt(r io.ReaderAt, offset int64) io.Reader {
return &readerAtAsReader{
ReaderAt: r,
offset: offset,
}
}
// CheckClose calls Close on the given io.Closer. If the given *error points to
// nil, it will be assigned the error returned by Close. Otherwise, any error
// returned by Close will be ignored. CheckClose is usually called with defer.

View File

@ -304,13 +304,38 @@ func DiffTreeContext(ctx context.Context, fromTree, toTree noder.Noder,
return nil, err
}
case onlyToRemains:
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
} else {
if err = ret.AddRecursiveInsert(to); err != nil {
return nil, err
}
}
if err = ii.nextTo(); err != nil {
return nil, err
}
case bothHaveNodes:
if from.Skip() {
if err = ret.AddRecursiveDelete(from); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if to.Skip() {
if err = ret.AddRecursiveDelete(to); err != nil {
return nil, err
}
if err := ii.nextBoth(); err != nil {
return nil, err
}
break
}
if err = diffNodes(&ret, ii); err != nil {
return nil, err
}

View File

@ -61,6 +61,10 @@ func (n *node) IsDir() bool {
return n.isDir
}
func (n *node) Skip() bool {
return false
}
func (n *node) Children() ([]noder.Noder, error) {
if err := n.calculateChildren(); err != nil {
return nil, err

View File

@ -19,6 +19,7 @@ type node struct {
entry *index.Entry
children []noder.Noder
isDir bool
skip bool
}
// NewRootNode returns the root node of a computed tree from a index.Index,
@ -39,7 +40,7 @@ func NewRootNode(idx *index.Index) noder.Noder {
continue
}
n := &node{path: fullpath}
n := &node{path: fullpath, skip: e.SkipWorktree}
if fullpath == e.Name {
n.entry = e
} else {
@ -58,6 +59,10 @@ func (n *node) String() string {
return n.path
}
func (n *node) Skip() bool {
return n.skip
}
// Hash the hash of a filesystem is a 24-byte slice, is the result of
// concatenating the computed plumbing.Hash of the file as a Blob and its
// plumbing.FileMode; that way the difftree algorithm will detect changes in the

View File

@ -53,6 +53,7 @@ type Noder interface {
// implement NumChildren in O(1) while Children is usually more
// complex.
NumChildren() (int, error)
Skip() bool
}
// NoChildren represents the children of a noder without children.

View File

@ -15,6 +15,14 @@ import (
// not be used.
type Path []Noder
func (p Path) Skip() bool {
if len(p) > 0 {
return p.Last().Skip()
}
return false
}
// String returns the full path of the final noder as a string, using
// "/" as the separator.
func (p Path) String() string {

29
vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package sync
import (
"bufio"
"io"
"sync"
)
var bufioReader = sync.Pool{
New: func() interface{} {
return bufio.NewReader(nil)
},
}
// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool.
// Returns a bufio.Reader that is resetted with reader and ready for use.
//
// After use, the *bufio.Reader should be put back into the sync.Pool
// by calling PutBufioReader.
func GetBufioReader(reader io.Reader) *bufio.Reader {
r := bufioReader.Get().(*bufio.Reader)
r.Reset(reader)
return r
}
// PutBufioReader puts reader back into its sync.Pool.
func PutBufioReader(reader *bufio.Reader) {
bufioReader.Put(reader)
}

51
vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sync
import (
"bytes"
"sync"
)
var (
byteSlice = sync.Pool{
New: func() interface{} {
b := make([]byte, 16*1024)
return &b
},
}
bytesBuffer = sync.Pool{
New: func() interface{} {
return bytes.NewBuffer(nil)
},
}
)
// GetByteSlice returns a *[]byte that is managed by a sync.Pool.
// The initial slice length will be 16384 (16kb).
//
// After use, the *[]byte should be put back into the sync.Pool
// by calling PutByteSlice.
func GetByteSlice() *[]byte {
buf := byteSlice.Get().(*[]byte)
return buf
}
// PutByteSlice puts buf back into its sync.Pool.
func PutByteSlice(buf *[]byte) {
byteSlice.Put(buf)
}
// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool.
// Returns a buffer that is resetted and ready for use.
//
// After use, the *bytes.Buffer should be put back into the sync.Pool
// by calling PutBytesBuffer.
func GetBytesBuffer() *bytes.Buffer {
buf := bytesBuffer.Get().(*bytes.Buffer)
buf.Reset()
return buf
}
// PutBytesBuffer puts buf back into its sync.Pool.
func PutBytesBuffer(buf *bytes.Buffer) {
bytesBuffer.Put(buf)
}

74
vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go generated vendored Normal file
View File

@ -0,0 +1,74 @@
package sync
import (
"bytes"
"compress/zlib"
"io"
"sync"
)
var (
zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01}
zlibReader = sync.Pool{
New: func() interface{} {
r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes))
return ZLibReader{
Reader: r.(zlibReadCloser),
}
},
}
zlibWriter = sync.Pool{
New: func() interface{} {
return zlib.NewWriter(nil)
},
}
)
type zlibReadCloser interface {
io.ReadCloser
zlib.Resetter
}
type ZLibReader struct {
dict *[]byte
Reader zlibReadCloser
}
// GetZlibReader returns a ZLibReader that is managed by a sync.Pool.
// Returns a ZLibReader that is resetted using a dictionary that is
// also managed by a sync.Pool.
//
// After use, the ZLibReader should be put back into the sync.Pool
// by calling PutZlibReader.
func GetZlibReader(r io.Reader) (ZLibReader, error) {
z := zlibReader.Get().(ZLibReader)
z.dict = GetByteSlice()
err := z.Reader.Reset(r, *z.dict)
return z, err
}
// PutZlibReader puts z back into its sync.Pool, first closing the reader.
// The Byte slice dictionary is also put back into its sync.Pool.
func PutZlibReader(z ZLibReader) {
z.Reader.Close()
PutByteSlice(z.dict)
zlibReader.Put(z)
}
// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool.
// Returns a writer that is resetted with w and ready for use.
//
// After use, the *zlib.Writer should be put back into the sync.Pool
// by calling PutZlibWriter.
func GetZlibWriter(w io.Writer) *zlib.Writer {
z := zlibWriter.Get().(*zlib.Writer)
z.Reset(w)
return z
}
// PutZlibWriter puts w back into its sync.Pool.
func PutZlibWriter(w *zlib.Writer) {
zlibWriter.Put(w)
}

View File

@ -9,8 +9,9 @@ import (
"os"
"path/filepath"
"strings"
"sync"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/config"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/plumbing/filemode"
@ -20,9 +21,7 @@ import (
"github.com/go-git/go-git/v5/plumbing/storer"
"github.com/go-git/go-git/v5/utils/ioutil"
"github.com/go-git/go-git/v5/utils/merkletrie"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-billy/v5/util"
"github.com/go-git/go-git/v5/utils/sync"
)
var (
@ -73,6 +72,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
fetchHead, err := remote.fetch(ctx, &FetchOptions{
RemoteName: o.RemoteName,
RemoteURL: o.RemoteURL,
Depth: o.Depth,
Auth: o.Auth,
Progress: o.Progress,
@ -182,6 +182,10 @@ func (w *Worktree) Checkout(opts *CheckoutOptions) error {
return err
}
if len(opts.SparseCheckoutDirectories) > 0 {
return w.ResetSparsely(ro, opts.SparseCheckoutDirectories)
}
return w.Reset(ro)
}
func (w *Worktree) createBranch(opts *CheckoutOptions) error {
@ -262,8 +266,7 @@ func (w *Worktree) setHEADToBranch(branch plumbing.ReferenceName, commit plumbin
return w.r.Storer.SetReference(head)
}
// Reset the worktree to a specified state.
func (w *Worktree) Reset(opts *ResetOptions) error {
func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error {
if err := opts.Validate(w.r); err != nil {
return err
}
@ -293,7 +296,7 @@ func (w *Worktree) Reset(opts *ResetOptions) error {
}
if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset {
if err := w.resetIndex(t); err != nil {
if err := w.resetIndex(t, dirs); err != nil {
return err
}
}
@ -307,8 +310,17 @@ func (w *Worktree) Reset(opts *ResetOptions) error {
return nil
}
func (w *Worktree) resetIndex(t *object.Tree) error {
// Reset the worktree to a specified state.
func (w *Worktree) Reset(opts *ResetOptions) error {
return w.ResetSparsely(opts, nil)
}
func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
idx, err := w.r.Storer.Index()
if len(dirs) > 0 {
idx.SkipUnless(dirs)
}
if err != nil {
return err
}
@ -398,7 +410,7 @@ func (w *Worktree) checkoutChange(ch merkletrie.Change, t *object.Tree, idx *ind
isSubmodule = e.Mode == filemode.Submodule
case merkletrie.Delete:
return rmFileAndDirIfEmpty(w.Filesystem, ch.From.String())
return rmFileAndDirsIfEmpty(w.Filesystem, ch.From.String())
}
if isSubmodule {
@ -520,12 +532,6 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
return nil
}
var copyBufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, 32*1024)
},
}
func (w *Worktree) checkoutFile(f *object.File) (err error) {
mode, err := f.Mode.ToOSFileMode()
if err != nil {
@ -549,9 +555,9 @@ func (w *Worktree) checkoutFile(f *object.File) (err error) {
}
defer ioutil.CheckClose(to, &err)
buf := copyBufferPool.Get().([]byte)
_, err = io.CopyBuffer(to, from, buf)
copyBufferPool.Put(buf)
buf := sync.GetByteSlice()
_, err = io.CopyBuffer(to, from, *buf)
sync.PutByteSlice(buf)
return
}
@ -772,8 +778,10 @@ func (w *Worktree) doClean(status Status, opts *CleanOptions, dir string, files
}
if opts.Dir && dir != "" {
return doCleanDirectories(w.Filesystem, dir)
_, err := removeDirIfEmpty(w.Filesystem, dir)
return err
}
return nil
}
@ -914,25 +922,52 @@ func findMatchInFile(file *object.File, treeName string, opts *GrepOptions) ([]G
return grepResults, nil
}
func rmFileAndDirIfEmpty(fs billy.Filesystem, name string) error {
// will walk up the directory tree removing all encountered empty
// directories, not just the one containing this file
func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error {
if err := util.RemoveAll(fs, name); err != nil {
return err
}
dir := filepath.Dir(name)
return doCleanDirectories(fs, dir)
for {
removed, err := removeDirIfEmpty(fs, dir)
if err != nil {
return err
}
if !removed {
// directory was not empty and not removed,
// stop checking parents
break
}
// move to parent directory
dir = filepath.Dir(dir)
}
return nil
}
// doCleanDirectories removes empty subdirs (without files)
func doCleanDirectories(fs billy.Filesystem, dir string) error {
// removeDirIfEmpty will remove the supplied directory `dir` if
// `dir` is empty
// returns true if the directory was removed
func removeDirIfEmpty(fs billy.Filesystem, dir string) (bool, error) {
files, err := fs.ReadDir(dir)
if err != nil {
return err
return false, err
}
if len(files) == 0 {
return fs.Remove(dir)
if len(files) > 0 {
return false, nil
}
return nil
err = fs.Remove(dir)
if err != nil {
return false, err
}
return true, nil
}
type indexBuilder struct {

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Atimespec.Sec), int64(os.Atimespec.Nsec))
e.CreatedAt = time.Unix(os.Atimespec.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

View File

@ -2,6 +2,7 @@ package git
import (
"bytes"
"errors"
"path"
"sort"
"strings"
@ -16,6 +17,12 @@ import (
"github.com/go-git/go-billy/v5"
)
var (
// ErrEmptyCommit occurs when a commit is attempted using a clean
// working tree, with no changes to be committed.
ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree")
)
// Commit stores the current contents of the index in a new commit along with
// a log message from the user describing the changes.
func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error) {
@ -39,7 +46,7 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error
s: w.r.Storer,
}
tree, err := h.BuildTree(idx)
tree, err := h.BuildTree(idx, opts)
if err != nil {
return plumbing.ZeroHash, err
}
@ -145,7 +152,11 @@ type buildTreeHelper struct {
// BuildTree builds the tree objects and push its to the storer, the hash
// of the root tree is returned.
func (h *buildTreeHelper) BuildTree(idx *index.Index) (plumbing.Hash, error) {
func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) {
if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) {
return plumbing.ZeroHash, ErrEmptyCommit
}
const rootNode = ""
h.trees = map[string]*object.Tree{rootNode: {}}
h.entries = map[string]*object.TreeEntry{}

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Ctim.Sec), int64(os.Ctim.Nsec))
e.CreatedAt = time.Unix(os.Ctim.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

View File

@ -270,10 +270,6 @@ func (w *Worktree) Add(path string) (plumbing.Hash, error) {
}
func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string, ignorePattern []gitignore.Pattern) (added bool, err error) {
files, err := w.Filesystem.ReadDir(directory)
if err != nil {
return false, err
}
if len(ignorePattern) > 0 {
m := gitignore.NewMatcher(ignorePattern)
matchPath := strings.Split(directory, string(os.PathSeparator))
@ -283,20 +279,13 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string,
}
}
for _, file := range files {
name := path.Join(directory, file.Name())
var a bool
if file.IsDir() {
if file.Name() == GitDirName {
// ignore special git directory
continue
}
a, err = w.doAddDirectory(idx, s, name, ignorePattern)
} else {
a, _, err = w.doAddFile(idx, s, name, ignorePattern)
for name := range s {
if !isPathInDirectory(name, filepath.ToSlash(filepath.Clean(directory))) {
continue
}
var a bool
a, _, err = w.doAddFile(idx, s, name, ignorePattern)
if err != nil {
return
}
@ -309,6 +298,26 @@ func (w *Worktree) doAddDirectory(idx *index.Index, s Status, directory string,
return
}
func isPathInDirectory(path, directory string) bool {
ps := strings.Split(path, "/")
ds := strings.Split(directory, "/")
if len(ds) == 1 && ds[0] == "." {
return true
}
if len(ps) < len(ds) {
return false
}
for i := 0; i < len(ds); i++ {
if ps[i] != ds[i] {
return false
}
}
return true
}
// AddWithOptions file contents to the index, updates the index using the
// current content found in the working tree, to prepare the content staged for
// the next commit.

View File

@ -12,7 +12,7 @@ import (
func init() {
fillSystemInfo = func(e *index.Entry, sys interface{}) {
if os, ok := sys.(*syscall.Stat_t); ok {
e.CreatedAt = time.Unix(int64(os.Atim.Sec), int64(os.Atim.Nsec))
e.CreatedAt = time.Unix(os.Atim.Unix())
e.Dev = uint32(os.Dev)
e.Inode = uint32(os.Ino)
e.GID = os.Gid

23
vendor/github.com/pjbgf/sha1cd/Dockerfile.arm generated vendored Normal file
View File

@ -0,0 +1,23 @@
FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d
ENV GOOS=linux
ENV GOARCH=arm
ENV CGO_ENABLED=1
ENV CC=arm-linux-gnueabihf-gcc
ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
ENV PKG_CONFIG_PATH=/usr/lib/arm-linux-gnueabihf/pkgconfig
RUN dpkg --add-architecture armhf \
&& apt update \
&& apt install -y --no-install-recommends \
upx \
gcc-arm-linux-gnueabihf \
libc6-dev-armhf-cross \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
COPY . /src/workdir
WORKDIR /src/workdir
RUN go build ./...

23
vendor/github.com/pjbgf/sha1cd/Dockerfile.arm64 generated vendored Normal file
View File

@ -0,0 +1,23 @@
FROM golang:1.20@sha256:2edf6aab2d57644f3fe7407132a0d1770846867465a39c2083770cf62734b05d
ENV GOOS=linux
ENV GOARCH=arm64
ENV CGO_ENABLED=1
ENV CC=aarch64-linux-gnu-gcc
ENV PATH="/go/bin/${GOOS}_${GOARCH}:${PATH}"
ENV PKG_CONFIG_PATH=/usr/lib/aarch64-linux-gnu/pkgconfig
# install build & runtime dependencies
RUN dpkg --add-architecture arm64 \
&& apt update \
&& apt install -y --no-install-recommends \
gcc-aarch64-linux-gnu \
libc6-dev-arm64-cross \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
COPY . /src/workdir
WORKDIR /src/workdir
RUN go build ./...

201
vendor/github.com/pjbgf/sha1cd/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

40
vendor/github.com/pjbgf/sha1cd/Makefile generated vendored Normal file
View File

@ -0,0 +1,40 @@
FUZZ_TIME ?= 1m
export CGO_ENABLED := 1
.PHONY: test
test:
go test ./...
.PHONY: bench
bench:
go test -benchmem -run=^$$ -bench ^Benchmark ./...
.PHONY: fuzz
fuzz:
go test -tags gofuzz -fuzz=. -fuzztime=$(FUZZ_TIME) ./test/
# Cross build project in arm/v7.
build-arm:
docker build -t sha1cd-arm -f Dockerfile.arm .
docker run --rm sha1cd-arm
# Cross build project in arm64.
build-arm64:
docker build -t sha1cd-arm64 -f Dockerfile.arm64 .
docker run --rm sha1cd-arm64
# Build with cgo disabled.
build-nocgo:
CGO_ENABLED=0 go build ./cgo
# Run cross-compilation to assure supported architectures.
cross-build: build-arm build-arm64 build-nocgo
generate:
go run sha1cdblock_amd64_asm.go -out sha1cdblock_amd64.s
sed -i 's;&\samd64;&\n// +build !noasm,gc,amd64;g' sha1cdblock_amd64.s
verify: generate
git diff --exit-code
go vet ./...

58
vendor/github.com/pjbgf/sha1cd/README.md generated vendored Normal file
View File

@ -0,0 +1,58 @@
# sha1cd
A Go implementation of SHA1 with counter-cryptanalysis, which detects
collision attacks.
The `cgo/lib` code is a carbon copy of the [original code], based on
the award winning [white paper] by Marc Stevens.
The Go implementation is largely based off Go's generic sha1.
At present no SIMD optimisations have been implemented.
## Usage
`sha1cd` can be used as a drop-in replacement for `crypto/sha1`:
```golang
import "github.com/pjbgf/sha1cd"
func test(){
data := []byte("data to be sha1 hashed")
h := sha1cd.Sum(data)
fmt.Printf("hash: %q\n", hex.EncodeToString(h))
}
```
To obtain information as to whether a collision was found, use the
func `CollisionResistantSum`.
```golang
import "github.com/pjbgf/sha1cd"
func test(){
data := []byte("data to be sha1 hashed")
h, col := sha1cd.CollisionResistantSum(data)
if col {
fmt.Println("collision found!")
}
fmt.Printf("hash: %q", hex.EncodeToString(h))
}
```
Note that the algorithm will automatically avoid collision, by
extending the SHA1 to 240-steps, instead of 80 when a collision
attempt is detected. Therefore, inputs that contains the unavoidable
bit conditions will yield a different hash from `sha1cd`, when compared
with results using `crypto/sha1`. Valid inputs will have matching the outputs.
## References
- https://shattered.io/
- https://github.com/cr-marcstevens/sha1collisiondetection
- https://csrc.nist.gov/Projects/Cryptographic-Algorithm-Validation-Program/Secure-Hashing#shavs
## Use of the Original Implementation
- https://github.com/git/git/commit/28dc98e343ca4eb370a29ceec4c19beac9b5c01e
- https://github.com/libgit2/libgit2/pull/4136
[original code]: https://github.com/cr-marcstevens/sha1collisiondetection
[white paper]: https://marc-stevens.nl/research/papers/C13-S.pdf

11
vendor/github.com/pjbgf/sha1cd/detection.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
package sha1cd
import "hash"
type CollisionResistantHash interface {
// CollisionResistantSum extends on Sum by returning an additional boolean
// which indicates whether a collision was found during the hashing process.
CollisionResistantSum(b []byte) ([]byte, bool)
hash.Hash
}

42
vendor/github.com/pjbgf/sha1cd/internal/const.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package shared
const (
// Constants for the SHA-1 hash function.
K0 = 0x5A827999
K1 = 0x6ED9EBA1
K2 = 0x8F1BBCDC
K3 = 0xCA62C1D6
// Initial values for the buffer variables: h0, h1, h2, h3, h4.
Init0 = 0x67452301
Init1 = 0xEFCDAB89
Init2 = 0x98BADCFE
Init3 = 0x10325476
Init4 = 0xC3D2E1F0
// Initial values for the temporary variables (ihvtmp0, ihvtmp1, ihvtmp2, ihvtmp3, ihvtmp4) during the SHA recompression step.
InitTmp0 = 0xD5
InitTmp1 = 0x394
InitTmp2 = 0x8152A8
InitTmp3 = 0x0
InitTmp4 = 0xA7ECE0
// SHA1 contains 2 buffers, each based off 5 32-bit words.
WordBuffers = 5
// The output of SHA1 is 20 bytes (160 bits).
Size = 20
// Rounds represents the number of steps required to process each chunk.
Rounds = 80
// SHA1 processes the input data in chunks. Each chunk contains 64 bytes.
Chunk = 64
// The number of pre-step compression state to store.
// Currently there are 3 pre-step compression states required: 0, 58, 65.
PreStepState = 3
Magic = "shacd\x01"
MarshaledSize = len(Magic) + 5*4 + Chunk + 8
)

227
vendor/github.com/pjbgf/sha1cd/sha1cd.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package sha1cd implements collision detection based on the whitepaper
// Counter-cryptanalysis from Marc Stevens. The original ubc implementation
// was done by Marc Stevens and Dan Shumow, and can be found at:
// https://github.com/cr-marcstevens/sha1collisiondetection
package sha1cd
// This SHA1 implementation is based on Go's generic SHA1.
// Original: https://github.com/golang/go/blob/master/src/crypto/sha1/sha1.go
import (
"crypto"
"encoding/binary"
"errors"
"hash"
shared "github.com/pjbgf/sha1cd/internal"
)
func init() {
crypto.RegisterHash(crypto.SHA1, New)
}
// The size of a SHA-1 checksum in bytes.
const Size = shared.Size
// The blocksize of SHA-1 in bytes.
const BlockSize = shared.Chunk
// digest represents the partial evaluation of a checksum.
type digest struct {
h [shared.WordBuffers]uint32
x [shared.Chunk]byte
nx int
len uint64
// col defines whether a collision has been found.
col bool
blockFunc func(dig *digest, p []byte)
}
func (d *digest) MarshalBinary() ([]byte, error) {
b := make([]byte, 0, shared.MarshaledSize)
b = append(b, shared.Magic...)
b = appendUint32(b, d.h[0])
b = appendUint32(b, d.h[1])
b = appendUint32(b, d.h[2])
b = appendUint32(b, d.h[3])
b = appendUint32(b, d.h[4])
b = append(b, d.x[:d.nx]...)
b = b[:len(b)+len(d.x)-d.nx] // already zero
b = appendUint64(b, d.len)
return b, nil
}
func appendUint32(b []byte, v uint32) []byte {
return append(b,
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func appendUint64(b []byte, v uint64) []byte {
return append(b,
byte(v>>56),
byte(v>>48),
byte(v>>40),
byte(v>>32),
byte(v>>24),
byte(v>>16),
byte(v>>8),
byte(v),
)
}
func (d *digest) UnmarshalBinary(b []byte) error {
if len(b) < len(shared.Magic) || string(b[:len(shared.Magic)]) != shared.Magic {
return errors.New("crypto/sha1: invalid hash state identifier")
}
if len(b) != shared.MarshaledSize {
return errors.New("crypto/sha1: invalid hash state size")
}
b = b[len(shared.Magic):]
b, d.h[0] = consumeUint32(b)
b, d.h[1] = consumeUint32(b)
b, d.h[2] = consumeUint32(b)
b, d.h[3] = consumeUint32(b)
b, d.h[4] = consumeUint32(b)
b = b[copy(d.x[:], b):]
b, d.len = consumeUint64(b)
d.nx = int(d.len % shared.Chunk)
return nil
}
func consumeUint64(b []byte) ([]byte, uint64) {
_ = b[7]
x := uint64(b[7]) | uint64(b[6])<<8 | uint64(b[shared.WordBuffers])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
return b[8:], x
}
func consumeUint32(b []byte) ([]byte, uint32) {
_ = b[3]
x := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24
return b[4:], x
}
func (d *digest) Reset() {
d.h[0] = shared.Init0
d.h[1] = shared.Init1
d.h[2] = shared.Init2
d.h[3] = shared.Init3
d.h[4] = shared.Init4
d.nx = 0
d.len = 0
d.col = false
}
// New returns a new hash.Hash computing the SHA1 checksum. The Hash also
// implements encoding.BinaryMarshaler and encoding.BinaryUnmarshaler to
// marshal and unmarshal the internal state of the hash.
func New() hash.Hash {
d := new(digest)
d.blockFunc = block
d.Reset()
return d
}
// NewGeneric is equivalent to New but uses the Go generic implementation,
// avoiding any processor-specific optimizations.
func NewGeneric() hash.Hash {
d := new(digest)
d.blockFunc = blockGeneric
d.Reset()
return d
}
func (d *digest) Size() int { return Size }
func (d *digest) BlockSize() int { return BlockSize }
func (d *digest) Write(p []byte) (nn int, err error) {
if len(p) == 0 {
return
}
nn = len(p)
d.len += uint64(nn)
if d.nx > 0 {
n := copy(d.x[d.nx:], p)
d.nx += n
if d.nx == shared.Chunk {
d.blockFunc(d, d.x[:])
d.nx = 0
}
p = p[n:]
}
if len(p) >= shared.Chunk {
n := len(p) &^ (shared.Chunk - 1)
d.blockFunc(d, p[:n])
p = p[n:]
}
if len(p) > 0 {
d.nx = copy(d.x[:], p)
}
return
}
func (d *digest) Sum(in []byte) []byte {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...)
}
func (d *digest) checkSum() [Size]byte {
len := d.len
// Padding. Add a 1 bit and 0 bits until 56 bytes mod 64.
var tmp [64]byte
tmp[0] = 0x80
if len%64 < 56 {
d.Write(tmp[0 : 56-len%64])
} else {
d.Write(tmp[0 : 64+56-len%64])
}
// Length in bits.
len <<= 3
binary.BigEndian.PutUint64(tmp[:], len)
d.Write(tmp[0:8])
if d.nx != 0 {
panic("d.nx != 0")
}
var digest [Size]byte
binary.BigEndian.PutUint32(digest[0:], d.h[0])
binary.BigEndian.PutUint32(digest[4:], d.h[1])
binary.BigEndian.PutUint32(digest[8:], d.h[2])
binary.BigEndian.PutUint32(digest[12:], d.h[3])
binary.BigEndian.PutUint32(digest[16:], d.h[4])
return digest
}
// Sum returns the SHA-1 checksum of the data.
func Sum(data []byte) ([Size]byte, bool) {
d := New().(*digest)
d.Write(data)
return d.checkSum(), d.col
}
func (d *digest) CollisionResistantSum(in []byte) ([]byte, bool) {
// Make a copy of d so that caller can keep writing and summing.
d0 := *d
hash := d0.checkSum()
return append(in, hash[:]...), d0.col
}

50
vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
//go:build !noasm && gc && amd64
// +build !noasm,gc,amd64
package sha1cd
import (
"math"
"unsafe"
shared "github.com/pjbgf/sha1cd/internal"
)
type sliceHeader struct {
base uintptr
len int
cap int
}
// blockAMD64 hashes the message p into the current state in dig.
// Both m1 and cs are used to store intermediate results which are used by the collision detection logic.
//
//go:noescape
func blockAMD64(dig *digest, p sliceHeader, m1 []uint32, cs [][5]uint32)
func block(dig *digest, p []byte) {
m1 := [shared.Rounds]uint32{}
cs := [shared.PreStepState][shared.WordBuffers]uint32{}
for len(p) >= shared.Chunk {
// Only send a block to be processed, as the collission detection
// works on a block by block basis.
ips := sliceHeader{
base: uintptr(unsafe.Pointer(&p[0])),
len: int(math.Min(float64(len(p)), float64(shared.Chunk))),
cap: shared.Chunk,
}
blockAMD64(dig, ips, m1[:], cs[:])
col := checkCollision(m1, cs, dig.h)
if col {
dig.col = true
blockAMD64(dig, ips, m1[:], cs[:])
blockAMD64(dig, ips, m1[:], cs[:])
}
p = p[shared.Chunk:]
}
}

2274
vendor/github.com/pjbgf/sha1cd/sha1cdblock_amd64.s generated vendored Normal file

File diff suppressed because it is too large Load Diff

268
vendor/github.com/pjbgf/sha1cd/sha1cdblock_generic.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Originally from: https://github.com/go/blob/master/src/crypto/sha1/sha1block.go
// It has been modified to support collision detection.
package sha1cd
import (
"fmt"
"math/bits"
shared "github.com/pjbgf/sha1cd/internal"
"github.com/pjbgf/sha1cd/ubc"
)
// blockGeneric is a portable, pure Go version of the SHA-1 block step.
// It's used by sha1block_generic.go and tests.
func blockGeneric(dig *digest, p []byte) {
var w [16]uint32
// cs stores the pre-step compression state for only the steps required for the
// collision detection, which are 0, 58 and 65.
// Refer to ubc/const.go for more details.
cs := [shared.PreStepState][shared.WordBuffers]uint32{}
h0, h1, h2, h3, h4 := dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4]
for len(p) >= shared.Chunk {
m1 := [shared.Rounds]uint32{}
hi := 1
// Collision attacks are thwarted by hashing a detected near-collision block 3 times.
// Think of it as extending SHA-1 from 80-steps to 240-steps for such blocks:
// The best collision attacks against SHA-1 have complexity about 2^60,
// thus for 240-steps an immediate lower-bound for the best cryptanalytic attacks would be 2^180.
// An attacker would be better off using a generic birthday search of complexity 2^80.
rehash:
a, b, c, d, e := h0, h1, h2, h3, h4
// Each of the four 20-iteration rounds
// differs only in the computation of f and
// the choice of K (K0, K1, etc).
i := 0
// Store pre-step compression state for the collision detection.
cs[0] = [shared.WordBuffers]uint32{a, b, c, d, e}
for ; i < 16; i++ {
// load step
j := i * 4
w[i] = uint32(p[j])<<24 | uint32(p[j+1])<<16 | uint32(p[j+2])<<8 | uint32(p[j+3])
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 20; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b&c | (^b)&d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K0
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 40; i++ {
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K1
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 60; i++ {
if i == 58 {
// Store pre-step compression state for the collision detection.
cs[1] = [shared.WordBuffers]uint32{a, b, c, d, e}
}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K2
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
for ; i < 80; i++ {
if i == 65 {
// Store pre-step compression state for the collision detection.
cs[2] = [shared.WordBuffers]uint32{a, b, c, d, e}
}
tmp := w[(i-3)&0xf] ^ w[(i-8)&0xf] ^ w[(i-14)&0xf] ^ w[(i)&0xf]
w[i&0xf] = tmp<<1 | tmp>>(32-1)
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + w[i&0xf] + shared.K3
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
// Store compression state for the collision detection.
m1[i] = w[i&0xf]
}
h0 += a
h1 += b
h2 += c
h3 += d
h4 += e
if hi == 2 {
hi++
goto rehash
}
if hi == 1 {
col := checkCollision(m1, cs, [shared.WordBuffers]uint32{h0, h1, h2, h3, h4})
if col {
dig.col = true
hi++
goto rehash
}
}
p = p[shared.Chunk:]
}
dig.h[0], dig.h[1], dig.h[2], dig.h[3], dig.h[4] = h0, h1, h2, h3, h4
}
func checkCollision(
m1 [shared.Rounds]uint32,
cs [shared.PreStepState][shared.WordBuffers]uint32,
state [shared.WordBuffers]uint32) bool {
if mask := ubc.CalculateDvMask(m1); mask != 0 {
dvs := ubc.SHA1_dvs()
for i := 0; dvs[i].DvType != 0; i++ {
if (mask & ((uint32)(1) << uint32(dvs[i].MaskB))) != 0 {
var csState [shared.WordBuffers]uint32
switch dvs[i].TestT {
case 58:
csState = cs[1]
case 65:
csState = cs[2]
case 0:
csState = cs[0]
default:
panic(fmt.Sprintf("dvs data is trying to use a testT that isn't available: %d", dvs[i].TestT))
}
col := hasCollided(
dvs[i].TestT, // testT is the step number
// m2 is a secondary message created XORing with
// ubc's DM prior to the SHA recompression step.
m1, dvs[i].Dm,
csState,
state)
if col {
return true
}
}
}
}
return false
}
func hasCollided(step uint32, m1, dm [shared.Rounds]uint32,
state [shared.WordBuffers]uint32, h [shared.WordBuffers]uint32) bool {
// Intermediary Hash Value.
ihv := [shared.WordBuffers]uint32{}
a, b, c, d, e := state[0], state[1], state[2], state[3], state[4]
// Walk backwards from current step to undo previous compression.
// The existing collision detection does not have dvs higher than 65,
// start value of i accordingly.
for i := uint32(64); i >= 60; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := b ^ c ^ d
e -= bits.RotateLeft32(a, 5) + f + shared.K3 + (m1[i] ^ dm[i]) // m2 = m1 ^ dm.
}
}
for i := uint32(59); i >= 40; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := ((b | c) & d) | (b & c)
e -= bits.RotateLeft32(a, 5) + f + shared.K2 + (m1[i] ^ dm[i])
}
}
for i := uint32(39); i >= 20; i-- {
a, b, c, d, e = b, c, d, e, a
if step > i {
b = bits.RotateLeft32(b, -30)
f := b ^ c ^ d
e -= bits.RotateLeft32(a, 5) + f + shared.K1 + (m1[i] ^ dm[i])
}
}
for i := uint32(20); i > 0; i-- {
j := i - 1
a, b, c, d, e = b, c, d, e, a
if step > j {
b = bits.RotateLeft32(b, -30) // undo the rotate left
f := b&c | (^b)&d
// subtract from e
e -= bits.RotateLeft32(a, 5) + f + shared.K0 + (m1[j] ^ dm[j])
}
}
ihv[0] = a
ihv[1] = b
ihv[2] = c
ihv[3] = d
ihv[4] = e
a = state[0]
b = state[1]
c = state[2]
d = state[3]
e = state[4]
// Recompress blocks based on the current step.
// The existing collision detection does not have dvs below 58, so they have been removed
// from the source code. If new dvs are added which target rounds below 40, that logic
// will need to be readded here.
for i := uint32(40); i < 60; i++ {
if step <= i {
f := ((b | c) & d) | (b & c)
t := bits.RotateLeft32(a, 5) + f + e + shared.K2 + (m1[i] ^ dm[i])
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
for i := uint32(60); i < 80; i++ {
if step <= i {
f := b ^ c ^ d
t := bits.RotateLeft32(a, 5) + f + e + shared.K3 + (m1[i] ^ dm[i])
a, b, c, d, e = t, a, bits.RotateLeft32(b, 30), c, d
}
}
ihv[0] += a
ihv[1] += b
ihv[2] += c
ihv[3] += d
ihv[4] += e
if ((ihv[0] ^ h[0]) | (ihv[1] ^ h[1]) |
(ihv[2] ^ h[2]) | (ihv[3] ^ h[3]) | (ihv[4] ^ h[4])) == 0 {
return true
}
return false
}

8
vendor/github.com/pjbgf/sha1cd/sha1cdblock_noasm.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
//go:build !amd64 || noasm || !gc
// +build !amd64 noasm !gc
package sha1cd
func block(dig *digest, p []byte) {
blockGeneric(dig, p)
}

368
vendor/github.com/pjbgf/sha1cd/ubc/check.go generated vendored Normal file
View File

@ -0,0 +1,368 @@
// Based on the C implementation from Marc Stevens and Dan Shumow.
// https://github.com/cr-marcstevens/sha1collisiondetection
package ubc
type DvInfo struct {
// DvType, DvK and DvB define the DV: I(K,B) or II(K,B) (see the paper).
// https://marc-stevens.nl/research/papers/C13-S.pdf
DvType uint32
DvK uint32
DvB uint32
// TestT is the step to do the recompression from for collision detection.
TestT uint32
// MaskI and MaskB define the bit to check for each DV in the dvmask returned by ubc_check.
MaskI uint32
MaskB uint32
// Dm is the expanded message block XOR-difference defined by the DV.
Dm [80]uint32
}
// CalculateDvMask takes as input an expanded message block and verifies the unavoidable bitconditions
// for all listed DVs. It returns a dvmask where each bit belonging to a DV is set if all
// unavoidable bitconditions for that DV have been met.
// Thus, one needs to do the recompression check for each DV that has its bit set.
func CalculateDvMask(W [80]uint32) uint32 {
mask := uint32(0xFFFFFFFF)
mask &= (((((W[44] ^ W[45]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_I_51_0_bit | DV_I_52_0_bit | DV_II_45_0_bit | DV_II_46_0_bit | DV_II_50_0_bit | DV_II_51_0_bit))
mask &= (((((W[49] ^ W[50]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_II_45_0_bit | DV_II_50_0_bit | DV_II_51_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
mask &= (((((W[48] ^ W[49]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_52_0_bit | DV_II_49_0_bit | DV_II_50_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
mask &= ((((W[47] ^ (W[50] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
mask &= (((((W[47] ^ W[48]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_51_0_bit | DV_II_48_0_bit | DV_II_49_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
mask &= (((((W[46] >> 4) ^ (W[49] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_50_0_bit | DV_II_55_0_bit))
mask &= (((((W[46] ^ W[47]) >> 29) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_50_0_bit | DV_II_47_0_bit | DV_II_48_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
mask &= (((((W[45] >> 4) ^ (W[48] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit | DV_II_49_0_bit | DV_II_54_0_bit))
mask &= (((((W[45] ^ W[46]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_51_0_bit | DV_II_52_0_bit))
mask &= (((((W[44] >> 4) ^ (W[47] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_53_0_bit))
mask &= (((((W[43] >> 4) ^ (W[46] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_52_0_bit))
mask &= (((((W[43] ^ W[44]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_I_50_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_49_0_bit | DV_II_50_0_bit))
mask &= (((((W[42] >> 4) ^ (W[45] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_51_0_bit))
mask &= (((((W[41] >> 4) ^ (W[44] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_50_0_bit))
mask &= (((((W[40] ^ W[41]) >> 29) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_47_0_bit | DV_I_48_0_bit | DV_II_46_0_bit | DV_II_47_0_bit | DV_II_56_0_bit))
mask &= (((((W[54] ^ W[55]) >> 29) & 1) - 1) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_50_0_bit | DV_II_55_0_bit | DV_II_56_0_bit))
mask &= (((((W[53] ^ W[54]) >> 29) & 1) - 1) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_49_0_bit | DV_II_54_0_bit | DV_II_55_0_bit))
mask &= (((((W[52] ^ W[53]) >> 29) & 1) - 1) | ^(DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit | DV_II_53_0_bit | DV_II_54_0_bit))
mask &= ((((W[50] ^ (W[53] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_48_0_bit | DV_II_54_0_bit))
mask &= (((((W[50] ^ W[51]) >> 29) & 1) - 1) | ^(DV_I_47_0_bit | DV_II_46_0_bit | DV_II_51_0_bit | DV_II_52_0_bit | DV_II_56_0_bit))
mask &= ((((W[49] ^ (W[52] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit | DV_II_47_0_bit | DV_II_53_0_bit))
mask &= ((((W[48] ^ (W[51] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit | DV_II_46_0_bit | DV_II_52_0_bit))
mask &= (((((W[42] ^ W[43]) >> 29) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_I_50_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
mask &= (((((W[41] ^ W[42]) >> 29) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_I_49_0_bit | DV_II_47_0_bit | DV_II_48_0_bit))
mask &= (((((W[40] >> 4) ^ (W[43] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_50_0_bit | DV_II_49_0_bit | DV_II_56_0_bit))
mask &= (((((W[39] >> 4) ^ (W[42] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_49_0_bit | DV_II_48_0_bit | DV_II_55_0_bit))
if (mask & (DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
mask &= (((((W[38] >> 4) ^ (W[41] >> 29)) & 1) - 1) | ^(DV_I_44_0_bit | DV_I_48_0_bit | DV_II_47_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
}
mask &= (((((W[37] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_43_0_bit | DV_I_47_0_bit | DV_II_46_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit)) != 0 {
mask &= (((((W[55] ^ W[56]) >> 29) & 1) - 1) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_51_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((((W[52] ^ (W[55] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_50_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((((W[51] ^ (W[54] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_47_0_bit | DV_II_49_0_bit | DV_II_55_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit)) != 0 {
mask &= (((((W[51] ^ W[52]) >> 29) & 1) - 1) | ^(DV_I_48_0_bit | DV_II_47_0_bit | DV_II_52_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit)) != 0 {
mask &= (((((W[36] >> 4) ^ (W[40] >> 29)) & 1) - 1) | ^(DV_I_46_0_bit | DV_I_49_0_bit | DV_II_45_0_bit | DV_II_48_0_bit))
}
if (mask & (DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit)) != 0 {
mask &= ((0 - (((W[53] ^ W[56]) >> 29) & 1)) | ^(DV_I_52_0_bit | DV_II_48_0_bit | DV_II_49_0_bit))
}
if (mask & (DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((0 - (((W[51] ^ W[54]) >> 29) & 1)) | ^(DV_I_50_0_bit | DV_II_46_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - (((W[50] ^ W[52]) >> 29) & 1)) | ^(DV_I_49_0_bit | DV_I_51_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit)) != 0 {
mask &= ((0 - (((W[49] ^ W[51]) >> 29) & 1)) | ^(DV_I_48_0_bit | DV_I_50_0_bit | DV_I_52_0_bit))
}
if (mask & (DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit)) != 0 {
mask &= ((0 - (((W[48] ^ W[50]) >> 29) & 1)) | ^(DV_I_47_0_bit | DV_I_49_0_bit | DV_I_51_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit)) != 0 {
mask &= ((0 - (((W[47] ^ W[49]) >> 29) & 1)) | ^(DV_I_46_0_bit | DV_I_48_0_bit | DV_I_50_0_bit))
}
if (mask & (DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit)) != 0 {
mask &= ((0 - (((W[46] ^ W[48]) >> 29) & 1)) | ^(DV_I_45_0_bit | DV_I_47_0_bit | DV_I_49_0_bit))
}
mask &= ((((W[45] ^ W[47]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit | DV_I_51_2_bit))
if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit)) != 0 {
mask &= ((0 - (((W[45] ^ W[47]) >> 29) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_I_48_0_bit))
}
mask &= (((((W[44] ^ W[46]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit | DV_I_50_2_bit))
if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit)) != 0 {
mask &= ((0 - (((W[44] ^ W[46]) >> 29) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_I_47_0_bit))
}
mask &= ((0 - ((W[41] ^ (W[42] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_II_46_2_bit | DV_II_51_2_bit))
mask &= ((0 - ((W[40] ^ (W[41] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_51_2_bit | DV_II_50_2_bit))
if (mask & (DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((0 - (((W[40] ^ W[42]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_I_46_0_bit | DV_II_56_0_bit))
}
mask &= ((0 - ((W[39] ^ (W[40] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_50_2_bit | DV_II_49_2_bit))
if (mask & (DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((0 - (((W[39] ^ W[41]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_I_45_0_bit | DV_II_55_0_bit))
}
if (mask & (DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit)) != 0 {
mask &= ((0 - (((W[38] ^ W[40]) >> 4) & 1)) | ^(DV_I_44_0_bit | DV_II_54_0_bit | DV_II_56_0_bit))
}
if (mask & (DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit)) != 0 {
mask &= ((0 - (((W[37] ^ W[39]) >> 4) & 1)) | ^(DV_I_43_0_bit | DV_II_53_0_bit | DV_II_55_0_bit))
}
mask &= ((0 - ((W[36] ^ (W[37] >> 5)) & (1 << 1))) | ^(DV_I_47_2_bit | DV_I_50_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit)) != 0 {
mask &= (((((W[35] >> 4) ^ (W[39] >> 29)) & 1) - 1) | ^(DV_I_45_0_bit | DV_I_48_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_48_0_bit | DV_II_48_0_bit)) != 0 {
mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 0))) | ^(DV_I_48_0_bit | DV_II_48_0_bit))
}
if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - ((W[63] ^ (W[64] >> 5)) & (1 << 1))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_I_47_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((0 - ((W[62] ^ (W[63] >> 5)) & (1 << 0))) | ^(DV_I_47_0_bit | DV_II_47_0_bit))
}
if (mask & (DV_I_46_0_bit | DV_II_46_0_bit)) != 0 {
mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 0))) | ^(DV_I_46_0_bit | DV_II_46_0_bit))
}
mask &= ((0 - ((W[61] ^ (W[62] >> 5)) & (1 << 2))) | ^(DV_I_46_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_45_0_bit | DV_II_45_0_bit)) != 0 {
mask &= ((0 - ((W[60] ^ (W[61] >> 5)) & (1 << 0))) | ^(DV_I_45_0_bit | DV_II_45_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_54_0_bit)) != 0 {
mask &= (((((W[58] ^ W[59]) >> 29) & 1) - 1) | ^(DV_II_51_0_bit | DV_II_54_0_bit))
}
if (mask & (DV_II_50_0_bit | DV_II_53_0_bit)) != 0 {
mask &= (((((W[57] ^ W[58]) >> 29) & 1) - 1) | ^(DV_II_50_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
mask &= ((((W[56] ^ (W[59] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_52_0_bit)) != 0 {
mask &= ((0 - (((W[56] ^ W[59]) >> 29) & 1)) | ^(DV_II_51_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_49_0_bit | DV_II_52_0_bit)) != 0 {
mask &= (((((W[56] ^ W[57]) >> 29) & 1) - 1) | ^(DV_II_49_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_51_0_bit | DV_II_53_0_bit)) != 0 {
mask &= ((((W[55] ^ (W[58] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_51_0_bit | DV_II_53_0_bit))
}
if (mask & (DV_II_50_0_bit | DV_II_52_0_bit)) != 0 {
mask &= ((((W[54] ^ (W[57] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_50_0_bit | DV_II_52_0_bit))
}
if (mask & (DV_II_49_0_bit | DV_II_51_0_bit)) != 0 {
mask &= ((((W[53] ^ (W[56] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_II_49_0_bit | DV_II_51_0_bit))
}
mask &= ((((W[51] ^ (W[50] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
mask &= ((((W[48] ^ W[50]) & (1 << 6)) - (1 << 6)) | ^(DV_I_50_2_bit | DV_II_46_2_bit))
if (mask & (DV_I_51_0_bit | DV_I_52_0_bit)) != 0 {
mask &= ((0 - (((W[48] ^ W[55]) >> 29) & 1)) | ^(DV_I_51_0_bit | DV_I_52_0_bit))
}
mask &= ((((W[47] ^ W[49]) & (1 << 6)) - (1 << 6)) | ^(DV_I_49_2_bit | DV_I_51_2_bit))
mask &= ((((W[48] ^ (W[47] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_47_2_bit | DV_II_51_2_bit))
mask &= ((((W[46] ^ W[48]) & (1 << 6)) - (1 << 6)) | ^(DV_I_48_2_bit | DV_I_50_2_bit))
mask &= ((((W[47] ^ (W[46] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_46_2_bit | DV_II_50_2_bit))
mask &= ((0 - ((W[44] ^ (W[45] >> 5)) & (1 << 1))) | ^(DV_I_51_2_bit | DV_II_49_2_bit))
mask &= ((((W[43] ^ W[45]) & (1 << 6)) - (1 << 6)) | ^(DV_I_47_2_bit | DV_I_49_2_bit))
mask &= (((((W[42] ^ W[44]) >> 6) & 1) - 1) | ^(DV_I_46_2_bit | DV_I_48_2_bit))
mask &= ((((W[43] ^ (W[42] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_II_46_2_bit | DV_II_51_2_bit))
mask &= ((((W[42] ^ (W[41] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_51_2_bit | DV_II_50_2_bit))
mask &= ((((W[41] ^ (W[40] >> 5)) & (1 << 1)) - (1 << 1)) | ^(DV_I_50_2_bit | DV_II_49_2_bit))
if (mask & (DV_I_52_0_bit | DV_II_51_0_bit)) != 0 {
mask &= ((((W[39] ^ (W[43] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_52_0_bit | DV_II_51_0_bit))
}
if (mask & (DV_I_51_0_bit | DV_II_50_0_bit)) != 0 {
mask &= ((((W[38] ^ (W[42] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_51_0_bit | DV_II_50_0_bit))
}
if (mask & (DV_I_48_2_bit | DV_I_51_2_bit)) != 0 {
mask &= ((0 - ((W[37] ^ (W[38] >> 5)) & (1 << 1))) | ^(DV_I_48_2_bit | DV_I_51_2_bit))
}
if (mask & (DV_I_50_0_bit | DV_II_49_0_bit)) != 0 {
mask &= ((((W[37] ^ (W[41] >> 25)) & (1 << 4)) - (1 << 4)) | ^(DV_I_50_0_bit | DV_II_49_0_bit))
}
if (mask & (DV_II_52_0_bit | DV_II_54_0_bit)) != 0 {
mask &= ((0 - ((W[36] ^ W[38]) & (1 << 4))) | ^(DV_II_52_0_bit | DV_II_54_0_bit))
}
mask &= ((0 - ((W[35] ^ (W[36] >> 5)) & (1 << 1))) | ^(DV_I_46_2_bit | DV_I_49_2_bit))
if (mask & (DV_I_51_0_bit | DV_II_47_0_bit)) != 0 {
mask &= ((((W[35] ^ (W[39] >> 25)) & (1 << 3)) - (1 << 3)) | ^(DV_I_51_0_bit | DV_II_47_0_bit))
}
if mask != 0 {
if (mask & DV_I_43_0_bit) != 0 {
if not((W[61]^(W[62]>>5))&(1<<1)) != 0 ||
not(not((W[59]^(W[63]>>25))&(1<<5))) != 0 ||
not((W[58]^(W[63]>>30))&(1<<0)) != 0 {
mask &= ^DV_I_43_0_bit
}
}
if (mask & DV_I_44_0_bit) != 0 {
if not((W[62]^(W[63]>>5))&(1<<1)) != 0 ||
not(not((W[60]^(W[64]>>25))&(1<<5))) != 0 ||
not((W[59]^(W[64]>>30))&(1<<0)) != 0 {
mask &= ^DV_I_44_0_bit
}
}
if (mask & DV_I_46_2_bit) != 0 {
mask &= ((^((W[40] ^ W[42]) >> 2)) | ^DV_I_46_2_bit)
}
if (mask & DV_I_47_2_bit) != 0 {
if not((W[62]^(W[63]>>5))&(1<<2)) != 0 ||
not(not((W[41]^W[43])&(1<<6))) != 0 {
mask &= ^DV_I_47_2_bit
}
}
if (mask & DV_I_48_2_bit) != 0 {
if not((W[63]^(W[64]>>5))&(1<<2)) != 0 ||
not(not((W[48]^(W[49]<<5))&(1<<6))) != 0 {
mask &= ^DV_I_48_2_bit
}
}
if (mask & DV_I_49_2_bit) != 0 {
if not(not((W[49]^(W[50]<<5))&(1<<6))) != 0 ||
not((W[42]^W[50])&(1<<1)) != 0 ||
not(not((W[39]^(W[40]<<5))&(1<<6))) != 0 ||
not((W[38]^W[40])&(1<<1)) != 0 {
mask &= ^DV_I_49_2_bit
}
}
if (mask & DV_I_50_0_bit) != 0 {
mask &= (((W[36] ^ W[37]) << 7) | ^DV_I_50_0_bit)
}
if (mask & DV_I_50_2_bit) != 0 {
mask &= (((W[43] ^ W[51]) << 11) | ^DV_I_50_2_bit)
}
if (mask & DV_I_51_0_bit) != 0 {
mask &= (((W[37] ^ W[38]) << 9) | ^DV_I_51_0_bit)
}
if (mask & DV_I_51_2_bit) != 0 {
if not(not((W[51]^(W[52]<<5))&(1<<6))) != 0 ||
not(not((W[49]^W[51])&(1<<6))) != 0 ||
not(not((W[37]^(W[37]>>5))&(1<<1))) != 0 ||
not(not((W[35]^(W[39]>>25))&(1<<5))) != 0 {
mask &= ^DV_I_51_2_bit
}
}
if (mask & DV_I_52_0_bit) != 0 {
mask &= (((W[38] ^ W[39]) << 11) | ^DV_I_52_0_bit)
}
if (mask & DV_II_46_2_bit) != 0 {
mask &= (((W[47] ^ W[51]) << 17) | ^DV_II_46_2_bit)
}
if (mask & DV_II_48_0_bit) != 0 {
if not(not((W[36]^(W[40]>>25))&(1<<3))) != 0 ||
not((W[35]^(W[40]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_48_0_bit
}
}
if (mask & DV_II_49_0_bit) != 0 {
if not(not((W[37]^(W[41]>>25))&(1<<3))) != 0 ||
not((W[36]^(W[41]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_49_0_bit
}
}
if (mask & DV_II_49_2_bit) != 0 {
if not(not((W[53]^(W[54]<<5))&(1<<6))) != 0 ||
not(not((W[51]^W[53])&(1<<6))) != 0 ||
not((W[50]^W[54])&(1<<1)) != 0 ||
not(not((W[45]^(W[46]<<5))&(1<<6))) != 0 ||
not(not((W[37]^(W[41]>>25))&(1<<5))) != 0 ||
not((W[36]^(W[41]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_49_2_bit
}
}
if (mask & DV_II_50_0_bit) != 0 {
if not((W[55]^W[58])&(1<<29)) != 0 ||
not(not((W[38]^(W[42]>>25))&(1<<3))) != 0 ||
not((W[37]^(W[42]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_50_0_bit
}
}
if (mask & DV_II_50_2_bit) != 0 {
if not(not((W[54]^(W[55]<<5))&(1<<6))) != 0 ||
not(not((W[52]^W[54])&(1<<6))) != 0 ||
not((W[51]^W[55])&(1<<1)) != 0 ||
not((W[45]^W[47])&(1<<1)) != 0 ||
not(not((W[38]^(W[42]>>25))&(1<<5))) != 0 ||
not((W[37]^(W[42]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_50_2_bit
}
}
if (mask & DV_II_51_0_bit) != 0 {
if not(not((W[39]^(W[43]>>25))&(1<<3))) != 0 ||
not((W[38]^(W[43]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_51_0_bit
}
}
if (mask & DV_II_51_2_bit) != 0 {
if not(not((W[55]^(W[56]<<5))&(1<<6))) != 0 ||
not(not((W[53]^W[55])&(1<<6))) != 0 ||
not((W[52]^W[56])&(1<<1)) != 0 ||
not((W[46]^W[48])&(1<<1)) != 0 ||
not(not((W[39]^(W[43]>>25))&(1<<5))) != 0 ||
not((W[38]^(W[43]>>30))&(1<<0)) != 0 {
mask &= ^DV_II_51_2_bit
}
}
if (mask & DV_II_52_0_bit) != 0 {
if not(not((W[59]^W[60])&(1<<29))) != 0 ||
not(not((W[40]^(W[44]>>25))&(1<<3))) != 0 ||
not(not((W[40]^(W[44]>>25))&(1<<4))) != 0 ||
not((W[39]^(W[44]<<2))&(1<<30)) != 0 {
mask &= ^DV_II_52_0_bit
}
}
if (mask & DV_II_53_0_bit) != 0 {
if not((W[58]^W[61])&(1<<29)) != 0 ||
not(not((W[57]^(W[61]>>25))&(1<<4))) != 0 ||
not(not((W[41]^(W[45]>>25))&(1<<3))) != 0 ||
not(not((W[41]^(W[45]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_53_0_bit
}
}
if (mask & DV_II_54_0_bit) != 0 {
if not(not((W[58]^(W[62]>>25))&(1<<4))) != 0 ||
not(not((W[42]^(W[46]>>25))&(1<<3))) != 0 ||
not(not((W[42]^(W[46]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_54_0_bit
}
}
if (mask & DV_II_55_0_bit) != 0 {
if not(not((W[59]^(W[63]>>25))&(1<<4))) != 0 ||
not(not((W[57]^(W[59]>>25))&(1<<4))) != 0 ||
not(not((W[43]^(W[47]>>25))&(1<<3))) != 0 ||
not(not((W[43]^(W[47]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_55_0_bit
}
}
if (mask & DV_II_56_0_bit) != 0 {
if not(not((W[60]^(W[64]>>25))&(1<<4))) != 0 ||
not(not((W[44]^(W[48]>>25))&(1<<3))) != 0 ||
not(not((W[44]^(W[48]>>25))&(1<<4))) != 0 {
mask &= ^DV_II_56_0_bit
}
}
}
return mask
}
func not(x uint32) uint32 {
if x == 0 {
return 1
}
return 0
}
func SHA1_dvs() []DvInfo {
return sha1_dvs
}

624
vendor/github.com/pjbgf/sha1cd/ubc/const.go generated vendored Normal file
View File

@ -0,0 +1,624 @@
// Based on the C implementation from Marc Stevens and Dan Shumow.
// https://github.com/cr-marcstevens/sha1collisiondetection
package ubc
const (
CheckSize = 80
DV_I_43_0_bit = (uint32)(1 << 0)
DV_I_44_0_bit = (uint32)(1 << 1)
DV_I_45_0_bit = (uint32)(1 << 2)
DV_I_46_0_bit = (uint32)(1 << 3)
DV_I_46_2_bit = (uint32)(1 << 4)
DV_I_47_0_bit = (uint32)(1 << 5)
DV_I_47_2_bit = (uint32)(1 << 6)
DV_I_48_0_bit = (uint32)(1 << 7)
DV_I_48_2_bit = (uint32)(1 << 8)
DV_I_49_0_bit = (uint32)(1 << 9)
DV_I_49_2_bit = (uint32)(1 << 10)
DV_I_50_0_bit = (uint32)(1 << 11)
DV_I_50_2_bit = (uint32)(1 << 12)
DV_I_51_0_bit = (uint32)(1 << 13)
DV_I_51_2_bit = (uint32)(1 << 14)
DV_I_52_0_bit = (uint32)(1 << 15)
DV_II_45_0_bit = (uint32)(1 << 16)
DV_II_46_0_bit = (uint32)(1 << 17)
DV_II_46_2_bit = (uint32)(1 << 18)
DV_II_47_0_bit = (uint32)(1 << 19)
DV_II_48_0_bit = (uint32)(1 << 20)
DV_II_49_0_bit = (uint32)(1 << 21)
DV_II_49_2_bit = (uint32)(1 << 22)
DV_II_50_0_bit = (uint32)(1 << 23)
DV_II_50_2_bit = (uint32)(1 << 24)
DV_II_51_0_bit = (uint32)(1 << 25)
DV_II_51_2_bit = (uint32)(1 << 26)
DV_II_52_0_bit = (uint32)(1 << 27)
DV_II_53_0_bit = (uint32)(1 << 28)
DV_II_54_0_bit = (uint32)(1 << 29)
DV_II_55_0_bit = (uint32)(1 << 30)
DV_II_56_0_bit = (uint32)(1 << 31)
)
// sha1_dvs contains a list of SHA-1 Disturbance Vectors (DV) which defines the
// unavoidable bit conditions when a collision attack is in progress.
var sha1_dvs = []DvInfo{
{
DvType: 1, DvK: 43, DvB: 0, TestT: 58, MaskI: 0, MaskB: 0,
Dm: [CheckSize]uint32{
0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6, 0x8000004c,
0x00000803, 0x80000161, 0x80000599},
}, {
DvType: 1, DvK: 44, DvB: 0, TestT: 58, MaskI: 0, MaskB: 1,
Dm: [CheckSize]uint32{
0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408, 0x800000e6,
0x8000004c, 0x00000803, 0x80000161},
},
{
DvType: 1, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 2,
Dm: [CheckSize]uint32{
0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164, 0x00000408,
0x800000e6, 0x8000004c, 0x00000803},
},
{
DvType: 1, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 3,
Dm: [CheckSize]uint32{
0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
0x00000408, 0x800000e6, 0x8000004c},
},
{
DvType: 1, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 4,
Dm: [CheckSize]uint32{
0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043,
0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003,
0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001,
0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003,
0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043,
0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002,
0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000,
0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002,
0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101,
0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c,
0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590, 0x00001020,
0x0000039a, 0x00000132},
},
{
DvType: 1, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 5,
Dm: [CheckSize]uint32{
0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c,
0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008,
0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010,
0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008,
0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000,
0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000,
0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049,
0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018, 0x00000164,
0x00000408, 0x800000e6},
},
{
DvType: 1, DvK: 47, DvB: 2, TestT: 58, MaskI: 0, MaskB: 6,
Dm: [CheckSize]uint32{
0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000, 0x60000032,
0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001, 0x00000020,
0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000, 0x80000040,
0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043, 0x40000022,
0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001, 0x40000002,
0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042, 0x40000002,
0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002, 0x80000040,
0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000, 0x00000040,
0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009,
0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a, 0x00000124,
0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060, 0x00000590,
0x00001020, 0x0000039a,
},
},
{
DvType: 1, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 7,
Dm: [CheckSize]uint32{
0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008, 0x08000000,
0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000, 0x60000000,
0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014, 0x28000000,
0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010, 0xf0000010,
0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008, 0x40000000,
0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010, 0x90000010,
0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000, 0xa0000000,
0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x20000000,
0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080, 0x80000006,
0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202, 0x00000018,
0x00000164, 0x00000408,
},
},
{
DvType: 1, DvK: 48, DvB: 2, TestT: 58, MaskI: 0, MaskB: 8,
Dm: [CheckSize]uint32{
0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022, 0x20000000,
0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002, 0x80000001,
0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052, 0xa0000000,
0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042, 0xc0000043,
0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022, 0x00000001,
0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042, 0x40000042,
0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000, 0x80000002,
0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040, 0x80000000,
0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080, 0x00000004,
0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202, 0x0000001a,
0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a, 0x00000060,
0x00000590, 0x00001020},
},
{
DvType: 1, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 9,
Dm: [CheckSize]uint32{
0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014, 0xb4000008,
0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010, 0x98000000,
0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010, 0xb8000014,
0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000, 0x90000010,
0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010, 0xb0000008,
0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000, 0x90000010,
0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000, 0x20000000,
0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004, 0x80000080,
0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012, 0x80000202,
0x00000018, 0x00000164},
},
{
DvType: 1, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 10,
Dm: [CheckSize]uint32{
0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053, 0xd0000022,
0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042, 0x60000002,
0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040, 0xe0000052,
0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001, 0x40000042,
0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043, 0xc0000022,
0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001, 0x40000042,
0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002, 0x80000000,
0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012, 0x00000202,
0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a, 0x0000080a,
0x00000060, 0x00000590},
},
{
DvType: 1, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 11,
Dm: [CheckSize]uint32{
0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010, 0xf4000014,
0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010, 0xb8000010,
0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014, 0x10000010,
0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018, 0x60000000,
0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010, 0xf0000010,
0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018, 0x60000000,
0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010, 0xa0000000,
0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002, 0x80000004,
0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009, 0x80000012,
0x80000202, 0x00000018,
},
},
{
DvType: 1, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 12,
Dm: [CheckSize]uint32{
0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040, 0xd0000053,
0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040, 0xe0000042,
0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052, 0x40000040,
0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060, 0x80000001,
0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042, 0xc0000043,
0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062, 0x80000001,
0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040, 0x80000002,
0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040, 0x80000000,
0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000, 0x80000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009, 0x00000012,
0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026, 0x0000004a,
0x0000080a, 0x00000060},
},
{
DvType: 1, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 13,
Dm: [CheckSize]uint32{
0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010, 0x2c000010,
0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010, 0x08000010,
0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000, 0x90000014,
0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000, 0x08000018,
0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000, 0x90000010,
0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010, 0x90000018,
0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000, 0x00000010,
0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040, 0x40000002,
0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103, 0x80000009,
0x80000012, 0x80000202},
},
{
DvType: 1, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 14,
Dm: [CheckSize]uint32{
0xa0000003, 0x20000030, 0x60000000, 0xe000002a, 0x20000043, 0xb0000040,
0xd0000053, 0xd0000022, 0x20000000, 0x60000032, 0x60000043, 0x20000040,
0xe0000042, 0x60000002, 0x80000001, 0x00000020, 0x00000003, 0x40000052,
0x40000040, 0xe0000052, 0xa0000000, 0x80000040, 0x20000001, 0x20000060,
0x80000001, 0x40000042, 0xc0000043, 0x40000022, 0x00000003, 0x40000042,
0xc0000043, 0xc0000022, 0x00000001, 0x40000002, 0xc0000043, 0x40000062,
0x80000001, 0x40000042, 0x40000042, 0x40000002, 0x00000002, 0x00000040,
0x80000002, 0x80000000, 0x80000002, 0x80000040, 0x00000000, 0x80000040,
0x80000000, 0x00000040, 0x80000000, 0x00000040, 0x80000002, 0x00000000,
0x80000000, 0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000101, 0x00000009,
0x00000012, 0x00000202, 0x0000001a, 0x00000124, 0x0000040c, 0x00000026,
0x0000004a, 0x0000080a},
},
{
DvType: 1, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 15,
Dm: [CheckSize]uint32{
0x04000010, 0xe8000000, 0x0800000c, 0x18000000, 0xb800000a, 0xc8000010,
0x2c000010, 0xf4000014, 0xb4000008, 0x08000000, 0x9800000c, 0xd8000010,
0x08000010, 0xb8000010, 0x98000000, 0x60000000, 0x00000008, 0xc0000000,
0x90000014, 0x10000010, 0xb8000014, 0x28000000, 0x20000010, 0x48000000,
0x08000018, 0x60000000, 0x90000010, 0xf0000010, 0x90000008, 0xc0000000,
0x90000010, 0xf0000010, 0xb0000008, 0x40000000, 0x90000000, 0xf0000010,
0x90000018, 0x60000000, 0x90000010, 0x90000010, 0x90000000, 0x80000000,
0x00000010, 0xa0000000, 0x20000000, 0xa0000000, 0x20000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x20000000, 0x00000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000040,
0x40000002, 0x80000004, 0x80000080, 0x80000006, 0x00000049, 0x00000103,
0x80000009, 0x80000012},
},
{
DvType: 2, DvK: 45, DvB: 0, TestT: 58, MaskI: 0, MaskB: 16,
Dm: [CheckSize]uint32{
0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a, 0x000002e4,
0x80000054, 0x00000967},
},
{
DvType: 2, DvK: 46, DvB: 0, TestT: 58, MaskI: 0, MaskB: 17,
Dm: [CheckSize]uint32{
0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d, 0x8000041a,
0x000002e4, 0x80000054},
},
{
DvType: 2, DvK: 46, DvB: 2, TestT: 58, MaskI: 0, MaskB: 18,
Dm: [CheckSize]uint32{
0x90000070, 0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010,
0xf0000062, 0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041,
0x20000050, 0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041,
0xc0000032, 0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002,
0x00000000, 0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
0x80000040, 0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003,
0x40000002, 0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002,
0x00000040, 0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002,
0x00000040, 0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000,
0x80000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105,
0x00000089, 0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e,
0x00000224, 0x00000050, 0x0000092e, 0x0000046c, 0x000005b6, 0x0000106a,
0x00000b90, 0x00000152},
},
{
DvType: 2, DvK: 47, DvB: 0, TestT: 58, MaskI: 0, MaskB: 19,
Dm: [CheckSize]uint32{
0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b, 0x8000016d,
0x8000041a, 0x000002e4},
},
{
DvType: 2, DvK: 48, DvB: 0, TestT: 58, MaskI: 0, MaskB: 20,
Dm: [CheckSize]uint32{
0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b, 0x0000011b,
0x8000016d, 0x8000041a},
},
{
DvType: 2, DvK: 49, DvB: 0, TestT: 58, MaskI: 0, MaskB: 21,
Dm: [CheckSize]uint32{
0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014, 0x8000024b,
0x0000011b, 0x8000016d},
},
{
DvType: 2, DvK: 49, DvB: 2, TestT: 58, MaskI: 0, MaskB: 22,
Dm: [CheckSize]uint32{
0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053, 0x30000008,
0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042, 0x00000030,
0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041, 0xe0000072,
0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001, 0xc0000002,
0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000, 0x80000000,
0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000, 0x00000040,
0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040, 0xc0000002,
0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002, 0x80000000,
0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004, 0x00000080,
0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016, 0x0000020b,
0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050, 0x0000092e,
0x0000046c, 0x000005b6},
},
{
DvType: 2, DvK: 50, DvB: 0, TestT: 65, MaskI: 0, MaskB: 23,
Dm: [CheckSize]uint32{
0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089, 0x00000014,
0x8000024b, 0x0000011b},
},
{
DvType: 2, DvK: 50, DvB: 2, TestT: 65, MaskI: 0, MaskB: 24,
Dm: [CheckSize]uint32{
0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070, 0xb0000053,
0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062, 0xc0000042,
0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050, 0xc0000041,
0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032, 0x20000001,
0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000, 0x00000000,
0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040, 0x80000000,
0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002, 0xc0000040,
0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040, 0x00000002,
0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000,
0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000004,
0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089, 0x00000016,
0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224, 0x00000050,
0x0000092e, 0x0000046c},
},
{
DvType: 2, DvK: 51, DvB: 0, TestT: 65, MaskI: 0, MaskB: 25,
Dm: [CheckSize]uint32{
0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c,
0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018,
0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014,
0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c,
0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000,
0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000,
0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010,
0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022,
0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107, 0x00000089,
0x00000014, 0x8000024b},
},
{
DvType: 2, DvK: 51, DvB: 2, TestT: 65, MaskI: 0, MaskB: 26,
Dm: [CheckSize]uint32{
0x00000043, 0xd0000072, 0xf0000010, 0xf000006a, 0x80000040, 0x90000070,
0xb0000053, 0x30000008, 0x00000043, 0xd0000072, 0xb0000010, 0xf0000062,
0xc0000042, 0x00000030, 0xe0000042, 0x20000060, 0xe0000041, 0x20000050,
0xc0000041, 0xe0000072, 0xa0000003, 0xc0000012, 0x60000041, 0xc0000032,
0x20000001, 0xc0000002, 0xe0000042, 0x60000042, 0x80000002, 0x00000000,
0x00000000, 0x80000000, 0x00000002, 0x00000040, 0x00000000, 0x80000040,
0x80000000, 0x00000040, 0x80000001, 0x00000060, 0x80000003, 0x40000002,
0xc0000040, 0xc0000002, 0x80000000, 0x80000000, 0x80000002, 0x00000040,
0x00000002, 0x80000000, 0x80000000, 0x80000000, 0x00000002, 0x00000040,
0x00000000, 0x80000040, 0x80000002, 0x00000000, 0x80000000, 0x80000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000004, 0x00000080, 0x00000004, 0x00000009, 0x00000105, 0x00000089,
0x00000016, 0x0000020b, 0x0000011b, 0x0000012d, 0x0000041e, 0x00000224,
0x00000050, 0x0000092e},
},
{
DvType: 2, DvK: 52, DvB: 0, TestT: 65, MaskI: 0, MaskB: 27,
Dm: [CheckSize]uint32{
0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010,
0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004,
0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018, 0x78000010,
0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004, 0x58000010,
0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010, 0xa0000000,
0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018, 0xe0000000,
0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000, 0xa0000000,
0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000, 0x20000000,
0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002, 0x40000041,
0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b, 0x80000107,
0x00000089, 0x00000014},
},
{
DvType: 2, DvK: 53, DvB: 0, TestT: 65, MaskI: 0, MaskB: 28,
Dm: [CheckSize]uint32{
0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004, 0xbc00001a,
0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010, 0x08000018,
0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000, 0xb0000004,
0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010, 0x98000010,
0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000, 0x00000010,
0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000, 0x00000018,
0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000, 0x20000000,
0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000, 0x00000000,
0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001, 0x40000002,
0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046, 0x4000004b,
0x80000107, 0x00000089},
},
{
DvType: 2, DvK: 54, DvB: 0, TestT: 65, MaskI: 0, MaskB: 29,
Dm: [CheckSize]uint32{
0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c, 0x3c000004,
0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c, 0xb8000010,
0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c, 0xe8000000,
0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000, 0xb8000010,
0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000, 0x80000000,
0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010, 0x60000000,
0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000, 0x20000000,
0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000, 0x20000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0xa0000000,
0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020, 0x00000001,
0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082, 0xc0000046,
0x4000004b, 0x80000107},
},
{
DvType: 2, DvK: 55, DvB: 0, TestT: 65, MaskI: 0, MaskB: 30,
Dm: [CheckSize]uint32{
0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010, 0xb400001c,
0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014, 0x0c000002,
0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010, 0x0000000c,
0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010, 0xb800001c,
0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000, 0xb0000000,
0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000, 0x20000000,
0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000, 0x00000010,
0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010, 0xb0000000,
0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000, 0x20000000,
0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010,
0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001, 0x00000020,
0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005, 0xc0000082,
0xc0000046, 0x4000004b},
},
{
DvType: 2, DvK: 56, DvB: 0, TestT: 65, MaskI: 0, MaskB: 31,
Dm: [CheckSize]uint32{
0x2600001a, 0x00000010, 0x0400001c, 0xcc000014, 0x0c000002, 0xc0000010,
0xb400001c, 0x3c000004, 0xbc00001a, 0x20000010, 0x2400001c, 0xec000014,
0x0c000002, 0xc0000010, 0xb400001c, 0x2c000004, 0xbc000018, 0xb0000010,
0x0000000c, 0xb8000010, 0x08000018, 0x78000010, 0x08000014, 0x70000010,
0xb800001c, 0xe8000000, 0xb0000004, 0x58000010, 0xb000000c, 0x48000000,
0xb0000000, 0xb8000010, 0x98000010, 0xa0000000, 0x00000000, 0x00000000,
0x20000000, 0x80000000, 0x00000010, 0x00000000, 0x20000010, 0x20000000,
0x00000010, 0x60000000, 0x00000018, 0xe0000000, 0x90000000, 0x30000010,
0xb0000000, 0x20000000, 0x20000000, 0xa0000000, 0x00000010, 0x80000000,
0x20000000, 0x20000000, 0x20000000, 0x80000000, 0x00000010, 0x00000000,
0x20000010, 0xa0000000, 0x00000000, 0x20000000, 0x20000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000001,
0x00000020, 0x00000001, 0x40000002, 0x40000041, 0x40000022, 0x80000005,
0xc0000082, 0xc0000046},
},
{
DvType: 0, DvK: 0, DvB: 0, TestT: 0, MaskI: 0, MaskB: 0,
Dm: [CheckSize]uint32{
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0},
},
}

3
vendor/github.com/pjbgf/sha1cd/ubc/doc.go generated vendored Normal file
View File

@ -0,0 +1,3 @@
// ubc package provides ways for SHA1 blocks to be checked for
// Unavoidable Bit Conditions that arise from crypto analysis attacks.
package ubc

201
vendor/github.com/skeema/knownhosts/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/github.com/skeema/knownhosts/NOTICE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2022 Skeema LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Some files were not shown because too many files have changed in this diff Show More