From 24b54e688ff9dd43d97f92b9954c6e8a76c268f3 Mon Sep 17 00:00:00 2001 From: huzhangying Date: Fri, 21 Nov 2025 09:46:57 +0800 Subject: [PATCH] fix CVE --- ...58183-archive-tar-set-a-limit-on-the.patch | 90 +++ ...-58189-crypto-tls-quote-protocols-in.patch | 44 ++ ...-61724-net-textproto-avoid-quadratic.patch | 70 +++ ...8188-crypto-x509-mitigate-Dos-vector.patch | 193 +++++++ ...oding-asn1-prevent-memory-exhaustion.patch | 150 +++++ ...509-improve-domain-name-verification.patch | 364 ++++++++++++ ...o-x509-rework-fix-for-CVE-2025-58187.patch | 538 ++++++++++++++++++ ...ng-pem-make-Decode-complexity-linear.patch | 220 +++++++ ...912-net-url-enforce-stricter-parsing.patch | 224 ++++++++ ...-58186-net-http-add-httpcookiemaxnum.patch | 194 +++++++ golang.spec | 18 +- 11 files changed, 2104 insertions(+), 1 deletion(-) create mode 100644 backport-0037-CVE-2025-58183-archive-tar-set-a-limit-on-the.patch create mode 100644 backport-0038-CVE-2025-58189-crypto-tls-quote-protocols-in.patch create mode 100644 backport-0039-CVE-2025-61724-net-textproto-avoid-quadratic.patch create mode 100644 backport-0040-CVE-2025-58188-crypto-x509-mitigate-Dos-vector.patch create mode 100644 backport-0041-CVE-2025-58185-encoding-asn1-prevent-memory-exhaustion.patch create mode 100644 backport-0042-CVE-2025-58187-crypto-x509-improve-domain-name-verification.patch create mode 100644 backport-0043-CVE-2025-58187-crypto-x509-rework-fix-for-CVE-2025-58187.patch create mode 100644 backport-0044-CVE-2025-61723-encoding-pem-make-Decode-complexity-linear.patch create mode 100644 backport-0045-CVE-2025-47912-net-url-enforce-stricter-parsing.patch create mode 100644 backport-0046-CVE-2025-58186-net-http-add-httpcookiemaxnum.patch diff --git a/backport-0037-CVE-2025-58183-archive-tar-set-a-limit-on-the.patch b/backport-0037-CVE-2025-58183-archive-tar-set-a-limit-on-the.patch new file mode 100644 index 0000000..c9a1285 --- /dev/null +++ b/backport-0037-CVE-2025-58183-archive-tar-set-a-limit-on-the.patch @@ -0,0 +1,90 @@ +From f7a68d3804efabd271f0338391858bc1e7e57422 Mon Sep 17 00:00:00 2001 +From: Damien Neil +Date: Thu, 11 Sep 2025 13:32:10 -0700 +Subject: [PATCH] archive/tar: set a limit on the size of GNU + sparse file 1.0 regions + +Sparse files in tar archives contain only the non-zero components +of the file. There are several different encodings for sparse +files. When reading GNU tar pax 1.0 sparse files, archive/tar did +not set a limit on the size of the sparse region data. A malicious +archive containing a large number of sparse blocks could cause +archive/tar to read an unbounded amount of data from the archive +into memory. + +Reference: https://go-review.googlesource.com/c/go/+/709861 +Conflict: no + +Since a malicious input can be highly compressable, a small +compressed input could cause very large allocations. + +Cap the size of the sparse block data to the same limit used +for PAX headers (1 MiB). + +Thanks to Harshit Gupta (Mr HAX) (https://www.linkedin.com/in/iam-harshit-gupta/) +for reporting this issue. + +Fixes CVE-2025-58183 +Fixes #75677 + +Change-Id: I70b907b584a7b8676df8a149a1db728ae681a770 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2800 +Reviewed-by: Roland Shoemaker +Reviewed-by: Nicholas Husin +Reviewed-on: https://go-review.googlesource.com/c/go/+/709861 +Auto-Submit: Michael Pratt +TryBot-Bypass: Michael Pratt +Reviewed-by: Carlos Amedee +--- + src/archive/tar/common.go | 1 + + src/archive/tar/reader.go | 9 +++++++-- + 2 files changed, 8 insertions(+), 2 deletions(-) + +diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go +index dc9d350..da1511b 100644 +--- a/src/archive/tar/common.go ++++ b/src/archive/tar/common.go +@@ -38,6 +38,7 @@ var ( + errMissData = errors.New("archive/tar: sparse file references non-existent data") + errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") + errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") ++ errSparseTooLong = errors.New("archive/tar: sparse map too long") + ) + + type headerError []string +diff --git a/src/archive/tar/reader.go b/src/archive/tar/reader.go +index cfa5044..7ab9c3a 100644 +--- a/src/archive/tar/reader.go ++++ b/src/archive/tar/reader.go +@@ -531,12 +531,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + cntNewline int64 + buf bytes.Buffer + blk block ++ totalSize int + ) + + // feedTokens copies data in blocks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + feedTokens := func(n int64) error { + for cntNewline < n { ++ totalSize += len(blk) ++ if totalSize > maxSpecialFileSize { ++ return errSparseTooLong ++ } + if _, err := mustReadFull(r, blk[:]); err != nil { + return err + } +@@ -569,8 +574,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { + } + + // Parse for all member entries. +- // numEntries is trusted after this since a potential attacker must have +- // committed resources proportional to what this library used. ++ // numEntries is trusted after this since feedTokens limits the number of ++ // tokens based on maxSpecialFileSize. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } +-- +2.43.0 + diff --git a/backport-0038-CVE-2025-58189-crypto-tls-quote-protocols-in.patch b/backport-0038-CVE-2025-58189-crypto-tls-quote-protocols-in.patch new file mode 100644 index 0000000..28a88a0 --- /dev/null +++ b/backport-0038-CVE-2025-58189-crypto-tls-quote-protocols-in.patch @@ -0,0 +1,44 @@ +From 4e9006a716533fe1c7ee08df02dfc73078f7dc19 Mon Sep 17 00:00:00 2001 +From: Roland Shoemaker +Date: Mon, 29 Sep 2025 10:11:56 -0700 +Subject: [PATCH] crypto/tls: quote protocols in ALPN + error message + +Quote the protocols sent by the client when returning the ALPN +negotiation error message. + +Reference: https://go-review.googlesource.com/c/go/+/707776 +Conclict: no + +Fixes CVE-2025-58189 +Fixes #75652 + +Change-Id: Ie7b3a1ed0b6efcc1705b71f0f1e8417126661330 +Reviewed-on: https://go-review.googlesource.com/c/go/+/707776 +Auto-Submit: Roland Shoemaker +Reviewed-by: Neal Patel +Reviewed-by: Nicholas Husin +Auto-Submit: Nicholas Husin +Reviewed-by: Nicholas Husin +TryBot-Bypass: Roland Shoemaker +Reviewed-by: Daniel McCarney +--- + src/crypto/tls/handshake_server.go | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/src/crypto/tls/handshake_server.go b/src/crypto/tls/handshake_server.go +index 7c75977..6aebb74 100644 +--- a/src/crypto/tls/handshake_server.go ++++ b/src/crypto/tls/handshake_server.go +@@ -338,7 +338,7 @@ func negotiateALPN(serverProtos, clientProtos []string, quic bool) (string, erro + if http11fallback { + return "", nil + } +- return "", fmt.Errorf("tls: client requested unsupported application protocols (%s)", clientProtos) ++ return "", fmt.Errorf("tls: client requested unsupported application protocols (%q)", clientProtos) + } + + // supportsECDHE returns whether ECDHE key exchanges can be used with this +-- +2.43.0 + diff --git a/backport-0039-CVE-2025-61724-net-textproto-avoid-quadratic.patch b/backport-0039-CVE-2025-61724-net-textproto-avoid-quadratic.patch new file mode 100644 index 0000000..35021cc --- /dev/null +++ b/backport-0039-CVE-2025-61724-net-textproto-avoid-quadratic.patch @@ -0,0 +1,70 @@ +From 5ede095649db7783726c28390812bca9ce2c684a Mon Sep 17 00:00:00 2001 +From: Damien Neil +Date: Tue, 30 Sep 2025 15:11:16 -0700 +Subject: [PATCH] net/textproto: avoid quadratic complexity in + Reader.ReadResponse + +Reader.ReadResponse constructed a response string from repeated +string concatenation, permitting a malicious sender to cause excessive +memory allocation and CPU consumption by sending a response consisting +of many short lines. + +Reference: https://go-review.googlesource.com/c/go/+/709859 +Conclict: no + +Use a strings.Builder to construct the string instead. + +Thanks to Jakub Ciolek for reporting this issue. + +Fixes CVE-2025-61724 +Fixes #75716 + +Change-Id: I1a98ce85a21b830cb25799f9ac9333a67400d736 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2940 +Reviewed-by: Roland Shoemaker +Reviewed-by: Nicholas Husin +Reviewed-on: https://go-review.googlesource.com/c/go/+/709859 +TryBot-Bypass: Michael Pratt +Auto-Submit: Michael Pratt +Reviewed-by: Carlos Amedee +--- + src/net/textproto/reader.go | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go +index f98e05b..2c926ca 100644 +--- a/src/net/textproto/reader.go ++++ b/src/net/textproto/reader.go +@@ -284,8 +284,10 @@ func (r *Reader) ReadCodeLine(expectCode int) (code int, message string, err err + // + // An expectCode <= 0 disables the check of the status code. + func (r *Reader) ReadResponse(expectCode int) (code int, message string, err error) { +- code, continued, message, err := r.readCodeLine(expectCode) ++ code, continued, first, err := r.readCodeLine(expectCode) + multi := continued ++ var messageBuilder strings.Builder ++ messageBuilder.WriteString(first) + for continued { + line, err := r.ReadLine() + if err != nil { +@@ -296,12 +298,15 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err + var moreMessage string + code2, continued, moreMessage, err = parseCodeLine(line, 0) + if err != nil || code2 != code { +- message += "\n" + strings.TrimRight(line, "\r\n") ++ messageBuilder.WriteByte('\n') ++ messageBuilder.WriteString(strings.TrimRight(line, "\r\n")) + continued = true + continue + } +- message += "\n" + moreMessage ++ messageBuilder.WriteByte('\n') ++ messageBuilder.WriteString(moreMessage) + } ++ message = messageBuilder.String() + if err != nil && multi && message != "" { + // replace one line error message with all lines (full message) + err = &Error{code, message} +-- +2.43.0 + diff --git a/backport-0040-CVE-2025-58188-crypto-x509-mitigate-Dos-vector.patch b/backport-0040-CVE-2025-58188-crypto-x509-mitigate-Dos-vector.patch new file mode 100644 index 0000000..15914d2 --- /dev/null +++ b/backport-0040-CVE-2025-58188-crypto-x509-mitigate-Dos-vector.patch @@ -0,0 +1,193 @@ +From 6e4007e8cffbb870e6b606307ab7308236ecefb9 Mon Sep 17 00:00:00 2001 +From: Neal Patel +Date: Thu, 11 Sep 2025 16:27:04 -0400 +Subject: [PATCH] crypto/x509: mitigate DoS vector when + intermediate certificate contains DSA public key + +An attacker could craft an intermediate X.509 certificate +containing a DSA public key and can crash a remote host +with an unauthenticated call to any endpoint that +verifies the certificate chain. + +Reference: https://go-review.googlesource.com/c/go/+/709853 +Conclict: no + +Thank you to Jakub Ciolek for reporting this issue. + +Fixes CVE-2025-58188 +Fixes #75675 + +Change-Id: I2ecbb87b9b8268dbc55c8795891e596ab60f0088 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2780 +Reviewed-by: Damien Neil +Reviewed-by: Roland Shoemaker +Reviewed-on: https://go-review.googlesource.com/c/go/+/709853 +Reviewed-by: Carlos Amedee +Auto-Submit: Michael Pratt +LUCI-TryBot-Result: Go LUCI +--- + src/crypto/x509/verify.go | 5 +- + src/crypto/x509/verify_test.go | 127 +++++++++++++++++++++++++++++++++ + 2 files changed, 131 insertions(+), 1 deletion(-) + +diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go +index 1da13d8..c49e233 100644 +--- a/src/crypto/x509/verify.go ++++ b/src/crypto/x509/verify.go +@@ -867,7 +867,10 @@ func alreadyInChain(candidate *Certificate, chain []*Certificate) bool { + if !bytes.Equal(candidate.RawSubject, cert.RawSubject) { + continue + } +- if !candidate.PublicKey.(pubKeyEqual).Equal(cert.PublicKey) { ++ // We enforce the canonical encoding of SPKI (by only allowing the ++ // correct AI paremeter encodings in parseCertificate), so it's safe to ++ // directly compare the raw bytes. ++ if !bytes.Equal(candidate.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { + continue + } + var certSAN *pkix.Extension +diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go +index d8678d0..a003422 100644 +--- a/src/crypto/x509/verify_test.go ++++ b/src/crypto/x509/verify_test.go +@@ -6,6 +6,7 @@ package x509 + + import ( + "crypto" ++ "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" +@@ -2712,3 +2713,129 @@ func TestVerifyNilPubKey(t *testing.T) { + t.Fatalf("buildChains returned unexpected error, got: %v, want %v", err, UnknownAuthorityError{}) + } + } ++ ++func TestCertificateChainSignedByECDSA(t *testing.T) { ++ caKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) ++ if err != nil { ++ t.Fatal(err) ++ } ++ root := &Certificate{ ++ SerialNumber: big.NewInt(1), ++ Subject: pkix.Name{CommonName: "X"}, ++ NotBefore: time.Now().Add(-time.Hour), ++ NotAfter: time.Now().Add(365 * 24 * time.Hour), ++ IsCA: true, ++ KeyUsage: KeyUsageCertSign | KeyUsageCRLSign, ++ BasicConstraintsValid: true, ++ } ++ caDER, err := CreateCertificate(rand.Reader, root, root, &caKey.PublicKey, caKey) ++ if err != nil { ++ t.Fatal(err) ++ } ++ root, err = ParseCertificate(caDER) ++ if err != nil { ++ t.Fatal(err) ++ } ++ ++ leafKey, _ := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) ++ leaf := &Certificate{ ++ SerialNumber: big.NewInt(42), ++ Subject: pkix.Name{CommonName: "leaf"}, ++ NotBefore: time.Now().Add(-10 * time.Minute), ++ NotAfter: time.Now().Add(24 * time.Hour), ++ KeyUsage: KeyUsageDigitalSignature, ++ ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, ++ BasicConstraintsValid: true, ++ } ++ leafDER, err := CreateCertificate(rand.Reader, leaf, root, &leafKey.PublicKey, caKey) ++ if err != nil { ++ t.Fatal(err) ++ } ++ leaf, err = ParseCertificate(leafDER) ++ if err != nil { ++ t.Fatal(err) ++ } ++ ++ inter, err := ParseCertificate(dsaSelfSignedCNX(t)) ++ if err != nil { ++ t.Fatal(err) ++ } ++ ++ inters := NewCertPool() ++ inters.AddCert(root) ++ inters.AddCert(inter) ++ ++ wantErr := "certificate signed by unknown authority" ++ _, err = leaf.Verify(VerifyOptions{Intermediates: inters, Roots: NewCertPool()}) ++ if !strings.Contains(err.Error(), wantErr) { ++ t.Errorf("got %v, want %q", err, wantErr) ++ } ++} ++ ++// dsaSelfSignedCNX produces DER-encoded ++// certificate with the properties: ++// ++// Subject=Issuer=CN=X ++// DSA SPKI ++// Matching inner/outer signature OIDs ++// Dummy ECDSA signature ++func dsaSelfSignedCNX(t *testing.T) []byte { ++ t.Helper() ++ var params dsa.Parameters ++ if err := dsa.GenerateParameters(¶ms, rand.Reader, dsa.L1024N160); err != nil { ++ t.Fatal(err) ++ } ++ ++ var dsaPriv dsa.PrivateKey ++ dsaPriv.Parameters = params ++ if err := dsa.GenerateKey(&dsaPriv, rand.Reader); err != nil { ++ t.Fatal(err) ++ } ++ dsaPub := &dsaPriv.PublicKey ++ ++ type dsaParams struct{ P, Q, G *big.Int } ++ paramDER, err := asn1.Marshal(dsaParams{dsaPub.P, dsaPub.Q, dsaPub.G}) ++ if err != nil { ++ t.Fatal(err) ++ } ++ yDER, err := asn1.Marshal(dsaPub.Y) ++ if err != nil { ++ t.Fatal(err) ++ } ++ ++ spki := publicKeyInfo{ ++ Algorithm: pkix.AlgorithmIdentifier{ ++ Algorithm: oidPublicKeyDSA, ++ Parameters: asn1.RawValue{FullBytes: paramDER}, ++ }, ++ PublicKey: asn1.BitString{Bytes: yDER, BitLength: 8 * len(yDER)}, ++ } ++ ++ rdn := pkix.Name{CommonName: "X"}.ToRDNSequence() ++ b, err := asn1.Marshal(rdn) ++ if err != nil { ++ t.Fatal(err) ++ } ++ rawName := asn1.RawValue{FullBytes: b} ++ ++ algoIdent := pkix.AlgorithmIdentifier{Algorithm: oidSignatureDSAWithSHA256} ++ tbs := tbsCertificate{ ++ Version: 0, ++ SerialNumber: big.NewInt(1002), ++ SignatureAlgorithm: algoIdent, ++ Issuer: rawName, ++ Validity: validity{NotBefore: time.Now().Add(-time.Hour), NotAfter: time.Now().Add(24 * time.Hour)}, ++ Subject: rawName, ++ PublicKey: spki, ++ } ++ c := certificate{ ++ TBSCertificate: tbs, ++ SignatureAlgorithm: algoIdent, ++ SignatureValue: asn1.BitString{Bytes: []byte{0}, BitLength: 8}, ++ } ++ dsaDER, err := asn1.Marshal(c) ++ if err != nil { ++ t.Fatal(err) ++ } ++ return dsaDER ++} +-- +2.43.0 + diff --git a/backport-0041-CVE-2025-58185-encoding-asn1-prevent-memory-exhaustion.patch b/backport-0041-CVE-2025-58185-encoding-asn1-prevent-memory-exhaustion.patch new file mode 100644 index 0000000..9406edf --- /dev/null +++ b/backport-0041-CVE-2025-58185-encoding-asn1-prevent-memory-exhaustion.patch @@ -0,0 +1,150 @@ +From 8709a41d5ef7321f486a1857f189c3fee20e8edd Mon Sep 17 00:00:00 2001 +From: Nicholas Husin +Date: Wed, 03 Sep 2025 09:30:56 -0400 +Subject: [PATCH] encoding/asn1: prevent memory exhaustion when + parsing using internal/saferio + +Within parseSequenceOf, reflect.MakeSlice is being used to pre-allocate +a slice that is needed in order to fully validate the given DER payload. +The size of the slice allocated are also multiple times larger than the +input DER: + +Reference: https://go-review.googlesource.com/c/go/+/709856 +Conflict: no + +- When using asn1.Unmarshal directly, the allocated slice is ~28x + larger. +- When passing in DER using x509.ParseCertificateRequest, the allocated + slice is ~48x larger. +- When passing in DER using ocsp.ParseResponse, the allocated slice is + ~137x larger. + +As a result, a malicious actor can craft a big empty DER payload, +resulting in an unnecessary large allocation of memories. This can be a +way to cause memory exhaustion. + +To prevent this, we now use SliceCapWithSize within internal/saferio to +enforce a memory allocation cap. + +Thanks to Jakub Ciolek for reporting this issue. + +For #75671 +Fixes CVE-2025-58185 + +Change-Id: Id50e76187eda43f594be75e516b9ca1d2ae6f428 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2700 +Reviewed-by: Roland Shoemaker +Reviewed-by: Damien Neil +Reviewed-on: https://go-review.googlesource.com/c/go/+/709856 +Reviewed-by: Carlos Amedee +LUCI-TryBot-Result: Go LUCI +Auto-Submit: Michael Pratt +--- + src/encoding/asn1/asn1.go | 9 +++++++- + src/encoding/asn1/asn1_test.go | 38 ++++++++++++++++++++++++++++++++++ + src/go/build/deps_test.go | 2 +- + 3 files changed, 47 insertions(+), 2 deletions(-) + +diff --git a/src/encoding/asn1/asn1.go b/src/encoding/asn1/asn1.go +index e7bf793..f4d0dd1 100644 +--- a/src/encoding/asn1/asn1.go ++++ b/src/encoding/asn1/asn1.go +@@ -22,6 +22,7 @@ package asn1 + import ( + "errors" + "fmt" ++ "internal/saferio" + "math" + "math/big" + "reflect" +@@ -643,10 +644,16 @@ func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type + offset += t.length + numElements++ + } +- ret = reflect.MakeSlice(sliceType, numElements, numElements) ++ safeCap := saferio.SliceCap(reflect.Zero(reflect.PtrTo(elemType)).Interface(), uint64(numElements)) ++ if safeCap < 0 { ++ err = SyntaxError{fmt.Sprintf("%s slice too big: %d elements of %d bytes", elemType.Kind(), numElements, elemType.Size())} ++ return ++ } ++ ret = reflect.MakeSlice(sliceType, 0, safeCap) + params := fieldParameters{} + offset := 0 + for i := 0; i < numElements; i++ { ++ ret = reflect.Append(ret, reflect.Zero(elemType)) + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return +diff --git a/src/encoding/asn1/asn1_test.go b/src/encoding/asn1/asn1_test.go +index 9a605e2..249d4e4 100644 +--- a/src/encoding/asn1/asn1_test.go ++++ b/src/encoding/asn1/asn1_test.go +@@ -7,10 +7,12 @@ package asn1 + import ( + "bytes" + "encoding/hex" ++ "errors" + "fmt" + "math" + "math/big" + "reflect" ++ "runtime" + "strings" + "testing" + "time" +@@ -1175,3 +1177,39 @@ func BenchmarkObjectIdentifierString(b *testing.B) { + _ = oidPublicKeyRSA.String() + } + } ++ ++func TestParsingMemoryConsumption(t *testing.T) { ++ // Craft a syntatically valid, but empty, ~10 MB DER bomb. A successful ++ // unmarshal of this bomb should yield ~280 MB. However, the parsing should ++ // fail due to the empty content; and, in such cases, we want to make sure ++ // that we do not unnecessarily allocate memories. ++ derBomb := make([]byte, 10_000_000) ++ for i := range derBomb { ++ derBomb[i] = 0x30 ++ } ++ derBomb = append([]byte{0x30, 0x83, 0x98, 0x96, 0x80}, derBomb...) ++ ++ var m runtime.MemStats ++ runtime.GC() ++ runtime.ReadMemStats(&m) ++ memBefore := m.TotalAlloc ++ ++ var out []struct { ++ Id []int ++ Critical bool `asn1:"optional"` ++ Value []byte ++ } ++ _, err := Unmarshal(derBomb, &out) ++ if !errors.As(err, &SyntaxError{}) { ++ t.Fatalf("Incorrect error result: want (%v), but got (%v) instead", &SyntaxError{}, err) ++ } ++ ++ runtime.ReadMemStats(&m) ++ memDiff := m.TotalAlloc - memBefore ++ ++ // Ensure that the memory allocated does not exceed 10<<21 (~20 MB) when ++ // the parsing fails. ++ if memDiff > 10<<21 { ++ t.Errorf("Too much memory allocated while parsing DER: %v MiB", memDiff/1024/1024) ++ } ++} +diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go +index 592f2fd..36b3a38 100644 +--- a/src/go/build/deps_test.go ++++ b/src/go/build/deps_test.go +@@ -457,7 +457,7 @@ var depsRules = ` + CGO, fmt, net !< CRYPTO; + + # CRYPTO-MATH is core bignum-based crypto - no cgo, net; fmt now ok. +- CRYPTO, FMT, math/big ++ CRYPTO, FMT, math/big, internal/saferio + < crypto/internal/boring/bbig + < crypto/rand + < crypto/ed25519 +-- +2.43.0 + diff --git a/backport-0042-CVE-2025-58187-crypto-x509-improve-domain-name-verification.patch b/backport-0042-CVE-2025-58187-crypto-x509-improve-domain-name-verification.patch new file mode 100644 index 0000000..543bc56 --- /dev/null +++ b/backport-0042-CVE-2025-58187-crypto-x509-improve-domain-name-verification.patch @@ -0,0 +1,364 @@ +From 3fc4c79fdbb17b9b29ea9f8c29dd780df075d4c4 Mon Sep 17 00:00:00 2001 +From: Neal Patel +Date: Mon, 15 Sep 2025 16:31:22 -0400 +Subject: [PATCH] crypto/x509: improve domain name verification + +Don't use domainToReverseLabels to check if domain names are valid, +since it is not particularly performant, and can contribute to DoS +vectors. Instead just iterate over the name and enforce the properties +we care about. + +This also enforces that DNS names, both in SANs and name constraints, +are valid. We previously allowed invalid SANs, because some +intermediates had these weird names (see #23995), but there are +currently no trusted intermediates that have this property, and since we +target the web PKI, supporting this particular case is not a high +priority. + +Thank you to Jakub Ciolek for reporting this issue. + +Fixes CVE-2025-58187 +Fixes #75681 + +Change-Id: I6ebce847dcbe5fc63ef2f9a74f53f11c4c56d3d1 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2820 +Reviewed-by: Damien Neil +Reviewed-by: Roland Shoemaker +Reviewed-on: https://go-review.googlesource.com/c/go/+/709854 +Auto-Submit: Michael Pratt +Reviewed-by: Carlos Amedee +LUCI-TryBot-Result: Go LUCI + +Backported-by: zhaoyifan +Reference: https://go-review.googlesource.com/c/go/+/709854 +Conflict: parser.go +--- + src/crypto/x509/name_constraints_test.go | 68 ++---------------- + src/crypto/x509/parser.go | 90 +++++++++++++++++------- + src/crypto/x509/parser_test.go | 37 ++++++++++ + src/crypto/x509/verify.go | 1 + + 4 files changed, 110 insertions(+), 86 deletions(-) + +diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go +index f73eeca..e1c5f2a 100644 +--- a/src/crypto/x509/name_constraints_test.go ++++ b/src/crypto/x509/name_constraints_test.go +@@ -1456,63 +1456,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + expectedError: "incompatible key usage", + }, + +- // An invalid DNS SAN should be detected only at validation time so +- // that we can process CA certificates in the wild that have invalid SANs. +- // See https://github.com/golang/go/issues/23995 +- +- // #77: an invalid DNS or mail SAN will not be detected if name constraint +- // checking is not triggered. +- { +- roots: make([]constraintsSpec, 1), +- intermediates: [][]constraintsSpec{ +- { +- {}, +- }, +- }, +- leaf: leafSpec{ +- sans: []string{"dns:this is invalid", "email:this @ is invalid"}, +- }, +- }, +- +- // #78: an invalid DNS SAN will be detected if any name constraint checking +- // is triggered. +- { +- roots: []constraintsSpec{ +- { +- bad: []string{"uri:"}, +- }, +- }, +- intermediates: [][]constraintsSpec{ +- { +- {}, +- }, +- }, +- leaf: leafSpec{ +- sans: []string{"dns:this is invalid"}, +- }, +- expectedError: "cannot parse dnsName", +- }, +- +- // #79: an invalid email SAN will be detected if any name constraint +- // checking is triggered. +- { +- roots: []constraintsSpec{ +- { +- bad: []string{"uri:"}, +- }, +- }, +- intermediates: [][]constraintsSpec{ +- { +- {}, +- }, +- }, +- leaf: leafSpec{ +- sans: []string{"email:this @ is invalid"}, +- }, +- expectedError: "cannot parse rfc822Name", +- }, +- +- // #80: if several EKUs are requested, satisfying any of them is sufficient. ++ // #77: if several EKUs are requested, satisfying any of them is sufficient. + { + roots: make([]constraintsSpec, 1), + intermediates: [][]constraintsSpec{ +@@ -1527,7 +1471,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection}, + }, + +- // #81: EKUs that are not asserted in VerifyOpts are not required to be ++ // #78: EKUs that are not asserted in VerifyOpts are not required to be + // nested. + { + roots: make([]constraintsSpec, 1), +@@ -1546,7 +1490,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #82: a certificate without SANs and CN is accepted in a constrained chain. ++ // #79: a certificate without SANs and CN is accepted in a constrained chain. + { + roots: []constraintsSpec{ + { +@@ -1563,7 +1507,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #83: a certificate without SANs and with a CN that does not parse as a ++ // #80: a certificate without SANs and with a CN that does not parse as a + // hostname is accepted in a constrained chain. + { + roots: []constraintsSpec{ +@@ -1582,7 +1526,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #84: a certificate with SANs and CN is accepted in a constrained chain. ++ // #81: a certificate with SANs and CN is accepted in a constrained chain. + { + roots: []constraintsSpec{ + { +@@ -1599,7 +1543,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + cn: "foo.bar", + }, + }, +- // #86: URIs with IPv6 addresses with zones and ports are rejected ++ // #83: URIs with IPv6 addresses with zones and ports are rejected + { + roots: []constraintsSpec{ + { +diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go +index 6695212..c67fdd0 100644 +--- a/src/crypto/x509/parser.go ++++ b/src/crypto/x509/parser.go +@@ -370,6 +370,60 @@ func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error + return nil + } + ++// domainNameValid checks the structure of a DNS name or similar name from a ++// certificate. If constraint is true, it checks additional properties required ++// for name constraints. ++// ++// This enforces the property that each label of a domain has a maximum length ++// of 63 characters, and so on. See ++// https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4. ++func domainNameValid(s string, constraint bool) bool { ++ if len(s) == 0 { ++ return false ++ } ++ ++ // There must not be a trailing period, although a trailing period during ++ // matching is handled by trimming it in matchDomainConstraint and ++ // matchHostnames. ++ if s[len(s)-1] == '.' { ++ return false ++ } ++ ++ // https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4 ++ // 255 octets or less. ++ if len(s) > 253 { ++ return false ++ } ++ ++ labelStart := 0 ++ for i := 0; i < len(s); i++ { ++ c := s[i] ++ if c == '.' { ++ labelLen := i - labelStart ++ if labelLen == 0 { ++ // Empty label, invalid unless this is the first character and ++ // constraint is true, in which case it's used to match the ++ // domain and all subdomains. ++ if i != 0 || !constraint { ++ return false ++ } ++ } ++ // https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4 ++ // labels 63 octets or less. ++ if labelLen > 63 { ++ return false ++ } ++ labelStart = i + 1 ++ } ++ } ++ // Check the last label. ++ if labelLen := len(s) - labelStart; labelLen == 0 || labelLen > 63 { ++ return false ++ } ++ ++ return true ++} ++ + func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(der, func(tag int, data []byte) error { + switch tag { +@@ -378,12 +432,18 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } ++ if _, ok := parseRFC2821Mailbox(email); !ok { ++ return fmt.Errorf("x509: cannot parse rfc822Name %q", email) ++ } + emailAddresses = append(emailAddresses, email) + case nameTypeDNS: + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } ++ if !domainNameValid(name, false) { ++ return fmt.Errorf("x509: cannot parse dnsName %q", name) ++ } + dnsNames = append(dnsNames, string(name)) + case nameTypeURI: + uriStr := string(data) +@@ -395,7 +455,9 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) + } + if len(uri.Host) > 0 { +- if _, ok := domainToReverseLabels(uri.Host); !ok { ++ if _, _, err := net.SplitHostPort(uri.Host); err == nil { ++ // Ignore the error: it's valid to have a host with a port. ++ } else if !domainNameValid(uri.Host, false) && net.ParseIP(uri.Host) == nil { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } + } +@@ -538,15 +600,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + +- trimmedDomain := domain +- if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { +- // constraints can have a leading +- // period to exclude the domain +- // itself, but that's not valid in a +- // normal domain name. +- trimmedDomain = trimmedDomain[1:] +- } +- if _, ok := domainToReverseLabels(trimmedDomain); !ok { ++ if !domainNameValid(domain, true) { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse dnsName constraint %q", domain) + } + dnsNames = append(dnsNames, domain) +@@ -588,11 +642,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle + } + } else { + // Otherwise it's a domain name. +- domain := constraint +- if len(domain) > 0 && domain[0] == '.' { +- domain = domain[1:] +- } +- if _, ok := domainToReverseLabels(domain); !ok { ++ if !domainNameValid(constraint, true) { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint) + } + } +@@ -608,15 +658,7 @@ func parseNameConstraintsExtension(out *Certificate, e pkix.Extension) (unhandle + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + +- trimmedDomain := domain +- if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { +- // constraints can have a leading +- // period to exclude the domain itself, +- // but that's not valid in a normal +- // domain name. +- trimmedDomain = trimmedDomain[1:] +- } +- if _, ok := domainToReverseLabels(trimmedDomain); !ok { ++ if !domainNameValid(domain, true) { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q", domain) + } + uriDomains = append(uriDomains, domain) +diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go +index b31f9cd..ede5235 100644 +--- a/src/crypto/x509/parser_test.go ++++ b/src/crypto/x509/parser_test.go +@@ -6,6 +6,7 @@ package x509 + + import ( + "encoding/asn1" ++ "strings" + "testing" + + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +@@ -101,3 +102,39 @@ func TestParseASN1String(t *testing.T) { + }) + } + } ++ ++func TestDomainNameValid(t *testing.T) { ++ tests := []struct { ++ name string ++ domain string ++ constraint bool ++ want bool ++ }{ ++ {"empty string", "", false, false}, ++ {"valid domain", "example.com", false, true}, ++ {"valid subdomain", "sub.example.com", false, true}, ++ {"trailing period", "example.com.", false, false}, ++ {"leading period non-constraint", ".example.com", false, false}, ++ {"leading period constraint", ".example.com", true, true}, ++ {"double period", "example..com", false, false}, ++ {"too long", strings.Repeat("a", 254), false, false}, ++ {"exactly 253 chars", strings.Repeat("a", 240) + ".example.com", false, true}, ++ {"label too long", strings.Repeat("a", 64) + ".example.com", false, false}, ++ {"label exactly 63 chars", strings.Repeat("a", 63) + ".example.com", false, true}, ++ {"single label", "localhost", false, true}, ++ {"only period non-constraint", ".", false, false}, ++ {"only period constraint", ".", true, false}, ++ {"multiple labels", "a.b.c.d.e.f.g", false, true}, ++ {"empty label at start", ".example", false, false}, ++ {"empty label at end", "example.", false, false}, ++ {"hyphen at start", "-example.com", false, true}, ++ } ++ ++ for _, tt := range tests { ++ t.Run(tt.name, func(t *testing.T) { ++ if got := domainNameValid(tt.domain, tt.constraint); got != tt.want { ++ t.Errorf("domainNameValid(%q, %v) = %v, want %v", tt.domain, tt.constraint, got, tt.want) ++ } ++ }) ++ } ++} +diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go +index c49e233..237cfeb 100644 +--- a/src/crypto/x509/verify.go ++++ b/src/crypto/x509/verify.go +@@ -360,6 +360,7 @@ func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + // domainToReverseLabels converts a textual domain name like foo.example.com to + // the list of labels in reverse order, e.g. ["com", "example", "foo"]. + func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { ++ reverseLabels = make([]string, 0, strings.Count(domain, ".")+1) + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) +-- +2.33.0 + diff --git a/backport-0043-CVE-2025-58187-crypto-x509-rework-fix-for-CVE-2025-58187.patch b/backport-0043-CVE-2025-58187-crypto-x509-rework-fix-for-CVE-2025-58187.patch new file mode 100644 index 0000000..dcb27f4 --- /dev/null +++ b/backport-0043-CVE-2025-58187-crypto-x509-rework-fix-for-CVE-2025-58187.patch @@ -0,0 +1,538 @@ +From 1cd71689f2ed8f07031a0cc58fc3586ca501839f Mon Sep 17 00:00:00 2001 +From: Roland Shoemaker +Date: Thu, 9 Oct 2025 13:35:24 -0700 +Subject: [PATCH] crypto/x509: rework fix for CVE-2025-58187 + +In CL 709854 we enabled strict validation for a number of properties of +domain names (and their constraints). This caused significant breakage, +since we didn't previously disallow the creation of certificates which +contained these malformed domains. + +Rollback a number of the properties we enforced, making domainNameValid +only enforce the same properties that domainToReverseLabels does. Since +this also undoes some of the DoS protections our initial fix enabled, +this change also adds caching of constraints in isValid (which perhaps +is the fix we should've initially chosen). + +Updates #75835 +Fixes #75828 + +Change-Id: Ie6ca6b4f30e9b8a143692b64757f7bbf4671ed0e +Reviewed-on: https://go-review.googlesource.com/c/go/+/710735 +LUCI-TryBot-Result: Go LUCI +Reviewed-by: Damien Neil + +Backported-by: zhaoyifan +Reference: https://go-review.googlesource.com/c/go/+/710735 +Conflict: parser.go parser_test.go +--- + src/crypto/x509/name_constraints_test.go | 77 ++++++++++++++++++-- + src/crypto/x509/parser.go | 89 +++++++++++------------- + src/crypto/x509/parser_test.go | 82 +++++++++++++++------- + src/crypto/x509/verify.go | 53 ++++++++++---- + src/crypto/x509/verify_test.go | 2 +- + 5 files changed, 208 insertions(+), 95 deletions(-) + +diff --git a/src/crypto/x509/name_constraints_test.go b/src/crypto/x509/name_constraints_test.go +index e1c5f2a..afc1409 100644 +--- a/src/crypto/x509/name_constraints_test.go ++++ b/src/crypto/x509/name_constraints_test.go +@@ -1456,7 +1456,63 @@ var nameConstraintsTests = []nameConstraintsTest{ + expectedError: "incompatible key usage", + }, + +- // #77: if several EKUs are requested, satisfying any of them is sufficient. ++ // An invalid DNS SAN should be detected only at validation time so ++ // that we can process CA certificates in the wild that have invalid SANs. ++ // See https://github.com/golang/go/issues/23995 ++ ++ // #77: an invalid DNS or mail SAN will not be detected if name constraint ++ // checking is not triggered. ++ { ++ roots: make([]constraintsSpec, 1), ++ intermediates: [][]constraintsSpec{ ++ { ++ {}, ++ }, ++ }, ++ leaf: leafSpec{ ++ sans: []string{"dns:this is invalid", "email:this @ is invalid"}, ++ }, ++ }, ++ ++ // #78: an invalid DNS SAN will be detected if any name constraint checking ++ // is triggered. ++ { ++ roots: []constraintsSpec{ ++ { ++ bad: []string{"uri:"}, ++ }, ++ }, ++ intermediates: [][]constraintsSpec{ ++ { ++ {}, ++ }, ++ }, ++ leaf: leafSpec{ ++ sans: []string{"dns:this is invalid"}, ++ }, ++ expectedError: "cannot parse dnsName", ++ }, ++ ++ // #79: an invalid email SAN will be detected if any name constraint ++ // checking is triggered. ++ { ++ roots: []constraintsSpec{ ++ { ++ bad: []string{"uri:"}, ++ }, ++ }, ++ intermediates: [][]constraintsSpec{ ++ { ++ {}, ++ }, ++ }, ++ leaf: leafSpec{ ++ sans: []string{"email:this @ is invalid"}, ++ }, ++ expectedError: "cannot parse rfc822Name", ++ }, ++ ++ // #80: if several EKUs are requested, satisfying any of them is sufficient. + { + roots: make([]constraintsSpec, 1), + intermediates: [][]constraintsSpec{ +@@ -1471,7 +1527,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + requestedEKUs: []ExtKeyUsage{ExtKeyUsageClientAuth, ExtKeyUsageEmailProtection}, + }, + +- // #78: EKUs that are not asserted in VerifyOpts are not required to be ++ // #81: EKUs that are not asserted in VerifyOpts are not required to be + // nested. + { + roots: make([]constraintsSpec, 1), +@@ -1490,7 +1546,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #79: a certificate without SANs and CN is accepted in a constrained chain. ++ // #82: a certificate without SANs and CN is accepted in a constrained chain. + { + roots: []constraintsSpec{ + { +@@ -1507,7 +1563,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #80: a certificate without SANs and with a CN that does not parse as a ++ // #83: a certificate without SANs and with a CN that does not parse as a + // hostname is accepted in a constrained chain. + { + roots: []constraintsSpec{ +@@ -1526,7 +1582,7 @@ var nameConstraintsTests = []nameConstraintsTest{ + }, + }, + +- // #81: a certificate with SANs and CN is accepted in a constrained chain. ++ // #84: a certificate with SANs and CN is accepted in a constrained chain. + { + roots: []constraintsSpec{ + { +@@ -1543,7 +1599,16 @@ var nameConstraintsTests = []nameConstraintsTest{ + cn: "foo.bar", + }, + }, +- // #83: URIs with IPv6 addresses with zones and ports are rejected ++ ++ // #85: .example.com is an invalid DNS name, it should not match the ++ // constraint example.com. ++ { ++ roots: []constraintsSpec{{ok: []string{"dns:example.com"}}}, ++ leaf: leafSpec{sans: []string{"dns:.example.com"}}, ++ expectedError: "cannot parse dnsName \".example.com\"", ++ }, ++ ++ // #86: URIs with IPv6 addresses with zones and ports are rejected + { + roots: []constraintsSpec{ + { +diff --git a/src/crypto/x509/parser.go b/src/crypto/x509/parser.go +index c67fdd0..4c89dab 100644 +--- a/src/crypto/x509/parser.go ++++ b/src/crypto/x509/parser.go +@@ -370,56 +370,61 @@ func forEachSAN(der cryptobyte.String, callback func(tag int, data []byte) error + return nil + } + +-// domainNameValid checks the structure of a DNS name or similar name from a +-// certificate. If constraint is true, it checks additional properties required +-// for name constraints. +-// +-// This enforces the property that each label of a domain has a maximum length +-// of 63 characters, and so on. See +-// https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4. ++// domainNameValid is an alloc-less version of the checks that ++// domainToReverseLabels does. + func domainNameValid(s string, constraint bool) bool { ++ // TODO(#75835): This function omits a number of checks which we ++ // really should be doing to enforce that domain names are valid names per ++ // RFC 1034. We previously enabled these checks, but this broke a ++ // significant number of certificates we previously considered valid, and we ++ // happily create via CreateCertificate (et al). We should enable these ++ // checks, but will need to gate them behind a GODEBUG. ++ // ++ // I have left the checks we previously enabled, noted with "TODO(#75835)" so ++ // that we can easily re-enable them once we unbreak everyone. ++ ++ // TODO(#75835): this should only be true for constraints. + if len(s) == 0 { +- return false ++ return true + } + +- // There must not be a trailing period, although a trailing period during +- // matching is handled by trimming it in matchDomainConstraint and +- // matchHostnames. ++ // Do not allow trailing period (FQDN format is not allowed in SANs or ++ // constraints). + if s[len(s)-1] == '.' { + return false + } + +- // https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4 +- // 255 octets or less. +- if len(s) > 253 { +- return false ++ // TODO(#75835): domains must have at least one label, cannot have ++ // a leading empty label, and cannot be longer than 253 characters. ++ // if len(s) == 0 || (!constraint && s[0] == '.') || len(s) > 253 { ++ // return false ++ // } ++ ++ lastDot := -1 ++ if constraint && s[0] == '.' { ++ s = s[1:] + } + +- labelStart := 0 +- for i := 0; i < len(s); i++ { +- c := s[i] +- if c == '.' { +- labelLen := i - labelStart +- if labelLen == 0 { +- // Empty label, invalid unless this is the first character and +- // constraint is true, in which case it's used to match the +- // domain and all subdomains. +- if i != 0 || !constraint { +- return false +- } ++ for i := 0; i <= len(s); i++ { ++ if i < len(s) && (s[i] < 33 || s[i] > 126) { ++ // Invalid character. ++ return false ++ } ++ if i == len(s) || s[i] == '.' { ++ labelLen := i ++ if lastDot >= 0 { ++ labelLen -= lastDot + 1 + } +- // https://datatracker.ietf.org/doc/html/rfc1035#section-2.3.4 +- // labels 63 octets or less. +- if labelLen > 63 { ++ if labelLen == 0 { + return false + } +- labelStart = i + 1 ++ // TODO(#75835): labels cannot be longer than 63 characters. ++ // if labelLen > 63 { ++ // return false ++ // } ++ lastDot = i + } + } +- // Check the last label. +- if labelLen := len(s) - labelStart; labelLen == 0 || labelLen > 63 { +- return false +- } + + return true + } +@@ -432,18 +437,12 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string + if err := isIA5String(email); err != nil { + return errors.New("x509: SAN rfc822Name is malformed") + } +- if _, ok := parseRFC2821Mailbox(email); !ok { +- return fmt.Errorf("x509: cannot parse rfc822Name %q", email) +- } + emailAddresses = append(emailAddresses, email) + case nameTypeDNS: + name := string(data) + if err := isIA5String(name); err != nil { + return errors.New("x509: SAN dNSName is malformed") + } +- if !domainNameValid(name, false) { +- return fmt.Errorf("x509: cannot parse dnsName %q", name) +- } + dnsNames = append(dnsNames, string(name)) + case nameTypeURI: + uriStr := string(data) +@@ -454,12 +453,8 @@ func parseSANExtension(der cryptobyte.String) (dnsNames, emailAddresses []string + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", uriStr, err) + } +- if len(uri.Host) > 0 { +- if _, _, err := net.SplitHostPort(uri.Host); err == nil { +- // Ignore the error: it's valid to have a host with a port. +- } else if !domainNameValid(uri.Host, false) && net.ParseIP(uri.Host) == nil { +- return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) +- } ++ if len(uri.Host) > 0 && !domainNameValid(uri.Host, false) { ++ return fmt.Errorf("x509: cannot parse URI %q: invalid domain", uriStr) + } + uris = append(uris, uri) + case nameTypeIP: +diff --git a/src/crypto/x509/parser_test.go b/src/crypto/x509/parser_test.go +index ede5235..d322d59 100644 +--- a/src/crypto/x509/parser_test.go ++++ b/src/crypto/x509/parser_test.go +@@ -104,37 +104,67 @@ func TestParseASN1String(t *testing.T) { + } + + func TestDomainNameValid(t *testing.T) { +- tests := []struct { ++ for _, tc := range []struct { + name string +- domain string ++ dnsName string + constraint bool +- want bool ++ valid bool + }{ +- {"empty string", "", false, false}, +- {"valid domain", "example.com", false, true}, +- {"valid subdomain", "sub.example.com", false, true}, +- {"trailing period", "example.com.", false, false}, +- {"leading period non-constraint", ".example.com", false, false}, +- {"leading period constraint", ".example.com", true, true}, +- {"double period", "example..com", false, false}, +- {"too long", strings.Repeat("a", 254), false, false}, +- {"exactly 253 chars", strings.Repeat("a", 240) + ".example.com", false, true}, +- {"label too long", strings.Repeat("a", 64) + ".example.com", false, false}, +- {"label exactly 63 chars", strings.Repeat("a", 63) + ".example.com", false, true}, +- {"single label", "localhost", false, true}, +- {"only period non-constraint", ".", false, false}, +- {"only period constraint", ".", true, false}, +- {"multiple labels", "a.b.c.d.e.f.g", false, true}, +- {"empty label at start", ".example", false, false}, +- {"empty label at end", "example.", false, false}, +- {"hyphen at start", "-example.com", false, true}, +- } ++ // TODO(#75835): these tests are for stricter name validation, which we ++ // had to disable. Once we reenable these strict checks, behind a ++ // GODEBUG, we should add them back in. ++ // {"empty name, name", "", false, false}, ++ // {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, false}, ++ // {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, false}, ++ // {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, false}, ++ // {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, false}, ++ // {"64 char single label, name", strings.Repeat("a", 64), false, false}, ++ // {"64 char single label, constraint", strings.Repeat("a", 64), true, false}, ++ // {"64 char label, name", "a." + strings.Repeat("a", 64), false, false}, ++ // {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, false}, ++ ++ // TODO(#75835): these are the inverse of the tests above, they should be removed ++ // once the strict checking is enabled. ++ {"254 char label, name", strings.Repeat("a.a", 84) + "aaa", false, true}, ++ {"254 char label, constraint", strings.Repeat("a.a", 84) + "aaa", true, true}, ++ {"253 char label, name", strings.Repeat("a.a", 84) + "aa", false, true}, ++ {"253 char label, constraint", strings.Repeat("a.a", 84) + "aa", true, true}, ++ {"64 char single label, name", strings.Repeat("a", 64), false, true}, ++ {"64 char single label, constraint", strings.Repeat("a", 64), true, true}, ++ {"64 char label, name", "a." + strings.Repeat("a", 64), false, true}, ++ {"64 char label, constraint", "a." + strings.Repeat("a", 64), true, true}, + +- for _, tt := range tests { +- t.Run(tt.name, func(t *testing.T) { +- if got := domainNameValid(tt.domain, tt.constraint); got != tt.want { +- t.Errorf("domainNameValid(%q, %v) = %v, want %v", tt.domain, tt.constraint, got, tt.want) ++ // Check we properly enforce properties of domain names. ++ {"empty name, constraint", "", true, true}, ++ {"empty label, name", "a..a", false, false}, ++ {"empty label, constraint", "a..a", true, false}, ++ {"period, name", ".", false, false}, ++ {"period, constraint", ".", true, false}, // TODO(roland): not entirely clear if this is a valid constraint (require at least one label?) ++ {"valid, name", "a.b.c", false, true}, ++ {"valid, constraint", "a.b.c", true, true}, ++ {"leading period, name", ".a.b.c", false, false}, ++ {"leading period, constraint", ".a.b.c", true, true}, ++ {"trailing period, name", "a.", false, false}, ++ {"trailing period, constraint", "a.", true, false}, ++ {"bare label, name", "a", false, true}, ++ {"bare label, constraint", "a", true, true}, ++ {"63 char single label, name", strings.Repeat("a", 63), false, true}, ++ {"63 char single label, constraint", strings.Repeat("a", 63), true, true}, ++ {"63 char label, name", "a." + strings.Repeat("a", 63), false, true}, ++ {"63 char label, constraint", "a." + strings.Repeat("a", 63), true, true}, ++ } { ++ t.Run(tc.name, func(t *testing.T) { ++ valid := domainNameValid(tc.dnsName, tc.constraint) ++ if tc.valid != valid { ++ t.Errorf("domainNameValid(%q, %t) = %v; want %v", tc.dnsName, tc.constraint, !tc.valid, tc.valid) + } + }) + } + } ++ ++func FuzzDomainNameValid(f *testing.F) { ++ f.Fuzz(func(t *testing.T, data string) { ++ domainNameValid(data, false) ++ domainNameValid(data, true) ++ }) ++} +diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go +index 237cfeb..028c4a7 100644 +--- a/src/crypto/x509/verify.go ++++ b/src/crypto/x509/verify.go +@@ -393,7 +393,7 @@ func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + return reverseLabels, true + } + +-func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { ++func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { + // If the constraint contains an @, then it specifies an exact mailbox + // name. + if strings.Contains(constraint, "@") { +@@ -406,10 +406,10 @@ func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, erro + + // Otherwise the constraint is like a DNS constraint of the domain part + // of the mailbox. +- return matchDomainConstraint(mailbox.domain, constraint) ++ return matchDomainConstraint(mailbox.domain, constraint, reversedDomainsCache, reversedConstraintsCache) + } + +-func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { ++func matchURIConstraint(uri *url.URL, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { + // From RFC 5280, Section 4.2.1.10: + // “a uniformResourceIdentifier that does not include an authority + // component with a host name specified as a fully qualified domain +@@ -438,7 +438,7 @@ func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { + return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) + } + +- return matchDomainConstraint(host, constraint) ++ return matchDomainConstraint(host, constraint, reversedDomainsCache, reversedConstraintsCache) + } + + func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { +@@ -455,16 +455,21 @@ func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { + return true, nil + } + +-func matchDomainConstraint(domain, constraint string) (bool, error) { ++func matchDomainConstraint(domain, constraint string, reversedDomainsCache map[string][]string, reversedConstraintsCache map[string][]string) (bool, error) { + // The meaning of zero length constraints is not specified, but this + // code follows NSS and accepts them as matching everything. + if len(constraint) == 0 { + return true, nil + } + +- domainLabels, ok := domainToReverseLabels(domain) +- if !ok { +- return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) ++ domainLabels, found := reversedDomainsCache[domain] ++ if !found { ++ var ok bool ++ domainLabels, ok = domainToReverseLabels(domain) ++ if !ok { ++ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) ++ } ++ reversedDomainsCache[domain] = domainLabels + } + + // RFC 5280 says that a leading period in a domain name means that at +@@ -478,9 +483,14 @@ func matchDomainConstraint(domain, constraint string) (bool, error) { + constraint = constraint[1:] + } + +- constraintLabels, ok := domainToReverseLabels(constraint) +- if !ok { +- return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) ++ constraintLabels, found := reversedConstraintsCache[constraint] ++ if !found { ++ var ok bool ++ constraintLabels, ok = domainToReverseLabels(constraint) ++ if !ok { ++ return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) ++ } ++ reversedConstraintsCache[constraint] = constraintLabels + } + + if len(domainLabels) < len(constraintLabels) || +@@ -601,6 +611,19 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V + } + } + ++ // Each time we do constraint checking, we need to check the constraints in ++ // the current certificate against all of the names that preceded it. We ++ // reverse these names using domainToReverseLabels, which is a relatively ++ // expensive operation. Since we check each name against each constraint, ++ // this requires us to do N*C calls to domainToReverseLabels (where N is the ++ // total number of names that preceed the certificate, and C is the total ++ // number of constraints in the certificate). By caching the results of ++ // calling domainToReverseLabels, we can reduce that to N+C calls at the ++ // cost of keeping all of the parsed names and constraints in memory until ++ // we return from isValid. ++ reversedDomainsCache := map[string][]string{} ++ reversedConstraintsCache := map[string][]string{} ++ + if (certType == intermediateCertificate || certType == rootCertificate) && + c.hasNameConstraints() { + toCheck := []*Certificate{} +@@ -621,20 +644,20 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, + func(parsedName, constraint any) (bool, error) { +- return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) ++ return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { + return err + } + + case nameTypeDNS: + name := string(data) +- if _, ok := domainToReverseLabels(name); !ok { ++ if !domainNameValid(name, false) { + return fmt.Errorf("x509: cannot parse dnsName %q", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, + func(parsedName, constraint any) (bool, error) { +- return matchDomainConstraint(parsedName.(string), constraint.(string)) ++ return matchDomainConstraint(parsedName.(string), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { + return err + } +@@ -648,7 +671,7 @@ func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *V + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, + func(parsedName, constraint any) (bool, error) { +- return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) ++ return matchURIConstraint(parsedName.(*url.URL), constraint.(string), reversedDomainsCache, reversedConstraintsCache) + }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { + return err + } +diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go +index a003422..9480059 100644 +--- a/src/crypto/x509/verify_test.go ++++ b/src/crypto/x509/verify_test.go +@@ -1547,7 +1547,7 @@ var nameConstraintTests = []struct { + + func TestNameConstraints(t *testing.T) { + for i, test := range nameConstraintTests { +- result, err := matchDomainConstraint(test.domain, test.constraint) ++ result, err := matchDomainConstraint(test.domain, test.constraint, map[string][]string{}, map[string][]string{}) + + if err != nil && !test.expectError { + t.Errorf("unexpected error for test #%d: domain=%s, constraint=%s, err=%s", i, test.domain, test.constraint, err) +-- +2.33.0 diff --git a/backport-0044-CVE-2025-61723-encoding-pem-make-Decode-complexity-linear.patch b/backport-0044-CVE-2025-61723-encoding-pem-make-Decode-complexity-linear.patch new file mode 100644 index 0000000..254619d --- /dev/null +++ b/backport-0044-CVE-2025-61723-encoding-pem-make-Decode-complexity-linear.patch @@ -0,0 +1,220 @@ +From 5ce8cd16f3859ec5ac4106ad8ec15d6236f4501b Mon Sep 17 00:00:00 2001 +From: Roland Shoemaker +Date: Tue, 30 Sep 2025 11:16:56 -0700 +Subject: [PATCH] encoding/pem: make Decode complexity linear + +Because Decode scanned the input first for the first BEGIN line, and +then the first END line, the complexity of Decode is quadratic. If the +input contained a large number of BEGINs and then a single END right at +the end of the input, we would find the first BEGIN, and then scan the +entire input for the END, and fail to parse the block, so move onto the +next BEGIN, scan the entire input for the END, etc. + +Instead, look for the first END in the input, and then the first BEGIN +that precedes the found END. We then process the bytes between the BEGIN +and END, and move onto the bytes after the END for further processing. +This gives us linear complexity. + +Fixes CVE-2025-61723 +Fixes #75676 + +Change-Id: I813c4f63e78bca4054226c53e13865c781564ccf +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2921 +Reviewed-by: Nicholas Husin +Reviewed-by: Damien Neil +Reviewed-on: https://go-review.googlesource.com/c/go/+/709858 +TryBot-Bypass: Michael Pratt +Auto-Submit: Michael Pratt +Reviewed-by: Carlos Amedee + +Backported-by: zhaoyifan +Reference: https://go-review.googlesource.com/c/go/+/709858 +Conflict: no +--- + src/encoding/pem/pem.go | 67 ++++++++++++++++++++---------------- + src/encoding/pem/pem_test.go | 13 +++---- + 2 files changed, 44 insertions(+), 36 deletions(-) + +diff --git a/src/encoding/pem/pem.go b/src/encoding/pem/pem.go +index d26e4c8..610bd42 100644 +--- a/src/encoding/pem/pem.go ++++ b/src/encoding/pem/pem.go +@@ -37,7 +37,7 @@ type Block struct { + // line bytes. The remainder of the byte array (also not including the new line + // bytes) is also returned and this will always be smaller than the original + // argument. +-func getLine(data []byte) (line, rest []byte) { ++func getLine(data []byte) (line, rest []byte, consumed int) { + i := bytes.IndexByte(data, '\n') + var j int + if i < 0 { +@@ -49,7 +49,7 @@ func getLine(data []byte) (line, rest []byte) { + i-- + } + } +- return bytes.TrimRight(data[0:i], " \t"), data[j:] ++ return bytes.TrimRight(data[0:i], " \t"), data[j:], j + } + + // removeSpacesAndTabs returns a copy of its input with all spaces and tabs +@@ -90,20 +90,32 @@ func Decode(data []byte) (p *Block, rest []byte) { + // pemStart begins with a newline. However, at the very beginning of + // the byte array, we'll accept the start string without it. + rest = data ++ + for { +- if bytes.HasPrefix(rest, pemStart[1:]) { +- rest = rest[len(pemStart)-1:] +- } else if _, after, ok := bytes.Cut(rest, pemStart); ok { +- rest = after +- } else { ++ // Find the first END line, and then find the last BEGIN line before ++ // the end line. This lets us skip any repeated BEGIN lines that don't ++ // have a matching END. ++ endIndex := bytes.Index(rest, pemEnd) ++ if endIndex < 0 { ++ return nil, data ++ } ++ endTrailerIndex := endIndex + len(pemEnd) ++ beginIndex := bytes.LastIndex(rest[:endIndex], pemStart[1:]) ++ if beginIndex < 0 || beginIndex > 0 && rest[beginIndex-1] != '\n' { + return nil, data + } ++ rest = rest[beginIndex+len(pemStart)-1:] ++ endIndex -= beginIndex + len(pemStart) - 1 ++ endTrailerIndex -= beginIndex + len(pemStart) - 1 + + var typeLine []byte +- typeLine, rest = getLine(rest) ++ var consumed int ++ typeLine, rest, consumed = getLine(rest) + if !bytes.HasSuffix(typeLine, pemEndOfLine) { + continue + } ++ endIndex -= consumed ++ endTrailerIndex -= consumed + typeLine = typeLine[0 : len(typeLine)-len(pemEndOfLine)] + + p = &Block{ +@@ -117,7 +129,7 @@ func Decode(data []byte) (p *Block, rest []byte) { + if len(rest) == 0 { + return nil, data + } +- line, next := getLine(rest) ++ line, next, consumed := getLine(rest) + + key, val, ok := bytes.Cut(line, colon) + if !ok { +@@ -129,21 +141,13 @@ func Decode(data []byte) (p *Block, rest []byte) { + val = bytes.TrimSpace(val) + p.Headers[string(key)] = string(val) + rest = next ++ endIndex -= consumed ++ endTrailerIndex -= consumed + } + +- var endIndex, endTrailerIndex int +- +- // If there were no headers, the END line might occur +- // immediately, without a leading newline. +- if len(p.Headers) == 0 && bytes.HasPrefix(rest, pemEnd[1:]) { +- endIndex = 0 +- endTrailerIndex = len(pemEnd) - 1 +- } else { +- endIndex = bytes.Index(rest, pemEnd) +- endTrailerIndex = endIndex + len(pemEnd) +- } +- +- if endIndex < 0 { ++ // If there were headers, there must be a newline between the headers ++ // and the END line, so endIndex should be >= 0. ++ if len(p.Headers) > 0 && endIndex < 0 { + continue + } + +@@ -163,21 +167,24 @@ func Decode(data []byte) (p *Block, rest []byte) { + } + + // The line must end with only whitespace. +- if s, _ := getLine(restOfEndLine); len(s) != 0 { ++ if s, _, _ := getLine(restOfEndLine); len(s) != 0 { + continue + } + +- base64Data := removeSpacesAndTabs(rest[:endIndex]) +- p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) +- n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) +- if err != nil { +- continue ++ p.Bytes = []byte{} ++ if endIndex > 0 { ++ base64Data := removeSpacesAndTabs(rest[:endIndex]) ++ p.Bytes = make([]byte, base64.StdEncoding.DecodedLen(len(base64Data))) ++ n, err := base64.StdEncoding.Decode(p.Bytes, base64Data) ++ if err != nil { ++ continue ++ } ++ p.Bytes = p.Bytes[:n] + } +- p.Bytes = p.Bytes[:n] + + // the -1 is because we might have only matched pemEnd without the + // leading newline if the PEM block was empty. +- _, rest = getLine(rest[endIndex+len(pemEnd)-1:]) ++ _, rest, _ = getLine(rest[endIndex+len(pemEnd)-1:]) + return p, rest + } + } +diff --git a/src/encoding/pem/pem_test.go b/src/encoding/pem/pem_test.go +index 56a7754..7025277 100644 +--- a/src/encoding/pem/pem_test.go ++++ b/src/encoding/pem/pem_test.go +@@ -34,7 +34,7 @@ var getLineTests = []GetLineTest{ + + func TestGetLine(t *testing.T) { + for i, test := range getLineTests { +- x, y := getLine([]byte(test.in)) ++ x, y, _ := getLine([]byte(test.in)) + if string(x) != test.out1 || string(y) != test.out2 { + t.Errorf("#%d got:%+v,%+v want:%s,%s", i, x, y, test.out1, test.out2) + } +@@ -46,6 +46,7 @@ func TestDecode(t *testing.T) { + if !reflect.DeepEqual(result, certificate) { + t.Errorf("#0 got:%#v want:%#v", result, certificate) + } ++ + result, remainder = Decode(remainder) + if !reflect.DeepEqual(result, privateKey) { + t.Errorf("#1 got:%#v want:%#v", result, privateKey) +@@ -68,7 +69,7 @@ func TestDecode(t *testing.T) { + } + + result, remainder = Decode(remainder) +- if result == nil || result.Type != "HEADERS" || len(result.Headers) != 1 { ++ if result == nil || result.Type != "VALID HEADERS" || len(result.Headers) != 1 { + t.Errorf("#5 expected single header block but got :%v", result) + } + +@@ -381,15 +382,15 @@ ZWAaUoVtWIQ52aKS0p19G99hhb+IVANC4akkdHV4SP8i7MVNZhfUmg== + + # This shouldn't be recognised because of the missing newline after the + headers. +------BEGIN HEADERS----- ++-----BEGIN INVALID HEADERS----- + Header: 1 +------END HEADERS----- ++-----END INVALID HEADERS----- + + # This should be valid, however. +------BEGIN HEADERS----- ++-----BEGIN VALID HEADERS----- + Header: 1 + +------END HEADERS-----`) ++-----END VALID HEADERS-----`) + + var certificate = &Block{Type: "CERTIFICATE", + Headers: map[string]string{}, +-- +2.33.0 + diff --git a/backport-0045-CVE-2025-47912-net-url-enforce-stricter-parsing.patch b/backport-0045-CVE-2025-47912-net-url-enforce-stricter-parsing.patch new file mode 100644 index 0000000..ab7b76d --- /dev/null +++ b/backport-0045-CVE-2025-47912-net-url-enforce-stricter-parsing.patch @@ -0,0 +1,224 @@ +From f6f4e8b3ef21299db1ea3a343c3e55e91365a7fd Mon Sep 17 00:00:00 2001 +From: Ethan Lee +Date: Fri, 29 Aug 2025 17:35:55 +0000 +Subject: [PATCH] net/url: enforce stricter parsing of + bracketed IPv6 hostnames + +- Previously, url.Parse did not enforce validation of hostnames within + square brackets. +- RFC 3986 stipulates that only IPv6 hostnames can be embedded within + square brackets in a URL. +- Now, the parsing logic should strictly enforce that only IPv6 + hostnames can be resolved when in square brackets. IPv4, IPv4-mapped + addresses and other input will be rejected. +- Update url_test to add test cases that cover the above scenarios. + +Reference: https://go-review.googlesource.com/c/go/+/709857 +Conflict: src/go/build/deps_test.go + +Thanks to Enze Wang, Jingcheng Yang and Zehui Miao of Tsinghua +University for reporting this issue. + +Fixes CVE-2025-47912 +Fixes #75678 + +Change-Id: Iaa41432bf0ee86de95a39a03adae5729e4deb46c +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2680 +Reviewed-by: Damien Neil +Reviewed-by: Roland Shoemaker +Reviewed-on: https://go-review.googlesource.com/c/go/+/709857 +TryBot-Bypass: Michael Pratt +Reviewed-by: Carlos Amedee +Auto-Submit: Michael Pratt +--- + src/go/build/deps_test.go | 10 ++++++---- + src/net/url/url.go | 42 +++++++++++++++++++++++++++++---------- + src/net/url/url_test.go | 39 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 77 insertions(+), 14 deletions(-) + +diff --git a/src/go/build/deps_test.go b/src/go/build/deps_test.go +index 36b3a38..8f9c304 100644 +--- a/src/go/build/deps_test.go ++++ b/src/go/build/deps_test.go +@@ -193,7 +193,6 @@ var depsRules = ` + internal/types/errors, + mime/quotedprintable, + net/internal/socktest, +- net/url, + runtime/trace, + text/scanner, + text/tabwriter; +@@ -241,6 +240,12 @@ var depsRules = ` + FMT + < text/template/parse; + ++ internal/bytealg, internal/intern, internal/itoa, math/bits, sort, strconv ++ < net/netip; ++ ++ FMT, net/netip ++ < net/url; ++ + net/url, text/template/parse + < text/template + < internal/lazytemplate; +@@ -354,9 +359,6 @@ var depsRules = ` + internal/godebug + < internal/intern; + +- internal/bytealg, internal/intern, internal/itoa, math/bits, sort, strconv +- < net/netip; +- + # net is unavoidable when doing any networking, + # so large dependencies must be kept out. + # This is a long-looking list but most of these +diff --git a/src/net/url/url.go b/src/net/url/url.go +index 501b263..302b5ed 100644 +--- a/src/net/url/url.go ++++ b/src/net/url/url.go +@@ -14,6 +14,7 @@ import ( + "errors" + "fmt" + "path" ++ "net/netip" + "sort" + "strconv" + "strings" +@@ -614,40 +615,61 @@ func parseAuthority(authority string) (user *Userinfo, host string, err error) { + // parseHost parses host as an authority without user + // information. That is, as host[:port]. + func parseHost(host string) (string, error) { +- if strings.HasPrefix(host, "[") { ++ if openBracketIdx := strings.LastIndex(host, "["); openBracketIdx != -1 { + // Parse an IP-Literal in RFC 3986 and RFC 6874. + // E.g., "[fe80::1]", "[fe80::1%25en0]", "[fe80::1]:80". +- i := strings.LastIndex(host, "]") +- if i < 0 { ++ closeBracketIdx := strings.LastIndex(host, "]") ++ if closeBracketIdx < 0 { + return "", errors.New("missing ']' in host") + } +- colonPort := host[i+1:] ++ ++ colonPort := host[closeBracketIdx+1:] + if !validOptionalPort(colonPort) { + return "", fmt.Errorf("invalid port %q after host", colonPort) + } ++ unescapedColonPort, err := unescape(colonPort, encodeHost) ++ if err != nil { ++ return "", err ++ } + ++ hostname := host[openBracketIdx+1 : closeBracketIdx] ++ var unescapedHostname string + // RFC 6874 defines that %25 (%-encoded percent) introduces + // the zone identifier, and the zone identifier can use basically + // any %-encoding it likes. That's different from the host, which + // can only %-encode non-ASCII bytes. + // We do impose some restrictions on the zone, to avoid stupidity + // like newlines. +- zone := strings.Index(host[:i], "%25") +- if zone >= 0 { +- host1, err := unescape(host[:zone], encodeHost) ++ zoneIdx := strings.Index(hostname, "%25") ++ if zoneIdx >= 0 { ++ hostPart, err := unescape(hostname[:zoneIdx], encodeHost) + if err != nil { + return "", err + } +- host2, err := unescape(host[zone:i], encodeZone) ++ zonePart, err := unescape(hostname[zoneIdx:], encodeZone) + if err != nil { + return "", err + } +- host3, err := unescape(host[i:], encodeHost) ++ unescapedHostname = hostPart + zonePart ++ } else { ++ var err error ++ unescapedHostname, err = unescape(hostname, encodeHost) + if err != nil { + return "", err + } +- return host1 + host2 + host3, nil + } ++ ++ // Per RFC 3986, only a host identified by a valid ++ // IPv6 address can be enclosed by square brackets. ++ // This excludes any IPv4 or IPv4-mapped addresses. ++ addr, err := netip.ParseAddr(unescapedHostname) ++ if err != nil { ++ return "", fmt.Errorf("invalid host: %w", err) ++ } ++ if addr.Is4() || addr.Is4In6() { ++ return "", errors.New("invalid IPv6 host") ++ } ++ return "[" + unescapedHostname + "]" + unescapedColonPort, nil + } else if i := strings.LastIndex(host, ":"); i != -1 { + colonPort := host[i:] + if !validOptionalPort(colonPort) { +diff --git a/src/net/url/url_test.go b/src/net/url/url_test.go +index 23c5c58..d0042c9 100644 +--- a/src/net/url/url_test.go ++++ b/src/net/url/url_test.go +@@ -383,6 +383,16 @@ var urltests = []URLTest{ + }, + "", + }, ++ // valid IPv6 host with port and path ++ { ++ "https://[2001:db8::1]:8443/test/path", ++ &URL{ ++ Scheme: "https", ++ Host: "[2001:db8::1]:8443", ++ Path: "/test/path", ++ }, ++ "", ++ }, + // host subcomponent; IPv6 address with zone identifier in RFC 6874 + { + "http://[fe80::1%25en0]/", // alphanum zone identifier +@@ -707,6 +717,24 @@ var parseRequestURLTests = []struct { + // RFC 6874. + {"http://[fe80::1%en0]/", false}, + {"http://[fe80::1%en0]:8080/", false}, ++ ++ // Tests exercising RFC 3986 compliance ++ {"https://[1:2:3:4:5:6:7:8]", true}, // full IPv6 address ++ {"https://[2001:db8::a:b:c:d]", true}, // compressed IPv6 address ++ {"https://[fe80::1%25eth0]", true}, // link-local address with zone ID (interface name) ++ {"https://[fe80::abc:def%254]", true}, // link-local address with zone ID (interface index) ++ {"https://[2001:db8::1]/path", true}, // compressed IPv6 address with path ++ {"https://[fe80::1%25eth0]/path?query=1", true}, // link-local with zone, path, and query ++ ++ {"https://[::ffff:192.0.2.1]", false}, ++ {"https://[:1] ", false}, ++ {"https://[1:2:3:4:5:6:7:8:9]", false}, ++ {"https://[1::1::1]", false}, ++ {"https://[1:2:3:]", false}, ++ {"https://[ffff::127.0.0.4000]", false}, ++ {"https://[0:0::test.com]:80", false}, ++ {"https://[2001:db8::test.com]", false}, ++ {"https://[test.com]", false}, + } + + func TestParseRequestURI(t *testing.T) { +@@ -1634,6 +1662,17 @@ func TestParseErrors(t *testing.T) { + {"cache_object:foo", true}, + {"cache_object:foo/bar", true}, + {"cache_object/:foo/bar", false}, ++ ++ {"http://[192.168.0.1]/", true}, // IPv4 in brackets ++ {"http://[192.168.0.1]:8080/", true}, // IPv4 in brackets with port ++ {"http://[::ffff:192.168.0.1]/", true}, // IPv4-mapped IPv6 in brackets ++ {"http://[::ffff:192.168.0.1]:8080/", true}, // IPv4-mapped IPv6 in brackets with port ++ {"http://[::ffff:c0a8:1]/", true}, // IPv4-mapped IPv6 in brackets (hex) ++ {"http://[not-an-ip]/", true}, // invalid IP string in brackets ++ {"http://[fe80::1%foo]/", true}, // invalid zone format in brackets ++ {"http://[fe80::1", true}, // missing closing bracket ++ {"http://fe80::1]/", true}, // missing opening bracket ++ {"http://[test.com]/", true}, // domain name in brackets + } + for _, tt := range tests { + u, err := Parse(tt.in) +-- +2.43.0 + diff --git a/backport-0046-CVE-2025-58186-net-http-add-httpcookiemaxnum.patch b/backport-0046-CVE-2025-58186-net-http-add-httpcookiemaxnum.patch new file mode 100644 index 0000000..3d2e45b --- /dev/null +++ b/backport-0046-CVE-2025-58186-net-http-add-httpcookiemaxnum.patch @@ -0,0 +1,194 @@ +From 9b9d02c5a015910ce57024788de2ff254c6cfca6 Mon Sep 17 00:00:00 2001 +From: Nicholas Husin +Date: Tue, 30 Sep 2025 14:02:38 -0400 +Subject: [PATCH] net/http: add httpcookiemaxnum GODEBUG option + to limit number of cookies parsed + +Reference: https://go-review.googlesource.com/c/go/+/709855 +Conflict: no + +When handling HTTP headers, net/http does not currently limit the number +of cookies that can be parsed. The only limitation that exists is for +the size of the entire HTTP header, which is controlled by +MaxHeaderBytes (defaults to 1 MB). + +Unfortunately, this allows a malicious actor to send HTTP headers which +contain a massive amount of small cookies, such that as much cookies as +possible can be fitted within the MaxHeaderBytes limitation. Internally, +this causes us to allocate a massive number of Cookie struct. + +For example, a 1 MB HTTP header with cookies that repeats "a=;" will +cause an allocation of ~66 MB in the heap. This can serve as a way for +malicious actors to induce memory exhaustion. + +To fix this, we will now limit the number of cookies we are willing to +parse to 3000 by default. This behavior can be changed by setting a new +GODEBUG option: GODEBUG=httpcookiemaxnum. httpcookiemaxnum can be set to +allow a higher or lower cookie limit. Setting it to 0 will also allow an +infinite number of cookies to be parsed. + +Thanks to jub0bs for reporting this issue. + +For #75672 +Fixes CVE-2025-58186 + +Change-Id: Ied58b3bc8acf5d11c880f881f36ecbf1d5d52622 +Reviewed-on: https://go-internal-review.googlesource.com/c/go/+/2720 +Reviewed-by: Roland Shoemaker +Reviewed-by: Damien Neil +Reviewed-on: https://go-review.googlesource.com/c/go/+/709855 +Reviewed-by: Carlos Amedee +LUCI-TryBot-Result: Go LUCI +Auto-Submit: Michael Pratt +--- + doc/godebug.md | 11 ++++++++ + src/internal/godebugs/table.go | 1 + + src/net/http/cookie.go | 47 +++++++++++++++++++++++++++++++++- + src/runtime/metrics/doc.go | 5 ++++ + 4 files changed, 63 insertions(+), 1 deletion(-) + +diff --git a/doc/godebug.md b/doc/godebug.md +index d7fbb27..10556f3 100644 +--- a/doc/godebug.md ++++ b/doc/godebug.md +@@ -126,6 +126,17 @@ for example, + see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) + and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). + ++ ++### Go 1.26 ++ ++Go 1.26 added a new `httpcookiemaxnum` setting that controls the maximum number ++of cookies that net/http will accept when parsing HTTP headers. If the number of ++cookie in a header exceeds the number set in `httpcookiemaxnum`, cookie parsing ++will fail early. The default value is `httpcookiemaxnum=3000`. Setting ++`httpcookiemaxnum=0` will allow the cookie parsing to accept an indefinite ++number of cookies. To avoid denial of service attacks, this setting and default ++was backported to Go 1.25.2 and Go 1.24.8. ++ + ### Go 1.23 + + Go 1.23.11 disabled build information stamping when multiple VCS are detected due +diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go +index 16b6fb3..ecb38ff 100644 +--- a/src/internal/godebugs/table.go ++++ b/src/internal/godebugs/table.go +@@ -33,6 +33,7 @@ var All = []Info{ + {Name: "http2client", Package: "net/http"}, + {Name: "http2debug", Package: "net/http", Opaque: true}, + {Name: "http2server", Package: "net/http"}, ++ {Name: "httpcookiemaxnum", Package: "net/http", Changed: 24, Old: "0"}, + {Name: "installgoroot", Package: "go/build"}, + {Name: "jstmpllitinterp", Package: "html/template"}, + //{Name: "multipartfiles", Package: "mime/multipart"}, +diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go +index 912fde6..9416c0d 100644 +--- a/src/net/http/cookie.go ++++ b/src/net/http/cookie.go +@@ -7,6 +7,7 @@ package http + import ( + "errors" + "fmt" ++ "internal/godebug" + "log" + "net" + "net/http/internal/ascii" +@@ -16,6 +17,8 @@ import ( + "time" + ) + ++var httpcookiemaxnum = godebug.New("httpcookiemaxnum") ++ + // A Cookie represents an HTTP cookie as sent in the Set-Cookie header of an + // HTTP response or the Cookie header of an HTTP request. + // +@@ -55,13 +58,40 @@ const ( + SameSiteNoneMode + ) + ++const defaultCookieMaxNum = 3000 ++ ++func cookieNumWithinMax(cookieNum int) bool { ++ withinDefaultMax := cookieNum <= defaultCookieMaxNum ++ if httpcookiemaxnum.Value() == "" { ++ return withinDefaultMax ++ } ++ if customMax, err := strconv.Atoi(httpcookiemaxnum.Value()); err == nil { ++ withinCustomMax := customMax == 0 || cookieNum <= customMax ++ if withinDefaultMax != withinCustomMax { ++ httpcookiemaxnum.IncNonDefault() ++ } ++ return withinCustomMax ++ } ++ return withinDefaultMax ++} ++ + // readSetCookies parses all "Set-Cookie" values from + // the header h and returns the successfully parsed Cookies. ++// ++// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum ++// GODEBUG option is not explicitly turned off, this function will silently ++// fail and return an empty slice. + func readSetCookies(h Header) []*Cookie { + cookieCount := len(h["Set-Cookie"]) + if cookieCount == 0 { + return []*Cookie{} + } ++ // Cookie limit was unfortunately introduced at a later point in time. ++ // As such, we can only fail by returning an empty slice rather than ++ // explicit error. ++ if !cookieNumWithinMax(cookieCount) { ++ return []*Cookie{} ++ } + cookies := make([]*Cookie, 0, cookieCount) + for _, line := range h["Set-Cookie"] { + parts := strings.Split(textproto.TrimString(line), ";") +@@ -273,13 +303,28 @@ func (c *Cookie) Valid() error { + // readCookies parses all "Cookie" values from the header h and + // returns the successfully parsed Cookies. + // +-// if filter isn't empty, only cookies of that name are returned. ++// If filter isn't empty, only cookies of that name are returned. ++// ++// If the amount of cookies exceeds CookieNumLimit, and httpcookielimitnum ++// GODEBUG option is not explicitly turned off, this function will silently ++// fail and return an empty slice. + func readCookies(h Header, filter string) []*Cookie { + lines := h["Cookie"] + if len(lines) == 0 { + return []*Cookie{} + } + ++ // Cookie limit was unfortunately introduced at a later point in time. ++ // As such, we can only fail by returning an empty slice rather than ++ // explicit error. ++ cookieCount := 0 ++ for _, line := range lines { ++ cookieCount += strings.Count(line, ";") + 1 ++ } ++ if !cookieNumWithinMax(cookieCount) { ++ return []*Cookie{} ++ } ++ + cookies := make([]*Cookie, 0, len(lines)+strings.Count(lines[0], ";")) + for _, line := range lines { + line = textproto.TrimString(line) +diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go +index b144a70..87f5ca9 100644 +--- a/src/runtime/metrics/doc.go ++++ b/src/runtime/metrics/doc.go +@@ -259,6 +259,11 @@ Below is the full list of supported metrics, ordered lexicographically. + The number of non-default behaviors executed by the net/http + package due to a non-default GODEBUG=http2server=... setting. + ++ /godebug/non-default-behavior/httpcookiemaxnum:events ++ The number of non-default behaviors executed by the net/http ++ package due to a non-default GODEBUG=httpcookiemaxnum=... ++ setting. ++ + /godebug/non-default-behavior/installgoroot:events + The number of non-default behaviors executed by the go/build + package due to a non-default GODEBUG=installgoroot=... setting. +-- +2.43.0 + diff --git a/golang.spec b/golang.spec index 0086290..c68bae3 100644 --- a/golang.spec +++ b/golang.spec @@ -69,7 +69,7 @@ Name: golang Version: 1.21.4 -Release: 42 +Release: 43 Summary: The Go Programming Language License: BSD and Public Domain URL: https://golang.org/ @@ -159,6 +159,16 @@ Patch6033: backport-0033-CVE-2025-47907-database-sql-avoid-closing-Rows-while-sc Patch6034: backport-0034-CVE-2025-47906-os-exec-fix-incorrect-expansion-of-.-and-.-in-LookPa.patch Patch6035: backport-0035-CVE-2025-4674-disable-support-for-multiple-vcs-in-one-module.patch Patch6036: backport-0036-CVE-2025-22871-net-http-reject-newlines-in-.patch +Patch6037: backport-0037-CVE-2025-58183-archive-tar-set-a-limit-on-the.patch +Patch6038: backport-0038-CVE-2025-58189-crypto-tls-quote-protocols-in.patch +Patch6039: backport-0039-CVE-2025-61724-net-textproto-avoid-quadratic.patch +Patch6040: backport-0040-CVE-2025-58188-crypto-x509-mitigate-Dos-vector.patch +Patch6041: backport-0041-CVE-2025-58185-encoding-asn1-prevent-memory-exhaustion.patch +Patch6042: backport-0042-CVE-2025-58187-crypto-x509-improve-domain-name-verification.patch +Patch6043: backport-0043-CVE-2025-58187-crypto-x509-rework-fix-for-CVE-2025-58187.patch +Patch6044: backport-0044-CVE-2025-61723-encoding-pem-make-Decode-complexity-linear.patch +Patch6045: backport-0045-CVE-2025-47912-net-url-enforce-stricter-parsing.patch +Patch6046: backport-0046-CVE-2025-58186-net-http-add-httpcookiemaxnum.patch # Part 10001-10999 %ifarch sw_64 @@ -550,6 +560,12 @@ fi %files devel -f go-tests.list -f go-misc.list -f go-src.list %changelog +* Thu Nov 20 2025 huzhangying - 1.21.4-43 +- Type:CVE +- CVE:CVE-2025-58183,CVE-2025-58189,CVE-2025-61724,CVE-2025-58188,CVE-2025-58185,CVE-2025-58187,CVE-2025-61723,CVE-2025-47912,CVE-2025-58186 +- SUG:NA +- DESC:fix CVE-2025-58183,CVE-2025-58189,CVE-2025-61724,CVE-2025-58188,CVE-2025-58185,CVE-2025-58187,CVE-2025-61723,CVE-2025-47912,CVE-2025-58186 + * Tue Nov 18 2025 huang-xiaoquan - 1.21.4-42 - Type:Feature - CVE:NA -- Gitee