Skip to content

Commit 919aecc

Browse files
committed
fix data-channel-write error wrap to preserve context cancellation identity
pkg/handlers/default.go:139 wrapped the CancellableWrite failure with %v, which dropped context.DeadlineExceeded / context.Canceled from the errors.Is chain on the returned ErrProcessingFatal. The downstream measureLatencyAndHandleErrors classifier uses errors.Is(err, context.DeadlineExceeded) to increment the timeout metric and apply the "error processing chunk" framing; with %v that branch was unreachable for this failure path, causing the timeout metric to under-count and the chunk-processing framing to be missing. Switching to %w preserves both the outer ErrProcessingFatal wrap (so isFatal still classifies correctly) and the inner ctx.Err() identity (so the timeout classifier fires). Multiple %w verbs in a single fmt.Errorf are supported on Go 1.20+; this repo is on Go 1.24. Same shape as #4929's fix at line 137 but feeds a different classifier (metric correctness rather than control flow). Adds a regression test that constructs a dead context, drives a chunk-error through handleNonArchiveContent against an unbuffered output channel to force CancellableWrite to fail, and asserts both the outer ErrProcessingFatal wrap and the inner ctx.Err() identity are preserved for both DeadlineExceeded and Canceled.
1 parent cf31c26 commit 919aecc

2 files changed

Lines changed: 66 additions & 1 deletion

File tree

pkg/handlers/default.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ func (h *defaultHandler) handleNonArchiveContent(
136136
h.metrics.incErrors()
137137
dataOrErr.Err = fmt.Errorf("%w: error reading chunk: %v", ErrProcessingWarning, err)
138138
if writeErr := common.CancellableWrite(ctx, dataOrErrChan, dataOrErr); writeErr != nil {
139-
return fmt.Errorf("%w: error writing to data channel: %v", ErrProcessingFatal, writeErr)
139+
return fmt.Errorf("%w: error writing to data channel: %w", ErrProcessingFatal, writeErr)
140140
}
141141
continue
142142
}

pkg/handlers/default_test.go

Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
11
package handlers
22

33
import (
4+
stdctx "context"
5+
"errors"
46
"os"
57
"strings"
68
"testing"
@@ -117,3 +119,66 @@ func TestHandleFileLineNumbers(t *testing.T) {
117119
assert.Equal(t, int64(11), results[1].LineNumber, "peek data should not be counted")
118120
})
119121
}
122+
123+
// TestDefaultHandler_DataChannelWriteErrorPreservesIdentity is a regression test
124+
// for the %v wrap at default.go:139. When CancellableWrite fails because the
125+
// handler context terminated mid-processing, the returned error must preserve
126+
// the underlying ctx.Err() identity. measureLatencyAndHandleErrors uses
127+
// errors.Is(err, context.DeadlineExceeded) to drive the timeout metric and the
128+
// "error processing chunk" framing; with %v that branch is unreachable for the
129+
// data-channel write failure path.
130+
func TestDefaultHandler_DataChannelWriteErrorPreservesIdentity(t *testing.T) {
131+
cases := []struct {
132+
name string
133+
makeCtx func() (stdctx.Context, stdctx.CancelFunc)
134+
want error
135+
}{
136+
{
137+
name: "DeadlineExceeded preserved through writeErr wrap",
138+
makeCtx: func() (stdctx.Context, stdctx.CancelFunc) {
139+
return stdctx.WithDeadline(stdctx.Background(), time.Now().Add(-time.Second))
140+
},
141+
want: stdctx.DeadlineExceeded,
142+
},
143+
{
144+
name: "Canceled preserved through writeErr wrap",
145+
makeCtx: func() (stdctx.Context, stdctx.CancelFunc) {
146+
ctx, cancel := stdctx.WithCancel(stdctx.Background())
147+
cancel()
148+
return ctx, cancel
149+
},
150+
want: stdctx.Canceled,
151+
},
152+
}
153+
154+
for _, tc := range cases {
155+
t.Run(tc.name, func(t *testing.T) {
156+
deadCtx, cancel := tc.makeCtx()
157+
defer cancel()
158+
ctx := context.AddLogger(deadCtx)
159+
160+
// Any non-nil error works here; what matters is that the chunk-error
161+
// branch runs and reaches CancellableWrite, which then fails because
162+
// ctx is already terminal.
163+
chunks := []sources.ChunkResult{sources.NewChunkResultError(errors.New("simulated chunk read failure"))}
164+
handler := newDefaultHandler(defaultHandlerType, withChunkReader(mockChunkReader(chunks)))
165+
166+
reader, err := newFileReader(context.Background(), strings.NewReader("ignored"))
167+
require.NoError(t, err)
168+
defer reader.Close()
169+
170+
// Unbuffered channel forces CancellableWrite to consult ctx.Done() and
171+
// return ctx.Err() since no reader is draining.
172+
dataOrErrChan := make(chan DataOrErr)
173+
174+
err = handler.handleNonArchiveContent(ctx, newMimeTypeReaderFromFileReader(reader), dataOrErrChan)
175+
require.Error(t, err)
176+
177+
assert.True(t, errors.Is(err, ErrProcessingFatal),
178+
"outer ErrProcessingFatal wrap must be preserved so isFatal classifies the failure")
179+
assert.True(t, errors.Is(err, tc.want),
180+
"inner ctx.Err() must remain inspectable so measureLatencyAndHandleErrors "+
181+
"can correctly increment the timeout metric")
182+
})
183+
}
184+
}

0 commit comments

Comments
 (0)