Skip to content

Commit 84acde1

Browse files
Fix various typos and disfluencies (#1118)
1 parent fd64eaa commit 84acde1

9 files changed

+24
-22
lines changed

balancer.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@ func (b CRC32Balancer) Balance(msg Message, partitions ...int) (partition int) {
260260
// determine which partition to route messages to. This ensures that messages
261261
// with the same key are routed to the same partition. This balancer is
262262
// compatible with the partitioner used by the Java library and by librdkafka's
263-
// "murmur2" and "murmur2_random" partitioners. /
263+
// "murmur2" and "murmur2_random" partitioners.
264264
//
265265
// With the Consistent field false (default), this partitioner is equivalent to
266266
// the "murmur2_random" setting in librdkafka. When Consistent is true, this

compress/compress.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ import (
1313
"github.com/segmentio/kafka-go/compress/zstd"
1414
)
1515

16-
// Compression represents the the compression applied to a record set.
16+
// Compression represents the compression applied to a record set.
1717
type Compression int8
1818

1919
const (

fetch.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ type FetchResponse struct {
4949
Topic string
5050
Partition int
5151

52-
// Informations about the topic partition layout returned from the broker.
52+
// Information about the topic partition layout returned from the broker.
5353
//
5454
// LastStableOffset requires the kafka broker to support the Fetch API in
5555
// version 4 or above (otherwise the value is zero).

produce.go

+6-6
Original file line numberDiff line numberDiff line change
@@ -117,21 +117,21 @@ type ProduceResponse struct {
117117

118118
// Time at which the broker wrote the records to the topic partition.
119119
//
120-
// This field will be zero if the kafka broker did no support the Produce
121-
// API in version 2 or above.
120+
// This field will be zero if the kafka broker did not support Produce API
121+
// version 2 or above.
122122
LogAppendTime time.Time
123123

124124
// First offset in the topic partition that the records were written to.
125125
//
126-
// This field will be zero if the kafka broker did no support the Produce
127-
// API in version 5 or above (or if the first offset is zero).
126+
// This field will be zero if the kafka broker did not support Produce API
127+
// version 5 or above (or if the first offset is zero).
128128
LogStartOffset int64
129129

130130
// If errors occurred writing specific records, they will be reported in
131131
// this map.
132132
//
133-
// This field will always be empty if the kafka broker did no support the
134-
// Produce API in version 8 or above.
133+
// This field will always be empty if the kafka broker did not support
134+
// Produce API version 8 or above.
135135
RecordErrors map[int]error
136136
}
137137

reader.go

+3-3
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ const (
1919
)
2020

2121
const (
22-
// defaultCommitRetries holds the number commit attempts to make
22+
// defaultCommitRetries holds the number of commit attempts to make
2323
// before giving up.
2424
defaultCommitRetries = 3
2525
)
@@ -785,7 +785,7 @@ func (r *Reader) Close() error {
785785
// offset when called. Note that this could result in an offset being committed
786786
// before the message is fully processed.
787787
//
788-
// If more fine grained control of when offsets are committed is required, it
788+
// If more fine-grained control of when offsets are committed is required, it
789789
// is recommended to use FetchMessage with CommitMessages instead.
790790
func (r *Reader) ReadMessage(ctx context.Context) (Message, error) {
791791
m, err := r.FetchMessage(ctx)
@@ -1220,7 +1220,7 @@ func (r *Reader) start(offsetsByPartition map[topicPartition]int64) {
12201220
}
12211221

12221222
// A reader reads messages from kafka and produces them on its channels, it's
1223-
// used as an way to asynchronously fetch messages while the main program reads
1223+
// used as a way to asynchronously fetch messages while the main program reads
12241224
// them using the high level reader API.
12251225
type reader struct {
12261226
dialer *Dialer

reader_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ func createTopic(t *testing.T, topic string, partitions int) {
313313
})
314314
if err != nil {
315315
if !errors.Is(err, TopicAlreadyExists) {
316-
err = fmt.Errorf("creaetTopic, conn.createtTopics: %w", err)
316+
err = fmt.Errorf("createTopic, conn.createTopics: %w", err)
317317
t.Error(err)
318318
t.FailNow()
319319
}

transport.go

+8-6
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ type Transport struct {
6060

6161
// Time limit set for establishing connections to the kafka cluster. This
6262
// limit includes all round trips done to establish the connections (TLS
63-
// hadbhaske, SASL negotiation, etc...).
63+
// handshake, SASL negotiation, etc...).
6464
//
6565
// Defaults to 5s.
6666
DialTimeout time.Duration
@@ -150,7 +150,7 @@ func (t *Transport) CloseIdleConnections() {
150150
// package.
151151
//
152152
// The type of the response message will match the type of the request. For
153-
// exmple, if RoundTrip was called with a *fetch.Request as argument, the value
153+
// example, if RoundTrip was called with a *fetch.Request as argument, the value
154154
// returned will be of type *fetch.Response. It is safe for the program to do a
155155
// type assertion after checking that no error was returned.
156156
//
@@ -413,14 +413,16 @@ func (p *connPool) roundTrip(ctx context.Context, req Request) (Response, error)
413413
case *meta.Response:
414414
m := req.(*meta.Request)
415415
// If we get here with allow auto topic creation then
416-
// we didn't have that topic in our cache so we should update
416+
// we didn't have that topic in our cache, so we should update
417417
// the cache.
418418
if m.AllowAutoTopicCreation {
419419
topicsToRefresh := make([]string, 0, len(resp.Topics))
420420
for _, topic := range resp.Topics {
421-
// fixes issue 806: don't refresh topics that failed to create,
422-
// it may means kafka doesn't enable auto topic creation.
423-
// This causes the library to hang indefinitely, same as createtopics process.
421+
// Don't refresh topics that failed to create, since that may
422+
// mean that enable automatic topic creation is not enabled.
423+
// That causes the library to hang indefinitely, same as
424+
// don't refresh topics that failed to create,
425+
// createtopics process. Fixes issue 806.
424426
if topic.ErrorCode != 0 {
425427
continue
426428
}

writer.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -533,7 +533,7 @@ func (w *Writer) enter() bool {
533533
// completed.
534534
func (w *Writer) leave() { w.group.Done() }
535535

536-
// spawn starts an new asynchronous operation on the writer. This method is used
536+
// spawn starts a new asynchronous operation on the writer. This method is used
537537
// instead of starting goroutines inline to help manage the state of the
538538
// writer's wait group. The wait group is used to block Close calls until all
539539
// inflight operations have completed, therefore automatically including those

writer_test.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ func TestWriter(t *testing.T) {
131131
},
132132

133133
{
134-
scenario: "writing messsages with a small batch byte size",
134+
scenario: "writing messages with a small batch byte size",
135135
function: testWriterSmallBatchBytes,
136136
},
137137
{
@@ -159,7 +159,7 @@ func TestWriter(t *testing.T) {
159159
function: testWriterInvalidPartition,
160160
},
161161
{
162-
scenario: "writing a message to a non-existant topic creates the topic",
162+
scenario: "writing a message to a non-existent topic creates the topic",
163163
function: testWriterAutoCreateTopic,
164164
},
165165
{

0 commit comments

Comments
 (0)