|
1 | 1 | package kafka
|
2 | 2 |
|
3 | 3 | import (
|
| 4 | + "bytes" |
4 | 5 | "context"
|
| 6 | + "io" |
| 7 | + "math/rand" |
5 | 8 | "net"
|
6 | 9 | "sync"
|
7 | 10 | "testing"
|
8 | 11 | "time"
|
| 12 | + |
| 13 | + "github.com/segmentio/kafka-go/compress" |
9 | 14 | )
|
10 | 15 |
|
11 | 16 | func newLocalClientAndTopic() (*Client, string, func()) {
|
@@ -183,3 +188,115 @@ func testConsumerGroupFetchOffsets(t *testing.T, ctx context.Context, c *Client)
|
183 | 188 | }
|
184 | 189 | }
|
185 | 190 | }
|
| 191 | + |
| 192 | +func TestClientProduceAndConsume(t *testing.T) { |
| 193 | + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) |
| 194 | + defer cancel() |
| 195 | + // Tests a typical kafka use case, data is produced to a partition, |
| 196 | + // then consumed back sequentially. We use snappy compression because |
| 197 | + // kafka stream are often compressed, and verify that each record |
| 198 | + // produced is exposed to the consumer, and order is preserved. |
| 199 | + client, topic, shutdown := newLocalClientAndTopic() |
| 200 | + defer shutdown() |
| 201 | + |
| 202 | + epoch := time.Now() |
| 203 | + seed := int64(0) // deterministic |
| 204 | + prng := rand.New(rand.NewSource(seed)) |
| 205 | + offset := int64(0) |
| 206 | + |
| 207 | + const numBatches = 100 |
| 208 | + const recordsPerBatch = 320 |
| 209 | + t.Logf("producing %d batches of %d records...", numBatches, recordsPerBatch) |
| 210 | + |
| 211 | + for i := 0; i < numBatches; i++ { // produce 100 batches |
| 212 | + records := make([]Record, recordsPerBatch) |
| 213 | + |
| 214 | + for i := range records { |
| 215 | + v := make([]byte, prng.Intn(999)+1) |
| 216 | + io.ReadFull(prng, v) |
| 217 | + records[i].Time = epoch |
| 218 | + records[i].Value = NewBytes(v) |
| 219 | + } |
| 220 | + |
| 221 | + res, err := client.Produce(ctx, &ProduceRequest{ |
| 222 | + Topic: topic, |
| 223 | + Partition: 0, |
| 224 | + RequiredAcks: -1, |
| 225 | + Records: NewRecordReader(records...), |
| 226 | + Compression: compress.Snappy, |
| 227 | + }) |
| 228 | + if err != nil { |
| 229 | + t.Fatal(err) |
| 230 | + } |
| 231 | + if res.Error != nil { |
| 232 | + t.Fatal(res.Error) |
| 233 | + } |
| 234 | + if res.BaseOffset != offset { |
| 235 | + t.Fatalf("records were produced at an unexpected offset, want %d but got %d", offset, res.BaseOffset) |
| 236 | + } |
| 237 | + offset += int64(len(records)) |
| 238 | + } |
| 239 | + |
| 240 | + prng.Seed(seed) |
| 241 | + offset = 0 // reset |
| 242 | + numFetches := 0 |
| 243 | + numRecords := 0 |
| 244 | + |
| 245 | + for numRecords < (numBatches * recordsPerBatch) { |
| 246 | + res, err := client.Fetch(ctx, &FetchRequest{ |
| 247 | + Topic: topic, |
| 248 | + Partition: 0, |
| 249 | + Offset: offset, |
| 250 | + MinBytes: 1, |
| 251 | + MaxBytes: 256 * 1024, |
| 252 | + MaxWait: 100 * time.Millisecond, // should only hit on the last fetch |
| 253 | + }) |
| 254 | + if err != nil { |
| 255 | + t.Fatal(err) |
| 256 | + } |
| 257 | + if res.Error != nil { |
| 258 | + t.Fatal(err) |
| 259 | + } |
| 260 | + |
| 261 | + for { |
| 262 | + r, err := res.Records.ReadRecord() |
| 263 | + if err != nil { |
| 264 | + if err != io.EOF { |
| 265 | + t.Fatal(err) |
| 266 | + } |
| 267 | + break |
| 268 | + } |
| 269 | + |
| 270 | + if r.Key != nil { |
| 271 | + r.Key.Close() |
| 272 | + t.Error("unexpected non-null key on record at offset", r.Offset) |
| 273 | + } |
| 274 | + |
| 275 | + n := prng.Intn(999) + 1 |
| 276 | + a := make([]byte, n) |
| 277 | + b := make([]byte, n) |
| 278 | + io.ReadFull(prng, a) |
| 279 | + |
| 280 | + _, err = io.ReadFull(r.Value, b) |
| 281 | + r.Value.Close() |
| 282 | + if err != nil { |
| 283 | + t.Fatalf("reading record at offset %d: %v", r.Offset, err) |
| 284 | + } |
| 285 | + |
| 286 | + if !bytes.Equal(a, b) { |
| 287 | + t.Fatalf("value of record at offset %d mismatches", r.Offset) |
| 288 | + } |
| 289 | + |
| 290 | + if r.Offset != offset { |
| 291 | + t.Fatalf("record at offset %d was expected to have offset %d", r.Offset, offset) |
| 292 | + } |
| 293 | + |
| 294 | + offset = r.Offset + 1 |
| 295 | + numRecords++ |
| 296 | + } |
| 297 | + |
| 298 | + numFetches++ |
| 299 | + } |
| 300 | + |
| 301 | + t.Logf("%d records were read in %d fetches", numRecords, numFetches) |
| 302 | +} |
0 commit comments