1// Copyright 2009 The Go Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style 3// license that can be found in the LICENSE file. 4 5package binary 6 7import ( 8 "bytes" 9 "fmt" 10 "io" 11 "math" 12 "reflect" 13 "strings" 14 "sync" 15 "testing" 16 "unsafe" 17) 18 19type Struct struct { 20 Int8 int8 21 Int16 int16 22 Int32 int32 23 Int64 int64 24 Uint8 uint8 25 Uint16 uint16 26 Uint32 uint32 27 Uint64 uint64 28 Float32 float32 29 Float64 float64 30 Complex64 complex64 31 Complex128 complex128 32 Array [4]uint8 33 Bool bool 34 BoolArray [4]bool 35} 36 37type T struct { 38 Int int 39 Uint uint 40 Uintptr uintptr 41 Array [4]int 42} 43 44var s = Struct{ 45 0x01, 46 0x0203, 47 0x04050607, 48 0x08090a0b0c0d0e0f, 49 0x10, 50 0x1112, 51 0x13141516, 52 0x1718191a1b1c1d1e, 53 54 math.Float32frombits(0x1f202122), 55 math.Float64frombits(0x232425262728292a), 56 complex( 57 math.Float32frombits(0x2b2c2d2e), 58 math.Float32frombits(0x2f303132), 59 ), 60 complex( 61 math.Float64frombits(0x333435363738393a), 62 math.Float64frombits(0x3b3c3d3e3f404142), 63 ), 64 65 [4]uint8{0x43, 0x44, 0x45, 0x46}, 66 67 true, 68 [4]bool{true, false, true, false}, 69} 70 71var big = []byte{ 72 1, 73 2, 3, 74 4, 5, 6, 7, 75 8, 9, 10, 11, 12, 13, 14, 15, 76 16, 77 17, 18, 78 19, 20, 21, 22, 79 23, 24, 25, 26, 27, 28, 29, 30, 80 81 31, 32, 33, 34, 82 35, 36, 37, 38, 39, 40, 41, 42, 83 43, 44, 45, 46, 47, 48, 49, 50, 84 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 85 86 67, 68, 69, 70, 87 88 1, 89 1, 0, 1, 0, 90} 91 92var little = []byte{ 93 1, 94 3, 2, 95 7, 6, 5, 4, 96 15, 14, 13, 12, 11, 10, 9, 8, 97 16, 98 18, 17, 99 22, 21, 20, 19, 100 30, 29, 28, 27, 26, 25, 24, 23, 101 102 34, 33, 32, 31, 103 42, 41, 40, 39, 38, 37, 36, 35, 104 46, 45, 44, 43, 50, 49, 48, 47, 105 58, 57, 56, 55, 54, 53, 52, 51, 66, 65, 64, 63, 62, 61, 60, 59, 106 107 67, 68, 69, 70, 108 109 1, 110 1, 0, 1, 0, 111} 112 113var src = []byte{1, 2, 3, 4, 5, 6, 7, 8} 114var res = []int32{0x01020304, 0x05060708} 115var putbuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} 116 117func checkResult(t *testing.T, dir string, order ByteOrder, err error, have, want any) { 118 if err != nil { 119 t.Errorf("%v %v: %v", dir, order, err) 120 return 121 } 122 if !reflect.DeepEqual(have, want) { 123 t.Errorf("%v %v:\n\thave %+v\n\twant %+v", dir, order, have, want) 124 } 125} 126 127var encoders = []struct { 128 name string 129 fn func(order ByteOrder, data any) ([]byte, error) 130}{ 131 { 132 "Write", 133 func(order ByteOrder, data any) ([]byte, error) { 134 buf := new(bytes.Buffer) 135 err := Write(buf, order, data) 136 return buf.Bytes(), err 137 }, 138 }, 139 { 140 "Encode", 141 func(order ByteOrder, data any) ([]byte, error) { 142 size := Size(data) 143 144 var buf []byte 145 if size > 0 { 146 buf = make([]byte, Size(data)) 147 } 148 149 n, err := Encode(buf, order, data) 150 if err == nil && n != size { 151 return nil, fmt.Errorf("returned size %d instead of %d", n, size) 152 } 153 return buf, err 154 }, 155 }, { 156 "Append", 157 func(order ByteOrder, data any) ([]byte, error) { 158 return Append(nil, order, data) 159 }, 160 }, 161} 162 163var decoders = []struct { 164 name string 165 fn func(order ByteOrder, data any, buf []byte) error 166}{ 167 { 168 "Read", 169 func(order ByteOrder, data any, buf []byte) error { 170 return Read(bytes.NewReader(buf), order, data) 171 }, 172 }, 173 { 174 "Decode", 175 func(order ByteOrder, data any, buf []byte) error { 176 n, err := Decode(buf, order, data) 177 if err == nil && n != Size(data) { 178 return fmt.Errorf("returned size %d instead of %d", n, Size(data)) 179 } 180 return err 181 }, 182 }, 183} 184 185func testRead(t *testing.T, order ByteOrder, b []byte, s1 any) { 186 for _, dec := range decoders { 187 t.Run(dec.name, func(t *testing.T) { 188 var s2 Struct 189 err := dec.fn(order, &s2, b) 190 checkResult(t, dec.name, order, err, s2, s1) 191 }) 192 } 193} 194 195func testWrite(t *testing.T, order ByteOrder, b []byte, s1 any) { 196 for _, enc := range encoders { 197 t.Run(enc.name, func(t *testing.T) { 198 buf, err := enc.fn(order, s1) 199 checkResult(t, enc.name, order, err, buf, b) 200 }) 201 } 202} 203 204func TestLittleEndianRead(t *testing.T) { testRead(t, LittleEndian, little, s) } 205func TestLittleEndianWrite(t *testing.T) { testWrite(t, LittleEndian, little, s) } 206func TestLittleEndianPtrWrite(t *testing.T) { testWrite(t, LittleEndian, little, &s) } 207 208func TestBigEndianRead(t *testing.T) { testRead(t, BigEndian, big, s) } 209func TestBigEndianWrite(t *testing.T) { testWrite(t, BigEndian, big, s) } 210func TestBigEndianPtrWrite(t *testing.T) { testWrite(t, BigEndian, big, &s) } 211 212func TestReadSlice(t *testing.T) { 213 t.Run("Read", func(t *testing.T) { 214 slice := make([]int32, 2) 215 err := Read(bytes.NewReader(src), BigEndian, slice) 216 checkResult(t, "ReadSlice", BigEndian, err, slice, res) 217 }) 218 219 t.Run("Decode", func(t *testing.T) { 220 slice := make([]int32, 2) 221 _, err := Decode(src, BigEndian, slice) 222 checkResult(t, "ReadSlice", BigEndian, err, slice, res) 223 }) 224} 225 226func TestWriteSlice(t *testing.T) { 227 testWrite(t, BigEndian, src, res) 228} 229 230func TestReadBool(t *testing.T) { 231 for _, dec := range decoders { 232 t.Run(dec.name, func(t *testing.T) { 233 var res bool 234 var err error 235 err = dec.fn(BigEndian, &res, []byte{0}) 236 checkResult(t, dec.name, BigEndian, err, res, false) 237 res = false 238 err = dec.fn(BigEndian, &res, []byte{1}) 239 checkResult(t, dec.name, BigEndian, err, res, true) 240 res = false 241 err = dec.fn(BigEndian, &res, []byte{2}) 242 checkResult(t, dec.name, BigEndian, err, res, true) 243 }) 244 } 245 246} 247 248func TestReadBoolSlice(t *testing.T) { 249 for _, dec := range decoders { 250 t.Run(dec.name, func(t *testing.T) { 251 slice := make([]bool, 4) 252 err := dec.fn(BigEndian, slice, []byte{0, 1, 2, 255}) 253 checkResult(t, dec.name, BigEndian, err, slice, []bool{false, true, true, true}) 254 }) 255 } 256} 257 258// Addresses of arrays are easier to manipulate with reflection than are slices. 259var intArrays = []any{ 260 &[100]int8{}, 261 &[100]int16{}, 262 &[100]int32{}, 263 &[100]int64{}, 264 &[100]uint8{}, 265 &[100]uint16{}, 266 &[100]uint32{}, 267 &[100]uint64{}, 268} 269 270func TestSliceRoundTrip(t *testing.T) { 271 for _, enc := range encoders { 272 for _, dec := range decoders { 273 t.Run(fmt.Sprintf("%s,%s", enc.name, dec.name), func(t *testing.T) { 274 for _, array := range intArrays { 275 src := reflect.ValueOf(array).Elem() 276 t.Run(src.Index(0).Type().Name(), func(t *testing.T) { 277 unsigned := false 278 switch src.Index(0).Kind() { 279 case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: 280 unsigned = true 281 } 282 for i := 0; i < src.Len(); i++ { 283 if unsigned { 284 src.Index(i).SetUint(uint64(i * 0x07654321)) 285 } else { 286 src.Index(i).SetInt(int64(i * 0x07654321)) 287 } 288 } 289 srcSlice := src.Slice(0, src.Len()) 290 buf, err := enc.fn(BigEndian, srcSlice.Interface()) 291 if err != nil { 292 t.Fatal(err) 293 } 294 dst := reflect.New(src.Type()).Elem() 295 dstSlice := dst.Slice(0, dst.Len()) 296 err = dec.fn(BigEndian, dstSlice.Interface(), buf) 297 if err != nil { 298 t.Fatal(err) 299 } 300 if !reflect.DeepEqual(src.Interface(), dst.Interface()) { 301 t.Log(dst) 302 t.Fatal(src) 303 } 304 }) 305 } 306 }) 307 } 308 } 309} 310 311func TestWriteT(t *testing.T) { 312 for _, enc := range encoders { 313 t.Run(enc.name, func(t *testing.T) { 314 ts := T{} 315 if _, err := enc.fn(BigEndian, ts); err == nil { 316 t.Errorf("WriteT: have err == nil, want non-nil") 317 } 318 319 tv := reflect.Indirect(reflect.ValueOf(ts)) 320 for i, n := 0, tv.NumField(); i < n; i++ { 321 typ := tv.Field(i).Type().String() 322 if typ == "[4]int" { 323 typ = "int" // the problem is int, not the [4] 324 } 325 if _, err := enc.fn(BigEndian, tv.Field(i).Interface()); err == nil { 326 t.Errorf("WriteT.%v: have err == nil, want non-nil", tv.Field(i).Type()) 327 } else if !strings.Contains(err.Error(), typ) { 328 t.Errorf("WriteT: have err == %q, want it to mention %s", err, typ) 329 } 330 } 331 }) 332 } 333} 334 335type BlankFields struct { 336 A uint32 337 _ int32 338 B float64 339 _ [4]int16 340 C byte 341 _ [7]byte 342 _ struct { 343 f [8]float32 344 } 345} 346 347type BlankFieldsProbe struct { 348 A uint32 349 P0 int32 350 B float64 351 P1 [4]int16 352 C byte 353 P2 [7]byte 354 P3 struct { 355 F [8]float32 356 } 357} 358 359func TestBlankFields(t *testing.T) { 360 for _, enc := range encoders { 361 t.Run(enc.name, func(t *testing.T) { 362 b1 := BlankFields{A: 1234567890, B: 2.718281828, C: 42} 363 buf, err := enc.fn(LittleEndian, &b1) 364 if err != nil { 365 t.Error(err) 366 } 367 368 // zero values must have been written for blank fields 369 var p BlankFieldsProbe 370 if err := Read(bytes.NewReader(buf), LittleEndian, &p); err != nil { 371 t.Error(err) 372 } 373 374 // quick test: only check first value of slices 375 if p.P0 != 0 || p.P1[0] != 0 || p.P2[0] != 0 || p.P3.F[0] != 0 { 376 t.Errorf("non-zero values for originally blank fields: %#v", p) 377 } 378 379 // write p and see if we can probe only some fields 380 buf, err = enc.fn(LittleEndian, &p) 381 if err != nil { 382 t.Error(err) 383 } 384 385 // read should ignore blank fields in b2 386 var b2 BlankFields 387 if err := Read(bytes.NewReader(buf), LittleEndian, &b2); err != nil { 388 t.Error(err) 389 } 390 if b1.A != b2.A || b1.B != b2.B || b1.C != b2.C { 391 t.Errorf("%#v != %#v", b1, b2) 392 } 393 }) 394 } 395} 396 397func TestSizeStructCache(t *testing.T) { 398 // Reset the cache, otherwise multiple test runs fail. 399 structSize = sync.Map{} 400 401 count := func() int { 402 var i int 403 structSize.Range(func(_, _ any) bool { 404 i++ 405 return true 406 }) 407 return i 408 } 409 410 var total int 411 added := func() int { 412 delta := count() - total 413 total += delta 414 return delta 415 } 416 417 type foo struct { 418 A uint32 419 } 420 421 type bar struct { 422 A Struct 423 B foo 424 C Struct 425 } 426 427 testcases := []struct { 428 val any 429 want int 430 }{ 431 {new(foo), 1}, 432 {new([1]foo), 0}, 433 {make([]foo, 1), 0}, 434 {new(bar), 1}, 435 {new(bar), 0}, 436 {new(struct{ A Struct }), 1}, 437 {new(struct{ A Struct }), 0}, 438 {new([1]struct{ A Struct }), 0}, 439 {make([]struct{ A Struct }, 1), 0}, 440 } 441 442 for _, tc := range testcases { 443 if Size(tc.val) == -1 { 444 t.Fatalf("Can't get the size of %T", tc.val) 445 } 446 447 if n := added(); n != tc.want { 448 t.Errorf("Sizing %T added %d entries to the cache, want %d", tc.val, n, tc.want) 449 } 450 } 451} 452 453func TestSizeInvalid(t *testing.T) { 454 testcases := []any{ 455 int(0), 456 new(int), 457 (*int)(nil), 458 [1]uint{}, 459 new([1]uint), 460 (*[1]uint)(nil), 461 []int{}, 462 []int(nil), 463 new([]int), 464 (*[]int)(nil), 465 (*int8)(nil), 466 (*uint8)(nil), 467 (*int16)(nil), 468 (*uint16)(nil), 469 (*int32)(nil), 470 (*uint32)(nil), 471 (*int64)(nil), 472 (*uint64)(nil), 473 (*float32)(nil), 474 (*float64)(nil), 475 (*complex64)(nil), 476 (*complex128)(nil), 477 } 478 for _, tc := range testcases { 479 if got := Size(tc); got != -1 { 480 t.Errorf("Size(%T) = %d, want -1", tc, got) 481 } 482 } 483} 484 485// An attempt to read into a struct with an unexported field will 486// panic. This is probably not the best choice, but at this point 487// anything else would be an API change. 488 489type Unexported struct { 490 a int32 491} 492 493func TestUnexportedRead(t *testing.T) { 494 var buf bytes.Buffer 495 u1 := Unexported{a: 1} 496 if err := Write(&buf, LittleEndian, &u1); err != nil { 497 t.Fatal(err) 498 } 499 500 for _, dec := range decoders { 501 t.Run(dec.name, func(t *testing.T) { 502 defer func() { 503 if recover() == nil { 504 t.Fatal("did not panic") 505 } 506 }() 507 var u2 Unexported 508 dec.fn(LittleEndian, &u2, buf.Bytes()) 509 }) 510 } 511 512} 513 514func TestReadErrorMsg(t *testing.T) { 515 for _, dec := range decoders { 516 t.Run(dec.name, func(t *testing.T) { 517 read := func(data any) { 518 err := dec.fn(LittleEndian, data, nil) 519 want := fmt.Sprintf("binary.%s: invalid type %s", dec.name, reflect.TypeOf(data).String()) 520 if err == nil { 521 t.Errorf("%T: got no error; want %q", data, want) 522 return 523 } 524 if got := err.Error(); got != want { 525 t.Errorf("%T: got %q; want %q", data, got, want) 526 } 527 } 528 read(0) 529 s := new(struct{}) 530 read(&s) 531 p := &s 532 read(&p) 533 }) 534 } 535} 536 537func TestReadTruncated(t *testing.T) { 538 const data = "0123456789abcdef" 539 540 var b1 = make([]int32, 4) 541 var b2 struct { 542 A, B, C, D byte 543 E int32 544 F float64 545 } 546 547 for i := 0; i <= len(data); i++ { 548 var errWant error 549 switch i { 550 case 0: 551 errWant = io.EOF 552 case len(data): 553 errWant = nil 554 default: 555 errWant = io.ErrUnexpectedEOF 556 } 557 558 if err := Read(strings.NewReader(data[:i]), LittleEndian, &b1); err != errWant { 559 t.Errorf("Read(%d) with slice: got %v, want %v", i, err, errWant) 560 } 561 if err := Read(strings.NewReader(data[:i]), LittleEndian, &b2); err != errWant { 562 t.Errorf("Read(%d) with struct: got %v, want %v", i, err, errWant) 563 } 564 } 565} 566 567func testUint64SmallSliceLengthPanics() (panicked bool) { 568 defer func() { 569 panicked = recover() != nil 570 }() 571 b := [8]byte{1, 2, 3, 4, 5, 6, 7, 8} 572 LittleEndian.Uint64(b[:4]) 573 return false 574} 575 576func testPutUint64SmallSliceLengthPanics() (panicked bool) { 577 defer func() { 578 panicked = recover() != nil 579 }() 580 b := [8]byte{} 581 LittleEndian.PutUint64(b[:4], 0x0102030405060708) 582 return false 583} 584 585func TestByteOrder(t *testing.T) { 586 type byteOrder interface { 587 ByteOrder 588 AppendByteOrder 589 } 590 buf := make([]byte, 8) 591 for _, order := range []byteOrder{LittleEndian, BigEndian} { 592 const offset = 3 593 for _, value := range []uint64{ 594 0x0000000000000000, 595 0x0123456789abcdef, 596 0xfedcba9876543210, 597 0xffffffffffffffff, 598 0xaaaaaaaaaaaaaaaa, 599 math.Float64bits(math.Pi), 600 math.Float64bits(math.E), 601 } { 602 want16 := uint16(value) 603 order.PutUint16(buf[:2], want16) 604 if got := order.Uint16(buf[:2]); got != want16 { 605 t.Errorf("PutUint16: Uint16 = %v, want %v", got, want16) 606 } 607 buf = order.AppendUint16(buf[:offset], want16) 608 if got := order.Uint16(buf[offset:]); got != want16 { 609 t.Errorf("AppendUint16: Uint16 = %v, want %v", got, want16) 610 } 611 if len(buf) != offset+2 { 612 t.Errorf("AppendUint16: len(buf) = %d, want %d", len(buf), offset+2) 613 } 614 615 want32 := uint32(value) 616 order.PutUint32(buf[:4], want32) 617 if got := order.Uint32(buf[:4]); got != want32 { 618 t.Errorf("PutUint32: Uint32 = %v, want %v", got, want32) 619 } 620 buf = order.AppendUint32(buf[:offset], want32) 621 if got := order.Uint32(buf[offset:]); got != want32 { 622 t.Errorf("AppendUint32: Uint32 = %v, want %v", got, want32) 623 } 624 if len(buf) != offset+4 { 625 t.Errorf("AppendUint32: len(buf) = %d, want %d", len(buf), offset+4) 626 } 627 628 want64 := uint64(value) 629 order.PutUint64(buf[:8], want64) 630 if got := order.Uint64(buf[:8]); got != want64 { 631 t.Errorf("PutUint64: Uint64 = %v, want %v", got, want64) 632 } 633 buf = order.AppendUint64(buf[:offset], want64) 634 if got := order.Uint64(buf[offset:]); got != want64 { 635 t.Errorf("AppendUint64: Uint64 = %v, want %v", got, want64) 636 } 637 if len(buf) != offset+8 { 638 t.Errorf("AppendUint64: len(buf) = %d, want %d", len(buf), offset+8) 639 } 640 } 641 } 642} 643 644func TestEarlyBoundsChecks(t *testing.T) { 645 if testUint64SmallSliceLengthPanics() != true { 646 t.Errorf("binary.LittleEndian.Uint64 expected to panic for small slices, but didn't") 647 } 648 if testPutUint64SmallSliceLengthPanics() != true { 649 t.Errorf("binary.LittleEndian.PutUint64 expected to panic for small slices, but didn't") 650 } 651} 652 653func TestReadInvalidDestination(t *testing.T) { 654 testReadInvalidDestination(t, BigEndian) 655 testReadInvalidDestination(t, LittleEndian) 656} 657 658func testReadInvalidDestination(t *testing.T, order ByteOrder) { 659 destinations := []any{ 660 int8(0), 661 int16(0), 662 int32(0), 663 int64(0), 664 665 uint8(0), 666 uint16(0), 667 uint32(0), 668 uint64(0), 669 670 bool(false), 671 } 672 673 for _, dst := range destinations { 674 err := Read(bytes.NewReader([]byte{1, 2, 3, 4, 5, 6, 7, 8}), order, dst) 675 want := fmt.Sprintf("binary.Read: invalid type %T", dst) 676 if err == nil || err.Error() != want { 677 t.Fatalf("for type %T: got %q; want %q", dst, err, want) 678 } 679 } 680} 681 682func TestNoFixedSize(t *testing.T) { 683 type Person struct { 684 Age int 685 Weight float64 686 Height float64 687 } 688 689 person := Person{ 690 Age: 27, 691 Weight: 67.3, 692 Height: 177.8, 693 } 694 695 for _, enc := range encoders { 696 t.Run(enc.name, func(t *testing.T) { 697 _, err := enc.fn(LittleEndian, &person) 698 if err == nil { 699 t.Fatalf("binary.%s: unexpected success as size of type *binary.Person is not fixed", enc.name) 700 } 701 errs := fmt.Sprintf("binary.%s: some values are not fixed-sized in type *binary.Person", enc.name) 702 if err.Error() != errs { 703 t.Fatalf("got %q, want %q", err, errs) 704 } 705 }) 706 } 707} 708 709func TestAppendAllocs(t *testing.T) { 710 buf := make([]byte, 0, Size(&s)) 711 var err error 712 allocs := testing.AllocsPerRun(1, func() { 713 _, err = Append(buf, LittleEndian, &s) 714 }) 715 if err != nil { 716 t.Fatal("Append failed:", err) 717 } 718 if allocs != 0 { 719 t.Fatalf("Append allocated %v times instead of not allocating at all", allocs) 720 } 721} 722 723var sizableTypes = []any{ 724 bool(false), 725 int8(0), 726 int16(0), 727 int32(0), 728 int64(0), 729 uint8(0), 730 uint16(0), 731 uint32(0), 732 uint64(0), 733 float32(0), 734 float64(0), 735 complex64(0), 736 complex128(0), 737 Struct{}, 738 &Struct{}, 739 []Struct{}, 740 ([]Struct)(nil), 741 [1]Struct{}, 742} 743 744func TestSizeAllocs(t *testing.T) { 745 for _, data := range sizableTypes { 746 t.Run(fmt.Sprintf("%T", data), func(t *testing.T) { 747 // Size uses a sync.Map behind the scenes. The slow lookup path of 748 // that does allocate, so we need a couple of runs here to be 749 // allocation free. 750 allocs := testing.AllocsPerRun(10, func() { 751 _ = Size(data) 752 }) 753 if allocs != 0 { 754 t.Fatalf("Expected no allocations, got %v", allocs) 755 } 756 }) 757 } 758} 759 760type byteSliceReader struct { 761 remain []byte 762} 763 764func (br *byteSliceReader) Read(p []byte) (int, error) { 765 n := copy(p, br.remain) 766 br.remain = br.remain[n:] 767 return n, nil 768} 769 770func BenchmarkReadSlice1000Int32s(b *testing.B) { 771 bsr := &byteSliceReader{} 772 slice := make([]int32, 1000) 773 buf := make([]byte, len(slice)*4) 774 b.SetBytes(int64(len(buf))) 775 b.ResetTimer() 776 for i := 0; i < b.N; i++ { 777 bsr.remain = buf 778 Read(bsr, BigEndian, slice) 779 } 780} 781 782func BenchmarkReadStruct(b *testing.B) { 783 bsr := &byteSliceReader{} 784 var buf bytes.Buffer 785 Write(&buf, BigEndian, &s) 786 b.SetBytes(int64(dataSize(reflect.ValueOf(s)))) 787 t := s 788 b.ResetTimer() 789 for i := 0; i < b.N; i++ { 790 bsr.remain = buf.Bytes() 791 Read(bsr, BigEndian, &t) 792 } 793 b.StopTimer() 794 if b.N > 0 && !reflect.DeepEqual(s, t) { 795 b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", t, s) 796 } 797} 798 799func BenchmarkWriteStruct(b *testing.B) { 800 b.SetBytes(int64(Size(&s))) 801 b.ResetTimer() 802 for i := 0; i < b.N; i++ { 803 Write(io.Discard, BigEndian, &s) 804 } 805} 806 807func BenchmarkAppendStruct(b *testing.B) { 808 buf := make([]byte, 0, Size(&s)) 809 b.SetBytes(int64(cap(buf))) 810 b.ResetTimer() 811 812 for i := 0; i < b.N; i++ { 813 Encode(buf, BigEndian, &s) 814 } 815} 816 817func BenchmarkWriteSlice1000Structs(b *testing.B) { 818 slice := make([]Struct, 1000) 819 buf := new(bytes.Buffer) 820 var w io.Writer = buf 821 b.SetBytes(int64(Size(slice))) 822 b.ResetTimer() 823 for i := 0; i < b.N; i++ { 824 buf.Reset() 825 Write(w, BigEndian, slice) 826 } 827 b.StopTimer() 828} 829 830func BenchmarkAppendSlice1000Structs(b *testing.B) { 831 slice := make([]Struct, 1000) 832 buf := make([]byte, 0, Size(slice)) 833 b.SetBytes(int64(cap(buf))) 834 b.ResetTimer() 835 for i := 0; i < b.N; i++ { 836 Append(buf, BigEndian, slice) 837 } 838 b.StopTimer() 839} 840 841func BenchmarkReadSlice1000Structs(b *testing.B) { 842 bsr := &byteSliceReader{} 843 slice := make([]Struct, 1000) 844 buf := make([]byte, Size(slice)) 845 b.SetBytes(int64(len(buf))) 846 b.ResetTimer() 847 for i := 0; i < b.N; i++ { 848 bsr.remain = buf 849 Read(bsr, BigEndian, slice) 850 } 851} 852 853func BenchmarkReadInts(b *testing.B) { 854 var ls Struct 855 bsr := &byteSliceReader{} 856 var r io.Reader = bsr 857 b.SetBytes(2 * (1 + 2 + 4 + 8)) 858 b.ResetTimer() 859 for i := 0; i < b.N; i++ { 860 bsr.remain = big 861 Read(r, BigEndian, &ls.Int8) 862 Read(r, BigEndian, &ls.Int16) 863 Read(r, BigEndian, &ls.Int32) 864 Read(r, BigEndian, &ls.Int64) 865 Read(r, BigEndian, &ls.Uint8) 866 Read(r, BigEndian, &ls.Uint16) 867 Read(r, BigEndian, &ls.Uint32) 868 Read(r, BigEndian, &ls.Uint64) 869 } 870 b.StopTimer() 871 want := s 872 want.Float32 = 0 873 want.Float64 = 0 874 want.Complex64 = 0 875 want.Complex128 = 0 876 want.Array = [4]uint8{0, 0, 0, 0} 877 want.Bool = false 878 want.BoolArray = [4]bool{false, false, false, false} 879 if b.N > 0 && !reflect.DeepEqual(ls, want) { 880 b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want) 881 } 882} 883 884func BenchmarkWriteInts(b *testing.B) { 885 buf := new(bytes.Buffer) 886 var w io.Writer = buf 887 b.SetBytes(2 * (1 + 2 + 4 + 8)) 888 b.ResetTimer() 889 for i := 0; i < b.N; i++ { 890 buf.Reset() 891 Write(w, BigEndian, s.Int8) 892 Write(w, BigEndian, s.Int16) 893 Write(w, BigEndian, s.Int32) 894 Write(w, BigEndian, s.Int64) 895 Write(w, BigEndian, s.Uint8) 896 Write(w, BigEndian, s.Uint16) 897 Write(w, BigEndian, s.Uint32) 898 Write(w, BigEndian, s.Uint64) 899 } 900 b.StopTimer() 901 if b.N > 0 && !bytes.Equal(buf.Bytes(), big[:30]) { 902 b.Fatalf("first half doesn't match: %x %x", buf.Bytes(), big[:30]) 903 } 904} 905 906func BenchmarkAppendInts(b *testing.B) { 907 buf := make([]byte, 0, 256) 908 b.SetBytes(2 * (1 + 2 + 4 + 8)) 909 b.ResetTimer() 910 for i := 0; i < b.N; i++ { 911 buf = buf[:0] 912 buf, _ = Append(buf, BigEndian, s.Int8) 913 buf, _ = Append(buf, BigEndian, s.Int16) 914 buf, _ = Append(buf, BigEndian, s.Int32) 915 buf, _ = Append(buf, BigEndian, s.Int64) 916 buf, _ = Append(buf, BigEndian, s.Uint8) 917 buf, _ = Append(buf, BigEndian, s.Uint16) 918 buf, _ = Append(buf, BigEndian, s.Uint32) 919 buf, _ = Append(buf, BigEndian, s.Uint64) 920 } 921 b.StopTimer() 922 if b.N > 0 && !bytes.Equal(buf, big[:30]) { 923 b.Fatalf("first half doesn't match: %x %x", buf, big[:30]) 924 } 925} 926 927func BenchmarkWriteSlice1000Int32s(b *testing.B) { 928 slice := make([]int32, 1000) 929 buf := new(bytes.Buffer) 930 var w io.Writer = buf 931 b.SetBytes(4 * 1000) 932 b.ResetTimer() 933 for i := 0; i < b.N; i++ { 934 buf.Reset() 935 Write(w, BigEndian, slice) 936 } 937 b.StopTimer() 938} 939 940func BenchmarkAppendSlice1000Int32s(b *testing.B) { 941 slice := make([]int32, 1000) 942 buf := make([]byte, 0, Size(slice)) 943 b.SetBytes(int64(cap(buf))) 944 b.ResetTimer() 945 for i := 0; i < b.N; i++ { 946 Append(buf, BigEndian, slice) 947 } 948 b.StopTimer() 949} 950 951func BenchmarkPutUint16(b *testing.B) { 952 b.SetBytes(2) 953 for i := 0; i < b.N; i++ { 954 BigEndian.PutUint16(putbuf[:2], uint16(i)) 955 } 956} 957 958func BenchmarkAppendUint16(b *testing.B) { 959 b.SetBytes(2) 960 for i := 0; i < b.N; i++ { 961 putbuf = BigEndian.AppendUint16(putbuf[:0], uint16(i)) 962 } 963} 964 965func BenchmarkPutUint32(b *testing.B) { 966 b.SetBytes(4) 967 for i := 0; i < b.N; i++ { 968 BigEndian.PutUint32(putbuf[:4], uint32(i)) 969 } 970} 971 972func BenchmarkAppendUint32(b *testing.B) { 973 b.SetBytes(4) 974 for i := 0; i < b.N; i++ { 975 putbuf = BigEndian.AppendUint32(putbuf[:0], uint32(i)) 976 } 977} 978 979func BenchmarkPutUint64(b *testing.B) { 980 b.SetBytes(8) 981 for i := 0; i < b.N; i++ { 982 BigEndian.PutUint64(putbuf[:8], uint64(i)) 983 } 984} 985 986func BenchmarkAppendUint64(b *testing.B) { 987 b.SetBytes(8) 988 for i := 0; i < b.N; i++ { 989 putbuf = BigEndian.AppendUint64(putbuf[:0], uint64(i)) 990 } 991} 992 993func BenchmarkLittleEndianPutUint16(b *testing.B) { 994 b.SetBytes(2) 995 for i := 0; i < b.N; i++ { 996 LittleEndian.PutUint16(putbuf[:2], uint16(i)) 997 } 998} 999 1000func BenchmarkLittleEndianAppendUint16(b *testing.B) { 1001 b.SetBytes(2) 1002 for i := 0; i < b.N; i++ { 1003 putbuf = LittleEndian.AppendUint16(putbuf[:0], uint16(i)) 1004 } 1005} 1006 1007func BenchmarkLittleEndianPutUint32(b *testing.B) { 1008 b.SetBytes(4) 1009 for i := 0; i < b.N; i++ { 1010 LittleEndian.PutUint32(putbuf[:4], uint32(i)) 1011 } 1012} 1013 1014func BenchmarkLittleEndianAppendUint32(b *testing.B) { 1015 b.SetBytes(4) 1016 for i := 0; i < b.N; i++ { 1017 putbuf = LittleEndian.AppendUint32(putbuf[:0], uint32(i)) 1018 } 1019} 1020 1021func BenchmarkLittleEndianPutUint64(b *testing.B) { 1022 b.SetBytes(8) 1023 for i := 0; i < b.N; i++ { 1024 LittleEndian.PutUint64(putbuf[:8], uint64(i)) 1025 } 1026} 1027 1028func BenchmarkLittleEndianAppendUint64(b *testing.B) { 1029 b.SetBytes(8) 1030 for i := 0; i < b.N; i++ { 1031 putbuf = LittleEndian.AppendUint64(putbuf[:0], uint64(i)) 1032 } 1033} 1034 1035func BenchmarkReadFloats(b *testing.B) { 1036 var ls Struct 1037 bsr := &byteSliceReader{} 1038 var r io.Reader = bsr 1039 b.SetBytes(4 + 8) 1040 b.ResetTimer() 1041 for i := 0; i < b.N; i++ { 1042 bsr.remain = big[30:] 1043 Read(r, BigEndian, &ls.Float32) 1044 Read(r, BigEndian, &ls.Float64) 1045 } 1046 b.StopTimer() 1047 want := s 1048 want.Int8 = 0 1049 want.Int16 = 0 1050 want.Int32 = 0 1051 want.Int64 = 0 1052 want.Uint8 = 0 1053 want.Uint16 = 0 1054 want.Uint32 = 0 1055 want.Uint64 = 0 1056 want.Complex64 = 0 1057 want.Complex128 = 0 1058 want.Array = [4]uint8{0, 0, 0, 0} 1059 want.Bool = false 1060 want.BoolArray = [4]bool{false, false, false, false} 1061 if b.N > 0 && !reflect.DeepEqual(ls, want) { 1062 b.Fatalf("struct doesn't match:\ngot %v;\nwant %v", ls, want) 1063 } 1064} 1065 1066func BenchmarkWriteFloats(b *testing.B) { 1067 buf := new(bytes.Buffer) 1068 var w io.Writer = buf 1069 b.SetBytes(4 + 8) 1070 b.ResetTimer() 1071 for i := 0; i < b.N; i++ { 1072 buf.Reset() 1073 Write(w, BigEndian, s.Float32) 1074 Write(w, BigEndian, s.Float64) 1075 } 1076 b.StopTimer() 1077 if b.N > 0 && !bytes.Equal(buf.Bytes(), big[30:30+4+8]) { 1078 b.Fatalf("first half doesn't match: %x %x", buf.Bytes(), big[30:30+4+8]) 1079 } 1080} 1081 1082func BenchmarkReadSlice1000Float32s(b *testing.B) { 1083 bsr := &byteSliceReader{} 1084 slice := make([]float32, 1000) 1085 buf := make([]byte, len(slice)*4) 1086 b.SetBytes(int64(len(buf))) 1087 b.ResetTimer() 1088 for i := 0; i < b.N; i++ { 1089 bsr.remain = buf 1090 Read(bsr, BigEndian, slice) 1091 } 1092} 1093 1094func BenchmarkWriteSlice1000Float32s(b *testing.B) { 1095 slice := make([]float32, 1000) 1096 buf := new(bytes.Buffer) 1097 var w io.Writer = buf 1098 b.SetBytes(4 * 1000) 1099 b.ResetTimer() 1100 for i := 0; i < b.N; i++ { 1101 buf.Reset() 1102 Write(w, BigEndian, slice) 1103 } 1104 b.StopTimer() 1105} 1106 1107func BenchmarkReadSlice1000Uint8s(b *testing.B) { 1108 bsr := &byteSliceReader{} 1109 slice := make([]uint8, 1000) 1110 buf := make([]byte, len(slice)) 1111 b.SetBytes(int64(len(buf))) 1112 b.ResetTimer() 1113 for i := 0; i < b.N; i++ { 1114 bsr.remain = buf 1115 Read(bsr, BigEndian, slice) 1116 } 1117} 1118 1119func BenchmarkWriteSlice1000Uint8s(b *testing.B) { 1120 slice := make([]uint8, 1000) 1121 buf := new(bytes.Buffer) 1122 var w io.Writer = buf 1123 b.SetBytes(1000) 1124 b.ResetTimer() 1125 for i := 0; i < b.N; i++ { 1126 buf.Reset() 1127 Write(w, BigEndian, slice) 1128 } 1129} 1130 1131func BenchmarkSize(b *testing.B) { 1132 for _, data := range sizableTypes { 1133 b.Run(fmt.Sprintf("%T", data), func(b *testing.B) { 1134 for range b.N { 1135 _ = Size(data) 1136 } 1137 }) 1138 } 1139} 1140 1141func TestNativeEndian(t *testing.T) { 1142 const val = 0x12345678 1143 i := uint32(val) 1144 s := unsafe.Slice((*byte)(unsafe.Pointer(&i)), unsafe.Sizeof(i)) 1145 if v := NativeEndian.Uint32(s); v != val { 1146 t.Errorf("NativeEndian.Uint32 returned %#x, expected %#x", v, val) 1147 } 1148} 1149