@@ -877,6 +877,254 @@ macro_rules! shr_impl_all {
877
877
878
878
shr_impl_all ! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
879
879
880
+ /// TODO(japaric) docs
881
+ #[ cfg( not( stage0) ) ]
882
+ #[ lang = "add_assign" ]
883
+ pub trait AddAssign < Rhs =Self > {
884
+ /// TODO(japaric) docs
885
+ fn add_assign ( & mut self , Rhs ) ;
886
+ }
887
+
888
+ #[ cfg( not( stage0) ) ]
889
+ macro_rules! add_assign_impl {
890
+ ( $( $t: ty) +) => ( $(
891
+ impl AddAssign for $t {
892
+ #[ inline]
893
+ fn add_assign( & mut self , other: $t) { * self += other }
894
+ }
895
+ ) +)
896
+ }
897
+
898
+ #[ cfg( not( stage0) ) ]
899
+ add_assign_impl ! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
900
+
901
+ /// TODO(japaric) docs
902
+ #[ cfg( not( stage0) ) ]
903
+ #[ lang = "sub_assign" ]
904
+ pub trait SubAssign < Rhs =Self > {
905
+ /// TODO(japaric) docs
906
+ fn sub_assign ( & mut self , Rhs ) ;
907
+ }
908
+
909
+ #[ cfg( not( stage0) ) ]
910
+ macro_rules! sub_assign_impl {
911
+ ( $( $t: ty) +) => ( $(
912
+ impl SubAssign for $t {
913
+ #[ inline]
914
+ fn sub_assign( & mut self , other: $t) { * self -= other }
915
+ }
916
+ ) +)
917
+ }
918
+
919
+ #[ cfg( not( stage0) ) ]
920
+ sub_assign_impl ! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
921
+
922
+ /// TODO(japaric) docs
923
+ #[ cfg( not( stage0) ) ]
924
+ #[ lang = "mul_assign" ]
925
+ pub trait MulAssign < Rhs =Self > {
926
+ /// TODO(japaric) docs
927
+ fn mul_assign ( & mut self , Rhs ) ;
928
+ }
929
+
930
+ #[ cfg( not( stage0) ) ]
931
+ macro_rules! mul_assign_impl {
932
+ ( $( $t: ty) +) => ( $(
933
+ impl MulAssign for $t {
934
+ #[ inline]
935
+ fn mul_assign( & mut self , other: $t) { * self *= other }
936
+ }
937
+ ) +)
938
+ }
939
+
940
+ #[ cfg( not( stage0) ) ]
941
+ mul_assign_impl ! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
942
+
943
+ /// TODO(japaric) docs
944
+ #[ cfg( not( stage0) ) ]
945
+ #[ lang = "div_assign" ]
946
+ pub trait DivAssign < Rhs =Self > {
947
+ /// TODO(japaric) docs
948
+ fn div_assign ( & mut self , Rhs ) ;
949
+ }
950
+
951
+ #[ cfg( not( stage0) ) ]
952
+ macro_rules! div_assign_impl {
953
+ ( $( $t: ty) +) => ( $(
954
+ impl DivAssign for $t {
955
+ #[ inline]
956
+ fn div_assign( & mut self , other: $t) { * self /= other }
957
+ }
958
+ ) +)
959
+ }
960
+
961
+ #[ cfg( not( stage0) ) ]
962
+ div_assign_impl ! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
963
+
964
+ /// TODO(japaric) docs
965
+ #[ cfg( not( stage0) ) ]
966
+ #[ lang = "rem_assign" ]
967
+ pub trait RemAssign < Rhs =Self > {
968
+ /// TODO(japaric) docs
969
+ fn rem_assign ( & mut self , Rhs ) ;
970
+ }
971
+
972
+ #[ cfg( not( stage0) ) ]
973
+ macro_rules! rem_assign_impl {
974
+ ( $( $t: ty) +) => ( $(
975
+ impl RemAssign for $t {
976
+ #[ inline]
977
+ fn rem_assign( & mut self , other: $t) { * self %= other }
978
+ }
979
+ ) +)
980
+ }
981
+
982
+ #[ cfg( not( stage0) ) ]
983
+ rem_assign_impl ! { usize u8 u16 u32 u64 isize i8 i16 i32 i64 f32 f64 }
984
+
985
+ /// TODO(japaric) docs
986
+ #[ cfg( not( stage0) ) ]
987
+ #[ lang = "bitand_assign" ]
988
+ pub trait BitAndAssign < Rhs =Self > {
989
+ /// TODO(japaric) docs
990
+ fn bitand_assign ( & mut self , Rhs ) ;
991
+ }
992
+
993
+ #[ cfg( not( stage0) ) ]
994
+ macro_rules! bitand_assign_impl {
995
+ ( $( $t: ty) +) => ( $(
996
+ impl BitAndAssign for $t {
997
+ #[ inline]
998
+ fn bitand_assign( & mut self , other: $t) { * self &= other }
999
+ }
1000
+ ) +)
1001
+ }
1002
+
1003
+ #[ cfg( not( stage0) ) ]
1004
+ bitand_assign_impl ! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
1005
+
1006
+ /// TODO(japaric) docs
1007
+ #[ cfg( not( stage0) ) ]
1008
+ #[ lang = "bitor_assign" ]
1009
+ pub trait BitOrAssign < Rhs =Self > {
1010
+ /// TODO(japaric) docs
1011
+ fn bitor_assign ( & mut self , Rhs ) ;
1012
+ }
1013
+
1014
+ #[ cfg( not( stage0) ) ]
1015
+ macro_rules! bitor_assign_impl {
1016
+ ( $( $t: ty) +) => ( $(
1017
+ impl BitOrAssign for $t {
1018
+ #[ inline]
1019
+ fn bitor_assign( & mut self , other: $t) { * self |= other }
1020
+ }
1021
+ ) +)
1022
+ }
1023
+
1024
+ #[ cfg( not( stage0) ) ]
1025
+ bitor_assign_impl ! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
1026
+
1027
+ /// TODO(japaric) docs
1028
+ #[ cfg( not( stage0) ) ]
1029
+ #[ lang = "bitxor_assign" ]
1030
+ pub trait BitXorAssign < Rhs =Self > {
1031
+ /// TODO(japaric) docs
1032
+ fn bitxor_assign ( & mut self , Rhs ) ;
1033
+ }
1034
+
1035
+ #[ cfg( not( stage0) ) ]
1036
+ macro_rules! bitxor_assign_impl {
1037
+ ( $( $t: ty) +) => ( $(
1038
+ impl BitXorAssign for $t {
1039
+ #[ inline]
1040
+ fn bitxor_assign( & mut self , other: $t) { * self ^= other }
1041
+ }
1042
+ ) +)
1043
+ }
1044
+
1045
+ #[ cfg( not( stage0) ) ]
1046
+ bitxor_assign_impl ! { bool usize u8 u16 u32 u64 isize i8 i16 i32 i64 }
1047
+
1048
+ /// TODO(japaric) docs
1049
+ #[ cfg( not( stage0) ) ]
1050
+ #[ lang = "shl_assign" ]
1051
+ pub trait ShlAssign < Rhs =Self > {
1052
+ /// TODO(japaric) docs
1053
+ fn shl_assign ( & mut self , Rhs ) ;
1054
+ }
1055
+
1056
+ #[ cfg( not( stage0) ) ]
1057
+ macro_rules! shl_assign_impl {
1058
+ ( $t: ty, $f: ty) => (
1059
+ impl ShlAssign <$f> for $t {
1060
+ #[ inline]
1061
+ fn shl_assign( & mut self , other: $f) {
1062
+ * self <<= other
1063
+ }
1064
+ }
1065
+ )
1066
+ }
1067
+
1068
+ #[ cfg( not( stage0) ) ]
1069
+ macro_rules! shl_assign_impl_all {
1070
+ ( $( $t: ty) * ) => ( $(
1071
+ shl_assign_impl! { $t, u8 }
1072
+ shl_assign_impl! { $t, u16 }
1073
+ shl_assign_impl! { $t, u32 }
1074
+ shl_assign_impl! { $t, u64 }
1075
+ shl_assign_impl! { $t, usize }
1076
+
1077
+ shl_assign_impl! { $t, i8 }
1078
+ shl_assign_impl! { $t, i16 }
1079
+ shl_assign_impl! { $t, i32 }
1080
+ shl_assign_impl! { $t, i64 }
1081
+ shl_assign_impl! { $t, isize }
1082
+ ) * )
1083
+ }
1084
+
1085
+ #[ cfg( not( stage0) ) ]
1086
+ shl_assign_impl_all ! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
1087
+
1088
+ /// TODO(japaric) docs
1089
+ #[ cfg( not( stage0) ) ]
1090
+ #[ lang = "shr_assign" ]
1091
+ pub trait ShrAssign < Rhs =Self > {
1092
+ /// TODO(japaric) docs
1093
+ fn shr_assign ( & mut self , Rhs ) ;
1094
+ }
1095
+
1096
+ #[ cfg( not( stage0) ) ]
1097
+ macro_rules! shr_assign_impl {
1098
+ ( $t: ty, $f: ty) => (
1099
+ impl ShrAssign <$f> for $t {
1100
+ #[ inline]
1101
+ fn shr_assign( & mut self , other: $f) {
1102
+ * self >>= other
1103
+ }
1104
+ }
1105
+ )
1106
+ }
1107
+
1108
+ #[ cfg( not( stage0) ) ]
1109
+ macro_rules! shr_assign_impl_all {
1110
+ ( $( $t: ty) * ) => ( $(
1111
+ shr_assign_impl! { $t, u8 }
1112
+ shr_assign_impl! { $t, u16 }
1113
+ shr_assign_impl! { $t, u32 }
1114
+ shr_assign_impl! { $t, u64 }
1115
+ shr_assign_impl! { $t, usize }
1116
+
1117
+ shr_assign_impl! { $t, i8 }
1118
+ shr_assign_impl! { $t, i16 }
1119
+ shr_assign_impl! { $t, i32 }
1120
+ shr_assign_impl! { $t, i64 }
1121
+ shr_assign_impl! { $t, isize }
1122
+ ) * )
1123
+ }
1124
+
1125
+ #[ cfg( not( stage0) ) ]
1126
+ shr_assign_impl_all ! { u8 u16 u32 u64 usize i8 i16 i32 i64 isize }
1127
+
880
1128
/// The `Index` trait is used to specify the functionality of indexing operations
881
1129
/// like `arr[idx]` when used in an immutable context.
882
1130
///
0 commit comments