1
+ // In C and Rust it is UB to read or write to usize::MAX because if an allocation extends to the
2
+ // last byte of address space (there must be an allocation to do the read or write), in C computing
3
+ // its one-past-the-end pointer would be equal to NULL and in Rust computing the address of a
4
+ // trailing ZST member with a safe place projection would wrap (place projection address computation
5
+ // is non-wrapping).
6
+ //
7
+ // However, some embedded systems have special memory at usize::MAX, and need to access that
8
+ // memory. If they do that with the intrinsics provided by compiler-builtins (such as memcpy!), the
9
+ // ptr::add in these loops will wrap. And if compiler-builtins is compiled with cfg(ub_checks),
10
+ // this will fail a UB check at runtime.
11
+ //
12
+ // Since this scenario is UB, we are within our rights hit this check and halt execution...
13
+ // But we are also within our rights to try to make it work.
14
+ // We use wrapping_add/wrapping_sub for pointer arithmetic in this module in an attempt to support
15
+ // this use. Of course this is not a guarantee that such use will work, it just means that this
16
+ // crate doing wrapping pointer arithmetic with a method that must not wrap won't be the problem if
17
+ // something does go wrong at runtime.
1
18
use core:: intrinsics:: likely;
2
19
3
20
const WORD_SIZE : usize = core:: mem:: size_of :: < usize > ( ) ;
@@ -9,7 +26,7 @@ const WORD_MASK: usize = WORD_SIZE - 1;
9
26
// word-wise copy.
10
27
// * The word-wise copy logic needs to perform some checks so it has some small overhead.
11
28
// ensures that even on 32-bit platforms we have copied at least 8 bytes through
12
- // word-wise copy so the saving of word-wise copy outweights the fixed overhead.
29
+ // word-wise copy so the saving of word-wise copy outweighs the fixed overhead.
13
30
const WORD_COPY_THRESHOLD : usize = if 2 * WORD_SIZE > 16 {
14
31
2 * WORD_SIZE
15
32
} else {
@@ -28,32 +45,32 @@ unsafe fn read_usize_unaligned(x: *const usize) -> usize {
28
45
pub unsafe fn copy_forward ( mut dest : * mut u8 , mut src : * const u8 , mut n : usize ) {
29
46
#[ inline( always) ]
30
47
unsafe fn copy_forward_bytes ( mut dest : * mut u8 , mut src : * const u8 , n : usize ) {
31
- let dest_end = dest. add ( n) ;
48
+ let dest_end = dest. wrapping_add ( n) ;
32
49
while dest < dest_end {
33
50
* dest = * src;
34
- dest = dest. add ( 1 ) ;
35
- src = src. add ( 1 ) ;
51
+ dest = dest. wrapping_add ( 1 ) ;
52
+ src = src. wrapping_add ( 1 ) ;
36
53
}
37
54
}
38
55
39
56
#[ inline( always) ]
40
57
unsafe fn copy_forward_aligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
41
58
let mut dest_usize = dest as * mut usize ;
42
59
let mut src_usize = src as * mut usize ;
43
- let dest_end = dest. add ( n) as * mut usize ;
60
+ let dest_end = dest. wrapping_add ( n) as * mut usize ;
44
61
45
62
while dest_usize < dest_end {
46
63
* dest_usize = * src_usize;
47
- dest_usize = dest_usize. add ( 1 ) ;
48
- src_usize = src_usize. add ( 1 ) ;
64
+ dest_usize = dest_usize. wrapping_add ( 1 ) ;
65
+ src_usize = src_usize. wrapping_add ( 1 ) ;
49
66
}
50
67
}
51
68
52
69
#[ cfg( not( feature = "mem-unaligned" ) ) ]
53
70
#[ inline( always) ]
54
71
unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
55
72
let mut dest_usize = dest as * mut usize ;
56
- let dest_end = dest. add ( n) as * mut usize ;
73
+ let dest_end = dest. wrapping_add ( n) as * mut usize ;
57
74
58
75
// Calculate the misalignment offset and shift needed to reassemble value.
59
76
let offset = src as usize & WORD_MASK ;
@@ -70,7 +87,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
70
87
let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
71
88
72
89
while dest_usize < dest_end {
73
- src_aligned = src_aligned. add ( 1 ) ;
90
+ src_aligned = src_aligned. wrapping_add ( 1 ) ;
74
91
let cur_word = * src_aligned;
75
92
#[ cfg( target_endian = "little" ) ]
76
93
let resembled = prev_word >> shift | cur_word << ( WORD_SIZE * 8 - shift) ;
@@ -79,7 +96,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
79
96
prev_word = cur_word;
80
97
81
98
* dest_usize = resembled;
82
- dest_usize = dest_usize. add ( 1 ) ;
99
+ dest_usize = dest_usize. wrapping_add ( 1 ) ;
83
100
}
84
101
}
85
102
@@ -88,12 +105,12 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
88
105
unsafe fn copy_forward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
89
106
let mut dest_usize = dest as * mut usize ;
90
107
let mut src_usize = src as * mut usize ;
91
- let dest_end = dest. add ( n) as * mut usize ;
108
+ let dest_end = dest. wrapping_add ( n) as * mut usize ;
92
109
93
110
while dest_usize < dest_end {
94
111
* dest_usize = read_usize_unaligned ( src_usize) ;
95
- dest_usize = dest_usize. add ( 1 ) ;
96
- src_usize = src_usize. add ( 1 ) ;
112
+ dest_usize = dest_usize. wrapping_add ( 1 ) ;
113
+ src_usize = src_usize. wrapping_add ( 1 ) ;
97
114
}
98
115
}
99
116
@@ -102,8 +119,8 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
102
119
// Because of n >= 2 * WORD_SIZE, dst_misalignment < n
103
120
let dest_misalignment = ( dest as usize ) . wrapping_neg ( ) & WORD_MASK ;
104
121
copy_forward_bytes ( dest, src, dest_misalignment) ;
105
- dest = dest. add ( dest_misalignment) ;
106
- src = src. add ( dest_misalignment) ;
122
+ dest = dest. wrapping_add ( dest_misalignment) ;
123
+ src = src. wrapping_add ( dest_misalignment) ;
107
124
n -= dest_misalignment;
108
125
109
126
let n_words = n & !WORD_MASK ;
@@ -113,8 +130,8 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
113
130
} else {
114
131
copy_forward_misaligned_words ( dest, src, n_words) ;
115
132
}
116
- dest = dest. add ( n_words) ;
117
- src = src. add ( n_words) ;
133
+ dest = dest. wrapping_add ( n_words) ;
134
+ src = src. wrapping_add ( n_words) ;
118
135
n -= n_words;
119
136
}
120
137
copy_forward_bytes ( dest, src, n) ;
@@ -126,10 +143,10 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
126
143
// as their inputs instead of pointers to the start!
127
144
#[ inline( always) ]
128
145
unsafe fn copy_backward_bytes ( mut dest : * mut u8 , mut src : * const u8 , n : usize ) {
129
- let dest_start = dest. sub ( n) ;
146
+ let dest_start = dest. wrapping_sub ( n) ;
130
147
while dest_start < dest {
131
- dest = dest. sub ( 1 ) ;
132
- src = src. sub ( 1 ) ;
148
+ dest = dest. wrapping_sub ( 1 ) ;
149
+ src = src. wrapping_sub ( 1 ) ;
133
150
* dest = * src;
134
151
}
135
152
}
@@ -138,11 +155,11 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
138
155
unsafe fn copy_backward_aligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
139
156
let mut dest_usize = dest as * mut usize ;
140
157
let mut src_usize = src as * mut usize ;
141
- let dest_start = dest. sub ( n) as * mut usize ;
158
+ let dest_start = dest. wrapping_sub ( n) as * mut usize ;
142
159
143
160
while dest_start < dest_usize {
144
- dest_usize = dest_usize. sub ( 1 ) ;
145
- src_usize = src_usize. sub ( 1 ) ;
161
+ dest_usize = dest_usize. wrapping_sub ( 1 ) ;
162
+ src_usize = src_usize. wrapping_sub ( 1 ) ;
146
163
* dest_usize = * src_usize;
147
164
}
148
165
}
@@ -151,7 +168,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
151
168
#[ inline( always) ]
152
169
unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
153
170
let mut dest_usize = dest as * mut usize ;
154
- let dest_start = dest. sub ( n) as * mut usize ;
171
+ let dest_start = dest. wrapping_sub ( n) as * mut usize ;
155
172
156
173
// Calculate the misalignment offset and shift needed to reassemble value.
157
174
let offset = src as usize & WORD_MASK ;
@@ -168,15 +185,15 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
168
185
let mut prev_word = core:: ptr:: read_volatile ( src_aligned) ;
169
186
170
187
while dest_start < dest_usize {
171
- src_aligned = src_aligned. sub ( 1 ) ;
188
+ src_aligned = src_aligned. wrapping_sub ( 1 ) ;
172
189
let cur_word = * src_aligned;
173
190
#[ cfg( target_endian = "little" ) ]
174
191
let resembled = prev_word << ( WORD_SIZE * 8 - shift) | cur_word >> shift;
175
192
#[ cfg( target_endian = "big" ) ]
176
193
let resembled = prev_word >> ( WORD_SIZE * 8 - shift) | cur_word << shift;
177
194
prev_word = cur_word;
178
195
179
- dest_usize = dest_usize. sub ( 1 ) ;
196
+ dest_usize = dest_usize. wrapping_sub ( 1 ) ;
180
197
* dest_usize = resembled;
181
198
}
182
199
}
@@ -186,25 +203,25 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
186
203
unsafe fn copy_backward_misaligned_words ( dest : * mut u8 , src : * const u8 , n : usize ) {
187
204
let mut dest_usize = dest as * mut usize ;
188
205
let mut src_usize = src as * mut usize ;
189
- let dest_start = dest. sub ( n) as * mut usize ;
206
+ let dest_start = dest. wrapping_sub ( n) as * mut usize ;
190
207
191
208
while dest_start < dest_usize {
192
- dest_usize = dest_usize. sub ( 1 ) ;
193
- src_usize = src_usize. sub ( 1 ) ;
209
+ dest_usize = dest_usize. wrapping_sub ( 1 ) ;
210
+ src_usize = src_usize. wrapping_sub ( 1 ) ;
194
211
* dest_usize = read_usize_unaligned ( src_usize) ;
195
212
}
196
213
}
197
214
198
- let mut dest = dest. add ( n) ;
199
- let mut src = src. add ( n) ;
215
+ let mut dest = dest. wrapping_add ( n) ;
216
+ let mut src = src. wrapping_add ( n) ;
200
217
201
218
if n >= WORD_COPY_THRESHOLD {
202
219
// Align dest
203
220
// Because of n >= 2 * WORD_SIZE, dst_misalignment < n
204
221
let dest_misalignment = dest as usize & WORD_MASK ;
205
222
copy_backward_bytes ( dest, src, dest_misalignment) ;
206
- dest = dest. sub ( dest_misalignment) ;
207
- src = src. sub ( dest_misalignment) ;
223
+ dest = dest. wrapping_sub ( dest_misalignment) ;
224
+ src = src. wrapping_sub ( dest_misalignment) ;
208
225
n -= dest_misalignment;
209
226
210
227
let n_words = n & !WORD_MASK ;
@@ -214,8 +231,8 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
214
231
} else {
215
232
copy_backward_misaligned_words ( dest, src, n_words) ;
216
233
}
217
- dest = dest. sub ( n_words) ;
218
- src = src. sub ( n_words) ;
234
+ dest = dest. wrapping_sub ( n_words) ;
235
+ src = src. wrapping_sub ( n_words) ;
219
236
n -= n_words;
220
237
}
221
238
copy_backward_bytes ( dest, src, n) ;
@@ -225,10 +242,10 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
225
242
pub unsafe fn set_bytes ( mut s : * mut u8 , c : u8 , mut n : usize ) {
226
243
#[ inline( always) ]
227
244
pub unsafe fn set_bytes_bytes ( mut s : * mut u8 , c : u8 , n : usize ) {
228
- let end = s. add ( n) ;
245
+ let end = s. wrapping_add ( n) ;
229
246
while s < end {
230
247
* s = c;
231
- s = s. add ( 1 ) ;
248
+ s = s. wrapping_add ( 1 ) ;
232
249
}
233
250
}
234
251
@@ -242,11 +259,11 @@ pub unsafe fn set_bytes(mut s: *mut u8, c: u8, mut n: usize) {
242
259
}
243
260
244
261
let mut s_usize = s as * mut usize ;
245
- let end = s. add ( n) as * mut usize ;
262
+ let end = s. wrapping_add ( n) as * mut usize ;
246
263
247
264
while s_usize < end {
248
265
* s_usize = broadcast;
249
- s_usize = s_usize. add ( 1 ) ;
266
+ s_usize = s_usize. wrapping_add ( 1 ) ;
250
267
}
251
268
}
252
269
@@ -255,12 +272,12 @@ pub unsafe fn set_bytes(mut s: *mut u8, c: u8, mut n: usize) {
255
272
// Because of n >= 2 * WORD_SIZE, dst_misalignment < n
256
273
let misalignment = ( s as usize ) . wrapping_neg ( ) & WORD_MASK ;
257
274
set_bytes_bytes ( s, c, misalignment) ;
258
- s = s. add ( misalignment) ;
275
+ s = s. wrapping_add ( misalignment) ;
259
276
n -= misalignment;
260
277
261
278
let n_words = n & !WORD_MASK ;
262
279
set_bytes_words ( s, c, n_words) ;
263
- s = s. add ( n_words) ;
280
+ s = s. wrapping_add ( n_words) ;
264
281
n -= n_words;
265
282
}
266
283
set_bytes_bytes ( s, c, n) ;
@@ -270,8 +287,8 @@ pub unsafe fn set_bytes(mut s: *mut u8, c: u8, mut n: usize) {
270
287
pub unsafe fn compare_bytes ( s1 : * const u8 , s2 : * const u8 , n : usize ) -> i32 {
271
288
let mut i = 0 ;
272
289
while i < n {
273
- let a = * s1. add ( i) ;
274
- let b = * s2. add ( i) ;
290
+ let a = * s1. wrapping_add ( i) ;
291
+ let b = * s2. wrapping_add ( i) ;
275
292
if a != b {
276
293
return a as i32 - b as i32 ;
277
294
}
@@ -285,7 +302,7 @@ pub unsafe fn c_string_length(mut s: *const core::ffi::c_char) -> usize {
285
302
let mut n = 0 ;
286
303
while * s != 0 {
287
304
n += 1 ;
288
- s = s. add ( 1 ) ;
305
+ s = s. wrapping_add ( 1 ) ;
289
306
}
290
307
n
291
308
}
0 commit comments