@@ -58,7 +58,7 @@ unsafe fn load_aligned_partial(src: *const usize, load_sz: usize) -> usize {
5858 let chunk_sz = core:: mem:: size_of:: <$ty>( ) ;
5959 if ( load_sz & chunk_sz) != 0 {
6060 // Since we are doing the large reads first, this must still be aligned to `chunk_sz`.
61- * ( & raw mut out) . byte_add ( i) . cast:: <$ty>( ) = * src. byte_add ( i) . cast:: <$ty>( ) ;
61+ * ( & raw mut out) . wrapping_byte_add ( i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( i) . cast:: <$ty>( ) ;
6262 i |= chunk_sz;
6363 }
6464 ) +} ;
@@ -91,7 +91,7 @@ unsafe fn load_aligned_end_partial(src: *const usize, load_sz: usize) -> usize {
9191 if ( load_sz & chunk_sz) != 0 {
9292 // Since we are doing the small reads first, `start_shift + i` has in the mean
9393 // time become aligned to `chunk_sz`.
94- * ( & raw mut out) . byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. byte_add ( start_shift + i) . cast:: <$ty>( ) ;
94+ * ( & raw mut out) . wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) = * src. wrapping_byte_add ( start_shift + i) . cast:: <$ty>( ) ;
9595 i |= chunk_sz;
9696 }
9797 ) +} ;
@@ -143,7 +143,7 @@ pub unsafe fn copy_forward(mut dest: *mut u8, mut src: *const u8, mut n: usize)
143143 let shift = offset * 8 ;
144144
145145 // Realign src
146- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
146+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
147147 let mut prev_word = load_aligned_end_partial ( src_aligned, WORD_SIZE - offset) ;
148148
149149 while dest_usize. wrapping_add ( 1 ) < dest_end {
@@ -252,7 +252,7 @@ pub unsafe fn copy_backward(dest: *mut u8, src: *const u8, mut n: usize) {
252252 let shift = offset * 8 ;
253253
254254 // Realign src
255- let mut src_aligned = src. byte_sub ( offset) as * mut usize ;
255+ let mut src_aligned = src. wrapping_byte_sub ( offset) as * mut usize ;
256256 let mut prev_word = load_aligned_partial ( src_aligned, offset) ;
257257
258258 while dest_start. wrapping_add ( 1 ) < dest_usize {
0 commit comments