@ -19,6 +19,26 @@ fn apply_mask(buf: &mut [u8], mask: &[u8; 4]) {
}
}
/// Faster version of `apply_mask()` which operates on 4-byte blocks.
///
/// Safety: `buf` must be at least 4-bytes aligned.
unsafe fn apply_mask_aligned32 ( buf : & mut [ u8 ] , mask : & [ u8 ; 4 ] ) {
debug_assert_eq! ( buf . as_ptr ( ) as usize % 4 , 0 ) ;
let mask_u32 = transmute ( * mask ) ;
let mut ptr = buf . as_mut_ptr ( ) as * mut u32 ;
for _ in 0 .. ( buf . len ( ) / 4 ) {
* ptr ^ = mask_u32 ;
ptr = ptr . offset ( 1 ) ;
}
// Possible last block with less than 4 bytes.
let last_block_start = buf . len ( ) & ! 3 ;
let last_block = & mut buf [ last_block_start .. ] ;
apply_mask ( last_block , mask ) ;
}
#[ inline ]
fn generate_mask ( ) -> [ u8 ; 4 ] {
rand ::random ( )
@ -174,7 +194,10 @@ impl Frame {
#[ inline ]
pub fn remove_mask ( & mut self ) {
self . mask . and_then ( | mask | {
Some ( apply_mask ( & mut self . payload , & mask ) )
// Assumes Vec's backing memory is at least 4-bytes aligned.
unsafe {
Some ( apply_mask_aligned32 ( & mut self . payload , & mask ) )
}
} ) ;
self . mask = None ;
}
@ -435,7 +458,10 @@ impl Frame {
if self . is_masked ( ) {
let mask = self . mask . take ( ) . unwrap ( ) ;
apply_mask ( & mut self . payload , & mask ) ;
// Assumes Vec's backing memory is at least 4-bytes aligned.
unsafe {
apply_mask_aligned32 ( & mut self . payload , & mask ) ;
}
try ! ( w . write ( & mask ) ) ;
}
@ -489,6 +515,24 @@ mod tests {
use super ::super ::coding ::{ OpCode , Data } ;
use std ::io ::Cursor ;
#[ test ]
fn test_apply_mask ( ) {
let mask = [
0x6d , 0xb6 , 0xb2 , 0x80 ,
] ;
let unmasked = vec! [
0xf3 , 0x00 , 0x01 , 0x02 , 0x03 , 0x80 , 0x81 , 0x82 , 0xff , 0xfe , 0x00 ,
] ;
let mut masked = unmasked . clone ( ) ;
apply_mask ( & mut masked , & mask ) ;
let mut masked_aligned = unmasked . clone ( ) ;
unsafe { apply_mask_aligned32 ( & mut masked_aligned , & mask ) } ;
assert_eq! ( masked , masked_aligned ) ;
}
#[ test ]
fn parse ( ) {
let mut raw : Cursor < Vec < u8 > > = Cursor ::new ( vec! [