From 01a9211a8432a4f2871bc6ccee1635a1afef3dbf Mon Sep 17 00:00:00 2001 From: Alexey Galakhov Date: Wed, 5 Apr 2017 11:35:08 +0200 Subject: [PATCH] Move mask functions into a separate file Signed-off-by: Alexey Galakhov --- src/protocol/frame/frame.rs | 62 ++------------------------------ src/protocol/frame/mask.rs | 71 +++++++++++++++++++++++++++++++++++++ src/protocol/frame/mod.rs | 1 + 3 files changed, 75 insertions(+), 59 deletions(-) create mode 100644 src/protocol/frame/mask.rs diff --git a/src/protocol/frame/frame.rs b/src/protocol/frame/frame.rs index 3bdc539..352efab 100644 --- a/src/protocol/frame/frame.rs +++ b/src/protocol/frame/frame.rs @@ -8,41 +8,9 @@ use std::result::Result as StdResult; use byteorder::{ByteOrder, ReadBytesExt, NetworkEndian}; use bytes::BufMut; -use rand; - use error::{Error, Result}; use super::coding::{OpCode, Control, Data, CloseCode}; - -fn apply_mask(buf: &mut [u8], mask: &[u8; 4]) { - for (i, byte) in buf.iter_mut().enumerate() { - *byte ^= mask[i & 3]; - } -} - -/// Faster version of `apply_mask()` which operates on 4-byte blocks. -/// -/// Safety: `buf` must be at least 4-bytes aligned. -unsafe fn apply_mask_aligned32(buf: &mut [u8], mask: &[u8; 4]) { - debug_assert_eq!(buf.as_ptr() as usize % 4, 0); - - let mask_u32 = transmute(*mask); - - let mut ptr = buf.as_mut_ptr() as *mut u32; - for _ in 0..(buf.len() / 4) { - *ptr ^= mask_u32; - ptr = ptr.offset(1); - } - - // Possible last block with less than 4 bytes. - let last_block_start = buf.len() & !3; - let last_block = &mut buf[last_block_start..]; - apply_mask(last_block, mask); -} - -#[inline] -fn generate_mask() -> [u8; 4] { - rand::random() -} +use super::mask::{generate_mask, apply_mask}; /// A struct representing the close command. #[derive(Debug, Clone)] @@ -219,10 +187,7 @@ impl Frame { #[inline] pub fn remove_mask(&mut self) { self.mask.and_then(|mask| { - // Assumes Vec's backing memory is at least 4-bytes aligned. - unsafe { - Some(apply_mask_aligned32(&mut self.payload, &mask)) - } + Some(apply_mask(&mut self.payload, &mask)) }); self.mask = None; } @@ -471,10 +436,7 @@ impl Frame { if self.is_masked() { let mask = self.mask.take().unwrap(); - // Assumes Vec's backing memory is at least 4-bytes aligned. - unsafe { - apply_mask_aligned32(&mut self.payload, &mask); - } + apply_mask(&mut self.payload, &mask); try!(w.write(&mask)); } @@ -528,24 +490,6 @@ mod tests { use super::super::coding::{OpCode, Data}; use std::io::Cursor; - #[test] - fn test_apply_mask() { - let mask = [ - 0x6d, 0xb6, 0xb2, 0x80, - ]; - let unmasked = vec![ - 0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, - ]; - - let mut masked = unmasked.clone(); - apply_mask(&mut masked, &mask); - - let mut masked_aligned = unmasked.clone(); - unsafe { apply_mask_aligned32(&mut masked_aligned, &mask) }; - - assert_eq!(masked, masked_aligned); - } - #[test] fn parse() { let mut raw: Cursor> = Cursor::new(vec![ diff --git a/src/protocol/frame/mask.rs b/src/protocol/frame/mask.rs new file mode 100644 index 0000000..9e5c984 --- /dev/null +++ b/src/protocol/frame/mask.rs @@ -0,0 +1,71 @@ +use std::mem::transmute; +use rand; + +/// Generate a random frame mask. +#[inline] +pub fn generate_mask() -> [u8; 4] { + rand::random() +} + +/// Mask/unmask a frame. +#[inline] +pub fn apply_mask(buf: &mut [u8], mask: &[u8; 4]) { + // Assume that the memory is 32-bytes aligned. + // FIXME: this assumption is not correct. + unsafe { apply_mask_aligned32(buf, mask) } +} + +/// A safe unoptimized mask application. +#[inline] +fn apply_mask_fallback(buf: &mut [u8], mask: &[u8; 4]) { + for (i, byte) in buf.iter_mut().enumerate() { + *byte ^= mask[i & 3]; + } +} + +/// Faster version of `apply_mask()` which operates on 4-byte blocks. +/// +/// Safety: `buf` must be at least 4-bytes aligned. +#[inline] +unsafe fn apply_mask_aligned32(buf: &mut [u8], mask: &[u8; 4]) { + debug_assert_eq!(buf.as_ptr() as usize % 4, 0); + + let mask_u32 = transmute(*mask); + + let mut ptr = buf.as_mut_ptr() as *mut u32; + for _ in 0..(buf.len() / 4) { + *ptr ^= mask_u32; + ptr = ptr.offset(1); + } + + // Possible last block with less than 4 bytes. + let last_block_start = buf.len() & !3; + let last_block = &mut buf[last_block_start..]; + apply_mask_fallback(last_block, mask); +} + +#[cfg(test)] +mod tests { + + use super::{apply_mask_fallback, apply_mask_aligned32}; + + #[test] + fn test_apply_mask() { + let mask = [ + 0x6d, 0xb6, 0xb2, 0x80, + ]; + let unmasked = vec![ + 0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, + ]; + + let mut masked = unmasked.clone(); + apply_mask_fallback(&mut masked, &mask); + + let mut masked_aligned = unmasked.clone(); + unsafe { apply_mask_aligned32(&mut masked_aligned, &mask) }; + + assert_eq!(masked, masked_aligned); + } + +} + diff --git a/src/protocol/frame/mod.rs b/src/protocol/frame/mod.rs index 8973db7..4b6a711 100644 --- a/src/protocol/frame/mod.rs +++ b/src/protocol/frame/mod.rs @@ -3,6 +3,7 @@ pub mod coding; mod frame; +mod mask; pub use self::frame::Frame; pub use self::frame::CloseFrame;