Skip to content

Commit

Permalink
refactor(http): remove buffer, use WriteBuf
Browse files Browse the repository at this point in the history
  • Loading branch information
seanmonstar committed Jan 17, 2017
1 parent 6e4739f commit 1868f85
Show file tree
Hide file tree
Showing 6 changed files with 123 additions and 132 deletions.
33 changes: 27 additions & 6 deletions src/http/buf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@ impl MemBuf {
trace!("MemBuf::reserve unique access, growing");
unsafe {
let mut vec = &mut *self.buf.get();
vec.reserve(needed);
let new_cap = vec.capacity();
grow_zerofill(vec, new_cap - orig_cap);
grow_zerofill(vec, needed);
}
} else {
// we need to allocate more space, but dont have unique
Expand Down Expand Up @@ -139,9 +137,32 @@ impl MemBuf {

#[inline]
unsafe fn grow_zerofill(buf: &mut Vec<u8>, additional: usize) {
let len = buf.len();
buf.set_len(len + additional);
::std::ptr::write_bytes(buf.as_mut_ptr().offset(len as isize), 0, additional);
let orig_cap = buf.capacity();
buf.reserve(additional);
let new_cap = buf.capacity();
let reserved = new_cap - orig_cap;
let orig_len = buf.len();
zero(buf, orig_len, reserved);
buf.set_len(orig_len + reserved);


unsafe fn zero(buf: &mut Vec<u8>, offset: usize, len: usize) {
assert!(buf.capacity() >= len + offset,
"offset of {} with len of {} is bigger than capacity of {}",
offset, len, buf.capacity());
ptr::write_bytes(buf.as_mut_ptr().offset(offset as isize), 0, len);
}
}

#[test]
fn test_grow_zerofill() {
for init in 0..100 {
for reserve in (0..100).rev() {
let mut vec = vec![0; init];
unsafe { grow_zerofill(&mut vec, reserve) }
assert_eq!(vec.len(), vec.capacity());
}
}
}

impl fmt::Debug for MemBuf {
Expand Down
107 changes: 0 additions & 107 deletions src/http/buffer.rs

This file was deleted.

11 changes: 7 additions & 4 deletions src/http/conn.rs
Original file line number Diff line number Diff line change
Expand Up @@ -610,20 +610,22 @@ mod tests {

#[test]
fn test_conn_body_write_length() {
extern crate pretty_env_logger;
use ::futures::Future;
let _ = pretty_env_logger::init();
let _: Result<(), ()> = ::futures::lazy(|| {
let io = AsyncIo::new_buf(vec![], 0);
let mut conn = Conn::<_, ServerTransaction>::new(io, Default::default());
let max = ::http::buffer::MAX_BUFFER_SIZE + 4096;
conn.state.writing = Writing::Body(Encoder::length(max as u64), None);
let max = ::http::io::MAX_BUFFER_SIZE + 4096;
conn.state.writing = Writing::Body(Encoder::length((max * 2) as u64), None);

assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'a'; 1024 * 4].into()) }).unwrap().is_ready());
match conn.state.writing {
Writing::Body(_, None) => {},
_ => panic!("writing did not queue chunk: {:?}", conn.state.writing),
}

assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; max - 8192].into()) }).unwrap().is_ready());
assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'b'; max].into()) }).unwrap().is_ready());

match conn.state.writing {
Writing::Body(_, Some(_)) => {},
Expand All @@ -636,7 +638,8 @@ mod tests {
assert!(conn.poll_complete().unwrap().is_not_ready());
conn.io.io_mut().block_in(1024 * 3);
assert!(conn.poll_complete().unwrap().is_not_ready());
conn.io.io_mut().block_in(max);
conn.io.io_mut().block_in(max * 2);
assert!(conn.poll_complete().unwrap().is_not_ready());
assert!(conn.poll_complete().unwrap().is_ready());

assert!(conn.start_send(Frame::Body { chunk: Some(vec![b'c'; 1024 * 4].into()) }).unwrap().is_ready());
Expand Down
102 changes: 88 additions & 14 deletions src/http/io.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,21 @@
use std::cmp;
use std::fmt;
use std::io::{self, Read, Write};
use std::ptr;

use futures::Async;
use tokio::io::Io;

use http::{Http1Transaction, h1, MessageHead, ParseResult};
use http::buf::{MemBuf, MemSlice};
use http::buffer::Buffer;

const INIT_BUFFER_SIZE: usize = 4096;
pub const MAX_BUFFER_SIZE: usize = 8192 + 4096 * 100;

pub struct Buffered<T> {
io: T,
read_buf: MemBuf,
write_buf: Buffer,
write_buf: WriteBuf,
}

impl<T> fmt::Debug for Buffered<T> {
Expand All @@ -31,7 +32,7 @@ impl<T: Io> Buffered<T> {
Buffered {
io: io,
read_buf: MemBuf::new(),
write_buf: Buffer::new(),
write_buf: WriteBuf::new(),
}
}

Expand Down Expand Up @@ -91,7 +92,7 @@ impl<T: Io> Buffered<T> {
}

pub fn buffer<B: AsRef<[u8]>>(&mut self, buf: B) {
self.write_buf.write(buf.as_ref());
self.write_buf.buffer(buf.as_ref());
}

#[cfg(test)]
Expand Down Expand Up @@ -121,12 +122,12 @@ impl<T: Read> Read for Buffered<T> {

impl<T: Write> Write for Buffered<T> {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Ok(self.write_buf.write(data))
Ok(self.write_buf.buffer(data))
}

fn flush(&mut self) -> io::Result<()> {
self.write_buf.write_into(&mut self.io).and_then(|_n| {
if self.write_buf.is_empty() {
if self.write_buf.remaining() == 0 {
Ok(())
} else {
Err(io::Error::new(io::ErrorKind::WouldBlock, "wouldblock"))
Expand Down Expand Up @@ -177,14 +178,20 @@ impl<T: AsRef<[u8]>> Cursor<T> {
self.pos >= self.bytes.as_ref().len()
}

/*
pub fn write_to<W: Write>(&mut self, dst: &mut W) -> io::Result<usize> {
dst.write(&self.bytes.as_ref()[self.pos..]).map(|n| {
self.pos += n;
n
})
if self.remaining() == 0 {
Ok(0)
} else {
dst.write(&self.bytes.as_ref()[self.pos..]).map(|n| {
self.pos += n;
n
})
}
}

fn remaining(&self) -> usize {
self.bytes.as_ref().len() - self.pos
}
*/

#[inline]
pub fn buf(&self) -> &[u8] {
Expand All @@ -201,8 +208,15 @@ impl<T: AsRef<[u8]>> Cursor<T> {
impl<T: AsRef<[u8]>> fmt::Debug for Cursor<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let bytes = self.buf();
let reasonable_max = ::std::cmp::min(bytes.len(), 32);
write!(f, "Cursor({:?})", &bytes[..reasonable_max])
if bytes.len() > 32 {
try!(f.write_str("Cursor(["));
for byte in &bytes[..32] {
try!(write!(f, "{:?}, ", byte));
}
write!(f, "... {}])", bytes.len())
} else {
write!(f, "Cursor({:?})", &bytes)
}
}
}

Expand Down Expand Up @@ -230,6 +244,66 @@ impl<T: Write> AtomicWrite for T {
}
//}

// an internal buffer to collect writes before flushes
#[derive(Debug)]
struct WriteBuf(Cursor<Vec<u8>>);

impl WriteBuf {
fn new() -> WriteBuf {
WriteBuf(Cursor::new(Vec::new()))
}

fn write_into<W: Write>(&mut self, w: &mut W) -> io::Result<usize> {
self.0.write_to(w)
}

fn buffer(&mut self, data: &[u8]) -> usize {
trace!("WriteBuf::buffer() len = {:?}", data.len());
self.maybe_reset();
self.maybe_reserve(data.len());
let mut vec = &mut self.0.bytes;
let len = cmp::min(vec.capacity() - vec.len(), data.len());
assert!(vec.capacity() - vec.len() >= len);
unsafe {
// in rust 1.9, we could use slice::copy_from_slice
ptr::copy(
data.as_ptr(),
vec.as_mut_ptr().offset(vec.len() as isize),
len
);
let new_len = vec.len() + len;
vec.set_len(new_len);
}
len
}

fn remaining(&self) -> usize {
self.0.remaining()
}

#[inline]
fn maybe_reserve(&mut self, needed: usize) {
let mut vec = &mut self.0.bytes;
let cap = vec.capacity();
if cap == 0 {
let init = cmp::max(INIT_BUFFER_SIZE, needed);
trace!("WriteBuf reserving initial {}", init);
vec.reserve(init);
} else if cap < MAX_BUFFER_SIZE {
vec.reserve(cmp::min(needed, MAX_BUFFER_SIZE - cap));
trace!("WriteBuf reserved {}", vec.capacity() - cap);
}
}

fn maybe_reset(&mut self) {
if self.0.pos != 0 && self.0.remaining() == 0 {
self.0.pos = 0;
unsafe {
self.0.bytes.set_len(0);
}
}
}
}

#[test]
fn test_iobuf_write_empty_slice() {
Expand Down
1 change: 0 additions & 1 deletion src/http/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ pub use self::chunk::Chunk;

mod body;
mod buf;
mod buffer;
mod chunk;
mod conn;
mod io;
Expand Down
1 change: 1 addition & 0 deletions src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -105,6 +105,7 @@ impl<T: Write> Write for AsyncIo<T> {
} else if self.bytes_until_block == 0 {
Err(io::Error::new(io::ErrorKind::WouldBlock, "mock block"))
} else {
trace!("AsyncIo::write() block_in = {}, data.len() = {}", self.bytes_until_block, data.len());
let n = cmp::min(self.bytes_until_block, data.len());
let n = try!(self.inner.write(&data[..n]));
self.bytes_until_block -= n;
Expand Down

0 comments on commit 1868f85

Please sign in to comment.