Compare commits

...

6 Commits

Author SHA1 Message Date
Sebastian Dröge 04a9a07a7d Fix code style 1 year ago
Alex Butler b3376f3dd9 Prepare update to tungstenite 0.20 1 year ago
Alex Butler 17866906ea Do not flush on each poll_ready call 1 year ago
Benoît CORTIER bee1b5595a Gracefully handle invalid native root certificates 1 year ago
Daniel Abramov fde3ff9491 Update README to include performance note 1 year ago
Daniel Abramov 56917cd21e Remove boilerplace in `poll_flush()` 1 year ago
  1. 2
      Cargo.toml
  2. 15
      README.md
  3. 36
      src/lib.rs
  4. 12
      src/tokio/rustls.rs

@ -41,7 +41,7 @@ futures-io = { version = "0.3", default-features = false, features = ["std"] }
pin-project-lite = "0.2"
[dependencies.tungstenite]
version = "0.19"
version = "0.20"
default-features = false
[dependencies.async-std]

@ -65,9 +65,24 @@ want to process a stream of bytes without regard to those boundaries, try
[`ws_stream_tungstenite`](https://crates.io/crates/ws_stream_tungstenite),
which builds upon this crate.
## Is it performant?
In essence, `async-tungstenite` is a wrapper for `tungstenite`, so the performance is capped by the performance of `tungstenite`. `tungstenite`
has a decent performance (it has been used in production for real-time communication software, video conferencing, etc), but it's definitely
not the fastest WebSocket library in the world at the moment of writing this note.
If performance is of a paramount importance for you (especially if you send **large messages**), then you might want to check other libraries
that have been designed to be performant or you could file a PR against `tungstenite` to improve the performance!
We are aware of changes that both `tungstenite` and `async-tungstenite` need in order to fill the gap of ~30% performance difference between `tungstenite`
and more performant libraries like `fastbwebsockets`, but we have not worked on that yet as it was not required for the use case that original authors designed
the library for. In the course of past years we have merged several performance improvements submitted by the awesome community of Rust users who helped to improve
the library! For a quick summary of the pending performance problems/improvements, see [the comment](https://github.com/snapview/tungstenite-rs/issues/352#issuecomment-1537488614).
## tokio-tungstenite
Originally this crate was created as a fork of
[tokio-tungstenite](https://github.com/snapview/tokio-tungstenite) and ported
to the traits of the [`futures`](https://crates.io/crates/futures) crate.
Integration into async-std, tokio and gio was added on top of that.

@ -334,11 +334,11 @@ where
match futures_util::ready!(self.with_context(Some((ContextWaker::Read, cx)), |s| {
#[cfg(feature = "verbose-logging")]
trace!(
"{}:{} Stream.with_context poll_next -> read_message()",
"{}:{} Stream.with_context poll_next -> read()",
file!(),
line!()
);
cvt(s.read_message())
cvt(s.read())
})) {
Ok(v) => Poll::Ready(Some(Ok(v))),
Err(e) => {
@ -368,12 +368,12 @@ where
{
type Error = WsError;
fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
(*self).with_context(Some((ContextWaker::Write, cx)), |s| cvt(s.write_pending()))
fn poll_ready(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn start_send(mut self: Pin<&mut Self>, item: Message) -> Result<(), Self::Error> {
match (*self).with_context(None, |s| s.write_message(item)) {
match (*self).with_context(None, |s| s.write(item)) {
Ok(()) => Ok(()),
Err(WsError::Io(err)) if err.kind() == std::io::ErrorKind::WouldBlock => {
// the message was accepted and queued
@ -388,25 +388,21 @@ where
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match (*self).with_context(Some((ContextWaker::Write, cx)), |s| s.write_pending()) {
Ok(()) => Poll::Ready(Ok(())),
Err(WsError::ConnectionClosed) => {
// WebSocket is closing and there is nothing to send anymore.
// Not an failure, the flush operation is a success.
Poll::Ready(Ok(()))
}
Err(WsError::Io(ref e)) if e.kind() == std::io::ErrorKind::WouldBlock => {
trace!("WouldBlock");
Poll::Pending
}
Err(e) => Poll::Ready(Err(e)),
}
(*self)
.with_context(Some((ContextWaker::Write, cx)), |s| cvt(s.flush()))
.map(|r| {
// WebSocket connection has just been closed. Flushing completed, not an error.
match r {
Err(WsError::ConnectionClosed) => Ok(()),
other => other,
}
})
}
fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let res = if self.closing {
// After queueing it, we call `write_pending` to drive the close handshake to completion.
(*self).with_context(Some((ContextWaker::Write, cx)), |s| s.write_pending())
// After queueing it, we call `flush` to drive the close handshake to completion.
(*self).with_context(Some((ContextWaker::Write, cx)), |s| s.flush())
} else {
(*self).with_context(Some((ContextWaker::Write, cx)), |s| s.close(None))
};

@ -49,11 +49,13 @@ where
{
use real_tokio_rustls::rustls::Certificate;
for cert in rustls_native_certs::load_native_certs()? {
root_store
.add(&Certificate(cert.0))
.map_err(TlsError::Rustls)?;
}
let native_certs = rustls_native_certs::load_native_certs()?;
let der_certs: Vec<Vec<u8>> =
native_certs.into_iter().map(|cert| cert.0).collect();
let total_number = der_certs.len();
let (number_added, number_ignored) =
root_store.add_parsable_certificates(&der_certs);
log::debug!("Added {number_added}/{total_number} native root certificates (ignored {number_ignored})");
}
#[cfg(all(
feature = "tokio-rustls-webpki-roots",

Loading…
Cancel
Save