diff options
author | Christian Duerr <contact@christianduerr.com> | 2021-07-03 20:31:50 +0000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2021-07-03 20:31:50 +0000 |
commit | c6ed855bfa036ffe5e3e3ea12819db6a1a25d4e1 (patch) | |
tree | 32e97cf1b3c20c107e1b78a1e49f92d86d9b9b15 | |
parent | 4dd70ba3a155d1eebe38bcd5a5c4c03ca09f2fea (diff) | |
download | alacritty-c6ed855bfa036ffe5e3e3ea12819db6a1a25d4e1.tar.gz alacritty-c6ed855bfa036ffe5e3e3ea12819db6a1a25d4e1.zip |
Add buffer for PTY reads during terminal lock
Before this patch, Alacritty's PTY reader would always try to read the
PTY into a buffer and then wait for the acquisition of the terminal lock
to process this data. Since locking for the terminal could take some
time, the PTY could fill up with the thread idling while doing so.
As a solution, this patch keeps reading to a buffer while the terminal
is locked in the renderer and starts processing all buffered data as
soon as the lock is released.
This has halfed the runtime of a simple `cat` benchmark from ~9 to ~4
seconds when the font size is set to `1`. Running this patch with
"normal" grid densities does not appear to make any significant
performance differences in either direction.
One possible memory optimization for the future would be to use this
buffer for synchronized updates, but since this currently uses a dynamic
buffer and would be a bit more cluttered, it has not been implemented in
this patch.
-rw-r--r-- | CHANGELOG.md | 1 | ||||
-rw-r--r-- | alacritty_terminal/src/event_loop.rs | 79 | ||||
-rw-r--r-- | alacritty_terminal/src/sync.rs | 18 |
3 files changed, 62 insertions, 36 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md index 373530ff..ee999003 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). - Regression in rendering performance with dense grids since 0.6.0 - Crash/Freezes with partially visible fullwidth characters due to alt screen resize - Incorrect vi cursor position after invoking `ScrollPageHalfUp` action +- Slow PTY read performance with extremely dense grids ## 0.8.0 diff --git a/alacritty_terminal/src/event_loop.rs b/alacritty_terminal/src/event_loop.rs index 098ee896..1a6ee8f3 100644 --- a/alacritty_terminal/src/event_loop.rs +++ b/alacritty_terminal/src/event_loop.rs @@ -22,8 +22,11 @@ use crate::term::{SizeInfo, Term}; use crate::thread; use crate::tty; -/// Max bytes to read from the PTY. -const MAX_READ: usize = u16::max_value() as usize; +/// Max bytes to read from the PTY before forced terminal synchronization. +const READ_BUFFER_SIZE: usize = 0x10_0000; + +/// Max bytes to read from the PTY while the terminal is locked. +const MAX_LOCKED_READ: usize = u16::max_value() as usize; /// Messages that may be sent to the `EventLoop`. #[derive(Debug)] @@ -213,48 +216,52 @@ where where X: Write, { + let mut unprocessed = 0; let mut processed = 0; - let mut terminal = None; - loop { - match self.pty.reader().read(buf) { - Ok(0) => break, - Ok(got) => { - // Record bytes read; used to limit time spent in pty_read. - processed += got; - - // Send a copy of bytes read to a subscriber. Used for - // example with ref test recording. - writer = writer.map(|w| { - w.write_all(&buf[..got]).unwrap(); - w - }); - - // Get reference to terminal. Lock is acquired on initial - // iteration and held until there's no bytes left to parse - // or we've reached `MAX_READ`. - if terminal.is_none() { - terminal = Some(self.terminal.lock()); - } - let terminal = terminal.as_mut().unwrap(); - - // Run the parser. - for byte in &buf[..got] { - state.parser.advance(&mut **terminal, *byte); - } + // Reserve the next terminal lock for PTY reading. + let _terminal_lease = Some(self.terminal.lease()); - // Exit if we've processed enough bytes. - if processed > MAX_READ { - break; - } - }, + loop { + // Read from the PTY. + match self.pty.reader().read(&mut buf[unprocessed..]) { + Ok(got) => unprocessed += got, Err(err) => match err.kind() { ErrorKind::Interrupted | ErrorKind::WouldBlock => { - break; + // Go back to mio if we're caught up on parsing and the PTY would block. + if unprocessed == 0 { + break; + } }, _ => return Err(err), }, } + + // Attempt to lock the terminal. + let mut terminal = match self.terminal.try_lock_unfair() { + // Force block if we are at the buffer size limit. + None if unprocessed >= READ_BUFFER_SIZE => self.terminal.lock_unfair(), + None => continue, + Some(terminal) => terminal, + }; + + // Write a copy of the bytes to the ref test file. + if let Some(writer) = &mut writer { + writer.write_all(&buf[..unprocessed]).unwrap(); + } + + // Parse the incoming bytes. + for byte in &buf[..unprocessed] { + state.parser.advance(&mut *terminal, *byte); + } + + processed += unprocessed; + unprocessed = 0; + + // Assure we're not blocking the terminal too long unnecessarily. + if processed >= MAX_LOCKED_READ { + break; + } } // Queue terminal redraw unless all processed bytes were synchronized. @@ -300,7 +307,7 @@ where pub fn spawn(mut self) -> JoinHandle<(Self, State)> { thread::spawn_named("PTY reader", move || { let mut state = State::default(); - let mut buf = [0u8; MAX_READ]; + let mut buf = [0u8; READ_BUFFER_SIZE]; let mut tokens = (0..).map(Into::into); diff --git a/alacritty_terminal/src/sync.rs b/alacritty_terminal/src/sync.rs index 43148a78..848bab62 100644 --- a/alacritty_terminal/src/sync.rs +++ b/alacritty_terminal/src/sync.rs @@ -21,6 +21,14 @@ impl<T> FairMutex<T> { FairMutex { data: Mutex::new(data), next: Mutex::new(()) } } + /// Acquire a lease to reserve the mutex lock. + /// + /// This will prevent others from acquiring a terminal lock, but block if anyone else is + /// already holding a lease. + pub fn lease(&self) -> MutexGuard<'_, ()> { + self.next.lock() + } + /// Lock the mutex. pub fn lock(&self) -> MutexGuard<'_, T> { // Must bind to a temporary or the lock will be freed before going @@ -28,4 +36,14 @@ impl<T> FairMutex<T> { let _next = self.next.lock(); self.data.lock() } + + /// Unfairly lock the mutex. + pub fn lock_unfair(&self) -> MutexGuard<'_, T> { + self.data.lock() + } + + /// Unfairly try to lock the mutex. + pub fn try_lock_unfair(&self) -> Option<MutexGuard<'_, T>> { + self.data.try_lock() + } } |