diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..3dc33ae --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,58 @@ +name: CI +on: [push, pull_request] + +jobs: + test: + name: Run tests + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Update rustup + run: rustup self update + - name: Install Rust + run: | + rustup set profile minimal + rustup toolchain install 1.52 -c rust-docs + rustup default 1.52 + - name: Install mdbook + run: | + mkdir bin + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.5/mdbook-v0.4.5-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=bin + echo "$(pwd)/bin" >> ${GITHUB_PATH} + - name: Report versions + run: | + rustup --version + rustc -Vv + mdbook --version + - name: Run tests + run: mdbook test + lint: + name: Run lints + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Update rustup + run: rustup self update + - name: Install Rust + run: | + rustup set profile minimal + rustup toolchain install nightly -c rust-docs + rustup override set nightly + - name: Install mdbook + run: | + mkdir bin + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.5/mdbook-v0.4.5-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=bin + echo "$(pwd)/bin" >> ${GITHUB_PATH} + - name: Report versions + run: | + rustup --version + rustc -Vv + mdbook --version + - name: Spellcheck + run: bash ci/spellcheck.sh list + - name: Lint for local file paths + run: | + mdbook build + cargo run --bin lfp src + - name: Validate references + run: bash ci/validate.sh diff --git a/Cargo.lock b/Cargo.lock new file mode 100644 index 0000000..b3469e3 --- /dev/null +++ b/Cargo.lock @@ -0,0 +1,275 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "adler32" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "aho-corasick" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "crc32fast" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "docopt" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "strsim 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "filetime" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "flate2" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", + "crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "libc" +version = "0.2.66" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "memchr" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "miniz_oxide" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "proc-macro2" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "quote" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "regex" +version = "1.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "regex-syntax" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "rust-book" +version = "0.0.1" +dependencies = [ + "docopt 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", + "tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)", + "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "serde_derive" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "strsim" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "syn" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)", + "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "tar" +version = "0.4.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "filetime 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", + "redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", + "xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "walkdir" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "winapi-util" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + +[[package]] +name = "xattr" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[metadata] +"checksum adler32 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d" +"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +"checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1" +"checksum docopt 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7f525a586d310c87df72ebcd98009e57f1cc030c8c268305287a476beb653969" +"checksum filetime 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd7380b54ced79dda72ecc35cc4fbbd1da6bba54afaa37e96fd1c2a308cd469" +"checksum flate2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" +"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" +"checksum miniz_oxide 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6f3f74f726ae935c3f514300cc6773a0c9492abc5e972d42ba0c0ebb88757625" +"checksum proc-macro2 1.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" +"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" +"checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87" +"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90" +"checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +"checksum serde 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +"checksum serde_derive 1.0.104 (registry+https://github.com/rust-lang/crates.io-index)" = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +"checksum strsim 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c" +"checksum syn 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)" = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +"checksum tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)" = "b3196bfbffbba3e57481b6ea32249fbaf590396a52505a2615adbb79d9d826d3" +"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +"checksum walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "777182bc735b6424e1a57516d35ed72cb8019d85c8c9bf536dccb3445c1a2f7d" +"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +"checksum winapi-util 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "244c3741f4240ef46274860397c7c74e50eb23624996930e484c16679633a54c" diff --git a/Cargo.toml b/Cargo.toml new file mode 100644 index 0000000..19b2036 --- /dev/null +++ b/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "rust-book" +version = "0.0.1" +authors = ["Steve Klabnik "] +description = "The Rust Book" +edition = "2018" + +[[bin]] +name = "concat_chapters" +path = "tools/src/bin/concat_chapters.rs" + +[[bin]] +name = "convert_quotes" +path = "tools/src/bin/convert_quotes.rs" + +[[bin]] +name = "lfp" +path = "tools/src/bin/lfp.rs" + +[[bin]] +name = "link2print" +path = "tools/src/bin/link2print.rs" + +[[bin]] +name = "release_listings" +path = "tools/src/bin/release_listings.rs" + +[[bin]] +name = "remove_hidden_lines" +path = "tools/src/bin/remove_hidden_lines.rs" + +[[bin]] +name = "remove_links" +path = "tools/src/bin/remove_links.rs" + +[[bin]] +name = "remove_markup" +path = "tools/src/bin/remove_markup.rs" + +[dependencies] +walkdir = "2.3.1" +docopt = "1.1.0" +serde = "1.0" +regex = "1.3.3" +lazy_static = "1.4.0" +flate2 = "1.0.13" +tar = "0.4.26" diff --git a/ci/dictionary.txt b/ci/dictionary.txt new file mode 100644 index 0000000..b4dbfa6 --- /dev/null +++ b/ci/dictionary.txt @@ -0,0 +1,555 @@ +personal_ws-1.1 en 0 utf-8 +abcabcabc +abcd +abcdefghijklmnopqrstuvwxyz +adaptor +adaptors +AddAssign +Addr +afdc +aggregator +AGraph +aliasability +alignof +alloc +allocator +Amir +anotherusername +APIs +app's +aren +args +ArgumentV +associativity +async +atomics +attr +autocompletion +AveragedCollection +backend +backported +backtrace +backtraces +BACKTRACE +Backtraces +Baz's +benchmarking +bioinformatics +bitand +BitAnd +BitAndAssign +bitor +BitOr +BitOrAssign +bitwise +Bitwise +bitxor +BitXor +BitXorAssign +Bjarne +Boehm +bool +Boolean +Booleans +Bors +BorrowMutError +BoxMeUp +BTreeSet +BuildHasher +Cacher +Cagain +callsite +CamelCase +cargodoc +ChangeColor +ChangeColorMessage +charset +choo +chXX +chYY +clippy +clippy's +cmdlet +coercions +combinator +ConcreteType +config +Config +const +consts +constant's +copyeditor +couldn +CPUs +cratesio +CRLF +cryptocurrencies +cryptographic +cryptographically +CStr +CString +ctrl +Ctrl +customizable +CustomSmartPointer +CustomSmartPointers +data's +DataStruct +deallocate +deallocated +deallocating +deallocation +debuginfo +decl +decrementing +deduplicate +deduplicating +deps +deref +Deref +dereference +Dereference +dereferenced +dereferences +dereferencing +DerefMut +DeriveInput +destructor +destructure +destructured +destructures +destructuring +Destructuring +deterministically +DevOps +didn +Dobrý +doccargo +doccratesio +DOCTYPE +doesn +disambiguating +DisplayBacktrace +DivAssign +DraftPost +DSTs +ebook +ebooks +Edsger +egular +else's +emoji +encodings +enum +Enum +enums +enum's +Enums +eprintln +Erlang +ErrorKind +executables +expr +extern +favicon +ferris +FFFD +FFFF +figcaption +fieldname +filename +Filename +filesystem +Filesystem +filesystem's +filesystems +Firefox +FnMut +FnOnce +formatter +formatters +FrenchToast +FromIterator +frontend +getter +GGraph +GitHub +gitignore +grapheme +Grapheme +growable +gzip +hardcode +hardcoded +hardcoding +hasher +hashers +HashMap +HashSet +Haskell +hasn +HeadB +HeadC +HelloMacro +helloworld +HelloWorld +HelloWorldName +Hmmm +Hoare +Hola +homogenous +html +https +hyperoptimize +hypotheticals +Iceburgh +ident +IDE +IDEs +IDE's +IEEE +impl +implementor +implementors +ImportantExcerpt +incrementing +IndexMut +indices +init +initializer +initializers +inline +instantiation +internet +interoperate +IntoIterator +InvalidDigit +invariants +ioerror +iokind +ioresult +IoResult +iostdin +IpAddr +IpAddrKind +irst +isize +iter +iterator's +JavaScript +JoinHandle +Kay's +kinded +Klabnik +lang +LastWriteTime +latin +liballoc +libc +libcollections +libcore +libpanic +librarys +libreoffice +libstd +libunwind +lifecycle +LimitTracker +linter +LLVM +lobally +locators +LockResult +login +lookup +loopback +lossy +lval +macOS +Matsakis +mathematic +memoization +metadata +Metadata +metaprogramming +mibbit +Mibbit +millis +minigrep +mixup +mkdir +MockMessenger +modifiability +modularity +monomorphization +Monomorphization +monomorphized +MoveMessage +Mozilla +mpsc +msvc +MulAssign +multibyte +multithreaded +mutex +mutex's +Mutex +mutexes +Mutexes +MutexGuard +mutext +MyBox +myprogram +namespace +namespaced +namespaces +namespacing +natively +newfound +NewJob +NewsArticle +NewThread +newtype +newtypes +nitty +nocapture +nomicon +nonadministrators +nondeterministic +nonequality +nongeneric +noplayground +NotFound +nsprust +null's +OCaml +offsetof +online +OpenGL +optimizations +OptionalFloatingPointNumber +OptionalNumber +OsStr +OsString +other's +OutlinePrint +overloadable +overread +PanicPayload +param +parameterize +ParseIntError +PartialEq +PartialOrd +pbcopy +PendingReview +PendingReviewPost +PlaceholderType +polymorphism +PoolCreationError +portia +powershell +PowerShell +powi +preallocate +preallocates +preprocessing +Preprocessing +preprocessor +PrimaryColor +println +priv +proc +proto +pthreads +pushups +QuitMessage +quux +RAII +randcrate +RangeFrom +RangeTo +RangeFull +README +READMEs +rect +recurse +recv +redeclaring +Refactoring +refactor +refactoring +refcell +RefCell +refcellt +RefMut +reformats +refutability +reimplement +RemAssign +repr +representable +request's +resizes +resizing +retweet +rewordings +rint +ripgrep +runnable +runtime +runtimes +Rustacean +Rustaceans +rUsT +rustc +rustdoc +Rustonomicon +rustfix +rustfmt +rustup +sampleproject +screenshot +searchstring +SecondaryColor +SelectBox +semver +SemVer +serde +ShlAssign +ShrAssign +shouldn +Simula +siphash +situps +sizeof +SliceIndex +Smalltalk +snuck +someproject +someusername +SPDX +spdx +SpreadsheetCell +sqrt +stackoverflow +startup +StaticRef +stderr +stdin +Stdin +stdlib +stdout +steveklabnik's +stringify +Stroustrup +Stroustrup's +struct +Struct +structs +struct's +Structs +StrWrap +SubAssign +subclasses +subcommand +subcommands +subdirectories +subdirectory +submodule +submodules +Submodules +suboptimal +subpath +substring +subteams +subtree +subtyping +summarizable +supertrait +supertraits +TcpListener +TcpStream +templating +test_harness +test's +TextField +That'd +there'd +ThreadPool +timestamp +Tiếng +timeline +tlborm +tlsv +TODO +TokenStream +toml +TOML +toolchain +toolchains +ToString +tradeoff +tradeoffs +TrafficLight +transcoding +trpl +tuesday +tuple +tuples +turbofish +Turon +typeof +TypeName +UFCS +unary +Unary +uncomment +Uncomment +uncommenting +unevaluated +Uninstalling +uninstall +unix +unpopulated +unoptimized +UnsafeCell +unsafety +unsized +unsynchronized +URIs +UsefulType +username +USERPROFILE +usize +UsState +utils +vals +variable's +variant's +vers +versa +vert +Versioning +visualstudio +Vlissides +vscode +vtable +waitlist +wasn +weakt +WeatherForecast +WebSocket +whitespace +wildcard +wildcards +workflow +workspace +workspaces +Workspaces +wouldn +writeln +WriteMessage +xpression +yyyy +ZipImpl diff --git a/ci/spellcheck.sh b/ci/spellcheck.sh new file mode 100644 index 0000000..f1c84a5 --- /dev/null +++ b/ci/spellcheck.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +aspell --version + +# Checks project Markdown files for spelling mistakes. + +# Notes: + +# This script needs dictionary file ($dict_filename) with project-specific +# valid words. If this file is missing, first invocation of a script generates +# a file of words considered typos at the moment. User should remove real typos +# from this file and leave only valid words. When script generates false +# positive after source modification, new valid word should be added +# to dictionary file. + +# Default mode of this script is interactive. Each source file is scanned for +# typos. aspell opens window, suggesting fixes for each found typo. Original +# files with errors will be backed up to files with format "filename.md.bak". + +# When running in CI, this script should be run in "list" mode (pass "list" +# as first argument). In this mode script scans all files and reports found +# errors. Exit code in this case depends on scan result: +# 1 if any errors found, +# 0 if all is clear. + +# Script skips words with length less than or equal to 3. This helps to avoid +# some false positives. + +# We can consider skipping source code in markdown files (```code```) to reduce +# rate of false positives, but then we lose ability to detect typos in code +# comments/strings etc. + +shopt -s nullglob + +dict_filename=./ci/dictionary.txt +markdown_sources=(./src/*.md) +mode="check" + +# aspell repeatedly modifies the personal dictionary for some reason, +# so we should use a copy of our dictionary. +dict_path="/tmp/dictionary.txt" + +if [[ "$1" == "list" ]]; then + mode="list" +fi + +# Error if running in list (CI) mode and there isn't a dictionary file; +# creating one in CI won't do any good :( +if [[ "$mode" == "list" && ! -f "$dict_filename" ]]; then + echo "No dictionary file found! A dictionary file is required in CI!" + exit 1 +fi + +if [[ ! -f "$dict_filename" ]]; then + # Pre-check mode: generates dictionary of words aspell consider typos. + # After user validates that this file contains only valid words, we can + # look for typos using this dictionary and some default aspell dictionary. + echo "Scanning files to generate dictionary file '$dict_filename'." + echo "Please check that it doesn't contain any misspellings." + + echo "personal_ws-1.1 en 0 utf-8" > "$dict_filename" + cat "${markdown_sources[@]}" | aspell --ignore 3 list | sort -u >> "$dict_filename" +elif [[ "$mode" == "list" ]]; then + # List (default) mode: scan all files, report errors. + declare -i retval=0 + + cp "$dict_filename" "$dict_path" + + if [ ! -f $dict_path ]; then + retval=1 + exit "$retval" + fi + + for fname in "${markdown_sources[@]}"; do + command=$(aspell --ignore 3 --personal="$dict_path" "$mode" < "$fname") + if [[ -n "$command" ]]; then + for error in $command; do + # FIXME: find more correct way to get line number + # (ideally from aspell). Now it can make some false positives, + # because it is just a grep. + grep --with-filename --line-number --color=always "$error" "$fname" + done + retval=1 + fi + done + exit "$retval" +elif [[ "$mode" == "check" ]]; then + # Interactive mode: fix typos. + cp "$dict_filename" "$dict_path" + + if [ ! -f $dict_path ]; then + retval=1 + exit "$retval" + fi + + for fname in "${markdown_sources[@]}"; do + aspell --ignore 3 --dont-backup --personal="$dict_path" "$mode" "$fname" + done +fi diff --git a/ci/validate.sh b/ci/validate.sh new file mode 100644 index 0000000..9e2cfdf --- /dev/null +++ b/ci/validate.sh @@ -0,0 +1,4 @@ +for file in src/*.md ; do + echo Checking references in $file + cargo run --quiet --bin link2print < $file > /dev/null +done \ No newline at end of file diff --git a/rust-toolchain b/rust-toolchain new file mode 100644 index 0000000..d96ae40 --- /dev/null +++ b/rust-toolchain @@ -0,0 +1 @@ +1.52 diff --git a/rustfmt.toml b/rustfmt.toml new file mode 100644 index 0000000..df99c69 --- /dev/null +++ b/rustfmt.toml @@ -0,0 +1 @@ +max_width = 80 diff --git a/tools/convert-quotes.sh b/tools/convert-quotes.sh new file mode 100644 index 0000000..aa51dcb --- /dev/null +++ b/tools/convert-quotes.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -eu + +dir=$1 + +mkdir -p "tmp/$dir" + +for f in $dir/*.md +do + cat "$f" | cargo run --bin convert_quotes > "tmp/$f" + mv "tmp/$f" "$f" +done diff --git a/tools/doc-to-md.sh b/tools/doc-to-md.sh new file mode 100644 index 0000000..170727d --- /dev/null +++ b/tools/doc-to-md.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -eu + +# Get all the docx files in the tmp dir. +ls tmp/*.docx | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename -s .docx | \ +while IFS= read -r filename; do + # Make a directory to put the XML in. + mkdir -p "tmp/$filename" + # Unzip the docx to get at the XML. + unzip -o "tmp/$filename.docx" -d "tmp/$filename" + # Convert to markdown with XSL. + xsltproc tools/docx-to-md.xsl "tmp/$filename/word/document.xml" | \ + # Hard wrap at 80 chars at word bourdaries. + fold -w 80 -s | \ + # Remove trailing whitespace and save in the `nostarch` dir for comparison. + sed -e "s/ *$//" > "nostarch/$filename.md" +done diff --git a/tools/docx-to-md.xsl b/tools/docx-to-md.xsl new file mode 100644 index 0000000..637c7a5 --- /dev/null +++ b/tools/docx-to-md.xsl @@ -0,0 +1,220 @@ + + + + + + + + + + + + + + + + + + + + + + + + [TOC] + # + + + + + + ## + + + + + + ### + + + + + + #### + + + + + + ### + + + + + + 1. + + + + + + 1. + + + + + + * + + + + + + * + + + + + + * + + + + + + + + + + + + + ``` + + + + + + + + + + + + + + + + + + + + + ``` + + + + ``` + + ``` + + + + + + + + + + + + + + > + + + + + + > + + + + + + > + + + + + +Unmatched: + + + + + + + + + + + + + + + ` + + ` + + + + + + + + + + + + + + + + + ** + + ** + + + + + + + + + + + + + + + + + * + + * + + + + + + + + + + + + + + diff --git a/tools/megadiff.sh b/tools/megadiff.sh new file mode 100644 index 0000000..9b0d943 --- /dev/null +++ b/tools/megadiff.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -eu + +# Remove files that are never affected by rustfmt or are otherwise uninteresting +rm -rf tmp/book-before/css/ tmp/book-before/theme/ tmp/book-before/img/ tmp/book-before/*.js \ + tmp/book-before/FontAwesome tmp/book-before/*.css tmp/book-before/*.png \ + tmp/book-before/*.json tmp/book-before/print.html + +rm -rf tmp/book-after/css/ tmp/book-after/theme/ tmp/book-after/img/ tmp/book-after/*.js \ + tmp/book-after/FontAwesome tmp/book-after/*.css tmp/book-after/*.png \ + tmp/book-after/*.json tmp/book-after/print.html + +# Get all the html files before +ls tmp/book-before/*.html | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename | \ +while IFS= read -r filename; do + # Remove any files that are the same before and after + diff "tmp/book-before/$filename" "tmp/book-after/$filename" > /dev/null \ + && rm "tmp/book-before/$filename" "tmp/book-after/$filename" +done diff --git a/tools/nostarch.sh b/tools/nostarch.sh new file mode 100644 index 0000000..d802bf0 --- /dev/null +++ b/tools/nostarch.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +set -eu + +cargo build --release + +mkdir -p tmp +rm -rf tmp/*.md +rm -rf tmp/markdown + +# Render the book as Markdown to include all the code listings +MDBOOK_OUTPUT__MARKDOWN=1 mdbook build -d tmp + +# Get all the Markdown files +ls tmp/markdown/${1:-""}*.md | \ +# Extract just the filename so we can reuse it easily. +xargs -n 1 basename | \ +# Remove all links followed by ```, then +# Change all remaining links from Markdown to italicized inline text. +while IFS= read -r filename; do + < "tmp/markdown/$filename" ./target/release/remove_links \ + | ./target/release/link2print \ + | ./target/release/remove_markup \ + | ./target/release/remove_hidden_lines > "tmp/$filename" +done +# Concatenate the files into the `nostarch` dir. +./target/release/concat_chapters tmp nostarch diff --git a/tools/src/bin/concat_chapters.rs b/tools/src/bin/concat_chapters.rs new file mode 100644 index 0000000..71fd86f --- /dev/null +++ b/tools/src/bin/concat_chapters.rs @@ -0,0 +1,115 @@ +#[macro_use] +extern crate lazy_static; + +use std::collections::BTreeMap; +use std::env; +use std::fs::{create_dir, read_dir, File}; +use std::io; +use std::io::{Read, Write}; +use std::path::{Path, PathBuf}; +use std::process::exit; + +use regex::Regex; + +static PATTERNS: &'static [(&'static str, &'static str)] = &[ + (r"ch(\d\d)-\d\d-.*\.md", "chapter$1.md"), + (r"appendix-(\d\d).*\.md", "appendix.md"), +]; + +lazy_static! { + static ref MATCHERS: Vec<(Regex, &'static str)> = { + PATTERNS + .iter() + .map(|&(expr, repl)| (Regex::new(expr).unwrap(), repl)) + .collect() + }; +} + +fn main() { + let args: Vec = env::args().collect(); + + if args.len() < 3 { + println!("Usage: {} ", args[0]); + exit(1); + } + + let source_dir = ensure_dir_exists(&args[1]).unwrap(); + let target_dir = ensure_dir_exists(&args[2]).unwrap(); + + let mut matched_files = match_files(source_dir, target_dir); + matched_files.sort(); + + for (target_path, source_paths) in group_by_target(matched_files) { + concat_files(source_paths, target_path).unwrap(); + } +} + +fn match_files( + source_dir: &Path, + target_dir: &Path, +) -> Vec<(PathBuf, PathBuf)> { + read_dir(source_dir) + .expect("Unable to read source directory") + .filter_map(|maybe_entry| maybe_entry.ok()) + .filter_map(|entry| { + let source_filename = entry.file_name(); + let source_filename = + &source_filename.to_string_lossy().into_owned(); + for &(ref regex, replacement) in MATCHERS.iter() { + if regex.is_match(source_filename) { + let target_filename = + regex.replace_all(source_filename, replacement); + let source_path = entry.path(); + let mut target_path = PathBuf::from(&target_dir); + target_path.push(target_filename.to_string()); + return Some((source_path, target_path)); + } + } + None + }) + .collect() +} + +fn group_by_target( + matched_files: Vec<(PathBuf, PathBuf)>, +) -> BTreeMap> { + let mut grouped: BTreeMap> = BTreeMap::new(); + for (source, target) in matched_files { + if let Some(source_paths) = grouped.get_mut(&target) { + source_paths.push(source); + continue; + } + let source_paths = vec![source]; + grouped.insert(target.clone(), source_paths); + } + grouped +} + +fn concat_files( + source_paths: Vec, + target_path: PathBuf, +) -> io::Result<()> { + println!("Concatenating into {}:", target_path.to_string_lossy()); + let mut target = File::create(target_path)?; + target.write_all(b"\n[TOC]\n")?; + + for path in source_paths { + println!(" {}", path.to_string_lossy()); + let mut source = File::open(path)?; + let mut contents: Vec = Vec::new(); + source.read_to_end(&mut contents)?; + + target.write_all(b"\n")?; + target.write_all(&contents)?; + target.write_all(b"\n")?; + } + Ok(()) +} + +fn ensure_dir_exists(dir_string: &str) -> io::Result<&Path> { + let path = Path::new(dir_string); + if !path.exists() { + create_dir(path)?; + } + Ok(&path) +} diff --git a/tools/src/bin/convert_quotes.rs b/tools/src/bin/convert_quotes.rs new file mode 100644 index 0000000..e548c5e --- /dev/null +++ b/tools/src/bin/convert_quotes.rs @@ -0,0 +1,78 @@ +use std::io; +use std::io::{Read, Write}; + +fn main() { + let mut is_in_code_block = false; + let mut is_in_inline_code = false; + let mut is_in_html_tag = false; + + let mut buffer = String::new(); + if let Err(e) = io::stdin().read_to_string(&mut buffer) { + panic!(e); + } + + for line in buffer.lines() { + if line.is_empty() { + is_in_inline_code = false; + } + if line.starts_with("```") { + is_in_code_block = !is_in_code_block; + } + if is_in_code_block { + is_in_inline_code = false; + is_in_html_tag = false; + write!(io::stdout(), "{}\n", line).unwrap(); + } else { + let modified_line = &mut String::new(); + let mut previous_char = std::char::REPLACEMENT_CHARACTER; + let mut chars_in_line = line.chars(); + + while let Some(possible_match) = chars_in_line.next() { + // Check if inside inline code. + if possible_match == '`' { + is_in_inline_code = !is_in_inline_code; + } + // Check if inside HTML tag. + if possible_match == '<' && !is_in_inline_code { + is_in_html_tag = true; + } + if possible_match == '>' && !is_in_inline_code { + is_in_html_tag = false; + } + + // Replace with right/left apostrophe/quote. + let char_to_push = if possible_match == '\'' + && !is_in_inline_code + && !is_in_html_tag + { + if (previous_char != std::char::REPLACEMENT_CHARACTER + && !previous_char.is_whitespace()) + || previous_char == '‘' + { + '’' + } else { + '‘' + } + } else if possible_match == '"' + && !is_in_inline_code + && !is_in_html_tag + { + if (previous_char != std::char::REPLACEMENT_CHARACTER + && !previous_char.is_whitespace()) + || previous_char == '“' + { + '”' + } else { + '“' + } + } else { + // Leave untouched. + possible_match + }; + modified_line.push(char_to_push); + previous_char = char_to_push; + } + write!(io::stdout(), "{}\n", modified_line).unwrap(); + } + } +} diff --git a/tools/src/bin/lfp.rs b/tools/src/bin/lfp.rs new file mode 100644 index 0000000..caab7b2 --- /dev/null +++ b/tools/src/bin/lfp.rs @@ -0,0 +1,252 @@ +// We have some long regex literals, so: +// ignore-tidy-linelength + +use docopt::Docopt; +use serde::Deserialize; +use std::io::BufRead; +use std::{fs, io, path}; + +fn main() { + let args: Args = Docopt::new(USAGE) + .and_then(|d| d.deserialize()) + .unwrap_or_else(|e| e.exit()); + + let src_dir = &path::Path::new(&args.arg_src_dir); + let found_errs = walkdir::WalkDir::new(src_dir) + .min_depth(1) + .into_iter() + .map(|entry| match entry { + Ok(entry) => entry, + Err(err) => { + eprintln!("{:?}", err); + std::process::exit(911) + } + }) + .map(|entry| { + let path = entry.path(); + if is_file_of_interest(path) { + let err_vec = lint_file(path); + for err in &err_vec { + match *err { + LintingError::LineOfInterest(line_num, ref line) => { + eprintln!( + "{}:{}\t{}", + path.display(), + line_num, + line + ) + } + LintingError::UnableToOpenFile => { + eprintln!("Unable to open {}.", path.display()) + } + } + } + !err_vec.is_empty() + } else { + false + } + }) + .collect::>() + .iter() + .any(|result| *result); + + if found_errs { + std::process::exit(1) + } else { + std::process::exit(0) + } +} + +const USAGE: &'static str = " +counter +Usage: + lfp + lfp (-h | --help) +Options: + -h --help Show this screen. +"; + +#[derive(Debug, Deserialize)] +struct Args { + arg_src_dir: String, +} + +fn lint_file(path: &path::Path) -> Vec { + match fs::File::open(path) { + Ok(file) => lint_lines(io::BufReader::new(&file).lines()), + Err(_) => vec![LintingError::UnableToOpenFile], + } +} + +fn lint_lines(lines: I) -> Vec +where + I: Iterator>, +{ + lines + .enumerate() + .map(|(line_num, line)| { + let raw_line = line.unwrap(); + if is_line_of_interest(&raw_line) { + Err(LintingError::LineOfInterest(line_num, raw_line)) + } else { + Ok(()) + } + }) + .filter(|result| result.is_err()) + .map(|result| result.unwrap_err()) + .collect() +} + +fn is_file_of_interest(path: &path::Path) -> bool { + path.extension().map_or(false, |ext| ext == "md") +} + +fn is_line_of_interest(line: &str) -> bool { + !line + .split_whitespace() + .filter(|sub_string| { + sub_string.contains("file://") + && !sub_string.contains("file:///projects/") + }) + .collect::>() + .is_empty() +} + +#[derive(Debug)] +enum LintingError { + UnableToOpenFile, + LineOfInterest(usize, String), +} + +#[cfg(test)] +mod tests { + + use std::path; + + #[test] + fn lint_file_returns_a_vec_with_errs_when_lines_of_interest_are_found() { + let string = r#" + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/guessing_game` + Guess the number! + The secret number is: 61 + Please input your guess. + 10 + You guessed: 10 + Too small! + Please input your guess. + 99 + You guessed: 99 + Too big! + Please input your guess. + foo + Please input your guess. + 61 + You guessed: 61 + You win! + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/debug/guessing_game` + Guess the number! + The secret number is: 7 + Please input your guess. + 4 + You guessed: 4 + $ cargo run + Running `target/debug/guessing_game` + Guess the number! + The secret number is: 83 + Please input your guess. + 5 + $ cargo run + Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game) + Running `target/debug/guessing_game` + Hello, world! + "#; + + let raw_lines = string.to_string(); + let lines = raw_lines.lines().map(|line| Ok(line.to_string())); + + let result_vec = super::lint_lines(lines); + + assert!(!result_vec.is_empty()); + assert_eq!(3, result_vec.len()); + } + + #[test] + fn lint_file_returns_an_empty_vec_when_no_lines_of_interest_are_found() { + let string = r#" + $ cargo run + Compiling guessing_game v0.1.0 (file:///projects/guessing_game) + Running `target/guessing_game` + Guess the number! + The secret number is: 61 + Please input your guess. + 10 + You guessed: 10 + Too small! + Please input your guess. + 99 + You guessed: 99 + Too big! + Please input your guess. + foo + Please input your guess. + 61 + You guessed: 61 + You win! + "#; + + let raw_lines = string.to_string(); + let lines = raw_lines.lines().map(|line| Ok(line.to_string())); + + let result_vec = super::lint_lines(lines); + + assert!(result_vec.is_empty()); + } + + #[test] + fn is_file_of_interest_returns_false_when_the_path_is_a_directory() { + let uninteresting_fn = "src/img"; + + assert!(!super::is_file_of_interest(path::Path::new( + uninteresting_fn + ))); + } + + #[test] + fn is_file_of_interest_returns_false_when_the_filename_does_not_have_the_md_extension( + ) { + let uninteresting_fn = "src/img/foo1.png"; + + assert!(!super::is_file_of_interest(path::Path::new( + uninteresting_fn + ))); + } + + #[test] + fn is_file_of_interest_returns_true_when_the_filename_has_the_md_extension() + { + let interesting_fn = "src/ch01-00-introduction.md"; + + assert!(super::is_file_of_interest(path::Path::new(interesting_fn))); + } + + #[test] + fn is_line_of_interest_does_not_report_a_line_if_the_line_contains_a_file_url_which_is_directly_followed_by_the_project_path( + ) { + let sample_line = + "Compiling guessing_game v0.1.0 (file:///projects/guessing_game)"; + + assert!(!super::is_line_of_interest(sample_line)); + } + + #[test] + fn is_line_of_interest_reports_a_line_if_the_line_contains_a_file_url_which_is_not_directly_followed_by_the_project_path( + ) { + let sample_line = "Compiling guessing_game v0.1.0 (file:///home/you/projects/guessing_game)"; + + assert!(super::is_line_of_interest(sample_line)); + } +} diff --git a/tools/src/bin/link2print.rs b/tools/src/bin/link2print.rs new file mode 100644 index 0000000..33d90ec --- /dev/null +++ b/tools/src/bin/link2print.rs @@ -0,0 +1,415 @@ +// FIXME: we have some long lines that could be refactored, but it's not a big deal. +// ignore-tidy-linelength + +use regex::{Captures, Regex}; +use std::collections::HashMap; +use std::io; +use std::io::{Read, Write}; + +fn main() { + write_md(parse_links(parse_references(read_md()))); +} + +fn read_md() -> String { + let mut buffer = String::new(); + match io::stdin().read_to_string(&mut buffer) { + Ok(_) => buffer, + Err(error) => panic!(error), + } +} + +fn write_md(output: String) { + write!(io::stdout(), "{}", output).unwrap(); +} + +fn parse_references(buffer: String) -> (String, HashMap) { + let mut ref_map = HashMap::new(); + // FIXME: currently doesn't handle "title" in following line. + let re = Regex::new(r###"(?m)\n?^ {0,3}\[([^]]+)\]:[[:blank:]]*(.*)$"###) + .unwrap(); + let output = re.replace_all(&buffer, |caps: &Captures<'_>| { + let key = caps.get(1).unwrap().as_str().to_uppercase(); + let val = caps.get(2).unwrap().as_str().to_string(); + if ref_map.insert(key, val).is_some() { + panic!("Did not expect markdown page to have duplicate reference"); + } + "".to_string() + }).to_string(); + (output, ref_map) +} + +fn parse_links((buffer, ref_map): (String, HashMap)) -> String { + // FIXME: check which punctuation is allowed by spec. + let re = Regex::new(r###"(?:(?P
(?:```(?:[^`]|`[^`])*`?\n```\n)|(?:[^\[]`[^`\n]+[\n]?[^`\n]*`))|(?:\[(?P[^]]+)\](?:(?:\([[:blank:]]*(?P[^")]*[^ ])(?:[[:blank:]]*"[^"]*")?\))|(?:\[(?P[^]]*)\]))?))"###).expect("could not create regex");
+    let error_code =
+        Regex::new(r###"^E\d{4}$"###).expect("could not create regex");
+    let output = re.replace_all(&buffer, |caps: &Captures<'_>| {
+        match caps.name("pre") {
+            Some(pre_section) => format!("{}", pre_section.as_str()),
+            None => {
+                let name = caps.name("name").expect("could not get name").as_str();
+                // Really we should ignore text inside code blocks,
+                // this is a hack to not try to treat `#[derive()]`,
+                // `[profile]`, `[test]`, or `[E\d\d\d\d]` like a link.
+                if name.starts_with("derive(") ||
+                   name.starts_with("profile") ||
+                   name.starts_with("test") ||
+                   name.starts_with("no_mangle") ||
+                   error_code.is_match(name) {
+                    return name.to_string()
+                }
+
+                let val = match caps.name("val") {
+                    // `[name](link)`
+                    Some(value) => value.as_str().to_string(),
+                    None => {
+                        match caps.name("key") {
+                            Some(key) => {
+                                match key.as_str() {
+                                    // `[name][]`
+                                    "" => format!("{}", ref_map.get(&name.to_uppercase()).expect(&format!("could not find url for the link text `{}`", name))),
+                                    // `[name][reference]`
+                                    _ => format!("{}", ref_map.get(&key.as_str().to_uppercase()).expect(&format!("could not find url for the link text `{}`", key.as_str()))),
+                                }
+                            }
+                            // `[name]` as reference
+                            None => format!("{}", ref_map.get(&name.to_uppercase()).expect(&format!("could not find url for the link text `{}`", name))),
+                        }
+                    }
+                };
+                format!("{} at *{}*", name, val)
+            }
+        }
+    });
+    output.to_string()
+}
+
+#[cfg(test)]
+mod tests {
+    fn parse(source: String) -> String {
+        super::parse_links(super::parse_references(source))
+    }
+
+    #[test]
+    fn parses_inline_link() {
+        let source =
+            r"This is a [link](http://google.com) that should be expanded"
+                .to_string();
+        let target =
+            r"This is a link at *http://google.com* that should be expanded"
+                .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_multiline_links() {
+        let source = r"This is a [link](http://google.com) that
+should appear expanded. Another [location](/here/) and [another](http://gogogo)"
+            .to_string();
+        let target = r"This is a link at *http://google.com* that
+should appear expanded. Another location at */here/* and another at *http://gogogo*"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference() {
+        let source = r"This is a [link][theref].
+[theref]: http://example.com/foo
+more text"
+            .to_string();
+        let target = r"This is a link at *http://example.com/foo*.
+more text"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_implicit_link() {
+        let source = r"This is an [implicit][] link.
+[implicit]: /The Link/"
+            .to_string();
+        let target = r"This is an implicit at */The Link/* link.".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_refs_with_one_space_indentation() {
+        let source = r"This is a [link][ref]
+ [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_refs_with_two_space_indentation() {
+        let source = r"This is a [link][ref]
+  [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_refs_with_three_space_indentation() {
+        let source = r"This is a [link][ref]
+   [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    #[should_panic]
+    fn rejects_refs_with_four_space_indentation() {
+        let source = r"This is a [link][ref]
+    [ref]: The link"
+            .to_string();
+        let target = r"This is a link at *The link*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_optional_inline_title() {
+        let source =
+            r###"This is a titled [link](http://example.com "My title")."###
+                .to_string();
+        let target =
+            r"This is a titled link at *http://example.com*.".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_title_with_puctuation() {
+        let source =
+            r###"[link](http://example.com "It's Title")"###.to_string();
+        let target = r"link at *http://example.com*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_name_with_punctuation() {
+        let source = r###"[I'm here](there)"###.to_string();
+        let target = r###"I'm here at *there*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_name_with_utf8() {
+        let source = r###"[user’s forum](the user’s forum)"###.to_string();
+        let target =
+            r###"user’s forum at *the user’s forum*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference_with_punctuation() {
+        let source = r###"[link][the ref-ref]
+[the ref-ref]:http://example.com/ref-ref"###
+            .to_string();
+        let target = r###"link at *http://example.com/ref-ref*"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_reference_case_insensitively() {
+        let source = r"[link][Ref]
+[ref]: The reference"
+            .to_string();
+        let target = r"link at *The reference*".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_link_as_reference_when_reference_is_empty() {
+        let source = r"[link as reference][]
+[link as reference]: the actual reference"
+            .to_string();
+        let target = r"link as reference at *the actual reference*".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn parses_link_without_reference_as_reference() {
+        let source = r"[link] is alone
+[link]: The contents"
+            .to_string();
+        let target = r"link at *The contents* is alone".to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    #[ignore]
+    fn parses_link_without_reference_as_reference_with_asterisks() {
+        let source = r"*[link]* is alone
+[link]: The contents"
+            .to_string();
+        let target = r"*link* at *The contents* is alone".to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_links_in_pre_sections() {
+        let source = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+"###
+        .to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_links_in_quoted_sections() {
+        let source = r###"do not change `[package]`."###.to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_links_in_quoted_sections_containing_newlines() {
+        let source = r"do not change `this [package]
+is still here` [link](ref)"
+            .to_string();
+        let target = r"do not change `this [package]
+is still here` link at *ref*"
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+
+    #[test]
+    fn ignores_links_in_pre_sections_while_still_handling_links() {
+        let source = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+Another [link]
+more text
+[link]: http://gohere
+"###
+        .to_string();
+        let target = r###"```toml
+[package]
+name = "hello_cargo"
+version = "0.1.0"
+authors = ["Your Name "]
+
+[dependencies]
+```
+Another link at *http://gohere*
+more text
+"###
+        .to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_quotes_in_pre_sections() {
+        let source = r###"```bash
+$ cargo build
+   Compiling guessing_game v0.1.0 (file:///projects/guessing_game)
+src/main.rs:23:21: 23:35 error: mismatched types [E0308]
+src/main.rs:23     match guess.cmp(&secret_number) {
+                                   ^~~~~~~~~~~~~~
+src/main.rs:23:21: 23:35 help: run `rustc --explain E0308` to see a detailed explanation
+src/main.rs:23:21: 23:35 note: expected type `&std::string::String`
+src/main.rs:23:21: 23:35 note:    found type `&_`
+error: aborting due to previous error
+Could not compile `guessing_game`.
+```
+"###
+            .to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_short_quotes() {
+        let source = r"to `1` at index `[0]` i".to_string();
+        let target = source.clone();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn ignores_pre_sections_with_final_quote() {
+        let source = r###"```bash
+$ cargo run
+   Compiling points v0.1.0 (file:///projects/points)
+error: the trait bound `Point: std::fmt::Display` is not satisfied [--explain E0277]
+ --> src/main.rs:8:29
+8 |>     println!("Point 1: {}", p1);
+  |>                             ^^
+:2:27: 2:58: note: in this expansion of format_args!
+:3:1: 3:54: note: in this expansion of print! (defined in )
+src/main.rs:8:5: 8:33: note: in this expansion of println! (defined in )
+note: `Point` cannot be formatted with the default formatter; try using `:?` instead if you are using a format string
+note: required by `std::fmt::Display::fmt`
+```
+`here` is another [link](the ref)
+"###.to_string();
+        let target = r###"```bash
+$ cargo run
+   Compiling points v0.1.0 (file:///projects/points)
+error: the trait bound `Point: std::fmt::Display` is not satisfied [--explain E0277]
+ --> src/main.rs:8:29
+8 |>     println!("Point 1: {}", p1);
+  |>                             ^^
+:2:27: 2:58: note: in this expansion of format_args!
+:3:1: 3:54: note: in this expansion of print! (defined in )
+src/main.rs:8:5: 8:33: note: in this expansion of println! (defined in )
+note: `Point` cannot be formatted with the default formatter; try using `:?` instead if you are using a format string
+note: required by `std::fmt::Display::fmt`
+```
+`here` is another link at *the ref*
+"###.to_string();
+        assert_eq!(parse(source), target);
+    }
+    #[test]
+    fn parses_adam_p_cheatsheet() {
+        let source = r###"[I'm an inline-style link](https://www.google.com)
+
+[I'm an inline-style link with title](https://www.google.com "Google's Homepage")
+
+[I'm a reference-style link][Arbitrary case-insensitive reference text]
+
+[I'm a relative reference to a repository file](../blob/master/LICENSE)
+
+[You can use numbers for reference-style link definitions][1]
+
+Or leave it empty and use the [link text itself][].
+
+URLs and URLs in angle brackets will automatically get turned into links.
+http://www.example.com or  and sometimes
+example.com (but not on Github, for example).
+
+Some text to show that the reference links can follow later.
+
+[arbitrary case-insensitive reference text]: https://www.mozilla.org
+[1]: http://slashdot.org
+[link text itself]: http://www.reddit.com"###
+            .to_string();
+
+        let target = r###"I'm an inline-style link at *https://www.google.com*
+
+I'm an inline-style link with title at *https://www.google.com*
+
+I'm a reference-style link at *https://www.mozilla.org*
+
+I'm a relative reference to a repository file at *../blob/master/LICENSE*
+
+You can use numbers for reference-style link definitions at *http://slashdot.org*
+
+Or leave it empty and use the link text itself at *http://www.reddit.com*.
+
+URLs and URLs in angle brackets will automatically get turned into links.
+http://www.example.com or  and sometimes
+example.com (but not on Github, for example).
+
+Some text to show that the reference links can follow later.
+"###
+            .to_string();
+        assert_eq!(parse(source), target);
+    }
+}
diff --git a/tools/src/bin/release_listings.rs b/tools/src/bin/release_listings.rs
new file mode 100644
index 0000000..56a38e0
--- /dev/null
+++ b/tools/src/bin/release_listings.rs
@@ -0,0 +1,159 @@
+#[macro_use]
+extern crate lazy_static;
+
+use regex::Regex;
+use std::error::Error;
+use std::fs;
+use std::fs::File;
+use std::io::prelude::*;
+use std::io::{BufReader, BufWriter};
+use std::path::{Path, PathBuf};
+
+fn main() -> Result<(), Box> {
+    // Get all listings from the `listings` directory
+    let listings_dir = Path::new("listings");
+
+    // Put the results in the `tmp/listings` directory
+    let out_dir = Path::new("tmp/listings");
+
+    // Clear out any existing content in `tmp/listings`
+    if out_dir.is_dir() {
+        fs::remove_dir_all(out_dir)?;
+    }
+
+    // Create a new, empty `tmp/listings` directory
+    fs::create_dir(out_dir)?;
+
+    // For each chapter in the `listings` directory,
+    for chapter in fs::read_dir(listings_dir)? {
+        let chapter = chapter?;
+        let chapter_path = chapter.path();
+
+        let chapter_name = chapter_path
+            .file_name()
+            .expect("Chapter should've had a name");
+
+        // Create a corresponding chapter dir in `tmp/listings`
+        let output_chapter_path = out_dir.join(chapter_name);
+        fs::create_dir(&output_chapter_path)?;
+
+        // For each listing in the chapter directory,
+        for listing in fs::read_dir(chapter_path)? {
+            let listing = listing?;
+            let listing_path = listing.path();
+
+            let listing_name = listing_path
+                .file_name()
+                .expect("Listing should've had a name");
+
+            // Create a corresponding listing dir in the tmp chapter dir
+            let output_listing_dir = output_chapter_path.join(listing_name);
+            fs::create_dir(&output_listing_dir)?;
+
+            // Copy all the cleaned files in the listing to the tmp directory
+            copy_cleaned_listing_files(listing_path, output_listing_dir)?;
+        }
+    }
+
+    // Create a compressed archive of all the listings
+    let tarfile = File::create("tmp/listings.tar.gz")?;
+    let encoder =
+        flate2::write::GzEncoder::new(tarfile, flate2::Compression::default());
+    let mut archive = tar::Builder::new(encoder);
+    archive.append_dir_all("listings", "tmp/listings")?;
+
+    // Assure whoever is running this that the script exiting successfully, and remind them
+    // where the generated file ends up
+    println!("Release tarball of listings in tmp/listings.tar.gz");
+
+    Ok(())
+}
+
+// Cleaned listings will not contain:
+//
+// - `target` directories
+// - `output.txt` files used to display output in the book
+// - `rustfmt-ignore` files used to signal to update-rustc.sh the listing shouldn't be formatted
+// - anchor comments or snip comments
+// - empty `main` functions in `lib.rs` files used to trick rustdoc
+fn copy_cleaned_listing_files(
+    from: PathBuf,
+    to: PathBuf,
+) -> Result<(), Box> {
+    for item in fs::read_dir(from)? {
+        let item = item?;
+        let item_path = item.path();
+
+        let item_name =
+            item_path.file_name().expect("Item should've had a name");
+        let output_item = to.join(item_name);
+
+        if item_path.is_dir() {
+            // Don't copy `target` directories
+            if item_name != "target" {
+                fs::create_dir(&output_item)?;
+                copy_cleaned_listing_files(item_path, output_item)?;
+            }
+        } else {
+            // Don't copy output files or files that tell update-rustc.sh not to format
+            if item_name != "output.txt" && item_name != "rustfmt-ignore" {
+                let item_extension = item_path.extension();
+                if item_extension.is_some() && item_extension.unwrap() == "rs" {
+                    copy_cleaned_rust_file(
+                        item_name,
+                        &item_path,
+                        &output_item,
+                    )?;
+                } else {
+                    // Copy any non-Rust files without modification
+                    fs::copy(item_path, output_item)?;
+                }
+            }
+        }
+    }
+
+    Ok(())
+}
+
+lazy_static! {
+    static ref ANCHOR_OR_SNIP_COMMENTS: Regex = Regex::new(
+        r"(?x)
+    //\s*ANCHOR:\s*[\w_-]+      # Remove all anchor comments
+    |
+    //\s*ANCHOR_END:\s*[\w_-]+  # Remove all anchor ending comments
+    |
+    //\s*--snip--               # Remove all snip comments
+    "
+    )
+    .unwrap();
+}
+
+lazy_static! {
+    static ref EMPTY_MAIN: Regex = Regex::new(r"fn main\(\) \{}").unwrap();
+}
+
+// Cleaned Rust files will not contain:
+//
+// - anchor comments or snip comments
+// - empty `main` functions in `lib.rs` files used to trick rustdoc
+fn copy_cleaned_rust_file(
+    item_name: &std::ffi::OsStr,
+    from: &PathBuf,
+    to: &PathBuf,
+) -> Result<(), Box> {
+    let from_buf = BufReader::new(File::open(from)?);
+    let mut to_buf = BufWriter::new(File::create(to)?);
+
+    for line in from_buf.lines() {
+        let line = line?;
+        if !ANCHOR_OR_SNIP_COMMENTS.is_match(&line) {
+            if item_name != "lib.rs" || !EMPTY_MAIN.is_match(&line) {
+                writeln!(&mut to_buf, "{}", line)?;
+            }
+        }
+    }
+
+    to_buf.flush()?;
+
+    Ok(())
+}
diff --git a/tools/src/bin/remove_hidden_lines.rs b/tools/src/bin/remove_hidden_lines.rs
new file mode 100644
index 0000000..fa3b705
--- /dev/null
+++ b/tools/src/bin/remove_hidden_lines.rs
@@ -0,0 +1,83 @@
+use std::io;
+use std::io::prelude::*;
+
+fn main() {
+    write_md(remove_hidden_lines(&read_md()));
+}
+
+fn read_md() -> String {
+    let mut buffer = String::new();
+    match io::stdin().read_to_string(&mut buffer) {
+        Ok(_) => buffer,
+        Err(error) => panic!(error),
+    }
+}
+
+fn write_md(output: String) {
+    write!(io::stdout(), "{}", output).unwrap();
+}
+
+fn remove_hidden_lines(input: &str) -> String {
+    let mut resulting_lines = vec![];
+    let mut within_codeblock = false;
+
+    for line in input.lines() {
+        if line.starts_with("```") {
+            within_codeblock = !within_codeblock;
+        }
+
+        if !within_codeblock || (!line.starts_with("# ") && line != "#") {
+            resulting_lines.push(line)
+        }
+    }
+
+    resulting_lines.join("\n")
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::remove_hidden_lines;
+
+    #[test]
+    fn hidden_line_in_code_block_is_removed() {
+        let input = r#"
+In this listing:
+
+```
+fn main() {
+# secret
+}
+```
+
+you can see that...
+        "#;
+        let output = remove_hidden_lines(input);
+
+        let desired_output = r#"
+In this listing:
+
+```
+fn main() {
+}
+```
+
+you can see that...
+        "#;
+
+        assert_eq!(output, desired_output);
+    }
+
+    #[test]
+    fn headings_arent_removed() {
+        let input = r#"
+# Heading 1
+        "#;
+        let output = remove_hidden_lines(input);
+
+        let desired_output = r#"
+# Heading 1
+        "#;
+
+        assert_eq!(output, desired_output);
+    }
+}
diff --git a/tools/src/bin/remove_links.rs b/tools/src/bin/remove_links.rs
new file mode 100644
index 0000000..a096389
--- /dev/null
+++ b/tools/src/bin/remove_links.rs
@@ -0,0 +1,45 @@
+extern crate regex;
+
+use regex::{Captures, Regex};
+use std::collections::HashSet;
+use std::io;
+use std::io::{Read, Write};
+
+fn main() {
+    let mut buffer = String::new();
+    if let Err(e) = io::stdin().read_to_string(&mut buffer) {
+        panic!(e);
+    }
+
+    let mut refs = HashSet::new();
+
+    // Capture all links and link references.
+    let regex =
+        r"\[([^\]]+)\](?:(?:\[([^\]]+)\])|(?:\([^\)]+\)))(?i)";
+    let link_regex = Regex::new(regex).unwrap();
+    let first_pass = link_regex.replace_all(&buffer, |caps: &Captures<'_>| {
+        // Save the link reference we want to delete.
+        if let Some(reference) = caps.get(2) {
+            refs.insert(reference.as_str().to_string());
+        }
+
+        // Put the link title back.
+        caps.get(1).unwrap().as_str().to_string()
+    });
+
+    // Search for the references we need to delete.
+    let ref_regex = Regex::new(r"(?m)^\[([^\]]+)\]:\s.*\n").unwrap();
+    let out = ref_regex.replace_all(&first_pass, |caps: &Captures<'_>| {
+        let capture = caps.get(1).unwrap().to_owned();
+
+        // Check if we've marked this reference for deletion ...
+        if refs.contains(capture.as_str()) {
+            return "".to_string();
+        }
+
+        // ... else we put back everything we captured.
+        caps.get(0).unwrap().as_str().to_string()
+    });
+
+    write!(io::stdout(), "{}", out).unwrap();
+}
diff --git a/tools/src/bin/remove_markup.rs b/tools/src/bin/remove_markup.rs
new file mode 100644
index 0000000..8877e03
--- /dev/null
+++ b/tools/src/bin/remove_markup.rs
@@ -0,0 +1,51 @@
+extern crate regex;
+
+use regex::{Captures, Regex};
+use std::io;
+use std::io::{Read, Write};
+
+fn main() {
+    write_md(remove_markup(read_md()));
+}
+
+fn read_md() -> String {
+    let mut buffer = String::new();
+    match io::stdin().read_to_string(&mut buffer) {
+        Ok(_) => buffer,
+        Err(error) => panic!(error),
+    }
+}
+
+fn write_md(output: String) {
+    write!(io::stdout(), "{}", output).unwrap();
+}
+
+fn remove_markup(input: String) -> String {
+    let filename_regex =
+        Regex::new(r#"\A(.*)\z"#).unwrap();
+    // Captions sometimes take up multiple lines.
+    let caption_start_regex =
+        Regex::new(r#"\A(.*)\z"#).unwrap();
+    let caption_end_regex = Regex::new(r#"(.*)\z"#).unwrap();
+    let regexen = vec![filename_regex, caption_start_regex, caption_end_regex];
+
+    let lines: Vec<_> = input
+        .lines()
+        .flat_map(|line| {
+            // Remove our syntax highlighting and rustdoc markers.
+            if line.starts_with("```") {
+                Some(String::from("```"))
+            // Remove the span around filenames and captions.
+            } else {
+                let result =
+                    regexen.iter().fold(line.to_string(), |result, regex| {
+                        regex.replace_all(&result, |caps: &Captures<'_>| {
+                            caps.get(1).unwrap().as_str().to_string()
+                        }).to_string()
+                    });
+                Some(result)
+            }
+        })
+        .collect();
+    lines.join("\n")
+}
diff --git a/tools/update-rustc.sh b/tools/update-rustc.sh
new file mode 100644
index 0000000..465fbe9
--- /dev/null
+++ b/tools/update-rustc.sh
@@ -0,0 +1,90 @@
+#!/bin/bash
+
+set -eu
+
+# Build the book before making any changes for comparison of the output.
+echo 'Building book into `tmp/book-before` before updating...'
+mdbook build -d tmp/book-before
+
+# Rustfmt all listings
+echo 'Formatting all listings...'
+find -s listings -name Cargo.toml -print0 | while IFS= read -r -d '' f; do
+    dir_to_fmt=$(dirname $f)
+
+    # There are a handful of listings we don't want to rustfmt and skipping
+    # doesn't work; those will have a file in their directory that explains why.
+    if [ ! -f "${dir_to_fmt}/rustfmt-ignore" ]; then
+        cd $dir_to_fmt
+        cargo fmt --all && true
+        cd - > /dev/null
+    fi
+done
+
+# Get listings without anchor comments in tmp by compiling a release listings
+# artifact
+echo 'Generate listings without anchor comments...'
+cargo run --bin release_listings
+
+root_dir=$(pwd)
+
+echo 'Regenerating output...'
+# For any listings where we show the output,
+find -s listings -name output.txt -print0 | while IFS= read -r -d '' f; do
+    build_directory=$(dirname $f)
+    full_build_directory="${root_dir}/${build_directory}"
+    full_output_path="${full_build_directory}/output.txt"
+    tmp_build_directory="tmp/${build_directory}"
+
+    cd $tmp_build_directory
+
+    # Save the previous compile time; we're going to keep it to minimize diff
+    # churn
+    compile_time=$(sed -E -ne 's/.*Finished (dev|test) \[unoptimized \+ debuginfo] target\(s\) in ([0-9.]*).*/\2/p' ${full_output_path})
+
+    # Save the hash from the first test binary; we're going to keep it to
+    # minimize diff churn
+    test_binary_hash=$(sed -E -ne 's@.*Running [^[:space:]]+ \(target/debug/deps/[^-]*-([^\s]*)\)@\1@p' ${full_output_path} | head -n 1)
+
+    # Act like this is the first time this listing has been built
+    cargo clean
+
+    # Run the command in the existing output file
+    cargo_command=$(sed -ne 's/$ \(.*\)/\1/p' ${full_output_path})
+
+    # Clear the output file of everything except the command
+    echo "$ ${cargo_command}" > ${full_output_path}
+
+    # Regenerate the output and append to the output file. Turn some warnings
+    # off to reduce output noise, and use one test thread to get consistent
+    # ordering of tests in the output when the command is `cargo test`.
+    RUSTFLAGS="-A unused_variables -A dead_code" RUST_TEST_THREADS=1 $cargo_command >> ${full_output_path} 2>&1 || true
+
+    # Set the project file path to the projects directory plus the crate name
+    # instead of a path to the computer of whoever is running this
+    sed -i '' -E -e 's@(Compiling|Checking) ([^\)]*) v0.1.0 (.*)@\1 \2 v0.1.0 (file:///projects/\2)@' ${full_output_path}
+
+    # Restore the previous compile time, if there is one
+    if [ -n  "${compile_time}" ]; then
+        sed -i '' -E -e "s/Finished (dev|test) \[unoptimized \+ debuginfo] target\(s\) in [0-9.]*/Finished \1 [unoptimized + debuginfo] target(s) in ${compile_time}/" ${full_output_path}
+    fi
+
+    # Restore the previous test binary hash, if there is one
+    if [ -n "${test_binary_hash}" ]; then
+        replacement='s@Running ([^[:space:]]+) \(target/debug/deps/([^-]*)-([^\s]*)\)@Running \1 (target/debug/deps/\2-'
+        replacement+="${test_binary_hash}"
+        replacement+=')@g'
+        sed -i '' -E -e "${replacement}" ${full_output_path}
+    fi
+
+    cd - > /dev/null
+done
+
+# Build the book after making all the changes
+echo 'Building book into `tmp/book-after` after updating...'
+mdbook build -d tmp/book-after
+
+# Run the megadiff script that removes all files that are the same, leaving only files to audit
+echo 'Removing tmp files that had no changes from the update...'
+./tools/megadiff.sh
+
+echo 'Done.'