Watchexec lib v3 (#601)

Co-authored-by: emilHof <95590295+emilHof@users.noreply.github.com>
This commit is contained in:
Félix Saparelli 2023-11-26 09:33:44 +13:00 committed by GitHub
parent 7f23fbd68a
commit a13bc429eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
110 changed files with 7085 additions and 6196 deletions

1700
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,7 @@ members = [
"crates/cli",
"crates/events",
"crates/signals",
"crates/supervisor",
"crates/filterer/globset",
"crates/filterer/ignore",
"crates/filterer/tagged",

View File

@ -1,6 +1,6 @@
complete -c watchexec -s w -l watch -d 'Watch a specific file or directory' -r -F
complete -c watchexec -s c -l clear -d 'Clear screen before running command' -r -f -a "{clear ,reset }"
complete -c watchexec -s o -l on-busy-update -d 'What to do when receiving events while the command is running' -r -f -a "{queue ,do-nothing ,restart ,signal }"
complete -c watchexec -s c -l clear -d 'Clear screen before running command' -r -f -a "{clear '',reset ''}"
complete -c watchexec -s o -l on-busy-update -d 'What to do when receiving events while the command is running' -r -f -a "{queue '',do-nothing '',restart '',signal ''}"
complete -c watchexec -s s -l signal -d 'Send a signal to the process when it\'s still running' -r
complete -c watchexec -l stop-signal -d 'Signal to send to stop the command' -r
complete -c watchexec -l stop-timeout -d 'Time to wait for the command to exit gracefully' -r
@ -8,7 +8,7 @@ complete -c watchexec -s d -l debounce -d 'Time to wait for new events before ta
complete -c watchexec -l delay-run -d 'Sleep before running the command' -r
complete -c watchexec -l poll -d 'Poll for filesystem changes' -r
complete -c watchexec -l shell -d 'Use a different shell' -r
complete -c watchexec -l emit-events-to -d 'Configure event emission' -r -f -a "{environment ,stdin ,file ,json-stdin ,json-file ,none }"
complete -c watchexec -l emit-events-to -d 'Configure event emission' -r -f -a "{environment '',stdin '',file '',json-stdin '',json-file '',none ''}"
complete -c watchexec -s E -l env -d 'Add env vars to the command' -r
complete -c watchexec -l project-origin -d 'Set the project origin' -r -f -a "(__fish_complete_directories)"
complete -c watchexec -l workdir -d 'Set the working directory' -r -f -a "(__fish_complete_directories)"
@ -17,9 +17,9 @@ complete -c watchexec -s f -l filter -d 'Filename patterns to filter to' -r
complete -c watchexec -l filter-file -d 'Files to load filters from' -r -F
complete -c watchexec -s i -l ignore -d 'Filename patterns to filter out' -r
complete -c watchexec -l ignore-file -d 'Files to load ignores from' -r -F
complete -c watchexec -l fs-events -d 'Filesystem events to filter to' -r -f -a "{access ,create ,remove ,rename ,modify ,metadata }"
complete -c watchexec -l fs-events -d 'Filesystem events to filter to' -r -f -a "{access '',create '',remove '',rename '',modify '',metadata ''}"
complete -c watchexec -l log-file -d 'Write diagnostic logs to a file' -r -F
complete -c watchexec -l completions -d 'Generate a shell completions script' -r -f -a "{bash ,elvish ,fish ,nu ,powershell ,zsh }"
complete -c watchexec -l completions -d 'Generate a shell completions script' -r -f -a "{bash '',elvish '',fish '',nu '',powershell '',zsh ''}"
complete -c watchexec -s W -l watch-when-idle -d 'Deprecated alias for \'--on-busy-update=do-nothing\''
complete -c watchexec -s r -l restart -d 'Restart the process if it\'s still running'
complete -c watchexec -s k -l kill -d 'Hidden legacy shorthand for \'--signal=kill\''

View File

@ -71,4 +71,4 @@ module completions {
}
use completions *
export use completions *

View File

@ -15,11 +15,11 @@ rust-version = "1.64.0"
edition = "2021"
[dependencies.time]
version = "0.3.20"
version = "0.3.30"
features = ["macros", "formatting"]
[dependencies.gix]
version = "0.48"
version = "0.55.2"
optional = true
default-features = false

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,6 @@ version = "*"
path = "../.."
[dependencies]
leon = { version = "0.0.1", default-features = false }
leon = { version = "2.0.1", default-features = false }
snapbox = "0.4.8"
time = { version = "0.3.20", features = ["formatting", "macros"] }
time = { version = "0.3.30", features = ["formatting", "macros"] }

View File

@ -3,14 +3,56 @@
version = 3
[[package]]
name = "bitflags"
version = "1.3.2"
name = "anstream"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
"windows-sys",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [
"anstyle",
"windows-sys",
]
[[package]]
name = "bosion"
version = "1.0.0"
version = "1.0.1"
dependencies = [
"time",
]
@ -26,104 +68,35 @@ dependencies = [
]
[[package]]
name = "cc"
version = "1.0.79"
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "concolor"
version = "0.0.12"
name = "deranged"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7b3e3c41e9488eeda196b6806dbf487742107d61b2e16485bcca6c25ed5755b"
checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
dependencies = [
"bitflags",
"concolor-query",
"is-terminal",
]
[[package]]
name = "concolor-query"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82a90734b3d5dcf656e7624cca6bce9c3a90ee11f900e80141a7427ccfb3d317"
[[package]]
name = "errno"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
dependencies = [
"errno-dragonfly",
"libc",
"winapi",
]
[[package]]
name = "errno-dragonfly"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]]
name = "io-lifetimes"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "is-terminal"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
dependencies = [
"hermit-abi",
"io-lifetimes",
"rustix",
"windows-sys",
"powerfmt",
]
[[package]]
name = "itoa"
version = "1.0.6"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "leon"
version = "0.0.1"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1afa3794684c32f91a5aa105e5109743bc6f2999a869c28fffa40aeffa30cfd0"
checksum = "52df920dfe9751d43501ff2ee12dd81c457d9e810d3f64b5200ee461fe73800b"
dependencies = [
"thiserror",
]
[[package]]
name = "libc"
version = "0.2.139"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
[[package]]
name = "linux-raw-sys"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "normalize-line-endings"
version = "0.3.0"
@ -131,73 +104,82 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]]
name = "proc-macro2"
version = "1.0.51"
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "proc-macro2"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.23"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rustix"
version = "0.36.9"
name = "serde"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc"
checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys",
"windows-sys",
"serde_derive",
]
[[package]]
name = "serde"
version = "1.0.152"
name = "serde_derive"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "similar"
version = "2.2.1"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf"
checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597"
[[package]]
name = "snapbox"
version = "0.4.8"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4389a6395e9925166f19d67b64874e526ec28a4b8455f3321b686c912299c3ea"
checksum = "4b377c0b6e4715c116473d8e40d51e3fa5b0a2297ca9b2a931ba800667b259ed"
dependencies = [
"concolor",
"anstream",
"anstyle",
"normalize-line-endings",
"similar",
"snapbox-macros",
"yansi",
]
[[package]]
name = "snapbox-macros"
version = "0.3.1"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "485e65c1203eb37244465e857d15a26d3a85a5410648ccb53b18bd44cb3a7336"
checksum = "ed1559baff8a696add3322b9be3e940d433e7bb4e38d79017205fd37ff28b28e"
dependencies = [
"anstream",
]
[[package]]
name = "syn"
version = "1.0.109"
version = "2.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
dependencies = [
"proc-macro2",
"quote",
@ -206,18 +188,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.38"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.38"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
@ -226,11 +208,13 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.20"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
dependencies = [
"deranged",
"itoa",
"powerfmt",
"serde",
"time-core",
"time-macros",
@ -238,61 +222,45 @@ dependencies = [
[[package]]
name = "time-core"
version = "0.1.0"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.8"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36"
checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20"
dependencies = [
"time-core",
]
[[package]]
name = "unicode-ident"
version = "1.0.7"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "775c11906edafc97bc378816b94585fbd9a054eabaf86fdd0ced94af449efab7"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "winapi"
version = "0.3.9"
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "windows-sys"
version = "0.45.0"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
@ -305,48 +273,42 @@ dependencies = [
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_i686_gnu"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"

View File

@ -17,6 +17,6 @@ default-features = false
features = ["std"]
[dependencies]
leon = { version = "0.0.1", default-features = false }
leon = { version = "2.0.1", default-features = false }
snapbox = "0.4.8"
time = { version = "0.3.20", features = ["formatting", "macros"] }
time = { version = "0.3.30", features = ["formatting", "macros"] }

View File

@ -3,14 +3,56 @@
version = 3
[[package]]
name = "bitflags"
version = "1.3.2"
name = "anstream"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44"
dependencies = [
"anstyle",
"anstyle-parse",
"anstyle-query",
"anstyle-wincon",
"colorchoice",
"utf8parse",
]
[[package]]
name = "anstyle"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87"
[[package]]
name = "anstyle-parse"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140"
dependencies = [
"utf8parse",
]
[[package]]
name = "anstyle-query"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b"
dependencies = [
"windows-sys",
]
[[package]]
name = "anstyle-wincon"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628"
dependencies = [
"anstyle",
"windows-sys",
]
[[package]]
name = "bosion"
version = "1.0.0"
version = "1.0.1"
dependencies = [
"time",
]
@ -26,104 +68,35 @@ dependencies = [
]
[[package]]
name = "cc"
version = "1.0.79"
name = "colorchoice"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50d30906286121d95be3d479533b458f87493b30a4b5f79a607db8f5d11aa91f"
checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7"
[[package]]
name = "concolor"
version = "0.0.12"
name = "deranged"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f7b3e3c41e9488eeda196b6806dbf487742107d61b2e16485bcca6c25ed5755b"
checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3"
dependencies = [
"bitflags",
"concolor-query",
"is-terminal",
]
[[package]]
name = "concolor-query"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "82a90734b3d5dcf656e7624cca6bce9c3a90ee11f900e80141a7427ccfb3d317"
[[package]]
name = "errno"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1"
dependencies = [
"errno-dragonfly",
"libc",
"winapi",
]
[[package]]
name = "errno-dragonfly"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf"
dependencies = [
"cc",
"libc",
]
[[package]]
name = "hermit-abi"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]]
name = "io-lifetimes"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1abeb7a0dd0f8181267ff8adc397075586500b81b28a73e8a0208b00fc170fb3"
dependencies = [
"libc",
"windows-sys",
]
[[package]]
name = "is-terminal"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
dependencies = [
"hermit-abi",
"io-lifetimes",
"rustix",
"windows-sys",
"powerfmt",
]
[[package]]
name = "itoa"
version = "1.0.6"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6"
checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
[[package]]
name = "leon"
version = "0.0.1"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1afa3794684c32f91a5aa105e5109743bc6f2999a869c28fffa40aeffa30cfd0"
checksum = "52df920dfe9751d43501ff2ee12dd81c457d9e810d3f64b5200ee461fe73800b"
dependencies = [
"thiserror",
]
[[package]]
name = "libc"
version = "0.2.139"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79"
[[package]]
name = "linux-raw-sys"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4"
[[package]]
name = "normalize-line-endings"
version = "0.3.0"
@ -131,73 +104,82 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be"
[[package]]
name = "proc-macro2"
version = "1.0.51"
name = "powerfmt"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d727cae5b39d21da60fa540906919ad737832fe0b1c165da3a34d6548c849d6"
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
[[package]]
name = "proc-macro2"
version = "1.0.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.23"
version = "1.0.33"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rustix"
version = "0.36.9"
name = "serde"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd5c6ff11fecd55b40746d1995a02f2eb375bf8c00d192d521ee09f42bef37bc"
checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89"
dependencies = [
"bitflags",
"errno",
"io-lifetimes",
"libc",
"linux-raw-sys",
"windows-sys",
"serde_derive",
]
[[package]]
name = "serde"
version = "1.0.152"
name = "serde_derive"
version = "1.0.193"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb"
checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "similar"
version = "2.2.1"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "420acb44afdae038210c99e69aae24109f32f15500aa708e81d46c9f29d55fcf"
checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597"
[[package]]
name = "snapbox"
version = "0.4.8"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4389a6395e9925166f19d67b64874e526ec28a4b8455f3321b686c912299c3ea"
checksum = "4b377c0b6e4715c116473d8e40d51e3fa5b0a2297ca9b2a931ba800667b259ed"
dependencies = [
"concolor",
"anstream",
"anstyle",
"normalize-line-endings",
"similar",
"snapbox-macros",
"yansi",
]
[[package]]
name = "snapbox-macros"
version = "0.3.1"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "485e65c1203eb37244465e857d15a26d3a85a5410648ccb53b18bd44cb3a7336"
checksum = "ed1559baff8a696add3322b9be3e940d433e7bb4e38d79017205fd37ff28b28e"
dependencies = [
"anstream",
]
[[package]]
name = "syn"
version = "1.0.109"
version = "2.0.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a"
dependencies = [
"proc-macro2",
"quote",
@ -206,18 +188,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.38"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0"
checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.38"
version = "1.0.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f"
checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8"
dependencies = [
"proc-macro2",
"quote",
@ -226,11 +208,13 @@ dependencies = [
[[package]]
name = "time"
version = "0.3.20"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd0cbfecb4d19b5ea75bb31ad904eb5b9fa13f21079c3b92017ebdf4999a5890"
checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5"
dependencies = [
"deranged",
"itoa",
"powerfmt",
"serde",
"time-core",
"time-macros",
@ -238,61 +222,45 @@ dependencies = [
[[package]]
name = "time-core"
version = "0.1.0"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e153e1f1acaef8acc537e68b44906d2db6436e2b35ac2c6b42640fff91f00fd"
checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3"
[[package]]
name = "time-macros"
version = "0.2.8"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd80a657e71da814b8e5d60d3374fc6d35045062245d80224748ae522dd76f36"
checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20"
dependencies = [
"time-core",
]
[[package]]
name = "unicode-ident"
version = "1.0.7"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "775c11906edafc97bc378816b94585fbd9a054eabaf86fdd0ced94af449efab7"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "winapi"
version = "0.3.9"
name = "utf8parse"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a"
[[package]]
name = "windows-sys"
version = "0.45.0"
version = "0.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
@ -305,48 +273,42 @@ dependencies = [
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c9864e83243fdec7fc9c5444389dcbbfd258f745e7853198f365e3c4968a608"
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c8b1b673ffc16c47a9ff48570a9d85e25d265735c503681332589af6253c6c7"
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
[[package]]
name = "windows_i686_gnu"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3887528ad530ba7bdbb1faa8275ec7a1155a45ffa57c37993960277145d640"
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
[[package]]
name = "windows_i686_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf4d1122317eddd6ff351aa852118a2418ad4214e6613a50e0191f7004372605"
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1040f221285e17ebccbc2591ffdc2d44ee1f9186324dd3e84e99ac68d699c45"
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "628bfdf232daa22b0d64fdb62b09fcc36bb01f05a3939e20ab73aaf9470d0463"
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.1"
version = "0.48.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "447660ad36a13288b1db4d4248e857b510e8c3a225c822ba4fb748c0aafecffd"
[[package]]
name = "yansi"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec"
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"

View File

@ -22,6 +22,6 @@ path = "../.."
default-features = false
[dependencies]
leon = { version = "0.0.1", default-features = false }
leon = { version = "2.0.1", default-features = false }
snapbox = "0.4.8"
time = { version = "0.3.20", features = ["formatting", "macros"] }
time = { version = "0.3.30", features = ["formatting", "macros"] }

View File

@ -77,7 +77,7 @@ impl Info {
#[cfg(feature = "git")]
git: GitInfo::gather()
.map_err(|e| {
println!("cargo:warning=git info gathering failed: {}", e);
println!("cargo:warning=git info gathering failed: {e}");
})
.ok(),
#[cfg(not(feature = "git"))]
@ -150,7 +150,8 @@ pub struct GitInfo {
#[cfg(feature = "git")]
impl GitInfo {
fn gather() -> Result<Self, String> {
let (path, _) = gix::discover::upwards(".").err_string()?;
use std::path::Path;
let (path, _) = gix::discover::upwards(Path::new(".")).err_string()?;
let repo = gix::discover(path).err_string()?;
let head = repo.head_commit().err_string()?;
let time = head.time().err_string()?;

View File

@ -107,7 +107,7 @@ pub fn gather_to(filename: &str, structname: &str, public: bool) {
"
), format!("{crate_version} ({git_shorthash} {git_date}) {crate_feature_string}\ncommit-hash: {git_hash}\ncommit-date: {git_date}\nbuild-date: {build_date}\nrelease: {crate_version}\nfeatures: {crate_feature_list}"))
} else {
("".to_string(), format!("{crate_version} ({build_date}) {crate_feature_string}\nbuild-date: {build_date}\nrelease: {crate_version}\nfeatures: {crate_feature_list}"))
(String::new(), format!("{crate_version} ({build_date}) {crate_feature_string}\nbuild-date: {build_date}\nrelease: {crate_version}\nfeatures: {crate_feature_list}"))
};
#[cfg(feature = "std")]

View File

@ -20,31 +20,28 @@ name = "watchexec"
path = "src/main.rs"
[dependencies]
argfile = "0.1.5"
chrono = "0.4.23"
clap_complete = "4.1.4"
clap_complete_nushell = "4.3.1"
clap_mangen = "0.2.9"
argfile = "0.1.6"
chrono = "0.4.31"
clap_complete = "4.4.4"
clap_complete_nushell = "4.4.2"
clap_mangen = "0.2.15"
clearscreen = "2.0.1"
dirs = "5.0.0"
futures = "0.3.17"
futures = "0.3.29"
humantime = "2.1.0"
is-terminal = "0.4.4"
notify-rust = "4.5.2"
serde_json = "1.0.94"
tempfile = "3.4.0"
tracing = "0.1.26"
which = "4.4.0"
[dependencies.command-group]
version = "2.1.0"
features = ["with-tokio"]
notify-rust = "4.9.0"
serde_json = "1.0.107"
tempfile = "3.8.1"
tracing = "0.1.40"
which = "5.0.0"
[dependencies.console-subscriber]
version = "0.1.0"
version = "0.2.0"
optional = true
[dependencies.clap]
version = "4.1.8"
version = "4.4.7"
features = ["cargo", "derive", "env", "wrap_help"]
[dependencies.ignore-files]
@ -55,6 +52,10 @@ path = "../ignore-files"
version = "5.3.0"
features = ["fancy"]
[dependencies.pid1]
version = "0.1.1"
optional = true
[dependencies.project-origins]
version = "1.2.0"
path = "../project-origins"
@ -77,7 +78,7 @@ version = "1.2.0"
path = "../filterer/globset"
[dependencies.tokio]
version = "1.24.2"
version = "1.33.0"
features = [
"fs",
"io-std",
@ -98,22 +99,24 @@ features = [
]
[target.'cfg(target_env = "musl")'.dependencies]
mimalloc = "0.1.26"
[target.'cfg(target_os = "linux")'.dependencies]
shadow-rs = "0.22.0"
[target.'cfg(target_os = "linux")'.build-dependencies]
shadow-rs = "0.22.0"
mimalloc = "0.1.39"
[build-dependencies]
embed-resource = "2.1.1"
embed-resource = "2.4.0"
[build-dependencies.bosion]
version = "1.0.1"
path = "../bosion"
[features]
default = ["pid1"]
## Enables PID1 handling.
pid1 = ["dep:pid1"]
## Enables logging for PID1 handling.
pid1-withlog = ["pid1"]
## For debugging only: enables the Tokio Console.
dev-console = ["console-subscriber"]

View File

@ -46,7 +46,8 @@ include!(env!("BOSION_PATH"));
author,
version,
long_version = Bosion::LONG_VERSION,
after_help = "Use @argfile as first argument to load arguments from the file 'argfile' (one argument per line) which will be inserted in place of the @argfile (further arguments on the CLI will override or add onto those in the file).",
after_help = "Want more detail? Try the long '--help' flag!",
after_long_help = "Use @argfile as first argument to load arguments from the file 'argfile' (one argument per line) which will be inserted in place of the @argfile (further arguments on the CLI will override or add onto those in the file).\n\nDidn't expect this much output? Use the short '-h' flag to get short help.",
hide_possible_values = true,
)]
#[cfg_attr(debug_assertions, command(before_help = "⚠ DEBUG BUILD ⚠"))]
@ -103,6 +104,9 @@ pub struct Args {
/// for '--project-origin' for more information.
///
/// This option can be specified multiple times to watch multiple files or directories.
///
/// The special value '/dev/null', provided as the only path watched, will cause Watchexec to
/// not watch any paths. Other event sources (like signals or key events) may still be used.
#[arg(
short = 'w',
long = "watch",

View File

@ -1,5 +1,479 @@
mod init;
mod runtime;
use std::{
borrow::Cow,
collections::HashMap,
env::current_dir,
ffi::{OsStr, OsString},
fs::File,
path::Path,
process::Stdio,
sync::Arc,
};
pub use init::init;
pub use runtime::runtime;
use clearscreen::ClearScreen;
use miette::{miette, IntoDiagnostic, Report, Result};
use notify_rust::Notification;
use tokio::{process::Command as TokioCommand, time::sleep};
use tracing::{debug, debug_span, error};
use watchexec::{
command::{Command, Program, Shell, SpawnOptions},
error::RuntimeError,
job::{CommandState, Job},
sources::fs::Watcher,
Config, ErrorHook, Id,
};
use watchexec_events::{Event, Keyboard, ProcessEnd, Tag};
use watchexec_signals::Signal;
use crate::state::State;
use crate::{
args::{Args, ClearMode, EmitEvents, OnBusyUpdate},
state::RotatingTempFile,
};
pub fn make_config(args: &Args, state: &State) -> Result<Config> {
let _span = debug_span!("args-runtime").entered();
let config = Config::default();
config.on_error(|err: ErrorHook| {
if let RuntimeError::IoError {
about: "waiting on process group",
..
} = err.error
{
// "No child processes" and such
// these are often spurious, so condemn them to -v only
error!("{}", err.error);
return;
}
if cfg!(debug_assertions) {
eprintln!("[[{:?}]]", err.error);
}
eprintln!("[[Error (not fatal)]]\n{}", Report::new(err.error));
});
config.pathset(if args.paths.is_empty() {
vec![current_dir().into_diagnostic()?]
} else if args.paths.len() == 1
&& args
.paths
.first()
.map_or(false, |p| p == Path::new("/dev/null"))
{
// special case: /dev/null means "don't start the fs event source"
Vec::new()
} else {
args.paths.clone()
});
config.throttle(args.debounce.0);
config.keyboard_events(args.stdin_quit);
if let Some(interval) = args.poll {
config.file_watcher(Watcher::Poll(interval.0));
}
let clear = args.screen_clear;
let delay_run = args.delay_run.map(|ts| ts.0);
let on_busy = args.on_busy_update;
let signal = args.signal;
let stop_signal = args.stop_signal;
let stop_timeout = args.stop_timeout.0;
let once = args.once;
let notif = args.notify;
let print_events = args.print_events;
let emit_events_to = args.emit_events_to;
let emit_file = state.emit_file.clone();
let workdir = Arc::new(args.workdir.clone());
let mut add_envs = HashMap::new();
for pair in &args.env {
if let Some((k, v)) = pair.split_once('=') {
add_envs.insert(k.to_owned(), OsString::from(v));
} else {
return Err(miette!("{pair} is not in key=value format"));
}
}
debug!(
?add_envs,
"additional environment variables to add to command"
);
let id = Id::default();
let command = interpret_command_args(args)?;
config.on_action_async(move |mut action| {
let add_envs = add_envs.clone();
let command = command.clone();
let emit_file = emit_file.clone();
let workdir = workdir.clone();
Box::new(async move {
let add_envs = add_envs.clone();
let command = command.clone();
let emit_file = emit_file.clone();
let workdir = workdir.clone();
let job = action.get_or_create_job(id, move || command.clone());
let events = action.events.clone();
job.set_spawn_hook(move |command, _| {
let add_envs = add_envs.clone();
let emit_file = emit_file.clone();
let events = events.clone();
if let Some(ref workdir) = workdir.as_ref() {
debug!(?workdir, "set command workdir");
command.current_dir(workdir);
}
emit_events_to_command(command, events, emit_file, emit_events_to, add_envs);
});
let show_events = || {
if print_events {
for (n, event) in action.events.iter().enumerate() {
eprintln!("[EVENT {n}] {event}");
}
}
};
if once {
show_events();
if let Some(delay) = delay_run {
job.run_async(move |_| {
Box::new(async move {
sleep(delay).await;
})
});
}
// this blocks the event loop, but also this is a debug feature so i don't care
job.start().await;
job.to_wait().await;
action.quit();
return action;
}
let is_keyboard_eof = action
.events
.iter()
.any(|e| e.tags.contains(&Tag::Keyboard(Keyboard::Eof)));
if is_keyboard_eof {
show_events();
action.quit();
return action;
}
let signals: Vec<Signal> = action.signals().collect();
// if we got a terminate or interrupt signal, quit
if signals.contains(&Signal::Terminate) || signals.contains(&Signal::Interrupt) {
show_events();
action.quit();
return action;
}
// pass all other signals on
for signal in signals {
job.signal(signal);
}
// clear the screen before printing events
if let Some(mode) = clear {
match mode {
ClearMode::Clear => {
clearscreen::clear().ok();
}
ClearMode::Reset => {
for cs in [
ClearScreen::WindowsCooked,
ClearScreen::WindowsVt,
ClearScreen::VtLeaveAlt,
ClearScreen::VtWellDone,
ClearScreen::default(),
] {
cs.clear().ok();
}
}
}
}
show_events();
if let Some(delay) = delay_run {
job.run_async(move |_| {
Box::new(async move {
sleep(delay).await;
})
});
}
job.run_async({
let job = job.clone();
move |context| {
let job = job.clone();
let is_running = matches!(context.current, CommandState::Running { .. });
Box::new(async move {
let innerjob = job.clone();
if is_running {
match on_busy {
OnBusyUpdate::DoNothing => {}
OnBusyUpdate::Signal => {
job.signal(if cfg!(windows) {
Signal::ForceStop
} else {
stop_signal.or(signal).unwrap_or(Signal::Terminate)
});
}
OnBusyUpdate::Restart if cfg!(windows) => {
job.restart();
job.run(move |context| {
setup_process(
innerjob.clone(),
context.command.clone(),
notif,
)
});
}
OnBusyUpdate::Restart => {
job.restart_with_signal(
stop_signal.unwrap_or(Signal::Terminate),
stop_timeout,
);
job.run(move |context| {
setup_process(
innerjob.clone(),
context.command.clone(),
notif,
)
});
}
OnBusyUpdate::Queue => {
let job = job.clone();
tokio::spawn(async move {
job.to_wait().await;
job.start();
job.run(move |context| {
setup_process(
innerjob.clone(),
context.command.clone(),
notif,
)
});
});
}
}
} else {
job.start();
job.run(move |context| {
setup_process(innerjob.clone(), context.command.clone(), notif)
});
}
})
}
});
action
})
});
Ok(config)
}
fn interpret_command_args(args: &Args) -> Result<Arc<Command>> {
let mut cmd = args.command.clone();
if cmd.is_empty() {
panic!("(clap) Bug: command is not present");
}
let shell = match if args.no_shell || args.no_shell_long {
None
} else {
args.shell.as_deref().or(Some("default"))
} {
Some("") => return Err(RuntimeError::CommandShellEmptyShell).into_diagnostic(),
Some("none") | None => None,
#[cfg(windows)]
Some("default") | Some("cmd") | Some("cmd.exe") | Some("CMD") | Some("CMD.EXE") => {
Some(Shell::cmd())
}
#[cfg(not(windows))]
Some("default") => Some(Shell::new("sh")),
#[cfg(windows)]
Some("powershell") => Some(Shell::new(available_powershell())),
Some(other) => {
let sh = other.split_ascii_whitespace().collect::<Vec<_>>();
// UNWRAP: checked by Some("")
#[allow(clippy::unwrap_used)]
let (shprog, shopts) = sh.split_first().unwrap();
Some(Shell {
prog: shprog.into(),
options: shopts.iter().map(|s| (*s).to_string()).collect(),
program_option: Some(Cow::Borrowed(OsStr::new("-c"))),
})
}
};
let program = if let Some(shell) = shell {
Program::Shell {
shell,
command: cmd.join(" "),
args: Vec::new(),
}
} else {
Program::Exec {
prog: cmd.remove(0).into(),
args: cmd,
}
};
Ok(Arc::new(Command {
program,
options: SpawnOptions {
grouped: !args.no_process_group,
..Default::default()
},
}))
}
#[cfg(windows)]
fn available_powershell() -> String {
todo!("figure out if powershell.exe is available, and use that, otherwise use pwsh.exe")
}
fn setup_process(job: Job, command: Arc<Command>, notif: bool) {
if notif {
Notification::new()
.summary("Watchexec: change detected")
.body(&format!("Running {command}"))
.show()
.map_or_else(
|err| {
eprintln!("[[Failed to send desktop notification: {err}]]");
},
drop,
);
}
tokio::spawn(async move {
job.to_wait().await;
job.run(move |context| end_of_process(context.current, notif));
});
}
fn end_of_process(state: &CommandState, notif: bool) {
let CommandState::Finished {
status,
started,
finished,
} = state
else {
return;
};
let duration = *finished - *started;
let msg = match status {
ProcessEnd::ExitError(code) => {
format!("Command exited with {code}, lasted {duration:?}")
}
ProcessEnd::ExitSignal(sig) => {
format!("Command killed by {sig:?}, lasted {duration:?}")
}
ProcessEnd::ExitStop(sig) => {
format!("Command stopped by {sig:?}, lasted {duration:?}")
}
ProcessEnd::Continued => format!("Command continued, lasted {duration:?}"),
ProcessEnd::Exception(ex) => {
format!("Command ended by exception {ex:#x}, lasted {duration:?}")
}
ProcessEnd::Success => format!("Command was successful, lasted {duration:?}"),
};
if notif {
Notification::new()
.summary("Watchexec: command ended")
.body(&msg)
.show()
.map_or_else(
|err| {
eprintln!("[[Failed to send desktop notification: {err}]]");
},
drop,
);
}
}
fn emit_events_to_command(
command: &mut TokioCommand,
events: Arc<[Event]>,
emit_file: RotatingTempFile,
emit_events_to: EmitEvents,
mut add_envs: HashMap<String, OsString>,
) {
use crate::emits::*;
let mut stdin = None;
match emit_events_to {
EmitEvents::Environment => {
add_envs.extend(emits_to_environment(&events));
}
EmitEvents::Stdin => match emits_to_file(&emit_file, &events)
.and_then(|path| File::open(path).into_diagnostic())
{
Ok(file) => {
stdin.replace(Stdio::from(file));
}
Err(err) => {
error!("Failed to write events to stdin, continuing without it: {err}");
}
},
EmitEvents::File => match emits_to_file(&emit_file, &events) {
Ok(path) => {
add_envs.insert("WATCHEXEC_EVENTS_FILE".into(), path.into());
}
Err(err) => {
error!("Failed to write WATCHEXEC_EVENTS_FILE, continuing without it: {err}");
}
},
EmitEvents::JsonStdin => match emits_to_json_file(&emit_file, &events)
.and_then(|path| File::open(path).into_diagnostic())
{
Ok(file) => {
stdin.replace(Stdio::from(file));
}
Err(err) => {
error!("Failed to write events to stdin, continuing without it: {err}");
}
},
EmitEvents::JsonFile => match emits_to_json_file(&emit_file, &events) {
Ok(path) => {
add_envs.insert("WATCHEXEC_EVENTS_FILE".into(), path.into());
}
Err(err) => {
error!("Failed to write WATCHEXEC_EVENTS_FILE, continuing without it: {err}");
}
},
EmitEvents::None => {}
}
for (k, v) in add_envs {
debug!(?k, ?v, "inserting environment variable");
command.env(k, v);
}
if let Some(stdin) = stdin {
debug!("set command stdin");
command.stdin(stdin);
}
}

View File

@ -1,52 +0,0 @@
use std::convert::Infallible;
use miette::Report;
use tracing::error;
use watchexec::{
config::InitConfig,
error::{FsWatcherError, RuntimeError},
handler::SyncFnHandler,
ErrorHook,
};
use crate::args::Args;
pub fn init(_args: &Args) -> InitConfig {
let mut config = InitConfig::default();
config.on_error(SyncFnHandler::from(
|err: ErrorHook| -> std::result::Result<(), Infallible> {
if let RuntimeError::IoError {
about: "waiting on process group",
..
} = err.error
{
// "No child processes" and such
// these are often spurious, so condemn them to -v only
error!("{}", err.error);
return Ok(());
}
if let RuntimeError::FsWatcher {
err:
FsWatcherError::Create { .. }
| FsWatcherError::TooManyWatches { .. }
| FsWatcherError::TooManyHandles { .. },
..
} = err.error
{
err.elevate();
return Ok(());
}
if cfg!(debug_assertions) {
eprintln!("[[{:?}]]", err.error);
}
eprintln!("[[Error (not fatal)]]\n{}", Report::new(err.error));
Ok(())
},
));
config
}

View File

@ -1,373 +0,0 @@
use std::{
collections::HashMap, convert::Infallible, env::current_dir, ffi::OsString, fs::File,
process::Stdio,
};
use miette::{miette, IntoDiagnostic, Result};
use notify_rust::Notification;
use tracing::{debug, debug_span, error};
use watchexec::{
action::{Action, Outcome, PostSpawn, PreSpawn},
command::{Command, Shell},
config::RuntimeConfig,
error::RuntimeError,
fs::Watcher,
handler::SyncFnHandler,
};
use watchexec_events::{Event, Keyboard, ProcessEnd, Tag};
use watchexec_signals::Signal;
use crate::args::{Args, ClearMode, EmitEvents, OnBusyUpdate};
use crate::state::State;
pub fn runtime(args: &Args, state: &State) -> Result<RuntimeConfig> {
let _span = debug_span!("args-runtime").entered();
let mut config = RuntimeConfig::default();
config.command(interpret_command_args(args)?);
config.pathset(if args.paths.is_empty() {
vec![current_dir().into_diagnostic()?]
} else {
args.paths.clone()
});
config.action_throttle(args.debounce.0);
config.command_grouped(!args.no_process_group);
config.keyboard_emit_eof(args.stdin_quit);
if let Some(interval) = args.poll {
config.file_watcher(Watcher::Poll(interval.0));
}
let clear = args.screen_clear;
let notif = args.notify;
let on_busy = args.on_busy_update;
let signal = args.signal;
let stop_signal = args.stop_signal;
let stop_timeout = args.stop_timeout.0;
let print_events = args.print_events;
let once = args.once;
let delay_run = args.delay_run.map(|ts| ts.0);
config.on_action(move |action: Action| {
let fut = async { Ok::<(), Infallible>(()) };
if print_events {
for (n, event) in action.events.iter().enumerate() {
eprintln!("[EVENT {n}] {event}");
}
}
if once {
action.outcome(Outcome::both(
if let Some(delay) = &delay_run {
Outcome::both(Outcome::Sleep(*delay), Outcome::Start)
} else {
Outcome::Start
},
Outcome::wait(Outcome::Exit),
));
return fut;
}
let signals: Vec<Signal> = action.events.iter().flat_map(Event::signals).collect();
let has_paths = action.events.iter().flat_map(Event::paths).next().is_some();
if signals.contains(&Signal::Terminate) {
action.outcome(Outcome::both(Outcome::Stop, Outcome::Exit));
return fut;
}
if signals.contains(&Signal::Interrupt) {
action.outcome(Outcome::both(Outcome::Stop, Outcome::Exit));
return fut;
}
let is_keyboard_eof = action
.events
.iter()
.any(|e| e.tags.contains(&Tag::Keyboard(Keyboard::Eof)));
if is_keyboard_eof {
action.outcome(Outcome::both(Outcome::Stop, Outcome::Exit));
return fut;
}
if !has_paths {
if !signals.is_empty() {
let mut out = Outcome::DoNothing;
for sig in signals {
out = Outcome::both(out, Outcome::Signal(sig));
}
action.outcome(out);
return fut;
}
let completion = action.events.iter().flat_map(Event::completions).next();
if let Some(status) = completion {
let (msg, printit) = match status {
Some(ProcessEnd::ExitError(code)) => {
(format!("Command exited with {code}"), true)
}
Some(ProcessEnd::ExitSignal(sig)) => {
(format!("Command killed by {sig:?}"), true)
}
Some(ProcessEnd::ExitStop(sig)) => {
(format!("Command stopped by {sig:?}"), true)
}
Some(ProcessEnd::Continued) => ("Command continued".to_string(), true),
Some(ProcessEnd::Exception(ex)) => {
(format!("Command ended by exception {ex:#x}"), true)
}
Some(ProcessEnd::Success) => ("Command was successful".to_string(), false),
None => ("Command completed".to_string(), false),
};
if printit {
eprintln!("[[{msg}]]");
}
if notif {
Notification::new()
.summary("Watchexec: command ended")
.body(&msg)
.show()
.map_or_else(
|err| {
eprintln!("[[Failed to send desktop notification: {err}]]");
},
drop,
);
}
action.outcome(Outcome::DoNothing);
return fut;
}
}
let start = if let Some(mode) = clear {
Outcome::both(
match mode {
ClearMode::Clear => Outcome::Clear,
ClearMode::Reset => Outcome::Reset,
},
Outcome::Start,
)
} else {
Outcome::Start
};
let start = if let Some(delay) = &delay_run {
Outcome::both(Outcome::Sleep(*delay), start)
} else {
start
};
let when_idle = start.clone();
let when_running = match on_busy {
OnBusyUpdate::Restart if cfg!(windows) => Outcome::both(Outcome::Stop, start),
OnBusyUpdate::Restart => Outcome::both(
Outcome::both(
Outcome::Signal(stop_signal.unwrap_or(Signal::Terminate)),
Outcome::wait_timeout(stop_timeout, Outcome::Stop),
),
start,
),
OnBusyUpdate::Signal if cfg!(windows) => Outcome::Stop,
OnBusyUpdate::Signal => {
Outcome::Signal(stop_signal.or(signal).unwrap_or(Signal::Terminate))
}
OnBusyUpdate::Queue => Outcome::wait(start),
OnBusyUpdate::DoNothing => Outcome::DoNothing,
};
action.outcome(Outcome::if_running(when_running, when_idle));
fut
});
let mut add_envs = HashMap::new();
// TODO: move to args?
for pair in &args.env {
if let Some((k, v)) = pair.split_once('=') {
add_envs.insert(k.to_owned(), OsString::from(v));
} else {
return Err(miette!("{pair} is not in key=value format"));
}
}
debug!(
?add_envs,
"additional environment variables to add to command"
);
let workdir = args.workdir.clone();
let emit_events_to = args.emit_events_to;
let emit_file = state.emit_file.clone();
config.on_pre_spawn(move |prespawn: PreSpawn| {
use crate::emits::*;
let workdir = workdir.clone();
let mut add_envs = add_envs.clone();
let mut stdin = None;
match emit_events_to {
EmitEvents::Environment => {
add_envs.extend(emits_to_environment(&prespawn.events));
}
EmitEvents::Stdin => match emits_to_file(&emit_file, &prespawn.events)
.and_then(|path| File::open(path).into_diagnostic())
{
Ok(file) => {
stdin.replace(Stdio::from(file));
}
Err(err) => {
error!("Failed to write events to stdin, continuing without it: {err}");
}
},
EmitEvents::File => match emits_to_file(&emit_file, &prespawn.events) {
Ok(path) => {
add_envs.insert("WATCHEXEC_EVENTS_FILE".into(), path.into());
}
Err(err) => {
error!("Failed to write WATCHEXEC_EVENTS_FILE, continuing without it: {err}");
}
},
EmitEvents::JsonStdin => match emits_to_json_file(&emit_file, &prespawn.events)
.and_then(|path| File::open(path).into_diagnostic())
{
Ok(file) => {
stdin.replace(Stdio::from(file));
}
Err(err) => {
error!("Failed to write events to stdin, continuing without it: {err}");
}
},
EmitEvents::JsonFile => match emits_to_json_file(&emit_file, &prespawn.events) {
Ok(path) => {
add_envs.insert("WATCHEXEC_EVENTS_FILE".into(), path.into());
}
Err(err) => {
error!("Failed to write WATCHEXEC_EVENTS_FILE, continuing without it: {err}");
}
},
EmitEvents::None => {}
}
async move {
if !add_envs.is_empty() || workdir.is_some() || stdin.is_some() {
if let Some(mut command) = prespawn.command().await {
for (k, v) in add_envs {
debug!(?k, ?v, "inserting environment variable");
command.env(k, v);
}
if let Some(ref workdir) = workdir {
debug!(?workdir, "set command workdir");
command.current_dir(workdir);
}
if let Some(stdin) = stdin {
debug!("set command stdin");
command.stdin(stdin);
}
}
}
Ok::<(), Infallible>(())
}
});
config.on_post_spawn(SyncFnHandler::from(move |postspawn: PostSpawn| {
if notif {
Notification::new()
.summary("Watchexec: change detected")
.body(&format!("Running {}", postspawn.command))
.show()
.map_or_else(
|err| {
eprintln!("[[Failed to send desktop notification: {err}]]");
},
drop,
);
}
Ok::<(), Infallible>(())
}));
Ok(config)
}
fn interpret_command_args(args: &Args) -> Result<Command> {
let mut cmd = args.command.clone();
if cmd.is_empty() {
panic!("(clap) Bug: command is not present");
}
Ok(if args.no_shell || args.no_shell_long {
Command::Exec {
prog: cmd.remove(0),
args: cmd,
}
} else {
let (shell, shopts) = if let Some(s) = &args.shell {
if s.is_empty() {
return Err(RuntimeError::CommandShellEmptyShell).into_diagnostic();
} else if s.eq_ignore_ascii_case("powershell") {
(Shell::Powershell, Vec::new())
} else if s.eq_ignore_ascii_case("none") {
return Ok(Command::Exec {
prog: cmd.remove(0),
args: cmd,
});
} else if s.eq_ignore_ascii_case("cmd") {
(cmd_shell(s.into()), Vec::new())
} else {
let sh = s.split_ascii_whitespace().collect::<Vec<_>>();
// UNWRAP: checked by first if branch
#[allow(clippy::unwrap_used)]
let (shprog, shopts) = sh.split_first().unwrap();
(
Shell::Unix((*shprog).to_string()),
shopts.iter().map(|s| (*s).to_string()).collect(),
)
}
} else {
(default_shell(), Vec::new())
};
Command::Shell {
shell,
args: shopts,
command: cmd.join(" "),
}
})
}
// until 2.0, then Powershell
#[cfg(windows)]
fn default_shell() -> Shell {
Shell::Cmd
}
#[cfg(not(windows))]
fn default_shell() -> Shell {
Shell::Unix("sh".to_string())
}
// because Shell::Cmd is only on windows
#[cfg(windows)]
fn cmd_shell(_: String) -> Shell {
Shell::Cmd
}
#[cfg(not(windows))]
fn cmd_shell(s: String) -> Shell {
Shell::Unix(s)
}

View File

@ -7,13 +7,10 @@ use std::{
use miette::{IntoDiagnostic, Result};
use tokio::io::{AsyncBufReadExt, BufReader};
use tracing::{info, trace, trace_span};
use watchexec::{
error::RuntimeError,
event::{
filekind::{FileEventKind, ModifyKind},
Event, Priority, Tag,
},
filter::Filterer,
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{
filekind::{FileEventKind, ModifyKind},
Event, Priority, Tag,
};
use watchexec_filterer_globset::GlobsetFilterer;

View File

@ -7,15 +7,12 @@ use args::{Args, ShellCompletion};
use clap::CommandFactory;
use clap_complete::{Generator, Shell};
use clap_mangen::Man;
use command_group::AsyncCommandGroup;
use is_terminal::IsTerminal;
use miette::{IntoDiagnostic, Result};
use tokio::{fs::metadata, io::AsyncWriteExt, process::Command};
use tracing::{debug, info, warn};
use watchexec::{
event::{Event, Priority},
Watchexec,
};
use watchexec::Watchexec;
use watchexec_events::{Event, Priority};
pub mod args;
mod config;
@ -102,14 +99,12 @@ async fn init() -> Result<Args> {
async fn run_watchexec(args: Args) -> Result<()> {
info!(version=%env!("CARGO_PKG_VERSION"), "constructing Watchexec from CLI");
let init = config::init(&args);
let state = state::State::new()?;
let mut runtime = config::runtime(&args, &state)?;
runtime.filterer(filterer::globset(&args).await?);
let config = config::make_config(&args, &state)?;
config.filterer(filterer::globset(&args).await?);
info!("initialising Watchexec runtime");
let wx = Watchexec::new(init, runtime)?;
let wx = Watchexec::with_config(config)?;
if !args.postpone {
debug!("kicking off with empty event");
@ -137,12 +132,10 @@ async fn run_manpage(_args: Args) -> Result<()> {
.stdin(Stdio::piped())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.group()
.kill_on_drop(true)
.spawn()
.into_diagnostic()?;
child
.inner()
.stdin
.as_mut()
.unwrap()

View File

@ -1,14 +1,19 @@
use miette::IntoDiagnostic;
#[cfg(target_env = "musl")]
#[global_allocator]
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
#[tokio::main]
async fn main() -> miette::Result<()> {
watchexec_cli::run().await?;
fn main() -> miette::Result<()> {
#[cfg(feature = "pid1")]
pid1::Pid1Settings::new()
.enable_log(cfg!(feature = "pid1-withlog"))
.launch()
.into_diagnostic()?;
if std::process::id() == 1 {
std::process::exit(0);
}
Ok(())
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async { watchexec_cli::run().await })
}

View File

@ -2,6 +2,8 @@
## Next (YYYY-MM-DD)
- Add `ProcessEnd::into_exitstatus` testing-only utility method.
## v1.0.0 (2023-03-18)
- Split off new `watchexec-events` crate (this one), to have a lightweight library that can parse

View File

@ -15,11 +15,11 @@ rust-version = "1.61.0"
edition = "2021"
[dependencies.notify]
version = "5.0.0"
version = "6.0.0"
optional = true
[dependencies.serde]
version = "1.0.152"
version = "1.0.183"
optional = true
features = ["derive"]
@ -29,13 +29,13 @@ path = "../signals"
default-features = false
[target.'cfg(unix)'.dependencies.nix]
version = "0.26.2"
version = "0.27.1"
features = ["signal"]
[dev-dependencies]
watchexec-events = { version = "*", features = ["serde"], path = "." }
snapbox = "0.4.10"
serde_json = "1.0.94"
snapbox = "0.4.11"
serde_json = "1.0.107"
[features]
default = ["notify"]

View File

@ -53,7 +53,7 @@ pub enum Tag {
/// The event is about a signal being delivered to the main process.
Signal(Signal),
/// The event is about the subprocess ending.
/// The event is about a subprocess ending.
ProcessCompletion(Option<ProcessEnd>),
#[cfg(feature = "serde")]

View File

@ -88,3 +88,45 @@ impl From<ExitStatus> for ProcessEnd {
}
}
}
impl ProcessEnd {
/// Convert a `ProcessEnd` to an `ExitStatus`.
///
/// This is a testing function only! **It will panic** if the `ProcessEnd` is not representable
/// as an `ExitStatus` on Unix. This is also not guaranteed to be accurate, as the waitpid()
/// status union is platform-specific. Exit codes and signals are implemented, other variants
/// are not.
#[cfg(unix)]
pub fn into_exitstatus(self) -> ExitStatus {
use std::os::unix::process::ExitStatusExt;
match self {
Self::Success => ExitStatus::from_raw(0),
Self::ExitError(code) => ExitStatus::from_raw((code.get() as u8 as i32) << 8),
Self::ExitSignal(signal) => {
ExitStatus::from_raw(signal.to_nix().map_or(0, |sig| sig as i32))
}
Self::Continued => ExitStatus::from_raw(0xffff),
_ => unimplemented!(),
}
}
/// Convert a `ProcessEnd` to an `ExitStatus`.
///
/// This is a testing function only! **It will panic** if the `ProcessEnd` is not representable
/// as an `ExitStatus` on Windows.
#[cfg(windows)]
pub fn into_exitstatus(self) -> ExitStatus {
use std::os::windows::process::ExitStatusExt;
match self {
Self::Success => ExitStatus::from_raw(0),
Self::ExitError(code) => ExitStatus::from_raw(code.get().try_into().unwrap()),
_ => unimplemented!(),
}
}
/// Unimplemented on this platform.
#[cfg(not(any(unix, windows)))]
pub fn into_exitstatus(self) -> ExitStatus {
unimplemented!()
}
}

View File

@ -17,7 +17,7 @@ edition = "2021"
[dependencies]
ignore = "0.4.18"
tracing = "0.1.26"
tracing = "0.1.40"
[dependencies.ignore-files]
version = "1.3.1"
@ -27,6 +27,10 @@ path = "../../ignore-files"
version = "2.3.0"
path = "../../lib"
[dependencies.watchexec-events]
version = "1.0.0"
path = "../../events"
[dependencies.watchexec-filterer-ignore]
version = "1.2.1"
path = "../ignore"
@ -39,7 +43,7 @@ version = "1.2.0"
path = "../../project-origins"
[dev-dependencies.tokio]
version = "1.24.2"
version = "1.33.0"
features = [
"fs",
"io-std",

View File

@ -16,11 +16,8 @@ use std::{
use ignore::gitignore::{Gitignore, GitignoreBuilder};
use ignore_files::{Error, IgnoreFile, IgnoreFilter};
use tracing::{debug, trace, trace_span};
use watchexec::{
error::RuntimeError,
event::{Event, FileType, Priority},
filter::Filterer,
};
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{Event, FileType, Priority};
use watchexec_filterer_ignore::IgnoreFilterer;
/// A simple filterer in the style of the watchexec v1.17 filter.

View File

@ -374,10 +374,8 @@ async fn extensions_fail_extensionless() {
#[tokio::test]
async fn multipath_allow_on_any_one_pass() {
use watchexec::{
event::{Event, FileType, Tag},
filter::Filterer,
};
use watchexec::filter::Filterer;
use watchexec_events::{Event, FileType, Tag};
let filterer = filt(&[], &[], &["py"]).await;
let origin = tokio::fs::canonicalize(".").await.unwrap();
@ -442,10 +440,8 @@ async fn leading_single_glob_file() {
#[tokio::test]
async fn nonpath_event_passes() {
use watchexec::{
event::{Event, Source, Tag},
filter::Filterer,
};
use watchexec::filter::Filterer;
use watchexec_events::{Event, Source, Tag};
let filterer = filt(&[], &[], &["py"]).await;

View File

@ -5,11 +5,8 @@ use std::{
use ignore_files::IgnoreFile;
use project_origins::ProjectType;
use watchexec::{
error::RuntimeError,
event::{Event, FileType, Priority, Tag},
filter::Filterer,
};
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{Event, FileType, Priority, Tag};
use watchexec_filterer_globset::GlobsetFilterer;
use watchexec_filterer_ignore::IgnoreFilterer;
@ -17,7 +14,7 @@ pub mod globset {
pub use super::globset_filt as filt;
pub use super::Applies;
pub use super::PathHarness;
pub use watchexec::event::Priority;
pub use watchexec_events::Priority;
}
pub trait PathHarness: Filterer {

View File

@ -17,7 +17,7 @@ edition = "2021"
[dependencies]
ignore = "0.4.18"
tracing = "0.1.26"
tracing = "0.1.40"
dunce = "1.0.4"
[dependencies.ignore-files]
@ -28,6 +28,10 @@ path = "../../ignore-files"
version = "2.3.0"
path = "../../lib"
[dependencies.watchexec-events]
version = "1.0.0"
path = "../../events"
[dependencies.watchexec-signals]
version = "1.0.0"
path = "../../signals"
@ -40,7 +44,7 @@ version = "1.2.0"
path = "../../project-origins"
[dev-dependencies.tokio]
version = "1.24.2"
version = "1.33.0"
features = [
"fs",
"io-std",

View File

@ -14,12 +14,8 @@
use ignore::Match;
use ignore_files::IgnoreFilter;
use tracing::{trace, trace_span};
use watchexec::{
error::RuntimeError,
event::{Event, FileType, Priority},
filter::Filterer,
};
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{Event, FileType, Priority};
/// A Watchexec [`Filterer`] implementation for [`IgnoreFilter`].
#[derive(Clone, Debug)]

View File

@ -235,7 +235,7 @@ async fn scopes() {
#[cfg(not(windows))]
filterer.file_does_pass("/local.b");
// FIXME flaky
//filterer.file_doesnt_pass("tests/local.c");
// filterer.file_doesnt_pass("tests/local.c");
filterer.file_does_pass("sublocal.a");
// #[cfg(windows)] FIXME should work

View File

@ -2,10 +2,9 @@ use std::path::{Path, PathBuf};
use ignore_files::{IgnoreFile, IgnoreFilter};
use project_origins::ProjectType;
use watchexec::{
error::RuntimeError,
event::{filekind::FileEventKind, Event, FileType, Priority, ProcessEnd, Source, Tag},
filter::Filterer,
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{
filekind::FileEventKind, Event, FileType, Priority, ProcessEnd, Source, Tag,
};
use watchexec_filterer_ignore::IgnoreFilterer;
use watchexec_signals::Signal;
@ -15,7 +14,7 @@ pub mod ignore {
pub use super::ignore_filt as filt;
pub use super::Applies;
pub use super::PathHarness;
pub use watchexec::event::Priority;
pub use watchexec_events::Priority;
}
pub trait PathHarness: Filterer {

View File

@ -31,7 +31,7 @@ version = "1.3.1"
path = "../../ignore-files"
[dependencies.tokio]
version = "1.24.2"
version = "1.32.0"
features = [
"fs",
]
@ -40,6 +40,10 @@ features = [
version = "2.3.0"
path = "../../lib"
[dependencies.watchexec-events]
version = "1.0.0"
path = "../../events"
[dependencies.watchexec-filterer-ignore]
version = "1.2.1"
path = "../ignore"
@ -56,7 +60,7 @@ version = "1.2.0"
path = "../../project-origins"
[dev-dependencies.tokio]
version = "1.24.2"
version = "1.32.0"
features = [
"fs",
"io-std",

View File

@ -12,11 +12,9 @@ use crate::{Filter, Matcher};
/// Errors emitted by the `TaggedFilterer`.
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum TaggedFiltererError {
/// Generic I/O error, with some context.
#[error("io({about}): {err}")]
#[diagnostic(code(watchexec::filter::io_error))]
IoError {
/// What it was about.
about: &'static str,
@ -28,7 +26,6 @@ pub enum TaggedFiltererError {
/// Error received when a tagged filter cannot be parsed.
#[error("cannot parse filter `{src}`: {err:?}")]
#[diagnostic(code(watchexec::filter::tagged::parse))]
Parse {
/// The source of the filter.
#[source_code]
@ -40,7 +37,6 @@ pub enum TaggedFiltererError {
/// Error received when a filter cannot be added or removed from a tagged filter list.
#[error("cannot {action} filter: {err:?}")]
#[diagnostic(code(watchexec::filter::tagged::filter_change))]
FilterChange {
/// The action that was attempted.
action: &'static str,
@ -52,22 +48,18 @@ pub enum TaggedFiltererError {
/// Error received when a glob cannot be parsed.
#[error("cannot parse glob: {0}")]
#[diagnostic(code(watchexec::filter::tagged::glob_parse))]
GlobParse(#[source] ignore::Error),
/// Error received when a compiled globset cannot be changed.
#[error("cannot change compiled globset: {0:?}")]
#[diagnostic(code(watchexec::filter::tagged::globset_change))]
GlobsetChange(#[source] SendError<Option<Gitignore>>),
/// Error received about the internal ignore filterer.
#[error("ignore filterer: {0}")]
#[diagnostic(code(watchexec::filter::tagged::ignore))]
Ignore(#[source] ignore_files::Error),
/// Error received when a new ignore filterer cannot be swapped in.
#[error("cannot swap in new ignore filterer: {0:?}")]
#[diagnostic(code(watchexec::filter::tagged::ignore_swap))]
IgnoreSwap(#[source] SendError<IgnoreFilterer>),
}

View File

@ -6,7 +6,7 @@ use regex::Regex;
use tokio::fs::canonicalize;
use tracing::{trace, warn};
use unicase::UniCase;
use watchexec::event::Tag;
use watchexec_events::Tag;
use crate::TaggedFiltererError;

View File

@ -10,11 +10,8 @@ use ignore::{
use ignore_files::{IgnoreFile, IgnoreFilter};
use tokio::fs::canonicalize;
use tracing::{debug, trace, trace_span};
use watchexec::{
error::RuntimeError,
event::{Event, FileType, Priority, ProcessEnd, Tag},
filter::Filterer,
};
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{Event, FileType, Priority, ProcessEnd, Tag};
use watchexec_filterer_ignore::IgnoreFilterer;
use watchexec_signals::Signal;

View File

@ -1,4 +1,4 @@
use watchexec::event::{filekind::*, ProcessEnd, Source};
use watchexec_events::{filekind::*, ProcessEnd, Source};
use watchexec_signals::Signal;
mod helpers;

View File

@ -9,10 +9,9 @@ use std::{
use ignore_files::{IgnoreFile, IgnoreFilter};
use project_origins::ProjectType;
use tokio::fs::canonicalize;
use watchexec::{
error::RuntimeError,
event::{filekind::FileEventKind, Event, FileType, Priority, ProcessEnd, Source, Tag},
filter::Filterer,
use watchexec::{error::RuntimeError, filter::Filterer};
use watchexec_events::{
filekind::FileEventKind, Event, FileType, Priority, ProcessEnd, Source, Tag,
};
use watchexec_filterer_ignore::IgnoreFilterer;
use watchexec_filterer_tagged::{Filter, FilterFile, Matcher, Op, Pattern, TaggedFilterer};
@ -26,7 +25,7 @@ pub mod tagged {
pub use super::PathHarness;
pub use super::TaggedHarness;
pub use super::{filter, glob_filter, notglob_filter};
pub use watchexec::event::Priority;
pub use watchexec_events::Priority;
}
pub mod tagged_ff {

View File

@ -1,6 +1,6 @@
use std::num::{NonZeroI32, NonZeroI64};
use watchexec::event::{filekind::*, ProcessEnd, Source};
use watchexec_events::{filekind::*, ProcessEnd, Source};
use watchexec_filterer_tagged::TaggedFilterer;
use watchexec_signals::Signal;

View File

@ -15,16 +15,23 @@ rust-version = "1.58.0"
edition = "2021"
[dependencies]
futures = "0.3.21"
gix-config = "0.25.1"
futures = "0.3.29"
gix-config = "0.31.0"
ignore = "0.4.18"
miette = "5.3.0"
thiserror = "1.0.31"
tokio = { version = "1.24.2", default-features = false, features = ["fs", "macros", "rt"] }
tracing = "0.1.35"
thiserror = "1.0.50"
tracing = "0.1.40"
radix_trie = "0.2.1"
dunce = "1.0.4"
[dependencies.tokio]
version = "1.33.0"
default-features = false
features = [
"fs",
"macros",
"rt",
]
[dependencies.project-origins]
version = "1.2.0"

View File

@ -51,7 +51,7 @@ pub async fn from_origin(path: impl AsRef<Path> + Send) -> (Vec<IgnoreFile>, Vec
match find_file(base.join(".git/config")).await {
Err(err) => errors.push(err),
Ok(None) => {}
Ok(Some(path)) => match path.parent().map(File::from_git_dir) {
Ok(Some(path)) => match path.parent().map(|path| File::from_git_dir(path.into())) {
None => errors.push(Error::new(
ErrorKind::Other,
"unreachable: .git/config must have a parent",

View File

@ -10,7 +10,6 @@ pub enum Error {
///
/// [`IgnoreFile`]: crate::IgnoreFile
#[error("cannot read ignore '{file}': {err}")]
#[diagnostic(code(ignore_file::read))]
Read {
/// The path to the erroring ignore file.
file: PathBuf,
@ -22,7 +21,6 @@ pub enum Error {
/// Error received when parsing a glob fails.
#[error("cannot parse glob from ignore '{file:?}': {err}")]
#[diagnostic(code(ignore_file::glob))]
Glob {
/// The path to the erroring ignore file.
file: Option<PathBuf>,
@ -35,7 +33,6 @@ pub enum Error {
/// Multiple related [`Error`](enum@Error)s.
#[error("multiple: {0:?}")]
#[diagnostic(code(ignore_file::set))]
Multi(#[related] Vec<Error>),
/// Error received when trying to canonicalize a path

View File

@ -2,6 +2,148 @@
## Next (YYYY-MM-DD)
### General
- Crate is more oriented around `Watchexec` the core experience rather than providing the kitchensink / components so you could build your own from the pieces; that helps the cohesion of the whole and simplifies many patterns.
- Deprecated items (mostly leftover from splitting out the `watchexec_events` and `watchexec_signals` crates) are removed.
- Watchexec can now supervise multiple commands at once. See [Action](#Action) below, the [Action docs](https://docs.rs/watchexec/latest/watchexec/action/struct.Action.html), and the [Supervisor docs](https://docs.rs/watchexec-supervisor) for more.
- Because of this new feature, the one where multiple commands could be set under the one supervisor is removed.
- Watchexec's supervisor was split up into its own crate, [`watchexec-supervisor`](https://docs.rs/watchexec-supervisor).
- Running as PID1 (e.g. in Docker) is now fully handled, with support from the [`pid1`](https://www.fpcomplete.com/blog/announcing-pid1-crate-for-easier-rust-docker-images/) crate.
- Tokio requirement is now 1.33.
- Notify was upgraded to 6.0.
- Nix was upgraded to 0.27.
### `Watchexec`
- `Watchexec::new()` now takes the `on_action` handler. As this is the most important handler to define and Watchexec will not be functional without one, that enforces providing it first.
- `Watchexec::with_config()` lets one provide a config upfront, otherwise the default values are used.
- `Watchexec::default()` is mostly used to avoid boilerplate in doc comment examples, and panics on initialisation errors.
- `Watchexec::reconfigure()` is removed. Use the public `config` field instead to access the "live" `Arc<Config>` (see below).
- Completion events aren't emitted anymore. They still exist in the Event enum, but they're not generated by Watchexec itself. Use `Job#to_wait` instead. Of course you can insert them as synthetic events if you want.
### Config
- `InitConfig` and `RuntimeConfig` have been unified into a single `Config` struct.
- Instead of module-specific `WorkingData` structures, all of the config is now flat in the same `Config`. That makes it easier to work with as all that's needed is to pass an `Arc<Config>` around, but it does mean the event sources are no longer independent.
- Instead of using `tokio::sync::watch` for some values, and `HandlerLock` for handlers, and so on, everything is now a new `Changeable` type, specialised to `ChangeableFn` for closures and `ChangeableFilterer` for the Filterer.
- There's now a `signal_change()` method which must be called after changes to the config; this is taken care of when using the methods on `Config`. This is required for the few places in Watchexec which need active reconfiguration rather than reading config values just-in-time.
- The above means that instead of using `Watchexec::reconfigure()` and keeping a clone of the config around, an `Arc<Config>` is now "live" and changes applied to it will affect the Watchexec instance directly.
- `command` / `commands` are removed from config. Instead use the Action handler API for creating new supervised commands.
- `command_grouped` is removed from config. That's now an option set on `Command`.
- `action_throttle` is renamed to `throttle` and now defaults to `50ms`, which is the default in Watchexec CLI.
- `keyboard_emit_eof` is renamed to `keyboard_events`.
- `pre_spawn_handler` is removed. Use `Job#set_spawn_hook` instead.
- `post_spawn_handler` is removed. Use `Job#run` instead.
### Command
The structure has been reworked to be simpler and more extensible. Instead of a Command _enum_, there's now a Command _struct_, which holds a single `Program` and behaviour-altering options. `Shell` has also been redone, with less special-casing.
If you had:
```rust
Command::Exec {
prog: "date".into(),
args: vec!["+%s".into()],
}
```
You should now write:
```rust
Command {
program: Program::Exec {
prog: "date".into(),
args: vec!["+%s".into()],
},
options: Default::default(),
}
```
The new `Program::Shell` field `args: Vec<String>` lets you pass (trailing) arguments to the shell invocation:
```rust
Program::Shell {
shell: Shell::new("sh"),
command: "ls".into(),
args: vec!["--".into(), "movies".into()],
}
```
is equivalent to:
```console
$ sh -c "ls" -- movies
```
- The old `args` field of `Command::Shell` is now the `options` field of `Shell`.
- `Shell` has a new field `program_option: Option<Cow<OsStr>>` which is the syntax of the option used to provide the command. Ie for most shells it's `-c` and for `CMD.EXE` it's `/C`; this makes it fully customisable (including its absence!) if you want to use weird shells or non-shell programs as shells.
- The special-cased `Shell::Powershell` is removed.
- On Windows, arguments are specified with [`raw_arg`](https://doc.rust-lang.org/stable/std/os/windows/process/trait.CommandExt.html#tymethod.raw_arg) instead of `arg` to avoid quoting issues.
- `Command` can no longer take a list of programs. That was always quite a hack; now that multiple supervised commands are possible, that's how multiple programs should be handled.
- The top-level Watchexec `command_grouped` option is now Command-level, so you can start both grouped and non-grouped programs.
- There's a new `reset_sigmask` option to control whether commands should have their signal masks reset on Unix. By default the signal mask is inherited.
### Errors
- `RuntimeError::NoCommands`, `RuntimeError::Handler`, `RuntimeError::HandlerLockHeld`, and `CriticalError::MissingHandler` are removed as the relevant types/structures don't exist anymore.
- `RuntimeError::CommandShellEmptyCommand` and `RuntimeError::CommandShellEmptyShell` are removed; you can construct `Shell` with empty shell program and `Program::Shell` with an empty command, these will at best do nothing but they won't error early through Watchexec.
- `RuntimeError::ClearScreen` is removed, as clearing the screen is now done by the consumer of Watchexec, not Watchexec itself.
- Watchexec will now panic if locks are poisoned; we can't recover from that.
- The filesystem watcher's "too many files", "too many handles", and other initialisation errors are removed as `RuntimeErrors`, and are now `CriticalErrors`. These being runtime, nominally recoverable errors instead of end-the-world failures is one of the most common pitfalls of using the library, and though recovery _is_ technically possible, it's better approached other ways.
- The `on_error` handler is now sync only and no longer returns a `Result`; as such there's no longer the weird logic of "if the `on_error` handler errors, it will call itself on the error once, then crash".
- If you were doing async work in `on_error`, you should instead use non-async calls (like `try_send()` for Tokio channels). The error handler is expected to return as fast as possible, and _not_ do blocking work if it can at all avoid it; this was always the case but is now documented more explicitly.
- Error diagnostic codes are removed.
### Action
The process supervision system is entirely reworked. Instead of "applying `Outcome`s", there's now a `Job` type which is a single supervised command, provided by the separate [`watchexec-supervisor`](https://docs.rs/watchexec-supervisor) crate. The Action handler itself can only create new jobs and list existing ones, and interaction with commands is done through the `Job` type.
The controls available on `Job` are now modeled on "real" supervisors like systemd, and are both more and less powerful than the old `Outcome` system. This can be seen clearly in how a "restart" is specified. Previously, this was an `Outcome` combinator:
```rust
Outcome::if_running(
Outcome::both(Outcome::stop(), Outcome::start()),
Outcome::start(),
)
```
Now, it's a discrete method:
```rust
job.restart();
```
Previously, a graceful stop was a mess:
```rust
Outcome::if_running(
Outcome::both(
Outcome::both(
Outcome::signal(Signal::Terminate),
Outcome::wait_timeout(Duration::from_secs(30)),
),
Outcome::both(Outcome::stop(), Outcome::start()),
),
Outcome::DoNothing,
)
```
Now, it's again a discrete method:
```rust
job.stop_with_signal(Signal::Terminate, Duration::from_secs(30));
```
The `stop()` and `start()` methods also do nothing if the process is already stopped or started, respectively, so you don't need to check the status of the job before calling them. The `try_restart()` method is available to do a restart only if the job is running, with the `try_restart_with_signal()` variant for graceful restarts.
Further, all of these methods are non-blocking sync (and take `&self`), but they return a `Ticket`, a future which resolves when the control has been processed. That can be dropped if you don't care about it without affecting the job, or used to perform more advanced flow control. The special `to_wait()` method returns a detached, cloneable, "wait()" future, which will resolve when the process exits, without needing to hold on to the `Job` or a reference at all.
See the [`restart_run_on_successful_build` example](./examples/restart_run_on_successful_build.rs) which starts a `cargo build`, waits for it to end, and then (re)starts `cargo run` if the build exited successfully.
Finally: `Outcome::Clear` and `Outcome::Reset` are gone, and there's no equivalent on `Job`: that's because these are screen control actions, not job control. You should use the [clearscreen](https://docs.rs/clearscreen) crate directly in your action handler, in conjunction with job control, to achieve the desired effect.
## v2.3.0 (2023-03-22)
- New: `Outcome::Race` and `Outcome::race()` ([#548](https://github.com/watchexec/watchexec/pull/548))

View File

@ -17,17 +17,17 @@ edition = "2021"
[dependencies]
async-priority-channel = "0.1.0"
async-recursion = "1.0.0"
async-recursion = "1.0.5"
atomic-take = "1.0.0"
clearscreen = "2.0.1"
futures = "0.3.16"
futures = "0.3.29"
miette = "5.3.0"
notify = "6.0.0"
once_cell = "1.8.0"
thiserror = "1.0.26"
thiserror = "1.0.44"
normalize-path = "0.2.0"
[dependencies.command-group]
version = "2.1.0"
version = "5.0.1"
features = ["with-tokio"]
[dependencies.watchexec-events]
@ -38,19 +38,20 @@ path = "../events"
version = "1.0.0"
path = "../signals"
[dependencies.watchexec-supervisor]
version = "0.1.0"
path = "../supervisor"
[dependencies.ignore-files]
version = "1.3.1"
path = "../ignore-files"
[dependencies.notify]
version = "5.0.0"
[dependencies.project-origins]
version = "1.2.0"
path = "../project-origins"
[dependencies.tokio]
version = "1.24.2"
version = "1.33.0"
features = [
"fs",
"io-std",
@ -62,12 +63,13 @@ features = [
]
[dependencies.tracing]
version = "0.1.26"
version = "0.1.40"
features = ["log"]
[target.'cfg(unix)'.dependencies.nix]
version = "0.26.2"
version = "0.27.1"
features = ["signal"]
[dev-dependencies]
tracing-subscriber = "0.3.6"
[dev-dependencies.tracing-subscriber]
version = "0.3.6"
features = ["env-filter"]

View File

@ -15,97 +15,178 @@ _The library which powers [Watchexec CLI](https://watchexec.github.io) and other
[license]: ../../LICENSE
## Quick start
## Examples
Here's a complete example showing some of the library's features:
```rust ,no_run
use miette::{IntoDiagnostic, Result};
use watchexec::{
Watchexec,
action::{Action, Outcome},
config::{InitConfig, RuntimeConfig},
handler::{Handler as _, PrintDebug},
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use watchexec::{
command::{Command, Program, Shell},
job::CommandState,
Watchexec,
};
use watchexec_events::{Event, Priority};
use watchexec_signals::Signal;
#[tokio::main]
async fn main() -> Result<()> {
let mut init = InitConfig::default();
init.on_error(PrintDebug(std::io::stderr()));
// this is okay to start with, but Watchexec logs a LOT of data,
// even at error level. you will quickly want to filter it down.
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
let mut runtime = RuntimeConfig::default();
runtime.pathset(["watchexec.conf"]);
// initialise Watchexec with a simple initial action handler
let job = Arc::new(Mutex::new(None));
let wx = Watchexec::new({
let outerjob = job.clone();
move |mut action| {
let (_, job) = action.create_job(Arc::new(Command {
program: Program::Shell {
shell: Shell::new("bash"),
command: "
echo 'Hello world'
trap 'echo Not quitting yet!' TERM
read
"
.into(),
args: Vec::new(),
},
options: Default::default(),
}));
let conf = YourConfigFormat::load_from_file("watchexec.conf").await.into_diagnostic()?;
conf.apply(&mut runtime);
// store the job outside this closure too
*outerjob.lock().unwrap() = Some(job.clone());
let we = Watchexec::new(init, runtime.clone())?;
let w = we.clone();
let c = runtime.clone();
runtime.on_action(move |action: Action| {
let mut c = c.clone();
let w = w.clone();
async move {
for event in action.events.iter() {
if event.paths().any(|(p, _)| p.ends_with("/watchexec.conf")) {
let conf = YourConfigFormat::load_from_file("watchexec.conf").await?;
conf.apply(&mut c);
let _ = w.reconfigure(c.clone());
// tada! self-reconfiguring watchexec on config file change!
break;
// block SIGINT
#[cfg(unix)]
job.set_spawn_hook(|cmd, _| {
use nix::sys::signal::{sigprocmask, SigSet, SigmaskHow, Signal};
unsafe {
cmd.pre_exec(|| {
let mut newset = SigSet::empty();
newset.add(Signal::SIGINT);
sigprocmask(SigmaskHow::SIG_BLOCK, Some(&newset), None)?;
Ok(())
});
}
}
});
action.outcome(Outcome::if_running(
Outcome::DoNothing,
Outcome::both(Outcome::Clear, Outcome::Start),
));
// start the command
job.start();
Ok(())
action
}
})?;
// (not normally required! ignore this when implementing)
as std::result::Result<_, MietteStub>
// start the engine
let main = wx.main();
// send an event to start
wx.send_event(Event::default(), Priority::Urgent)
.await
.unwrap();
// ^ this will cause the action handler we've defined above to run,
// creating and starting our little bash program, and storing it in the mutex
// spin until we've got the job
while job.lock().unwrap().is_none() {
tokio::task::yield_now().await;
}
// watch the job and restart it when it exits
let job = job.lock().unwrap().clone().unwrap();
let auto_restart = tokio::spawn(async move {
loop {
job.to_wait().await;
job.run(|context| {
if let CommandState::Finished {
status,
started,
finished,
} = context.current
{
let duration = *finished - *started;
eprintln!("[Program stopped with {status:?}; ran for {duration:?}]")
}
})
.await;
eprintln!("[Restarting...]");
job.start().await;
}
});
we.reconfigure(runtime);
we.main().await.into_diagnostic()?;
// now we change what the action does:
let auto_restart_abort = auto_restart.abort_handle();
wx.config.on_action(move |mut action| {
// if we get Ctrl-C on the Watchexec instance, we quit
if action.signals().any(|sig| sig == Signal::Interrupt) {
eprintln!("[Quitting...]");
auto_restart_abort.abort();
action.quit_gracefully(Signal::ForceStop, Duration::ZERO);
return action;
}
// if the action was triggered by file events, gracefully stop the program
if action.paths().next().is_some() {
// watchexec can manage ("supervise") more than one program;
// here we only have one but we don't know its Id so we grab it out of the iterator
if let Some(job) = action.list_jobs().next().map(|(_, job)| job.clone()) {
eprintln!("[Asking program to stop...]");
job.stop_with_signal(Signal::Terminate, Duration::from_secs(5));
}
}
action
});
// and watch all files in the current directory:
wx.config.pathset(["."]);
// then keep running until Watchexec quits!
let _ = main.await.into_diagnostic()?;
auto_restart.abort();
Ok(())
}
// ignore this! it's stuff to make the above code get checked by cargo doc tests!
struct YourConfigFormat; impl YourConfigFormat { async fn load_from_file(_: &str) -> std::result::Result<Self, MietteStub> { Ok(Self) } fn apply(&self, _: &mut RuntimeConfig) {} } use miette::Diagnostic; use thiserror::Error; #[derive(Debug, Error, Diagnostic)] #[error("stub")] struct MietteStub;
```
Other examples:
- [Only Commands](./examples/only_commands.rs): skip watching files, only use the supervisor.
- [Only Events](./examples/only_events.rs): never start any processes, only print events.
- [Restart `cargo run` only when `cargo build` succeeds](./examples/restart_run_on_successful_build.rs)
## Kitchen sink
The library also exposes a number of components which are available to make your own tool, or to
make anything else you may want:
Though not its primary usecase, the library exposes most of its relatively standalone components,
available to make other tools that are not Watchexec-shaped:
- **[Command handling](https://docs.rs/watchexec/2/watchexec/command/index.html)**, to
build a command with an arbitrary shell, deal with grouped and ungrouped processes the same way,
and supervise a process while also listening for & acting on interventions such as sending signals.
- **Event sources**: [Filesystem](https://docs.rs/watchexec/3/watchexec/sources/fs/index.html),
[Signals](https://docs.rs/watchexec/3/watchexec/sources/signal/index.html),
[Keyboard](https://docs.rs/watchexec/3/watchexec/sources/keyboard/index.html).
- **Event sources**: [Filesystem](https://docs.rs/watchexec/2/watchexec/fs/index.html),
[Signals](https://docs.rs/watchexec/2/watchexec/signal/index.html),
[Keyboard](https://docs.rs/watchexec/2/watchexec/keyboard/index.html),
(more to come).
- Finding **[a common prefix](https://docs.rs/watchexec/2/watchexec/paths/fn.common_prefix.html)**
- Finding **[a common prefix](https://docs.rs/watchexec/3/watchexec/paths/fn.common_prefix.html)**
of a set of paths.
- A **[Changeable](https://docs.rs/watchexec/3/watchexec/changeable/index.html)** type, which
powers the "live" configuration system.
- And [more][docs]!
Filterers are split into their own crates, so they can be evolved independently:
- The **[Globset](https://docs.rs/watchexec-filterer-globset) filterer** implements the default
Watchexec filter, and mimics the pre-1.18 behaviour as much as possible.
Watchexec CLI filtering, based on the regex crate's ignore mechanisms.
- The **[Tagged](https://docs.rs/watchexec-filterer-tagged) filterer** is an experiment in creating
a more powerful filtering solution, which can operate on every part of events, not just their
paths.
- ~~The **[Tagged](https://docs.rs/watchexec-filterer-tagged) filterer**~~ was an experiment in
creating a more powerful filtering solution, which could operate on every part of events, not
just their paths, using a custom syntax. It is no longer maintained.
- The **[Ignore](https://docs.rs/watchexec-filterer-ignore) filterer** implements ignore-file
semantics, and especially supports _trees_ of ignore files. It is used as a subfilterer in both
@ -113,6 +194,9 @@ Filterers are split into their own crates, so they can be evolved independently:
There are also separate, standalone crates used to build Watchexec which you can tap into:
- **[Supervisor](https://docs.rs/watchexec-supervisor)** is Watchexec's process supervisor and
command abstraction.
- **[ClearScreen](https://docs.rs/clearscreen)** makes clearing the terminal screen in a
cross-platform way easy by default, and provides advanced options to fit your usecase.

View File

@ -1,74 +0,0 @@
use std::time::Duration;
use miette::{IntoDiagnostic, Result};
use watchexec::{
action::{Action, Outcome},
command::Command,
config::{InitConfig, RuntimeConfig},
error::ReconfigError,
event::Event,
fs::Watcher,
ErrorHook, Watchexec,
};
use watchexec_signals::Signal;
// Run with: `env RUST_LOG=debug cargo run --example print_out`
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let mut init = InitConfig::default();
init.on_error(|err: ErrorHook| async move {
eprintln!("Watchexec Runtime Error: {}", err.error);
Ok::<(), std::convert::Infallible>(())
});
let mut runtime = RuntimeConfig::default();
runtime.pathset(["src", "dontexist", "examples"]);
runtime.command(Command::Exec {
prog: "date".into(),
args: Vec::new(),
});
let wx = Watchexec::new(init, runtime.clone())?;
let w = wx.clone();
let config = runtime.clone();
runtime.on_action(move |action: Action| {
let mut config = config.clone();
let w = w.clone();
async move {
eprintln!("Watchexec Action: {action:?}");
let sigs = action
.events
.iter()
.flat_map(Event::signals)
.collect::<Vec<_>>();
if sigs.iter().any(|sig| sig == &Signal::Interrupt) {
action.outcome(Outcome::Exit);
} else if sigs.iter().any(|sig| sig == &Signal::User1) {
eprintln!("Switching to native for funsies");
config.file_watcher(Watcher::Native);
w.reconfigure(config)?;
} else if sigs.iter().any(|sig| sig == &Signal::User2) {
eprintln!("Switching to polling for funsies");
config.file_watcher(Watcher::Poll(Duration::from_millis(50)));
w.reconfigure(config)?;
} else if action.events.iter().flat_map(Event::paths).next().is_some() {
action.outcome(Outcome::if_running(
Outcome::both(Outcome::Stop, Outcome::Start),
Outcome::Start,
));
}
Ok::<(), ReconfigError>(())
}
});
wx.reconfigure(runtime)?;
wx.main().await.into_diagnostic()??;
Ok(())
}

View File

@ -1,52 +0,0 @@
use std::time::Duration;
use async_priority_channel as priority;
use miette::{IntoDiagnostic, Result};
use tokio::{
sync::{mpsc, watch},
time::sleep,
};
use watchexec::{
event::{Event, Priority},
fs,
};
// Run with: `env RUST_LOG=debug cargo run --example fs`,
// then touch some files within the first 15 seconds, and afterwards.
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let (ev_s, ev_r) = priority::bounded::<Event, Priority>(1024);
let (er_s, mut er_r) = mpsc::channel(64);
let (wd_s, wd_r) = watch::channel(fs::WorkingData::default());
let mut wkd = fs::WorkingData::default();
wkd.pathset = vec![".".into()];
wd_s.send(wkd.clone()).into_diagnostic()?;
tokio::spawn(async move {
while let Ok((event, priority)) = ev_r.recv().await {
tracing::info!("event ({priority:?}): {event:?}");
}
});
tokio::spawn(async move {
while let Some(error) = er_r.recv().await {
tracing::error!("error: {error}");
}
});
let wd_sh = tokio::spawn(async move {
sleep(Duration::from_secs(15)).await;
wkd.pathset = Vec::new();
tracing::info!("turning off fs watcher without stopping it");
wd_s.send(wkd).unwrap();
wd_s
});
fs::worker(wd_r, er_s, ev_s).await?;
wd_sh.await.into_diagnostic()?;
Ok(())
}

View File

@ -0,0 +1,55 @@
use std::{
sync::Arc,
time::{Duration, Instant},
};
use miette::{IntoDiagnostic, Result};
use tokio::time::sleep;
use watchexec::{
command::{Command, Program},
Watchexec,
};
use watchexec_events::{Event, Priority};
#[tokio::main]
async fn main() -> Result<()> {
let wx = Watchexec::new(|mut action| {
// you don't HAVE to respond to filesystem events:
// here, we start a command every five seconds, unless we get a signal and quit
if action.signals().next().is_some() {
eprintln!("[Quitting...]");
action.quit();
} else {
let (_, job) = action.create_job(Arc::new(Command {
program: Program::Exec {
prog: "echo".into(),
args: vec![
"Hello world!".into(),
format!("Current time: {:?}", Instant::now()),
"Press Ctrl+C to quit".into(),
],
},
options: Default::default(),
}));
job.start();
}
action
})?;
tokio::spawn({
let wx = wx.clone();
async move {
loop {
sleep(Duration::from_secs(5)).await;
wx.send_event(Event::default(), Priority::Urgent)
.await
.unwrap();
}
}
});
let _ = wx.main().await.into_diagnostic()?;
Ok(())
}

View File

@ -0,0 +1,30 @@
use miette::{IntoDiagnostic, Result};
use watchexec::Watchexec;
#[tokio::main]
async fn main() -> Result<()> {
let wx = Watchexec::new(|mut action| {
// you don't HAVE to spawn jobs:
// here, we just print out the events as they come in
for event in action.events.iter() {
eprintln!("{event:?}");
}
// quit when we get a signal
if action.signals().next().is_some() {
eprintln!("[Quitting...]");
action.quit();
}
action
})?;
// start the engine
let main = wx.main();
// and watch all files in the current directory:
wx.config.pathset(["."]);
let _ = main.await.into_diagnostic()?;
Ok(())
}

View File

@ -1,64 +1,138 @@
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use miette::{IntoDiagnostic, Result};
use watchexec::{
action::{Action, Outcome},
config::{InitConfig, RuntimeConfig},
handler::PrintDebug,
command::{Command, Program, Shell},
job::CommandState,
Watchexec,
};
use watchexec_events::{Event, Priority};
use watchexec_signals::Signal;
#[tokio::main]
async fn main() -> Result<()> {
let mut init = InitConfig::default();
init.on_error(PrintDebug(std::io::stderr()));
// this is okay to start with, but Watchexec logs a LOT of data,
// even at error level. you will quickly want to filter it down.
tracing_subscriber::fmt()
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
.init();
let mut runtime = RuntimeConfig::default();
runtime.pathset(["watchexec.conf"]);
// initialise Watchexec with a simple initial action handler
let job = Arc::new(Mutex::new(None));
let wx = Watchexec::new({
let outerjob = job.clone();
move |mut action| {
let (_, job) = action.create_job(Arc::new(Command {
program: Program::Shell {
shell: Shell::new("bash"),
command: "
echo 'Hello world'
trap 'echo Not quitting yet!' TERM
read
"
.into(),
args: Vec::new(),
},
options: Default::default(),
}));
let conf = YourConfigFormat::load_from_file("watchexec.conf")
.await
.into_diagnostic()?;
conf.apply(&mut runtime);
// store the job outside this closure too
*outerjob.lock().unwrap() = Some(job.clone());
let we = Watchexec::new(init, runtime.clone())?;
let w = we.clone();
let c = runtime.clone();
runtime.on_action(move |action: Action| {
let mut c = c.clone();
let w = w.clone();
async move {
for event in action.events.iter() {
if event.paths().any(|(p, _)| p.ends_with("/watchexec.conf")) {
let conf = YourConfigFormat::load_from_file("watchexec.conf").await?;
conf.apply(&mut c);
let _ = w.reconfigure(c.clone());
// tada! self-reconfiguring watchexec on config file change!
break;
// block SIGINT
#[cfg(unix)]
job.set_spawn_hook(|cmd, _| {
use nix::sys::signal::{sigprocmask, SigSet, SigmaskHow, Signal};
unsafe {
cmd.pre_exec(|| {
let mut newset = SigSet::empty();
newset.add(Signal::SIGINT);
sigprocmask(SigmaskHow::SIG_BLOCK, Some(&newset), None)?;
Ok(())
});
}
}
});
action.outcome(Outcome::if_running(
Outcome::DoNothing,
Outcome::both(Outcome::Clear, Outcome::Start),
));
// start the command
job.start();
Ok::<(), std::io::Error>(())
action
}
})?;
// start the engine
let main = wx.main();
// send an event to start
wx.send_event(Event::default(), Priority::Urgent)
.await
.unwrap();
// ^ this will cause the action handler we've defined above to run,
// creating and starting our little bash program, and storing it in the mutex
// spin until we've got the job
while job.lock().unwrap().is_none() {
tokio::task::yield_now().await;
}
// watch the job and restart it when it exits
let job = job.lock().unwrap().clone().unwrap();
let auto_restart = tokio::spawn(async move {
loop {
job.to_wait().await;
job.run(|context| {
if let CommandState::Finished {
status,
started,
finished,
} = context.current
{
let duration = *finished - *started;
eprintln!("[Program stopped with {status:?}; ran for {duration:?}]")
}
})
.await;
eprintln!("[Restarting...]");
job.start().await;
}
});
let _ = we.main().await.into_diagnostic()?;
// now we change what the action does:
let auto_restart_abort = auto_restart.abort_handle();
wx.config.on_action(move |mut action| {
// if we get Ctrl-C on the Watchexec instance, we quit
if action.signals().any(|sig| sig == Signal::Interrupt) {
eprintln!("[Quitting...]");
auto_restart_abort.abort();
action.quit_gracefully(Signal::ForceStop, Duration::ZERO);
return action;
}
// if the action was triggered by file events, gracefully stop the program
if action.paths().next().is_some() {
// watchexec can manage ("supervise") more than one program;
// here we only have one but we don't know its Id so we grab it out of the iterator
if let Some(job) = action.list_jobs().next().map(|(_, job)| job.clone()) {
eprintln!("[Asking program to stop...]");
job.stop_with_signal(Signal::Terminate, Duration::from_secs(5));
}
// we could also use `action.get_or_create_job` initially and store its Id to use here,
// see the CHANGELOG.md for an example under "3.0.0 > Action".
}
action
});
// and watch all files in the current directory:
wx.config.pathset(["."]);
// then keep running until Watchexec quits!
let _ = main.await.into_diagnostic()?;
auto_restart.abort();
Ok(())
}
struct YourConfigFormat;
impl YourConfigFormat {
async fn load_from_file(_path: impl AsRef<std::path::Path>) -> std::io::Result<Self> {
Ok(Self)
}
fn apply(&self, _config: &mut RuntimeConfig) {
// ...
}
}

View File

@ -0,0 +1,84 @@
use std::sync::Arc;
use miette::{IntoDiagnostic, Result};
use watchexec::{
command::{Command, Program, SpawnOptions},
job::CommandState,
Id, Watchexec,
};
use watchexec_events::{Event, Priority, ProcessEnd};
use watchexec_signals::Signal;
#[tokio::main]
async fn main() -> Result<()> {
let build_id = Id::default();
let run_id = Id::default();
let wx = Watchexec::new_async(move |mut action| {
Box::new(async move {
if action.signals().any(|sig| sig == Signal::Interrupt) {
eprintln!("[Quitting...]");
action.quit();
return action;
}
let build = action.get_or_create_job(build_id, || {
Arc::new(Command {
program: Program::Exec {
prog: "cargo".into(),
args: vec!["build".into()],
},
options: Default::default(),
})
});
let run = action.get_or_create_job(run_id, || {
Arc::new(Command {
program: Program::Exec {
prog: "cargo".into(),
args: vec!["run".into()],
},
options: SpawnOptions {
grouped: true,
..Default::default()
},
})
});
if action.paths().next().is_some()
|| action.events.iter().any(|event| event.tags.is_empty())
{
build.restart().await;
}
build.to_wait().await;
build
.run(move |context| {
if let CommandState::Finished {
status: ProcessEnd::Success,
..
} = context.current
{
run.restart();
}
})
.await;
action
})
})?;
// start the engine
let main = wx.main();
// send an event to start
wx.send_event(Event::default(), Priority::Urgent)
.await
.unwrap();
// and watch all files in cli src
wx.config.pathset(["crates/cli/src"]);
// then keep running until Watchexec quits!
let _ = main.await.into_diagnostic()?;
Ok(())
}

View File

@ -1,42 +0,0 @@
use std::process::exit;
use async_priority_channel as priority;
use miette::Result;
use tokio::sync::mpsc;
use watchexec::{
event::{Event, Priority, Tag},
signal,
};
use watchexec_signals::Signal;
// Run with: `env RUST_LOG=debug cargo run --example signal`,
// then issue some signals to the printed PID, or hit e.g. Ctrl-C.
// Send a SIGTERM (unix) or Ctrl-Break (windows) to exit.
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let (ev_s, ev_r) = priority::bounded::<Event, Priority>(1024);
let (er_s, mut er_r) = mpsc::channel(64);
tokio::spawn(async move {
while let Ok((event, priority)) = ev_r.recv().await {
tracing::info!("event {priority:?}: {event:?}");
if event.tags.contains(&Tag::Signal(Signal::Terminate)) {
exit(0);
}
}
});
tokio::spawn(async move {
while let Some(error) = er_r.recv().await {
tracing::error!("error: {error}");
}
});
tracing::info!("PID is {}", std::process::id());
signal::worker(er_s.clone(), ev_s.clone()).await?;
Ok(())
}

View File

@ -1,14 +1,15 @@
//! Processor responsible for receiving events, filtering them, and scheduling actions in response.
#[doc(inline)]
pub use outcome::Outcome;
pub use handler::Handler as ActionHandler;
#[doc(inline)]
pub use quit::QuitManner;
#[doc(inline)]
pub use r#return::ActionReturn;
#[doc(inline)]
pub use worker::worker;
#[doc(inline)]
pub use workingdata::*;
mod outcome;
mod outcome_worker;
mod process_holder;
mod handler;
mod quit;
mod r#return;
mod worker;
mod workingdata;

View File

@ -0,0 +1,161 @@
use std::{collections::HashMap, path::Path, sync::Arc, time::Duration};
use tokio::task::JoinHandle;
use watchexec_events::{Event, FileType, ProcessEnd};
use watchexec_signals::Signal;
use watchexec_supervisor::{
command::Command,
job::{start_job, Job},
};
use crate::id::Id;
use super::QuitManner;
/// The environment given to the action handler.
///
/// The action handler is the heart of a Watchexec program. Within, you decide what happens when an
/// event successfully passes all filters. Watchexec maintains a set of Supervised [`Job`]s, which
/// are assigned a unique [`Id`] for lightweight reference. In this action handler, you should
/// add commands to be supervised with `create_job()`, or find an already-supervised job with
/// `get_job()` or `list_jobs()`. You can interact with jobs directly via their handles, and can
/// even store clones of the handles for later use outside the action handler.
///
/// The action handler is also given the [`Event`]s which triggered the action. These are expected
/// to be the way to determine what to do with a job. However, in some applications you might not
/// care about them, and that's fine too: for example, you can build a Watchexec which only does
/// process supervision, and is triggered entirely by synthetic events. Conversely, you are also not
/// obligated to use the job handles: you can build a Watchexec which only does something with the
/// events, and never actually starts any processes.
///
/// There are some important considerations to keep in mind when writing an action handler:
///
/// 1. The action handler is called with the supervisor set _as of when the handler was called_.
/// This is particularly important when multiple action handlers might be running at the same
/// time: they might have incomplete views of the supervisor set.
///
/// 2. The way the action handler communicates with the Watchexec handler is through the return
/// value of the handler. That is, when you add a job with `create_job()`, the job is not added
/// to the Watchexec instance's supervisor set until the action handler returns. Similarly, when
/// using `quit()`, the quit action is not performed until the action handler returns and the
/// Watchexec instance is able to see it.
///
/// 3. The action handler blocks the action main loop. This means that if you have a long-running
/// action handler, the Watchexec instance will not be able to process events until the handler
/// returns. That will cause events to accumulate and then get dropped once the channel reaches
/// capacity, which will impact your ability to receive signals (such as a Ctrl-C), and may spew
/// [`EventChannelTrySend` errors](crate::error::RuntimeError::EventChannelTrySend).
///
/// If you want to do something long-running, you should either ignore that error, and accept
/// events may be dropped, or preferrably spawn a task to do it, and return from the action
/// handler as soon as possible.
#[derive(Debug)]
pub struct Handler {
/// The collected events which triggered the action.
pub events: Arc<[Event]>,
extant: HashMap<Id, Job>,
pub(crate) new: HashMap<Id, (Job, JoinHandle<()>)>,
pub(crate) quit: Option<QuitManner>,
}
impl Handler {
pub(crate) fn new(events: Arc<[Event]>, jobs: HashMap<Id, Job>) -> Self {
Self {
events,
extant: jobs,
new: HashMap::new(),
quit: None,
}
}
/// Create a new job and return its handle.
///
/// This starts the [`Job`] immediately, and stores a copy of its handle and [`Id`] in this
/// `Action` (and thus in the Watchexec instance, when the action handler returns).
pub fn create_job(&mut self, command: Arc<Command>) -> (Id, Job) {
let id = Id::default();
let (job, task) = start_job(command);
self.new.insert(id, (job.clone(), task));
(id, job)
}
// exposing this is dangerous as it allows duplicate IDs which may leak jobs
fn create_job_with_id(&mut self, id: Id, command: Arc<Command>) -> Job {
let (job, task) = start_job(command);
self.new.insert(id, (job.clone(), task));
job
}
/// Get an existing job or create a new one given an Id.
///
/// This starts the [`Job`] immediately if one with the Id doesn't exist, and stores a copy of
/// its handle and [`Id`] in this `Action` (and thus in the Watchexec instance, when the action
/// handler returns).
pub fn get_or_create_job(&mut self, id: Id, command: impl Fn() -> Arc<Command>) -> Job {
self.get_job(id)
.unwrap_or_else(|| self.create_job_with_id(id, command()))
}
/// Get a job given its Id.
///
/// This returns a job handle, if it existed when this handler was called.
pub fn get_job(&self, id: Id) -> Option<Job> {
self.extant.get(&id).cloned()
}
/// List all jobs currently supervised by Watchexec.
///
/// This returns an iterator over all jobs, in no particular order, as of when this handler was
/// called.
pub fn list_jobs(&self) -> impl Iterator<Item = (Id, Job)> + '_ {
self.extant.iter().map(|(id, job)| (*id, job.clone()))
}
/// Shut down the Watchexec instance immediately.
///
/// This will kill and drop all jobs without waiting on processes, then quit.
///
/// Use `graceful_quit()` to wait for processes to finish before quitting.
///
/// The quit is initiated once the action handler returns, not when this method is called.
pub fn quit(&mut self) {
self.quit = Some(QuitManner::Abort);
}
/// Shut down the Watchexec instance gracefully.
///
/// This will send graceful stops to all jobs, wait on them to finish, then reap them and quit.
///
/// Use `quit()` to quit more abruptly.
///
/// If you want to wait for all other actions to finish and for jobs to get cleaned up, but not
/// gracefully delay for processes, you can do:
///
/// ```no_compile
/// action.quit_gracefully(Signal::ForceStop, Duration::ZERO);
/// ```
///
/// The quit is initiated once the action handler returns, not when this method is called.
pub fn quit_gracefully(&mut self, signal: Signal, grace: Duration) {
self.quit = Some(QuitManner::Graceful { signal, grace });
}
/// Convenience to get all signals in the event set.
pub fn signals(&self) -> impl Iterator<Item = Signal> + '_ {
self.events.iter().flat_map(Event::signals)
}
/// Convenience to get all paths in the event set.
///
/// An action contains a set of events, and some of those events might relate to watched
/// files, and each of *those* events may have one or more paths that were affected.
/// To hide this complexity this method just provides any and all paths in the event,
/// along with the type of file at that path, if Watchexec knows that.
pub fn paths(&self) -> impl Iterator<Item = (&Path, Option<&FileType>)> + '_ {
self.events.iter().flat_map(Event::paths)
}
/// Convenience to get all process completions in the event set.
pub fn completions(&self) -> impl Iterator<Item = Option<ProcessEnd>> + '_ {
self.events.iter().flat_map(Event::completions)
}
}

View File

@ -1,156 +0,0 @@
use std::time::Duration;
use watchexec_signals::Signal;
/// The outcome to execute when an action is triggered.
///
/// Logic against the state of the command should be expressed using these variants, rather than
/// inside the action handler, as it ensures the state of the command is always the latest available
/// when the outcome is executed.
#[derive(Clone, Debug, PartialEq, Eq)]
#[non_exhaustive]
pub enum Outcome {
/// Stop processing this action silently.
DoNothing,
/// If the command is running, stop it.
///
/// This should be used with an `IfRunning`, and will warn if the command is not running.
Stop,
/// If the command isn't running, start it.
///
/// This should be used with an `IfRunning`, and will warn if the command is running.
Start,
/// Wait for command completion.
///
/// Does nothing if the command isn't running.
Wait,
/// Sleep for some duration.
Sleep(Duration),
/// Send this signal to the command.
///
/// This does not wait for the command to complete.
Signal(Signal),
/// Clear the (terminal) screen.
Clear,
/// Reset the (terminal) screen.
///
/// This invokes (in order): [`WindowsCooked`][clearscreen::ClearScreen::WindowsCooked],
/// [`WindowsVt`][clearscreen::ClearScreen::WindowsVt],
/// [`VtLeaveAlt`][clearscreen::ClearScreen::VtLeaveAlt],
/// [`VtWellDone`][clearscreen::ClearScreen::VtWellDone],
/// and [the default clear][clearscreen::ClearScreen::default()].
Reset,
/// Exit watchexec.
Exit,
/// When command is running, do the first, otherwise the second.
IfRunning(Box<Outcome>, Box<Outcome>),
/// Do both outcomes in order.
Both(Box<Outcome>, Box<Outcome>),
/// Race both outcomes: run both at once, and when one finishes, cancel the other.
Race(Box<Outcome>, Box<Outcome>),
}
impl Default for Outcome {
fn default() -> Self {
Self::DoNothing
}
}
impl Outcome {
/// Convenience function to create an outcome conditional on the state of the subprocess.
#[must_use]
pub fn if_running(then: Self, otherwise: Self) -> Self {
Self::IfRunning(Box::new(then), Box::new(otherwise))
}
/// Convenience function to create a sequence of outcomes.
#[must_use]
pub fn both(one: Self, two: Self) -> Self {
Self::Both(Box::new(one), Box::new(two))
}
/// Pattern that creates a sequence of outcomes from an iterator.
#[must_use]
pub fn sequence(mut outcomes: impl Iterator<Item = Self>) -> Self {
let mut seq = outcomes.next().unwrap_or(Self::DoNothing);
for outcome in outcomes {
seq = Self::both(seq, outcome);
}
seq
}
/// Convenience function to create a race of outcomes.
#[must_use]
pub fn race(one: Self, two: Self) -> Self {
Self::Race(Box::new(one), Box::new(two))
}
/// Pattern that waits for the subprocess to complete before executing the outcome.
#[must_use]
pub fn wait(and_then: Self) -> Self {
Self::both(Self::Wait, and_then)
}
/// Pattern that waits for the subprocess to complete with a timeout.
#[must_use]
pub fn wait_timeout(timeout: Duration, and_then: Self) -> Self {
Self::both(Self::race(Self::Sleep(timeout), Self::Wait), and_then)
}
/// Resolves the outcome given the current state of the subprocess.
#[must_use]
pub fn resolve(self, is_running: bool) -> Self {
match (is_running, self) {
(true, Self::IfRunning(then, _)) => then.resolve(true),
(false, Self::IfRunning(_, otherwise)) => otherwise.resolve(false),
(ir, Self::Both(one, two)) => Self::both(one.resolve(ir), two.resolve(ir)),
(_, other) => other,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn simple_if_running() {
assert_eq!(
Outcome::if_running(Outcome::Stop, Outcome::Start).resolve(true),
Outcome::Stop
);
assert_eq!(
Outcome::if_running(Outcome::Stop, Outcome::Start).resolve(false),
Outcome::Start
);
}
#[test]
fn simple_passthrough() {
assert_eq!(Outcome::Wait.resolve(true), Outcome::Wait);
assert_eq!(Outcome::Clear.resolve(false), Outcome::Clear);
}
#[test]
fn nested_if_runnings() {
assert_eq!(
Outcome::both(
Outcome::if_running(Outcome::Stop, Outcome::Start),
Outcome::if_running(Outcome::Wait, Outcome::Exit)
)
.resolve(true),
Outcome::Both(Box::new(Outcome::Stop), Box::new(Outcome::Wait))
);
}
}

View File

@ -1,209 +0,0 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use async_priority_channel as priority;
use clearscreen::ClearScreen;
use futures::{
future::{select, Either},
Future,
};
use tokio::{
spawn,
sync::{mpsc, watch::Receiver},
time::sleep,
};
use tracing::{debug, error, info, trace, warn};
use crate::{
command::Supervisor,
error::RuntimeError,
event::{Event, Priority},
};
use super::{process_holder::ProcessHolder, Outcome, WorkingData};
#[derive(Clone)]
pub struct OutcomeWorker {
events: Arc<[Event]>,
working: Receiver<WorkingData>,
process: ProcessHolder,
gen: usize,
gencheck: Arc<AtomicUsize>,
errors_c: mpsc::Sender<RuntimeError>,
events_c: priority::Sender<Event, Priority>,
}
impl OutcomeWorker {
pub fn newgen() -> Arc<AtomicUsize> {
Default::default()
}
pub fn spawn(
outcome: Outcome,
events: Arc<[Event]>,
working: Receiver<WorkingData>,
process: ProcessHolder,
gencheck: Arc<AtomicUsize>,
errors_c: mpsc::Sender<RuntimeError>,
events_c: priority::Sender<Event, Priority>,
) {
let gen = gencheck.fetch_add(1, Ordering::SeqCst).wrapping_add(1);
let this = Self {
events,
working,
process,
gen,
gencheck,
errors_c,
events_c,
};
debug!(?outcome, %gen, "spawning outcome worker");
spawn(async move {
let errors_c = this.errors_c.clone();
match this.apply(outcome.clone()).await {
Err(err) => {
if matches!(err, RuntimeError::Exit) {
info!(%gen, "propagating graceful exit");
} else {
error!(?err, %gen, "outcome applier errored");
}
if let Err(err) = errors_c.send(err).await {
error!(?err, %gen, "failed to send an error, something is terribly wrong");
}
}
Ok(_) => {
debug!(?outcome, %gen, "outcome worker finished");
}
}
});
}
async fn check_gen<O>(&self, f: impl Future<Output = O> + Send) -> Option<O> {
// TODO: use a select and a notifier of some kind so it cancels tasks
if self.gencheck.load(Ordering::SeqCst) != self.gen {
warn!(when=%"pre", gen=%self.gen, "outcome worker was cycled, aborting");
return None;
}
let o = f.await;
if self.gencheck.load(Ordering::SeqCst) != self.gen {
warn!(when=%"post", gen=%self.gen, "outcome worker was cycled, aborting");
return None;
}
Some(o)
}
#[async_recursion::async_recursion]
async fn apply(&self, outcome: Outcome) -> Result<(), RuntimeError> {
macro_rules! notry {
($e:expr) => {
match self.check_gen($e).await {
None => return Ok(()),
Some(o) => o,
}
};
}
match (notry!(self.process.is_some()), outcome) {
(_, Outcome::DoNothing) => {}
(_, Outcome::Exit) => {
return Err(RuntimeError::Exit);
}
(true, Outcome::Stop) => {
notry!(self.process.kill());
notry!(self.process.wait())?;
notry!(self.process.drop_inner());
}
(false, o @ (Outcome::Stop | Outcome::Wait | Outcome::Signal(_))) => {
debug!(outcome=?o, "meaningless without a process, not doing anything");
}
(_, Outcome::Start) => {
let (cmds, grouped, pre_spawn_handler, post_spawn_handler) = {
let wrk = self.working.borrow();
(
wrk.commands.clone(),
wrk.grouped,
wrk.pre_spawn_handler.clone(),
wrk.post_spawn_handler.clone(),
)
};
if cmds.is_empty() {
warn!("tried to start commands without anything to run");
} else {
trace!("spawning supervisor for command");
let sup = Supervisor::spawn(
self.errors_c.clone(),
self.events_c.clone(),
cmds,
grouped,
self.events.clone(),
pre_spawn_handler,
post_spawn_handler,
)?;
notry!(self.process.replace(sup));
}
}
(true, Outcome::Signal(sig)) => {
notry!(self.process.signal(sig));
}
(true, Outcome::Wait) => {
notry!(self.process.wait())?;
}
(_, Outcome::Sleep(time)) => {
trace!(?time, "sleeping");
notry!(sleep(time));
trace!(?time, "done sleeping");
}
(_, Outcome::Clear) => {
clearscreen::clear()?;
}
(_, Outcome::Reset) => {
for cs in [
ClearScreen::WindowsCooked,
ClearScreen::WindowsVt,
ClearScreen::VtLeaveAlt,
ClearScreen::VtWellDone,
ClearScreen::default(),
] {
cs.clear()?;
}
}
(true, Outcome::IfRunning(then, _)) => {
notry!(self.apply(*then))?;
}
(false, Outcome::IfRunning(_, otherwise)) => {
notry!(self.apply(*otherwise))?;
}
(_, Outcome::Both(one, two)) => {
if let Err(err) = notry!(self.apply(*one)) {
debug!(
"first outcome failed, sending an error but proceeding to the second anyway"
);
notry!(self.errors_c.send(err)).ok();
}
notry!(self.apply(*two))?;
}
(_, Outcome::Race(one, two)) => {
if let Either::Left((Err(err), _)) | Either::Right((Err(err), _)) =
select(self.apply(*one), self.apply(*two)).await
{
return Err(err);
}
}
}
Ok(())
}
}

View File

@ -1,71 +0,0 @@
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::trace;
use watchexec_signals::Signal;
use crate::{command::Supervisor, error::RuntimeError};
#[derive(Clone, Debug, Default)]
pub struct ProcessHolder(Arc<RwLock<Option<Supervisor>>>);
impl ProcessHolder {
pub async fn is_running(&self) -> bool {
self.0
.read()
.await
.as_ref()
.map_or(false, Supervisor::is_running)
}
pub async fn is_some(&self) -> bool {
self.0.read().await.is_some()
}
pub async fn drop_inner(&self) {
trace!("dropping supervisor");
self.0.write().await.take();
trace!("dropped supervisor");
}
pub async fn replace(&self, new: Supervisor) {
trace!("replacing supervisor");
if let Some(_old) = self.0.write().await.replace(new) {
trace!("replaced supervisor");
// TODO: figure out what to do with old
} else {
trace!("not replaced: no supervisor");
}
}
pub async fn signal(&self, sig: Signal) {
if let Some(p) = self.0.read().await.as_ref() {
trace!("signaling supervisor");
p.signal(sig).await;
trace!("signaled supervisor");
} else {
trace!("not signaling: no supervisor");
}
}
pub async fn kill(&self) {
if let Some(p) = self.0.read().await.as_ref() {
trace!("killing supervisor");
p.kill().await;
trace!("killed supervisor");
} else {
trace!("not killing: no supervisor");
}
}
pub async fn wait(&self) -> Result<(), RuntimeError> {
if let Some(p) = self.0.read().await.as_ref() {
trace!("waiting on supervisor");
p.wait().await?;
trace!("waited on supervisor");
} else {
trace!("not waiting: no supervisor");
}
Ok(())
}
}

View File

@ -0,0 +1,17 @@
use std::time::Duration;
use watchexec_signals::Signal;
/// How the Watchexec instance should quit.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum QuitManner {
/// Kill all processes and drop all jobs, then quit.
Abort,
/// Gracefully stop all jobs, then quit.
Graceful {
/// Signal to send immediately
signal: Signal,
/// Time to wait before forceful termination
grace: Duration,
},
}

View File

@ -0,0 +1,18 @@
use std::future::Future;
use super::ActionHandler;
/// The return type of an action.
///
/// This is the type returned by the raw action handler, used internally or when setting the action
/// handler directly via the field on [`Config`](crate::Config). It is not used when setting the
/// action handler via [`Config::on_action`](crate::Config::on_action) and
/// [`Config::on_action_async`](crate::Config::on_action_async) as that takes care of wrapping the
/// return type from the specialised signature on these methods.
pub enum ActionReturn {
/// The action handler is synchronous and here's its return value.
Sync(ActionHandler),
/// The action handler is asynchronous: this is the future that will resolve to its return value.
Async(Box<dyn Future<Output = ActionHandler> + Send + Sync>),
}

View File

@ -1,54 +1,131 @@
use std::{
collections::HashMap,
mem::take,
sync::Arc,
time::{Duration, Instant},
};
use async_priority_channel as priority;
use tokio::{
sync::{
mpsc,
watch::{self},
},
time::timeout,
};
use tracing::{debug, info, trace};
use tokio::{sync::mpsc, time::timeout};
use tracing::{debug, trace};
use watchexec_events::{Event, Priority};
use watchexec_supervisor::job::Job;
use super::{handler::Handler, quit::QuitManner};
use crate::{
action::ActionReturn,
error::{CriticalError, RuntimeError},
event::{Event, Priority},
handler::rte,
filter::Filterer,
id::Id,
late_join_set::LateJoinSet,
Config,
};
use super::{outcome_worker::OutcomeWorker, process_holder::ProcessHolder, Action, WorkingData};
/// The main worker of a Watchexec process.
///
/// This is the main loop of the process. It receives events from the event channel, filters them,
/// debounces them, obtains the desired outcome of an actioned event, calls the appropriate handlers
/// and schedules processes as needed.
pub async fn worker(
working: watch::Receiver<WorkingData>,
config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events_tx: priority::Sender<Event, Priority>,
events: priority::Receiver<Event, Priority>,
) -> Result<(), CriticalError> {
let mut last = Instant::now();
let mut set = Vec::new();
let process = ProcessHolder::default();
let outcome_gen = OutcomeWorker::newgen();
let mut jobtasks = LateJoinSet::default();
let mut jobs = HashMap::<Id, Job>::new();
loop {
if events.is_closed() {
trace!("events channel closed, stopping");
break;
while let Some(mut set) = throttle_collect(
config.clone(),
events.clone(),
errors.clone(),
Instant::now(),
)
.await?
{
let events: Arc<[Event]> = Arc::from(take(&mut set).into_boxed_slice());
trace!("preparing action handler");
let action = Handler::new(events.clone(), jobs.clone());
debug!("running action handler");
let action = match config.action_handler.call(action) {
ActionReturn::Sync(action) => action,
ActionReturn::Async(action) => Box::into_pin(action).await,
};
debug!("take control of new tasks");
for (id, (job, task)) in action.new {
trace!(?id, "taking control of new task");
jobtasks.insert(task);
jobs.insert(id, job);
}
if let Some(manner) = action.quit {
debug!(?manner, "quitting worker");
match manner {
QuitManner::Abort => break,
QuitManner::Graceful { signal, grace } => {
debug!(?signal, ?grace, "quitting worker gracefully");
let mut tasks = LateJoinSet::default();
for (id, job) in jobs.drain() {
trace!(?id, "quitting job");
tasks.spawn(async move {
job.stop_with_signal(signal, grace);
job.delete().await;
});
}
debug!("waiting for graceful shutdown tasks");
tasks.join_all().await;
debug!("waiting for job tasks to end");
jobtasks.join_all().await;
break;
}
}
}
let gc: Vec<Id> = jobs
.iter()
.filter_map(|(id, job)| {
if job.is_dead() {
trace!(?id, "job is dead, gc'ing");
Some(*id)
} else {
None
}
})
.collect();
if !gc.is_empty() {
debug!("garbage collect old tasks");
for id in gc {
jobs.remove(&id);
}
}
debug!("action handler finished");
}
debug!("action worker finished");
Ok(())
}
pub async fn throttle_collect(
config: Arc<Config>,
events: priority::Receiver<Event, Priority>,
errors: mpsc::Sender<RuntimeError>,
mut last: Instant,
) -> Result<Option<Vec<Event>>, CriticalError> {
if events.is_closed() {
trace!("events channel closed, stopping");
return Ok(None);
}
let mut set: Vec<Event> = vec![];
loop {
let maxtime = if set.is_empty() {
trace!("nothing in set, waiting forever for next event");
Duration::from_secs(u64::MAX)
} else {
working.borrow().throttle.saturating_sub(last.elapsed())
config.throttle.get().saturating_sub(last.elapsed())
};
if maxtime.is_zero() {
@ -64,7 +141,7 @@ pub async fn worker(
let maybe_event = timeout(maxtime, events.recv()).await;
if events.is_closed() {
trace!("events channel closed during timeout, stopping");
break;
return Ok(None);
}
match maybe_event {
@ -72,7 +149,7 @@ pub async fn worker(
trace!("timed out, cycling");
continue;
}
Ok(Err(_empty)) => break,
Ok(Err(_empty)) => return Ok(None),
Ok(Ok((event, priority))) => {
trace!(?event, ?priority, "got event");
@ -81,7 +158,7 @@ pub async fn worker(
} else if event.is_empty() {
trace!("empty event, by-passing filters");
} else {
let filtered = working.borrow().filterer.check_event(&event, priority);
let filtered = config.filterer.check_event(&event, priority);
match filtered {
Err(err) => {
trace!(%err, "filter errored on event");
@ -109,7 +186,7 @@ pub async fn worker(
trace!("urgent event, by-passing throttle");
} else {
let elapsed = last.elapsed();
if elapsed < working.borrow().throttle {
if elapsed < config.throttle.get() {
trace!(?elapsed, "still within throttle window, cycling");
continue;
}
@ -118,49 +195,6 @@ pub async fn worker(
}
}
trace!("out of throttle, starting action process");
last = Instant::now();
#[allow(clippy::iter_with_drain)]
let events = Arc::from(take(&mut set).into_boxed_slice());
let action = Action::new(Arc::clone(&events));
info!(?action, "action constructed");
debug!("running action handler");
let action_handler = {
let wrk = working.borrow();
wrk.action_handler.clone()
};
let outcome = action.outcome.clone();
let err = action_handler
.call(action)
.await
.map_err(|e| rte("action worker", e.as_ref()));
if let Err(err) = err {
errors.send(err).await?;
debug!("action handler errored, skipping");
continue;
}
let outcome = outcome.get().cloned().unwrap_or_default();
debug!(?outcome, "action handler finished");
let outcome = outcome.resolve(process.is_running().await);
info!(?outcome, "outcome resolved");
OutcomeWorker::spawn(
outcome,
events,
working.clone(),
process.clone(),
outcome_gen.clone(),
errors.clone(),
events_tx.clone(),
);
debug!("action process done");
return Ok(Some(set));
}
debug!("action worker finished");
Ok(())
}

View File

@ -1,217 +0,0 @@
use std::{
fmt,
sync::{Arc, Weak},
time::Duration,
};
use once_cell::sync::OnceCell;
use tokio::{
process::Command as TokioCommand,
sync::{Mutex, OwnedMutexGuard},
};
use crate::{command::Command, event::Event, filter::Filterer, handler::HandlerLock};
use super::Outcome;
/// The configuration of the [action][crate::action] worker.
///
/// This is marked non-exhaustive so new configuration can be added without breaking.
#[derive(Clone)]
#[non_exhaustive]
pub struct WorkingData {
/// How long to wait for events to build up before executing an action.
///
/// This is sometimes called "debouncing." We debounce on the trailing edge: an action is
/// triggered only after that amount of time has passed since the first event in the cycle. The
/// action is called with all the collected events in the cycle.
pub throttle: Duration,
/// The main handler to define: what to do when an action is triggered.
///
/// This handler is called with the [`Action`] environment, which has a certain way of returning
/// the desired outcome, check out the [`Action::outcome()`] method. The handler checks for the
/// outcome as soon as the handler returns, which means that if the handler returns before the
/// outcome is set, you'll get unexpected results. For this reason, it's a bad idea to use ex. a
/// channel as the handler.
///
/// If this handler is not provided, it defaults to a no-op, which does absolutely nothing, not
/// even quit. Hence, you really need to provide a handler.
///
/// It is possible to change the handler or any other configuration inside the previous handler.
/// It's useful to know that the handlers are updated from this working data before any of them
/// run in any given cycle, so changing the pre-spawn and post-spawn handlers from this handler
/// will not affect the running action.
pub action_handler: HandlerLock<Action>,
/// A handler triggered before a command is spawned.
///
/// This handler is called with the [`PreSpawn`] environment, which provides mutable access to
/// the [`Command`](TokioCommand) which is about to be run. See the notes on the
/// [`PreSpawn::command()`] method for important information on what you can do with it.
///
/// Returning an error from the handler will stop the action from processing further, and issue
/// a [`RuntimeError`][crate::error::RuntimeError] to the error channel.
pub pre_spawn_handler: HandlerLock<PreSpawn>,
/// A handler triggered immediately after a command is spawned.
///
/// This handler is called with the [`PostSpawn`] environment, which provides details on the
/// spawned command, including its PID.
///
/// Returning an error from the handler will drop the [`Child`][tokio::process::Child], which
/// will terminate the command without triggering any of the normal Watchexec behaviour, and
/// issue a [`RuntimeError`][crate::error::RuntimeError] to the error channel.
pub post_spawn_handler: HandlerLock<PostSpawn>,
/// Commands to execute.
///
/// These will be run in order, and an error will stop early.
pub commands: Vec<Command>,
/// Whether to use process groups (on Unix) or job control (on Windows) to run the command.
///
/// This makes use of [command_group] under the hood.
///
/// If you want to known whether a spawned command was run in a process group, you should use
/// the value in [`PostSpawn`] instead of reading this one, as it may have changed in the
/// meantime.
pub grouped: bool,
/// The filterer implementation to use when filtering events.
///
/// The default is a no-op, which will always pass every event.
pub filterer: Arc<dyn Filterer>,
}
impl fmt::Debug for WorkingData {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("WorkingData")
.field("throttle", &self.throttle)
.field("commands", &self.commands)
.field("grouped", &self.grouped)
.field("filterer", &self.filterer)
.finish_non_exhaustive()
}
}
impl Default for WorkingData {
fn default() -> Self {
Self {
throttle: Duration::from_millis(50),
action_handler: Default::default(),
pre_spawn_handler: Default::default(),
post_spawn_handler: Default::default(),
commands: Vec::new(),
grouped: true,
filterer: Arc::new(()),
}
}
}
/// The environment given to the action handler.
///
/// This deliberately does not implement Clone to make it hard to move it out of the handler, which
/// you should not do.
///
/// The [`Action::outcome()`] method is the only way to set the outcome of the action, and it _must_
/// be called before the handler returns.
#[derive(Debug)]
pub struct Action {
/// The collected events which triggered the action.
pub events: Arc<[Event]>,
pub(super) outcome: Arc<OnceCell<Outcome>>,
}
impl Action {
pub(super) fn new(events: Arc<[Event]>) -> Self {
Self {
events,
outcome: Default::default(),
}
}
/// Set the action's outcome.
///
/// This takes `self` and `Action` is not `Clone`, so it's only possible to call it once.
/// Regardless, if you _do_ manage to call it twice, it will do nothing beyond the first call.
///
/// See the [`Action`] documentation about handlers to learn why it's a bad idea to clone or
/// send it elsewhere, and what kind of handlers you cannot use.
pub fn outcome(self, outcome: Outcome) {
self.outcome.set(outcome).ok();
}
}
/// The environment given to the pre-spawn handler.
///
/// This deliberately does not implement Clone to make it hard to move it out of the handler, which
/// you should not do.
///
/// The [`PreSpawn::command()`] method is the only way to mutate the command, and the mutex guard it
/// returns _must_ be dropped before the handler returns.
#[derive(Debug)]
#[non_exhaustive]
pub struct PreSpawn {
/// The command which is about to be spawned.
pub command: Command,
/// The collected events which triggered the action this command issues from.
pub events: Arc<[Event]>,
to_spawn_w: Weak<Mutex<TokioCommand>>,
}
impl PreSpawn {
pub(crate) fn new(
command: Command,
to_spawn: TokioCommand,
events: Arc<[Event]>,
) -> (Self, Arc<Mutex<TokioCommand>>) {
let arc = Arc::new(Mutex::new(to_spawn));
(
Self {
command,
events,
to_spawn_w: Arc::downgrade(&arc),
},
arc.clone(),
)
}
/// Get write access to the command that will be spawned.
///
/// Keeping the lock alive beyond the end of the handler may cause the command to be cancelled,
/// but note no guarantees are made on this behaviour. Just don't do it. See the [`Action`]
/// documentation about handlers for more.
///
/// This will always return `Some()` under normal circumstances.
pub async fn command(&self) -> Option<OwnedMutexGuard<TokioCommand>> {
if let Some(arc) = self.to_spawn_w.upgrade() {
Some(arc.lock_owned().await)
} else {
None
}
}
}
/// The environment given to the post-spawn handler.
///
/// This is Clone, as there's nothing (except returning an error) that can be done to the command
/// now that it's spawned, as far as Watchexec is concerned. Nevertheless, you should return from
/// this handler quickly, to avoid holding up anything else.
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct PostSpawn {
/// The command the process was spawned with.
pub command: Command,
/// The collected events which triggered the action the command issues from.
pub events: Arc<[Event]>,
/// The process ID or the process group ID.
pub id: u32,
/// Whether the command was run in a process group.
pub grouped: bool,
}

View File

@ -0,0 +1,122 @@
//! Changeable values.
use std::{
any::type_name,
fmt,
sync::{Arc, RwLock},
};
/// A shareable value that doesn't keep a lock when it is read.
///
/// This is essentially an `Arc<RwLock<T: Clone>>`, with the only two methods to use it as:
/// - replace the value, which obtains a write lock
/// - get a clone of that value, which obtains a read lock
///
/// but importantly because you get a clone of the value, the read lock is not held after the
/// `get()` method returns.
///
/// See [`ChangeableFn`] for a specialised variant which holds an [`Fn`].
#[derive(Clone)]
pub struct Changeable<T>(Arc<RwLock<T>>);
impl<T> Changeable<T>
where
T: Clone + Send,
{
/// Create a new Changeable.
///
/// If `T: Default`, prefer using `::default()`.
#[must_use]
pub fn new(value: T) -> Self {
Self(Arc::new(RwLock::new(value)))
}
/// Replace the value with a new one.
///
/// Panics if the lock was poisoned.
pub fn replace(&self, new: T) {
*(self.0.write().expect("changeable lock poisoned")) = new;
}
/// Get a clone of the value.
///
/// Panics if the lock was poisoned.
#[must_use]
pub fn get(&self) -> T {
self.0.read().expect("handler lock poisoned").clone()
}
}
impl<T> Default for Changeable<T>
where
T: Clone + Send + Default,
{
fn default() -> Self {
Self::new(T::default())
}
}
// TODO: with specialisation, write a better impl when T: Debug
impl<T> fmt::Debug for Changeable<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Changeable")
.field("inner type", &type_name::<T>())
.finish_non_exhaustive()
}
}
/// A shareable `Fn` that doesn't hold a lock when it is called.
///
/// This is a specialisation of [`Changeable`] for the `Fn` usecase.
///
/// As this is for Watchexec, only `Fn`s with a single argument and return value are supported
/// here; it's simple enough to make your own if you want more.
pub struct ChangeableFn<T, U>(Changeable<Arc<dyn (Fn(T) -> U) + Send + Sync>>);
impl<T, U> ChangeableFn<T, U>
where
T: Send,
U: Send,
{
pub(crate) fn new(f: impl (Fn(T) -> U) + Send + Sync + 'static) -> Self {
Self(Changeable::new(Arc::new(f)))
}
/// Replace the fn with a new one.
///
/// Panics if the lock was poisoned.
pub fn replace(&self, new: impl (Fn(T) -> U) + Send + Sync + 'static) {
self.0.replace(Arc::new(new));
}
/// Call the fn.
///
/// Panics if the lock was poisoned.
pub fn call(&self, data: T) -> U {
(self.0.get())(data)
}
}
// the derive adds a T: Clone bound
impl<T, U> Clone for ChangeableFn<T, U> {
fn clone(&self) -> Self {
Self(Changeable::clone(&self.0))
}
}
impl<T, U> Default for ChangeableFn<T, U>
where
T: Send,
U: Send + Default,
{
fn default() -> Self {
Self::new(|_| U::default())
}
}
impl<T, U> fmt::Debug for ChangeableFn<T, U> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChangeableFn")
.field("payload type", &type_name::<T>())
.field("return type", &type_name::<U>())
.finish_non_exhaustive()
}
}

View File

@ -1,157 +0,0 @@
//! Command construction, configuration, and tracking.
use std::fmt;
use tokio::process::Command as TokioCommand;
use tracing::trace;
use crate::error::RuntimeError;
#[doc(inline)]
pub use process::Process;
#[doc(inline)]
pub use supervisor::Supervisor;
mod process;
mod supervisor;
#[cfg(test)]
mod tests;
/// A command to execute.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Command {
/// A raw command which will be executed as-is.
Exec {
/// The program to run.
prog: String,
/// The arguments to pass.
args: Vec<String>,
},
/// A shelled command line.
Shell {
/// The shell to run.
shell: Shell,
/// Additional options or arguments to pass to the shell.
///
/// These will be inserted before the `-c` (or equivalent) option immediately preceding the
/// command line string.
args: Vec<String>,
/// The command line to pass to the shell.
command: String,
},
}
/// Shell to use to run shelled commands.
///
/// `Cmd` and `Powershell` are special-cased because they have different calling conventions. Also
/// `Cmd` is only available in Windows, while `Powershell` is also available on unices (provided the
/// end-user has it installed, of course).
///
/// There is no default implemented: as consumer of this library you are encouraged to set your own
/// default as makes sense in your application / for your platform.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Shell {
/// Use the given string as a unix shell invocation.
///
/// This is invoked with `-c` followed by the command.
Unix(String),
/// Use the Windows CMD.EXE shell.
///
/// This is `cmd.exe` invoked with `/C` followed by the command.
#[cfg(windows)]
Cmd,
/// Use Powershell, on Windows or elsewhere.
///
/// This is `powershell.exe` invoked with `-Command` followed by the command on Windows.
/// On unices, it is equivalent to `Unix("pwsh")`.
Powershell,
}
impl Command {
/// Obtain a [`tokio::process::Command`] from a [`Command`].
///
/// Behaves as described in the [`Command`] and [`Shell`] documentation.
///
/// # Errors
///
/// - Errors if the `command` of a `Command::Shell` is empty.
/// - Errors if the `shell` of a `Shell::Unix(shell)` is empty.
pub fn to_spawnable(&self) -> Result<TokioCommand, RuntimeError> {
trace!(cmd=?self, "constructing command");
match self {
Self::Exec { prog, args } => {
let mut c = TokioCommand::new(prog);
c.args(args);
Ok(c)
}
Self::Shell {
shell,
args,
command,
} => {
if command.is_empty() {
return Err(RuntimeError::CommandShellEmptyCommand);
}
let (shcmd, shcliopt) = match shell {
#[cfg(windows)]
Shell::Cmd => {
use std::os::windows::process::CommandExt as _;
use std::process::Command as StdCommand;
// TODO this is a workaround until TokioCommand has a raw_arg method. See tokio-rs/tokio#5810.
let mut std_command = StdCommand::new("cmd.exe");
std_command.args(args).arg("/C").raw_arg(command);
return Ok(TokioCommand::from(std_command));
}
#[cfg(windows)]
Shell::Powershell => ("powershell.exe", "-Command"),
#[cfg(not(windows))]
Shell::Powershell => ("pwsh", "-c"),
Shell::Unix(cmd) => {
if cmd.is_empty() {
return Err(RuntimeError::CommandShellEmptyShell);
}
(cmd.as_str(), "-c")
}
};
let mut c = TokioCommand::new(shcmd);
c.args(args);
c.arg(shcliopt).arg(command);
Ok(c)
}
}
}
}
impl fmt::Display for Command {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Exec { prog, args } => {
write!(f, "{prog}")?;
for arg in args {
write!(f, " {arg}")?;
}
Ok(())
}
Self::Shell { command, .. } => {
write!(f, "{command}")
}
}
}
}

View File

@ -1,148 +0,0 @@
use std::process::ExitStatus;
use command_group::AsyncGroupChild;
use tokio::process::Child;
use tracing::{debug, trace};
use crate::error::RuntimeError;
/// Low-level wrapper around a process child, be it grouped or ungrouped.
#[derive(Debug)]
pub enum Process {
/// The initial state of the process, before it's spawned.
None,
/// A grouped process that's been spawned.
Grouped(AsyncGroupChild),
/// An ungrouped process that's been spawned.
Ungrouped(Child),
/// The cached exit status of the process.
Done(ExitStatus),
}
impl Default for Process {
/// Returns [`Process::None`].
fn default() -> Self {
Self::None
}
}
impl Process {
/// Sends a Unix signal to the process.
///
/// Does nothing if the process is not running.
#[cfg(unix)]
pub fn signal(&mut self, sig: command_group::Signal) -> Result<(), RuntimeError> {
use command_group::UnixChildExt;
match self {
Self::None | Self::Done(_) => Ok(()),
Self::Grouped(c) => {
debug!(signal=%sig, pgid=?c.id(), "sending signal to process group");
c.signal(sig)
}
Self::Ungrouped(c) => {
debug!(signal=%sig, pid=?c.id(), "sending signal to process");
c.signal(sig)
}
}
.map_err(RuntimeError::Process)
}
/// Kills the process.
///
/// Does nothing if the process is not running.
///
/// Note that this has different behaviour for grouped and ungrouped processes due to Tokio's
/// API: it waits on ungrouped processes, but not for grouped processes.
pub async fn kill(&mut self) -> Result<(), RuntimeError> {
match self {
Self::None | Self::Done(_) => Ok(()),
Self::Grouped(c) => {
debug!(pgid=?c.id(), "killing process group");
c.kill()
}
Self::Ungrouped(c) => {
debug!(pid=?c.id(), "killing process");
c.kill().await
}
}
.map_err(RuntimeError::Process)
}
/// Checks the status of the process.
///
/// Returns `true` if the process is still running.
///
/// This takes `&mut self` as it transitions the [`Process`] state to [`Process::Done`] if it
/// finds the process has ended, such that it will cache the exit status. Otherwise that status
/// would be lost.
///
/// Does nothing and returns `false` immediately if the `Process` is `Done` or `None`.
pub fn is_running(&mut self) -> Result<bool, RuntimeError> {
match self {
Self::None | Self::Done(_) => Ok(false),
Self::Grouped(c) => c.try_wait().map(|status| {
trace!("try-waiting on process group");
if let Some(status) = status {
trace!(?status, "converting to ::Done");
*self = Self::Done(status);
true
} else {
false
}
}),
Self::Ungrouped(c) => c.try_wait().map(|status| {
trace!("try-waiting on process");
if let Some(status) = status {
trace!(?status, "converting to ::Done");
*self = Self::Done(status);
true
} else {
false
}
}),
}
.map_err(RuntimeError::Process)
}
/// Waits for the process to exit, and returns its exit status.
///
/// This takes `&mut self` as it transitions the [`Process`] state to [`Process::Done`] if it
/// finds the process has ended, such that it will cache the exit status.
///
/// This makes it possible to call `wait` on a process multiple times, without losing the exit
/// status.
///
/// Returns immediately with the cached exit status if the `Process` is `Done`, and with `None`
/// if the `Process` is `None`.
pub async fn wait(&mut self) -> Result<Option<ExitStatus>, RuntimeError> {
match self {
Self::None => Ok(None),
Self::Done(status) => Ok(Some(*status)),
Self::Grouped(c) => {
trace!("waiting on process group");
let status = c.wait().await.map_err(|err| RuntimeError::IoError {
about: "waiting on process group",
err,
})?;
trace!(?status, "converting to ::Done");
*self = Self::Done(status);
Ok(Some(status))
}
Self::Ungrouped(c) => {
trace!("waiting on process");
let status = c.wait().await.map_err(|err| RuntimeError::IoError {
about: "waiting on process (ungrouped)",
err,
})?;
trace!(?status, "converting to ::Done");
*self = Self::Done(status);
Ok(Some(status))
}
}
.map_err(RuntimeError::Process)
}
}

View File

@ -1,369 +0,0 @@
use std::sync::Arc;
use async_priority_channel as priority;
use command_group::AsyncCommandGroup;
use tokio::{
select, spawn,
sync::{
mpsc::{self, Sender},
watch,
},
};
use tracing::{debug, debug_span, error, info, trace, Span};
use watchexec_signals::Signal;
use crate::{
action::{PostSpawn, PreSpawn},
command::Command,
error::RuntimeError,
event::{Event, Priority, Source, Tag},
handler::{rte, HandlerLock},
};
use super::Process;
#[derive(Clone, Copy, Debug)]
enum Intervention {
Kill,
Signal(Signal),
}
/// A task which supervises a sequence of processes.
///
/// This spawns processes from a vec of [`Command`]s in order and waits for each to complete while
/// handling interventions to itself: orders to terminate, or to send a signal to the current
/// process. It also immediately issues a [`Tag::ProcessCompletion`] event when the set completes.
#[derive(Debug)]
pub struct Supervisor {
intervene: Sender<Intervention>,
ongoing: watch::Receiver<bool>,
}
impl Supervisor {
/// Spawns the command set, the supervision task, and returns a new control object.
pub fn spawn(
errors: Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
mut commands: Vec<Command>,
grouped: bool,
actioned_events: Arc<[Event]>,
pre_spawn_handler: HandlerLock<PreSpawn>,
post_spawn_handler: HandlerLock<PostSpawn>,
) -> Result<Self, RuntimeError> {
// get commands in reverse order so pop() returns the next to run
commands.reverse();
let next = commands.pop().ok_or(RuntimeError::NoCommands)?;
let (notify, waiter) = watch::channel(true);
let (int_s, int_r) = mpsc::channel(8);
spawn(async move {
let span = debug_span!("supervisor");
let mut next = next;
let mut commands = commands;
let mut int = int_r;
loop {
let (mut process, pid) = match spawn_process(
span.clone(),
next,
grouped,
actioned_events.clone(),
pre_spawn_handler.clone(),
post_spawn_handler.clone(),
)
.await
{
Ok(pp) => pp,
Err(err) => {
let _enter = span.enter();
error!(%err, "while spawning process");
errors.send(err).await.ok();
trace!("marking process as done");
notify
.send(false)
.unwrap_or_else(|e| trace!(%e, "error sending process complete"));
trace!("closing supervisor task early");
return;
}
};
span.in_scope(|| debug!(?process, ?pid, "spawned process"));
loop {
select! {
p = process.wait() => {
match p {
Ok(_) => break, // deal with it below
Err(err) => {
let _enter = span.enter();
error!(%err, "while waiting on process");
errors.try_send(err).ok();
trace!("marking process as done");
notify.send(false).unwrap_or_else(|e| trace!(%e, "error sending process complete"));
trace!("closing supervisor task early");
return;
}
}
},
Some(int) = int.recv() => {
match int {
Intervention::Kill => {
if let Err(err) = process.kill().await {
let _enter = span.enter();
error!(%err, "while killing process");
errors.try_send(err).ok();
trace!("continuing to watch command");
}
}
#[cfg(unix)]
Intervention::Signal(sig) => {
let _enter = span.enter();
if let Some(sig) = sig.to_nix() {
if let Err(err) = process.signal(sig) {
error!(%err, "while sending signal to process");
errors.try_send(err).ok();
trace!("continuing to watch command");
}
} else {
let err = RuntimeError::UnsupportedSignal(sig);
error!(%err, "while sending signal to process");
errors.try_send(err).ok();
trace!("continuing to watch command");
}
}
#[cfg(windows)]
Intervention::Signal(sig) => {
let _enter = span.enter();
// https://github.com/watchexec/watchexec/issues/219
let err = RuntimeError::UnsupportedSignal(sig);
error!(%err, "while sending signal to process");
errors.try_send(err).ok();
trace!("continuing to watch command");
}
}
}
else => break,
}
}
span.in_scope(|| trace!("got out of loop, waiting once more"));
match process.wait().await {
Err(err) => {
let _enter = span.enter();
error!(%err, "while waiting on process");
errors.try_send(err).ok();
}
Ok(status) => {
let event = span.in_scope(|| {
let event = Event {
tags: vec![
Tag::Source(Source::Internal),
Tag::ProcessCompletion(status.map(Into::into)),
],
metadata: Default::default(),
};
debug!(?event, "creating synthetic process completion event");
event
});
if let Err(err) = events.send(event, Priority::Low).await {
let _enter = span.enter();
error!(%err, "while sending process completion event");
errors
.try_send(RuntimeError::EventChannelSend {
ctx: "command supervisor",
err,
})
.ok();
}
}
}
let _enter = span.enter();
if let Some(cmd) = commands.pop() {
debug!(?cmd, "queuing up next command");
next = cmd;
} else {
debug!("no more commands to supervise");
break;
}
}
let _enter = span.enter();
trace!("marking process as done");
notify
.send(false)
.unwrap_or_else(|e| trace!(%e, "error sending process complete"));
trace!("closing supervisor task");
});
Ok(Self {
ongoing: waiter,
intervene: int_s,
})
}
/// Issues a signal to the process.
///
/// On Windows, this currently only supports [`Signal::ForceStop`].
///
/// While this is async, it returns once the signal intervention has been sent internally, not
/// when the signal has been delivered.
pub async fn signal(&self, signal: Signal) {
if cfg!(windows) {
if signal == Signal::ForceStop {
self.intervene.send(Intervention::Kill).await.ok();
}
// else: https://github.com/watchexec/watchexec/issues/219
} else {
trace!(?signal, "sending signal intervention");
self.intervene.send(Intervention::Signal(signal)).await.ok();
}
// only errors on channel closed, and that only happens if the process is dead
}
/// Stops the process.
///
/// While this is async, it returns once the signal intervention has been sent internally, not
/// when the signal has been delivered.
pub async fn kill(&self) {
trace!("sending kill intervention");
self.intervene.send(Intervention::Kill).await.ok();
// only errors on channel closed, and that only happens if the process is dead
}
/// Returns true if the supervisor is still running.
///
/// This is almost always equivalent to whether the _process_ is still running, but may not be
/// 100% in sync.
pub fn is_running(&self) -> bool {
let ongoing = *self.ongoing.borrow();
trace!(?ongoing, "supervisor state");
ongoing
}
/// Returns only when the supervisor completes.
///
/// This is almost always equivalent to waiting for the _process_ to complete, but may not be
/// 100% in sync.
pub async fn wait(&self) -> Result<(), RuntimeError> {
if !*self.ongoing.borrow() {
trace!("supervisor already completed (pre)");
return Ok(());
}
debug!("waiting on supervisor completion");
let mut ongoing = self.ongoing.clone();
// never completes if ongoing is marked false in between the previous check and now!
// TODO: select with something that sleeps a bit and rechecks the ongoing
ongoing
.changed()
.await
.map_err(|err| RuntimeError::InternalSupervisor(err.to_string()))?;
debug!("supervisor completed");
Ok(())
}
}
async fn spawn_process(
span: Span,
command: Command,
grouped: bool,
actioned_events: Arc<[Event]>,
pre_spawn_handler: HandlerLock<PreSpawn>,
post_spawn_handler: HandlerLock<PostSpawn>,
) -> Result<(Process, u32), RuntimeError> {
let (pre_spawn, spawnable) = span.in_scope::<_, Result<_, RuntimeError>>(|| {
debug!(%grouped, ?command, "preparing command");
#[cfg_attr(windows, allow(unused_mut))]
let mut spawnable = command.to_spawnable()?;
// Required from Rust 1.66:
// https://github.com/rust-lang/rust/pull/101077
//
// We do that before the pre-spawn so that hook can be used to set a different mask if wanted.
#[cfg(unix)]
{
use nix::sys::signal::{sigprocmask, SigSet, SigmaskHow, Signal};
unsafe {
spawnable.pre_exec(|| {
let mut oldset = SigSet::empty();
let mut newset = SigSet::all();
newset.remove(Signal::SIGHUP); // leave SIGHUP alone so nohup works
debug!(unblocking=?newset, "resetting process sigmask");
sigprocmask(SigmaskHow::SIG_UNBLOCK, Some(&newset), Some(&mut oldset))?;
debug!(?oldset, "sigmask reset");
Ok(())
});
}
}
debug!("running pre-spawn handler");
Ok(PreSpawn::new(
command.clone(),
spawnable,
actioned_events.clone(),
))
})?;
pre_spawn_handler
.call(pre_spawn)
.await
.map_err(|e| rte("action pre-spawn", e.as_ref()))?;
let (proc, id, post_spawn) = span.in_scope::<_, Result<_, RuntimeError>>(|| {
let mut spawnable = Arc::try_unwrap(spawnable)
.map_err(|_| RuntimeError::HandlerLockHeld("pre-spawn"))?
.into_inner();
info!(command=?spawnable, "spawning command");
let (proc, id) = if grouped {
let proc = spawnable
.group()
.kill_on_drop(true)
.spawn()
.map_err(|err| RuntimeError::IoError {
about: "spawning process group",
err,
})?;
let id = proc.id().ok_or(RuntimeError::ProcessDeadOnArrival)?;
info!(pgid=%id, "process group spawned");
(Process::Grouped(proc), id)
} else {
let proc =
spawnable
.kill_on_drop(true)
.spawn()
.map_err(|err| RuntimeError::IoError {
about: "spawning process (ungrouped)",
err,
})?;
let id = proc.id().ok_or(RuntimeError::ProcessDeadOnArrival)?;
info!(pid=%id, "process spawned");
(Process::Ungrouped(proc), id)
};
debug!("running post-spawn handler");
Ok((
proc,
id,
PostSpawn {
command: command.clone(),
events: actioned_events.clone(),
id,
grouped,
},
))
})?;
post_spawn_handler
.call(post_spawn)
.await
.map_err(|e| rte("action post-spawn", e.as_ref()))?;
Ok((proc, id))
}

View File

@ -1,128 +0,0 @@
use super::{Command, Shell};
use command_group::AsyncCommandGroup;
#[tokio::test]
#[cfg(unix)]
async fn unix_shell_none() -> Result<(), std::io::Error> {
assert!(Command::Exec {
prog: "echo".into(),
args: vec!["hi".into()]
}
.to_spawnable()
.expect("echo directly")
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(unix)]
async fn unix_shell_sh() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Unix("sh".into()),
args: Vec::new(),
command: "echo hi".into()
}
.to_spawnable()
.expect("echo with sh")
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(unix)]
async fn unix_shell_alternate() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Unix("bash".into()),
args: Vec::new(),
command: "echo hi".into()
}
.to_spawnable()
.expect("echo with bash")
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(unix)]
async fn unix_shell_alternate_shopts() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Unix("bash".into()),
args: vec!["-o".into(), "errexit".into()],
command: "echo hi".into()
}
.to_spawnable()
.expect("echo with shopts")
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(windows)]
async fn windows_shell_none() -> Result<(), std::io::Error> {
assert!(Command::Exec {
prog: "echo".into(),
args: vec!["hi".into()]
}
.to_spawnable()
.unwrap()
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(windows)]
async fn windows_shell_cmd() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Cmd,
args: Vec::new(),
command: r#""echo" hi"#.into()
}
.to_spawnable()
.unwrap()
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(windows)]
async fn windows_shell_powershell() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Powershell,
args: Vec::new(),
command: "echo hi".into()
}
.to_spawnable()
.unwrap()
.group_status()
.await?
.success());
Ok(())
}
#[tokio::test]
#[cfg(windows)]
async fn windows_shell_unix_style_powershell() -> Result<(), std::io::Error> {
assert!(Command::Shell {
shell: Shell::Unix("powershell.exe".into()),
args: Vec::new(),
command: "echo hi".into()
}
.to_spawnable()
.unwrap()
.group_status()
.await?
.success());
Ok(())
}

View File

@ -1,226 +1,300 @@
//! Configuration and builders for [`crate::Watchexec`].
use std::{fmt, path::Path, sync::Arc, time::Duration};
use std::{future::Future, path::Path, pin::pin, sync::Arc, time::Duration};
use tracing::debug;
use tokio::sync::Notify;
use tracing::{debug, trace};
use crate::{
action::{Action, PostSpawn, PreSpawn},
command::Command,
filter::Filterer,
fs::Watcher,
handler::{Handler, HandlerLock},
action::{ActionHandler, ActionReturn},
changeable::{Changeable, ChangeableFn},
filter::{ChangeableFilterer, Filterer},
sources::fs::{WatchedPath, Watcher},
ErrorHook,
};
/// Runtime configuration for [`Watchexec`][crate::Watchexec].
/// Configuration for [`Watchexec`][crate::Watchexec].
///
/// This is used both when constructing the instance (as initial configuration) and to reconfigure
/// it at runtime via [`Watchexec::reconfigure()`][crate::Watchexec::reconfigure()].
/// Almost every field is a [`Changeable`], such that its value can be changed from a `&self`.
///
/// Use [`RuntimeConfig::default()`] to build a new one, or modify an existing one. This struct is
/// marked non-exhaustive such that new options may be added without breaking change. You can make
/// changes through the fields directly, or use the convenience (chainable!) methods instead.
/// Fields are public for advanced use, but in most cases changes should be made through the
/// methods provided: not only are they more convenient, each calls `debug!` on the new value,
/// providing a quick insight into what your application sets.
///
/// Another advantage of using the convenience methods is that each one contains a call to the
/// [`debug!`] macro, providing insight into what config your application sets for "free".
///
/// You should see the detailed documentation on [`fs::WorkingData`][crate::fs::WorkingData] and
/// [`action::WorkingData`][crate::action::WorkingData] for important information and particulars
/// about each field, especially the handlers.
#[derive(Clone, Debug, Default)]
/// The methods also set the "change signal" of the Config: this notifies some parts of Watchexec
/// they should re-read the config. If you modify values via the fields directly, you should call
/// `signal_change()` yourself. Note that this doesn't mean that changing values _without_ calling
/// this will prevent Watchexec changing until it's called: most parts of Watchexec take a
/// "just-in-time" approach and read a config item immediately before it's needed, every time it's
/// needed, and thus don't need to listen for the change signal.
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct RuntimeConfig {
/// Working data for the filesystem event source.
pub struct Config {
/// This is set by the change methods whenever they're called, and notifies Watchexec that it
/// should read the configuration again.
pub(crate) change_signal: Arc<Notify>,
/// The main handler to define: what to do when an action is triggered.
///
/// This notably includes the path set to be watched.
pub fs: crate::fs::WorkingData,
/// Working data for keyboard event sources.
pub keyboard: crate::keyboard::WorkingData,
/// Working data for the action processing.
/// This handler is called with the [`Action`] environment, look at its doc for more detail.
///
/// This is the task responsible for scheduling the actions in response to events, applying the
/// filtering, etc.
pub action: crate::action::WorkingData,
}
impl RuntimeConfig {
/// Set the pathset to be watched.
pub fn pathset<I, P>(&mut self, pathset: I) -> &mut Self
where
I: IntoIterator<Item = P>,
P: AsRef<Path>,
{
self.fs.pathset = pathset.into_iter().map(|p| p.as_ref().into()).collect();
debug!(pathset=?self.fs.pathset, "RuntimeConfig: pathset");
self
}
/// Set the file watcher type to use.
pub fn file_watcher(&mut self, watcher: Watcher) -> &mut Self {
debug!(?watcher, "RuntimeConfig: watcher");
self.fs.watcher = watcher;
self
}
/// Enable monitoring of 'end of file' from stdin
pub fn keyboard_emit_eof(&mut self, enable: bool) -> &mut Self {
self.keyboard.eof = enable;
self
}
/// Set the action throttle.
pub fn action_throttle(&mut self, throttle: impl Into<Duration>) -> &mut Self {
self.action.throttle = throttle.into();
debug!(throttle=?self.action.throttle, "RuntimeConfig: throttle");
self
}
/// Toggle whether to use process groups or not.
pub fn command_grouped(&mut self, grouped: bool) -> &mut Self {
debug!(?grouped, "RuntimeConfig: command_grouped");
self.action.grouped = grouped;
self
}
/// Set a single command to run on action.
/// If this handler is not provided, or does nothing, Watchexec in turn will do nothing, not
/// even quit. Hence, you really need to provide a handler. This is enforced when using
/// [`Watchexec::new()`], but not when using [`Watchexec::default()`].
///
/// This is a convenience for `.commands(vec![Command...])`.
pub fn command(&mut self, command: Command) -> &mut Self {
debug!(?command, "RuntimeConfig: command");
self.action.commands = vec![command];
self
}
/// It is possible to change the handler or any other configuration inside the previous handler.
/// This and other handlers are fetched "just in time" when needed, so changes to handlers can
/// appear instant, or may lag a little depending on lock contention, but a handler being called
/// does not hold its lock. A handler changing while it's being called doesn't affect the run of
/// a previous version of the handler: it will neither be stopped nor retried with the new code.
///
/// It is important for this handler to return quickly: avoid performing blocking work in it.
/// This is true for all handlers, but especially for this one, as it will block the event loop
/// and you'll find that the internal event queues quickly fill up and it all grinds to a halt.
/// Spawn threads or tasks, or use channels or other async primitives to communicate with your
/// expensive code.
pub action_handler: ChangeableFn<ActionHandler, ActionReturn>,
/// Set the commands to run on action.
pub fn commands(&mut self, commands: impl Into<Vec<Command>>) -> &mut Self {
self.action.commands = commands.into();
debug!(commands=?self.action.commands, "RuntimeConfig: commands");
self
}
/// Set the filterer implementation to use.
pub fn filterer(&mut self, filterer: Arc<dyn Filterer>) -> &mut Self {
debug!(?filterer, "RuntimeConfig: filterer");
self.action.filterer = filterer;
self
}
/// Set the action handler.
pub fn on_action(&mut self, handler: impl Handler<Action> + Send + 'static) -> &mut Self {
debug!("RuntimeConfig: on_action");
self.action.action_handler = HandlerLock::new(Box::new(handler));
self
}
/// Set the pre-spawn handler.
pub fn on_pre_spawn(&mut self, handler: impl Handler<PreSpawn> + Send + 'static) -> &mut Self {
debug!("RuntimeConfig: on_pre_spawn");
self.action.pre_spawn_handler = HandlerLock::new(Box::new(handler));
self
}
/// Set the post-spawn handler.
pub fn on_post_spawn(
&mut self,
handler: impl Handler<PostSpawn> + Send + 'static,
) -> &mut Self {
debug!("RuntimeConfig: on_post_spawn");
self.action.post_spawn_handler = HandlerLock::new(Box::new(handler));
self
}
}
/// Initialisation configuration for [`Watchexec`][crate::Watchexec].
///
/// This is used only for constructing the instance.
///
/// Use [`InitConfig::default()`] to build a new one, and the inherent methods to change values.
/// This struct is marked non-exhaustive such that new options may be added without breaking change.
#[non_exhaustive]
pub struct InitConfig {
/// Runtime error handler.
///
/// This is run on every runtime error that occurs within watchexec. By default the placeholder
/// `()` handler is used, which discards all errors.
///
/// If the handler errors, [_that_ error][crate::error::RuntimeError::Handler] is immediately
/// given to the handler. If this second handler call errors as well, its error is ignored.
///
/// Also see the [`ErrorHook`] documentation for returning critical errors from this handler.
/// This is run on every runtime error that occurs within Watchexec. The default handler
/// is a no-op.
///
/// # Examples
///
/// Set the error handler:
///
/// ```
/// # use std::convert::Infallible;
/// # use watchexec::{config::InitConfig, ErrorHook};
/// let mut init = InitConfig::default();
/// init.on_error(|err: ErrorHook| async move {
/// # use watchexec::{config::Config, ErrorHook};
/// let mut config = Config::default();
/// config.on_error(|err: ErrorHook| {
/// tracing::error!("{}", err.error);
/// Ok::<(), Infallible>(())
/// });
/// ```
pub error_handler: Box<dyn Handler<ErrorHook> + Send>,
///
/// Output a critical error (which will terminate Watchexec):
///
/// ```
/// # use watchexec::{config::Config, ErrorHook, error::{CriticalError, RuntimeError}};
/// let mut config = Config::default();
/// config.on_error(|err: ErrorHook| {
/// tracing::error!("{}", err.error);
///
/// if matches!(err.error, RuntimeError::FsWatcher { .. }) {
/// err.critical(CriticalError::External("fs watcher failed".into()));
/// }
/// });
/// ```
///
/// Elevate a runtime error to critical (will preserve the error information):
///
/// ```
/// # use watchexec::{config::Config, ErrorHook, error::RuntimeError};
/// let mut config = Config::default();
/// config.on_error(|err: ErrorHook| {
/// tracing::error!("{}", err.error);
///
/// if matches!(err.error, RuntimeError::FsWatcher { .. }) {
/// err.elevate();
/// }
/// });
/// ```
///
/// It is important for this to return quickly: avoid performing blocking work. Locking and
/// writing to stdio is fine, but waiting on the network is a bad idea. Of course, an
/// asynchronous log writer or separate UI thread is always a better idea than `println!` if
/// have that ability.
pub error_handler: ChangeableFn<ErrorHook, ()>,
/// Internal: the buffer size of the channel which carries runtime errors.
/// The set of filesystem paths to be watched.
///
/// If this is non-empty, the filesystem event source is started and configured to provide
/// events for these paths. If it becomes empty, the filesystem event source is shut down.
pub pathset: Changeable<Vec<WatchedPath>>,
/// The kind of filesystem watcher to be used.
pub file_watcher: Changeable<Watcher>,
/// Watch stdin and emit events when input comes in over the keyboard.
///
/// If this is true, the keyboard event source is started and configured to report when input
/// is received on stdin. If it becomes false, the keyboard event source is shut down and stdin
/// may flow to commands again.
///
/// Currently only EOF is watched for and emitted.
pub keyboard_events: Changeable<bool>,
/// How long to wait for events to build up before executing an action.
///
/// This is sometimes called "debouncing." We debounce on the trailing edge: an action is
/// triggered only after that amount of time has passed since the first event in the cycle. The
/// action is called with all the collected events in the cycle.
///
/// Default is 50ms.
pub throttle: Changeable<Duration>,
/// The filterer implementation to use when filtering events.
///
/// The default is a no-op, which will always pass every event.
pub filterer: ChangeableFilterer,
/// The buffer size of the channel which carries runtime errors.
///
/// The default (64) is usually fine. If you expect a much larger throughput of runtime errors,
/// or if your `error_handler` is slow, adjusting this value may help.
///
/// This is unchangeable at runtime and must be set before Watchexec instantiation.
pub error_channel_size: usize,
/// Internal: the buffer size of the channel which carries events.
/// The buffer size of the channel which carries events.
///
/// The default (1024) is usually fine. If you expect a much larger throughput of events,
/// The default (4096) is usually fine. If you expect a much larger throughput of events,
/// adjusting this value may help.
///
/// This is unchangeable at runtime and must be set before Watchexec instantiation.
pub event_channel_size: usize,
}
impl Default for InitConfig {
impl Default for Config {
fn default() -> Self {
Self {
error_handler: Box::new(()) as _,
change_signal: Default::default(),
action_handler: ChangeableFn::new(ActionReturn::Sync),
error_handler: Default::default(),
pathset: Default::default(),
file_watcher: Default::default(),
keyboard_events: Default::default(),
throttle: Changeable::new(Duration::from_millis(50)),
filterer: Default::default(),
error_channel_size: 64,
event_channel_size: 1024,
event_channel_size: 4096,
}
}
}
impl InitConfig {
impl Config {
/// Signal that the configuration has changed.
///
/// This is called automatically by all other methods here, so most of the time calling this
/// isn't needed, but it can be useful for some advanced uses.
pub fn signal_change(&self) -> &Self {
self.change_signal.notify_waiters();
self
}
/// Watch the config for a change, but run once first.
///
/// This returns a Stream where the first value is available immediately, and then every
/// subsequent one is from a change signal for this Config.
#[must_use]
pub(crate) fn watch(&self) -> ConfigWatched {
ConfigWatched::new(self.change_signal.clone())
}
/// Set the pathset to be watched.
pub fn pathset<I, P>(&self, pathset: I) -> &Self
where
I: IntoIterator<Item = P>,
P: AsRef<Path>,
{
let pathset = pathset.into_iter().map(|p| p.as_ref().into()).collect();
debug!(?pathset, "Config: pathset");
self.pathset.replace(pathset);
self.signal_change()
}
/// Set the file watcher type to use.
pub fn file_watcher(&self, watcher: Watcher) -> &Self {
debug!(?watcher, "Config: file watcher");
self.file_watcher.replace(watcher);
self.signal_change()
}
/// Enable keyboard/stdin event source.
pub fn keyboard_events(&self, enable: bool) -> &Self {
debug!(?enable, "Config: keyboard");
self.keyboard_events.replace(enable);
self.signal_change()
}
/// Set the throttle.
pub fn throttle(&self, throttle: impl Into<Duration>) -> &Self {
let throttle = throttle.into();
debug!(?throttle, "Config: throttle");
self.throttle.replace(throttle);
self.signal_change()
}
/// Set the filterer implementation to use.
pub fn filterer(&self, filterer: impl Filterer + Send + Sync + 'static) -> &Self {
debug!(?filterer, "Config: filterer");
self.filterer.replace(filterer);
self.signal_change()
}
/// Set the runtime error handler.
///
/// See the [documentation on the field](InitConfig#structfield.error_handler) for more details.
pub fn on_error(&mut self, handler: impl Handler<ErrorHook> + Send + 'static) -> &mut Self {
debug!("InitConfig: on_error");
self.error_handler = Box::new(handler) as _;
self
pub fn on_error(&self, handler: impl Fn(ErrorHook) + Send + Sync + 'static) -> &Self {
debug!("Config: on_error");
self.error_handler.replace(handler);
self.signal_change()
}
/// Set the buffer size of the channel which carries runtime errors.
///
/// See the [documentation on the field](InitConfig#structfield.error_channel_size) for more details.
pub fn error_channel_size(&mut self, size: usize) -> &mut Self {
debug!(?size, "InitConfig: error_channel_size");
self.error_channel_size = size;
self
/// Set the action handler.
pub fn on_action(
&self,
handler: impl (Fn(ActionHandler) -> ActionHandler) + Send + Sync + 'static,
) -> &Self {
debug!("Config: on_action");
self.action_handler
.replace(move |action| ActionReturn::Sync(handler(action)));
self.signal_change()
}
/// Set the buffer size of the channel which carries events.
///
/// See the [documentation on the field](InitConfig#structfield.event_channel_size) for more details.
pub fn event_channel_size(&mut self, size: usize) -> &mut Self {
debug!(?size, "InitConfig: event_channel_size");
self.event_channel_size = size;
self
/// Set the action handler to a future-returning closure.
pub fn on_action_async(
&self,
handler: impl (Fn(ActionHandler) -> Box<dyn Future<Output = ActionHandler> + Send + Sync>)
+ Send
+ Sync
+ 'static,
) -> &Self {
debug!("Config: on_action_async");
self.action_handler
.replace(move |action| ActionReturn::Async(handler(action)));
self.signal_change()
}
}
impl fmt::Debug for InitConfig {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("InitConfig")
.field("error_channel_size", &self.error_channel_size)
.field("event_channel_size", &self.event_channel_size)
.finish_non_exhaustive()
#[derive(Debug)]
pub(crate) struct ConfigWatched {
first_run: bool,
notify: Arc<Notify>,
}
impl ConfigWatched {
fn new(notify: Arc<Notify>) -> Self {
let notified = notify.notified();
pin!(notified).as_mut().enable();
Self {
first_run: true,
notify,
}
}
pub async fn next(&mut self) {
let notified = self.notify.notified();
let mut notified = pin!(notified);
notified.as_mut().enable();
if self.first_run {
trace!("ConfigWatched: first run");
self.first_run = false;
} else {
trace!(?notified, "ConfigWatched: waiting for change");
// there's a bit of a gotcha where any config changes made after a Notified resolves
// but before a new one is issued will not be caught. not sure how to fix that yet.
notified.await;
}
}
}

View File

@ -1,19 +1,17 @@
use miette::Diagnostic;
use thiserror::Error;
use tokio::{sync::mpsc, task::JoinError};
use watchexec_events::{Event, Priority};
use crate::event::{Event, Priority};
use super::RuntimeError;
use super::{FsWatcherError, RuntimeError};
use crate::sources::fs::Watcher;
/// Errors which are not recoverable and stop watchexec execution.
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum CriticalError {
/// Pseudo-error used to signal a graceful exit.
#[error("this should never be printed (exit)")]
#[diagnostic(code(watchexec::runtime::exit))]
Exit,
/// For custom critical errors.
@ -21,16 +19,12 @@ pub enum CriticalError {
/// This should be used for errors by external code which are not covered by the other error
/// types; watchexec-internal errors should never use this.
#[error("external(critical): {0}")]
#[diagnostic(code(watchexec::critical::external))]
External(#[from] Box<dyn std::error::Error + Send + Sync>),
/// For elevated runtime errors.
///
/// This should be used for runtime errors elevated to critical. This currently does not happen
/// in watchexec, but it is possible in the future. This variant is useful with the `on_error`
/// runtime error handler; see [`ErrorHook`](crate::ErrorHook).
/// This is used for runtime errors elevated to critical.
#[error("a runtime error is too serious for the process to continue")]
#[diagnostic(code(watchexec::critical::elevated_runtime), help("{help:?}"))]
Elevated {
/// The runtime error to be elevated.
#[source]
@ -42,7 +36,6 @@ pub enum CriticalError {
/// A critical I/O error occurred.
#[error("io({about}): {err}")]
#[diagnostic(code(watchexec::critical::io_error))]
IoError {
/// What it was about.
about: &'static str,
@ -54,23 +47,26 @@ pub enum CriticalError {
/// Error received when a runtime error cannot be sent to the errors channel.
#[error("cannot send internal runtime error: {0}")]
#[diagnostic(code(watchexec::critical::error_channel_send))]
ErrorChannelSend(#[from] mpsc::error::SendError<RuntimeError>),
/// Error received when an event cannot be sent to the events channel.
#[error("cannot send event to internal channel: {0}")]
#[diagnostic(code(watchexec::critical::event_channel_send))]
EventChannelSend(#[from] async_priority_channel::SendError<(Event, Priority)>),
/// Error received when joining the main watchexec task.
#[error("main task join: {0}")]
#[diagnostic(code(watchexec::critical::main_task_join))]
MainTaskJoin(#[source] JoinError),
/// Error received when a handler is missing on initialisation.
/// Error received when the filesystem watcher can't initialise.
///
/// This is a **bug** and should be reported.
#[error("internal: missing handler on init")]
#[diagnostic(code(watchexec::critical::internal::missing_handler))]
MissingHandler,
/// In theory this is recoverable but in practice it's generally not, so we treat it as critical.
#[error("fs: cannot initialise {kind:?} watcher")]
FsWatcherInit {
/// The kind of watcher.
kind: Watcher,
/// The error which occurred.
#[source]
err: FsWatcherError,
},
}

View File

@ -1,40 +1,32 @@
use miette::Diagnostic;
use thiserror::Error;
use watchexec_events::{Event, Priority};
use watchexec_signals::Signal;
use crate::{
event::{Event, Priority},
fs::Watcher,
};
use crate::sources::fs::Watcher;
/// Errors which _may_ be recoverable, transient, or only affect a part of the operation, and should
/// be reported to the user and/or acted upon programmatically, but will not outright stop watchexec.
///
/// Some errors that are classified here are spurious and may be ignored; in general you should not
/// use the convenience print handlers for handling these errors beyond prototyping. For example,
/// Some errors that are classified here are spurious and may be ignored. For example,
/// "waiting on process" errors should not be printed to the user by default:
///
/// ```
/// # use std::convert::Infallible;
/// # use tracing::error;
/// # use watchexec::{config::InitConfig, ErrorHook, error::RuntimeError, handler::SyncFnHandler};
/// # let mut config = InitConfig::default();
/// config.on_error(SyncFnHandler::from(
/// |err: ErrorHook| -> std::result::Result<(), Infallible> {
/// if let RuntimeError::IoError {
/// about: "waiting on process group",
/// ..
/// } = err.error
/// {
/// error!("{}", err.error);
/// return Ok(());
/// }
/// # use watchexec::{Config, ErrorHook, error::RuntimeError};
/// # let mut config = Config::default();
/// config.on_error(|err: ErrorHook| {
/// if let RuntimeError::IoError {
/// about: "waiting on process group",
/// ..
/// } = err.error
/// {
/// error!("{}", err.error);
/// return;
/// }
///
/// // ...
///
/// Ok(())
/// },
/// ));
/// // ...
/// });
/// ```
///
/// On the other hand, some errors may not be fatal to this library's understanding, but will be to
@ -42,36 +34,28 @@ use crate::{
/// to [`CriticalError`](super::CriticalError)s:
///
/// ```
/// # use std::convert::Infallible;
/// # use watchexec::{config::InitConfig, ErrorHook, error::{RuntimeError, FsWatcherError}, handler::SyncFnHandler};
/// # let mut config = InitConfig::default();
/// config.on_error(SyncFnHandler::from(
/// |err: ErrorHook| -> std::result::Result<(), Infallible> {
/// if let RuntimeError::FsWatcher {
/// err:
/// FsWatcherError::Create { .. }
/// | FsWatcherError::TooManyWatches { .. }
/// | FsWatcherError::TooManyHandles { .. },
/// ..
/// } = err.error
/// {
/// err.elevate();
/// return Ok(());
/// }
/// # use watchexec::{Config, ErrorHook, error::{RuntimeError, FsWatcherError}};
/// # let mut config = Config::default();
/// config.on_error(|err: ErrorHook| {
/// if let RuntimeError::FsWatcher {
/// err:
/// FsWatcherError::Create { .. }
/// | FsWatcherError::TooManyWatches { .. }
/// | FsWatcherError::TooManyHandles { .. },
/// ..
/// } = err.error {
/// err.elevate();
/// return;
/// }
///
/// // ...
///
/// Ok(())
/// },
/// ));
/// // ...
/// });
/// ```
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum RuntimeError {
/// Pseudo-error used to signal a graceful exit.
#[error("this should never be printed (exit)")]
#[diagnostic(code(watchexec::runtime::exit))]
Exit,
/// For custom runtime errors.
@ -79,12 +63,10 @@ pub enum RuntimeError {
/// This should be used for errors by external code which are not covered by the other error
/// types; watchexec-internal errors should never use this.
#[error("external(runtime): {0}")]
#[diagnostic(code(watchexec::runtime::external))]
External(#[from] Box<dyn std::error::Error + Send + Sync>),
/// Generic I/O error, with some context.
#[error("io({about}): {err}")]
#[diagnostic(code(watchexec::runtime::io_error))]
IoError {
/// What it was about.
about: &'static str,
@ -96,7 +78,6 @@ pub enum RuntimeError {
/// Events from the filesystem watcher event source.
#[error("{kind:?} fs watcher error")]
#[diagnostic(code(watchexec::runtime::fs_watcher))]
FsWatcher {
/// The kind of watcher that failed to instantiate.
kind: Watcher,
@ -108,7 +89,6 @@ pub enum RuntimeError {
/// Events from the keyboard event source
#[error("keyboard watcher error")]
#[diagnostic(code(watchexec::runtime::keyboard_watcher))]
KeyboardWatcher {
/// The underlying error.
#[source]
@ -117,12 +97,10 @@ pub enum RuntimeError {
/// Opaque internal error from a command supervisor.
#[error("internal: command supervisor: {0}")]
#[diagnostic(code(watchexec::runtime::internal_supervisor))]
InternalSupervisor(String),
/// Error received when an event cannot be sent to the event channel.
#[error("cannot send event from {ctx}: {err}")]
#[diagnostic(code(watchexec::runtime::event_channel_send))]
EventChannelSend {
/// The context in which this error happened.
///
@ -136,7 +114,6 @@ pub enum RuntimeError {
/// Error received when an event cannot be sent to the event channel.
#[error("cannot send event from {ctx}: {err}")]
#[diagnostic(code(watchexec::runtime::event_channel_try_send))]
EventChannelTrySend {
/// The context in which this error happened.
///
@ -152,7 +129,6 @@ pub enum RuntimeError {
///
/// The error is completely opaque, having been flattened into a string at the error point.
#[error("handler error while {ctx}: {err}")]
#[diagnostic(code(watchexec::runtime::handler))]
Handler {
/// The context in which this error happened.
///
@ -165,17 +141,14 @@ pub enum RuntimeError {
/// Error received when a [`Handler`][crate::handler::Handler] which has been passed a lock has kept that lock open after the handler has completed.
#[error("{0} handler returned while holding a lock alive")]
#[diagnostic(code(watchexec::runtime::handler_lock_held))]
HandlerLockHeld(&'static str),
/// Error received when operating on a process.
#[error("when operating on process: {0}")]
#[diagnostic(code(watchexec::runtime::process))]
Process(#[source] std::io::Error),
/// Error received when a process did not start correctly, or finished before we could even tell.
#[error("process was dead on arrival")]
#[diagnostic(code(watchexec::runtime::process_doa))]
ProcessDeadOnArrival,
/// Error received when a [`Signal`] is unsupported
@ -183,38 +156,28 @@ pub enum RuntimeError {
/// This may happen if the signal is not supported on the current platform, or if Watchexec
/// doesn't support sending the signal.
#[error("unsupported signal: {0:?}")]
#[diagnostic(code(watchexec::runtime::unsupported_signal))]
UnsupportedSignal(Signal),
/// Error received when there are no commands to run.
///
/// This is generally a programmer error and should be caught earlier.
#[error("no commands to run")]
#[diagnostic(code(watchexec::runtime::no_commands))]
NoCommands,
/// Error received when trying to render a [`Command::Shell`](crate::command::Command) that has no `command`
///
/// This is generally a programmer error and should be caught earlier.
#[error("empty shelled command")]
#[diagnostic(code(watchexec::runtime::command_shell::empty_command))]
CommandShellEmptyCommand,
/// Error received when trying to render a [`Shell::Unix`](crate::command::Shell) with an empty shell
///
/// This is generally a programmer error and should be caught earlier.
#[error("empty shell program")]
#[diagnostic(code(watchexec::runtime::command_shell::empty_shell))]
CommandShellEmptyShell,
/// Error received when clearing the screen.
#[error("clear screen: {0}")]
#[diagnostic(code(watchexec::runtime::clearscreen))]
Clearscreen(#[from] clearscreen::Error),
/// Error received from the [`ignore-files`](ignore_files) crate.
#[error("ignore files: {0}")]
#[diagnostic(code(watchexec::runtime::ignore_files))]
IgnoreFiles(
#[diagnostic_source]
#[from]
@ -223,7 +186,6 @@ pub enum RuntimeError {
/// Error emitted by a [`Filterer`](crate::filter::Filterer).
#[error("{kind} filterer: {err}")]
#[diagnostic(code(watchexec::runtime::filterer))]
Filterer {
/// The kind of filterer that failed.
///

View File

@ -2,58 +2,22 @@ use std::path::PathBuf;
use miette::Diagnostic;
use thiserror::Error;
use tokio::sync::watch;
use crate::{action, fs, keyboard};
// compatibility re-export
#[deprecated(
note = "use the `watchexec_signals` crate directly instead",
since = "2.2.0"
)]
pub use watchexec_signals::SignalParseError;
/// Errors occurring from reconfigs.
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum ReconfigError {
/// Error received when the action processor cannot be updated.
#[error("reconfig: action watch: {0}")]
#[diagnostic(code(watchexec::reconfig::action_watch))]
ActionWatch(#[from] watch::error::SendError<action::WorkingData>),
/// Error received when the fs event source cannot be updated.
#[error("reconfig: fs watch: {0}")]
#[diagnostic(code(watchexec::reconfig::fs_watch))]
FsWatch(#[from] watch::error::SendError<fs::WorkingData>),
/// Error received when the keyboard event source cannot be updated.
#[error("reconfig: keyboard watch: {0}")]
#[diagnostic(code(watchexec::reconfig::keyboard_watch))]
KeyboardWatch(#[from] watch::error::SendError<keyboard::WorkingData>),
}
/// Errors emitted by the filesystem watcher.
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum FsWatcherError {
/// Error received when creating a filesystem watcher fails.
///
/// Also see `TooManyWatches` and `TooManyHandles`.
#[error("failed to instantiate")]
#[diagnostic(
code(watchexec::fs_watcher::create),
help("perhaps retry with the poll watcher")
)]
#[diagnostic(help("perhaps retry with the poll watcher"))]
Create(#[source] notify::Error),
/// Error received when creating or updating a filesystem watcher fails because there are too many watches.
///
/// This is the OS error 28 on Linux.
#[error("failed to instantiate: too many watches")]
#[diagnostic(code(watchexec::fs_watcher::too_many_watches))]
#[cfg_attr(target_os = "linux", diagnostic(help("you will want to increase your inotify.max_user_watches, see inotify(7) and https://watchexec.github.io/docs/inotify-limits.html")))]
#[cfg_attr(
not(target_os = "linux"),
@ -65,7 +29,6 @@ pub enum FsWatcherError {
///
/// This is the OS error 24 on Linux. It may also occur when the limit for inotify instances is reached.
#[error("failed to instantiate: too many handles")]
#[diagnostic(code(watchexec::fs_watcher::too_many_handles))]
#[cfg_attr(target_os = "linux", diagnostic(help("you will want to increase your `nofile` limit, see pam_limits(8); or increase your inotify.max_user_instances, see inotify(7) and https://watchexec.github.io/docs/inotify-limits.html")))]
#[cfg_attr(
not(target_os = "linux"),
@ -75,12 +38,10 @@ pub enum FsWatcherError {
/// Error received when reading a filesystem event fails.
#[error("received an event that we could not read")]
#[diagnostic(code(watchexec::fs_watcher::event))]
Event(#[source] notify::Error),
/// Error received when adding to the pathset for the filesystem watcher fails.
#[error("while adding {path:?}")]
#[diagnostic(code(watchexec::fs_watcher::path_add))]
PathAdd {
/// The path that was attempted to be added.
path: PathBuf,
@ -92,7 +53,6 @@ pub enum FsWatcherError {
/// Error received when removing from the pathset for the filesystem watcher fails.
#[error("while removing {path:?}")]
#[diagnostic(code(watchexec::fs_watcher::path_remove))]
PathRemove {
/// The path that was attempted to be removed.
path: PathBuf,
@ -106,10 +66,8 @@ pub enum FsWatcherError {
/// Errors emitted by the keyboard watcher.
#[derive(Debug, Diagnostic, Error)]
#[non_exhaustive]
#[diagnostic(url(docsrs))]
pub enum KeyboardWatcherError {
/// Error received when shutting down stdin watcher fails.
#[error("failed to shut down stdin watcher")]
#[diagnostic(code(watchexec::keyboard_watcher))]
StdinShutdown,
}

View File

@ -1,11 +1,10 @@
//! The `Filterer` trait for event filtering.
use std::sync::Arc;
use std::{fmt, sync::Arc};
use crate::{
error::RuntimeError,
event::{Event, Priority},
};
use watchexec_events::{Event, Priority};
use crate::{changeable::Changeable, error::RuntimeError};
/// An interface for filtering events.
pub trait Filterer: std::fmt::Debug + Send + Sync {
@ -33,3 +32,43 @@ impl<T: Filterer> Filterer for Arc<T> {
Self::as_ref(self).check_event(event, priority)
}
}
/// A shareable `Filterer` that doesn't hold a lock when it is called.
///
/// This is a specialisation of [`Changeable`] for `Filterer`.
pub struct ChangeableFilterer(Changeable<Arc<dyn Filterer>>);
impl ChangeableFilterer {
/// Replace the filterer with a new one.
///
/// Panics if the lock was poisoned.
pub fn replace(&self, new: impl Filterer + Send + Sync + 'static) {
self.0.replace(Arc::new(new));
}
}
impl Filterer for ChangeableFilterer {
fn check_event(&self, event: &Event, priority: Priority) -> Result<bool, RuntimeError> {
Arc::as_ref(&self.0.get()).check_event(event, priority)
}
}
// the derive adds a T: Clone bound
impl Clone for ChangeableFilterer {
fn clone(&self) -> Self {
Self(Changeable::clone(&self.0))
}
}
impl Default for ChangeableFilterer {
fn default() -> Self {
Self(Changeable::new(Arc::new(())))
}
}
impl fmt::Debug for ChangeableFilterer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ChangeableFilterer")
.field("filterer", &format!("{:?}", self.0.get()))
.finish_non_exhaustive()
}
}

View File

@ -1,264 +0,0 @@
//! Trait and implementations for hook handlers.
//!
//! You can implement the trait yourself, or use any of the provided implementations:
//! - for closures,
//! - for std and tokio channels,
//! - for printing to writers, in `Debug` and `Display` (where supported) modes (generally used for
//! debugging and testing, as they don't allow any other output customisation),
//! - for `()`, as placeholder.
//!
//! The implementation for [`FnMut`] only supports fns that return a [`Future`]. Unfortunately
//! it's not possible to provide an implementation for fns that don't return a `Future` as well,
//! so to call sync code you must either provide an async handler, or use the [`SyncFnHandler`]
//! wrapper.
//!
//! # Examples
//!
//! In each example `on_data` is the following function:
//!
//! ```
//! # use watchexec::handler::Handler;
//! fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! ```
//!
//! Async closure:
//!
//! ```
//! use tokio::io::{AsyncWriteExt, stdout};
//! # use watchexec::handler::Handler;
//! # fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! on_data(|data: Vec<u8>| async move {
//! stdout().write_all(&data).await
//! });
//! ```
//!
//! Sync code in async closure:
//!
//! ```
//! use std::io::{Write, stdout};
//! # use watchexec::handler::Handler;
//! # fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! on_data(|data: Vec<u8>| async move {
//! stdout().write_all(&data)
//! });
//! ```
//!
//! Sync closure with wrapper:
//!
//! ```
//! use std::io::{Write, stdout};
//! # use watchexec::handler::{Handler, SyncFnHandler};
//! # fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! on_data(SyncFnHandler::from(|data: Vec<u8>| {
//! stdout().write_all(&data)
//! }));
//! ```
//!
//! Std channel:
//!
//! ```
//! use std::sync::mpsc;
//! # use watchexec::handler::Handler;
//! # fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! let (s, r) = mpsc::channel();
//! on_data(s);
//! ```
//!
//! Tokio channel:
//!
//! ```
//! use tokio::sync::mpsc;
//! # use watchexec::handler::Handler;
//! # fn on_data<T: Handler<Vec<u8>>>(_: T) {}
//! let (s, r) = mpsc::channel(123);
//! on_data(s);
//! ```
//!
//! Printing to console:
//!
//! ```
//! use std::io::{Write, stderr, stdout};
//! # use watchexec::handler::{Handler, PrintDebug, PrintDisplay};
//! # fn on_data<T: Handler<String>>(_: T) {}
//! on_data(PrintDebug(stdout()));
//! on_data(PrintDisplay(stderr()));
//! ```
use std::{error::Error, future::Future, io::Write, marker::PhantomData, sync::Arc};
use tokio::{runtime::Handle, sync::Mutex, task::block_in_place};
use crate::error::RuntimeError;
/// A callable that can be used to hook into watchexec.
pub trait Handler<T> {
/// Call the handler with the given data.
fn handle(&mut self, _data: T) -> Result<(), Box<dyn Error>>;
}
/// A shareable wrapper for a [`Handler`].
///
/// Internally this is a Tokio [`Mutex`].
pub struct HandlerLock<T>(Arc<Mutex<Box<dyn Handler<T> + Send>>>);
impl<T> HandlerLock<T>
where
T: Send,
{
/// Wrap a [`Handler`] into a lock.
#[must_use]
pub fn new(handler: Box<dyn Handler<T> + Send>) -> Self {
Self(Arc::new(Mutex::new(handler)))
}
/// Replace the handler with a new one.
pub async fn replace(&self, new: Box<dyn Handler<T> + Send>) {
let mut handler = self.0.lock().await;
*handler = new;
}
/// Call the handler.
pub async fn call(&self, data: T) -> Result<(), Box<dyn Error>> {
let mut handler = self.0.lock().await;
handler.handle(data)
}
}
impl<T> Clone for HandlerLock<T> {
fn clone(&self) -> Self {
Self(Arc::clone(&self.0))
}
}
impl<T> Default for HandlerLock<T>
where
T: Send,
{
fn default() -> Self {
Self::new(Box::new(()))
}
}
pub(crate) fn rte(ctx: &'static str, err: &dyn Error) -> RuntimeError {
RuntimeError::Handler {
ctx,
err: err.to_string(),
}
}
/// Wrapper for [`Handler`]s that are non-future [`FnMut`]s.
///
/// Construct using [`Into::into`]:
///
/// ```
/// # use watchexec::handler::{Handler as _, SyncFnHandler};
/// # let f: SyncFnHandler<(), std::io::Error, _> =
/// (|data| { dbg!(data); Ok(()) }).into()
/// # ;
/// ```
///
/// or [`From::from`]:
///
/// ```
/// # use watchexec::handler::{Handler as _, SyncFnHandler};
/// # let f: SyncFnHandler<(), std::io::Error, _> =
/// SyncFnHandler::from(|data| { dbg!(data); Ok(()) });
/// ```
pub struct SyncFnHandler<T, E, F>
where
E: Error + 'static,
F: FnMut(T) -> Result<(), E> + Send + 'static,
{
inner: F,
_t: PhantomData<T>,
_e: PhantomData<E>,
}
impl<T, E, F> From<F> for SyncFnHandler<T, E, F>
where
E: Error + 'static,
F: FnMut(T) -> Result<(), E> + Send + 'static,
{
fn from(inner: F) -> Self {
Self {
inner,
_t: PhantomData,
_e: PhantomData,
}
}
}
impl<T, E, F> Handler<T> for SyncFnHandler<T, E, F>
where
E: Error + 'static,
F: FnMut(T) -> Result<(), E> + Send + 'static,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
(self.inner)(data).map_err(|e| Box::new(e) as _)
}
}
impl<F, U, T, E> Handler<T> for F
where
E: Error + 'static,
F: FnMut(T) -> U + Send + 'static,
U: Future<Output = Result<(), E>>,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
// this will always be called within watchexec context, which runs within tokio
block_in_place(|| {
Handle::current()
.block_on((self)(data))
.map_err(|e| Box::new(e) as _)
})
}
}
impl<T> Handler<T> for () {
fn handle(&mut self, _data: T) -> Result<(), Box<dyn Error>> {
Ok::<(), std::convert::Infallible>(()).map_err(|e| Box::new(e) as _)
}
}
impl<T> Handler<T> for std::sync::mpsc::Sender<T>
where
T: Send + 'static,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
self.send(data).map_err(|e| Box::new(e) as _)
}
}
impl<T> Handler<T> for tokio::sync::mpsc::Sender<T>
where
T: std::fmt::Debug + 'static,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
self.try_send(data).map_err(|e| Box::new(e) as _)
}
}
/// A handler implementation to print to any [`Write`]r (e.g. stdout) in `Debug` format.
pub struct PrintDebug<W: Write>(pub W);
impl<T, W> Handler<T> for PrintDebug<W>
where
T: std::fmt::Debug,
W: Write,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
writeln!(self.0, "{data:?}").map_err(|e| Box::new(e) as _)
}
}
/// A handler implementation to print to any [`Write`]r (e.g. stdout) in `Display` format.
pub struct PrintDisplay<W: Write>(pub W);
impl<T, W> Handler<T> for PrintDisplay<W>
where
T: std::fmt::Display,
W: Write,
{
fn handle(&mut self, data: T) -> Result<(), Box<dyn Error>> {
writeln!(self.0, "{data}").map_err(|e| Box::new(e) as _)
}
}

67
crates/lib/src/id.rs Normal file
View File

@ -0,0 +1,67 @@
use std::{cell::Cell, num::NonZeroU64};
/// Unique opaque identifier.
#[must_use]
#[derive(Debug, Hash, PartialEq, Eq, Clone, Copy)]
pub struct Id {
thread: NonZeroU64,
counter: u64,
}
thread_local! {
static COUNTER: Cell<u64> = const { Cell::new(0) };
}
impl Default for Id {
fn default() -> Self {
let counter = COUNTER.get();
COUNTER.set(counter.wrapping_add(1));
Self {
thread: threadid(),
counter,
}
}
}
fn threadid() -> NonZeroU64 {
use std::hash::{Hash, Hasher};
struct Extractor {
id: u64,
}
impl Hasher for Extractor {
fn finish(&self) -> u64 {
self.id
}
fn write(&mut self, _bytes: &[u8]) {}
fn write_u64(&mut self, n: u64) {
self.id = n;
}
}
let mut ex = Extractor { id: 0 };
std::thread::current().id().hash(&mut ex);
// SAFETY: guaranteed to be > 0
// safeguarded by the max(1), but this is already guaranteed by the thread id being a NonZeroU64
// internally; as that guarantee is not stable, we do make sure, just to be on the safe side.
unsafe { NonZeroU64::new_unchecked(ex.finish().max(1)) }
}
// Replace with this when the thread_id_value feature is stable
// fn threadid() -> NonZeroU64 {
// std::thread::current().id().as_u64()
// }
#[test]
fn test_threadid() {
let top = threadid();
std::thread::spawn(move || {
assert_ne!(top, threadid());
})
.join()
.expect("thread failed");
}

View File

@ -0,0 +1,105 @@
use std::future::Future;
use futures::{stream::FuturesUnordered, StreamExt};
use tokio::task::{JoinError, JoinHandle};
/// A collection of tasks spawned on a Tokio runtime.
///
/// This is conceptually a variant of Tokio's [`JoinSet`](tokio::task::JoinSet) which can attach
/// tasks after they've been spawned.
///
/// # Examples
///
/// Spawn multiple tasks and wait for them.
///
/// ```no_compile
/// use crate::late_join_set::LateJoinSet;
///
/// #[tokio::main]
/// async fn main() {
/// let mut set = LateJoinSet::default();
///
/// for i in 0..10 {
/// set.spawn(async move { println!("{i}"); });
/// }
///
/// let mut seen = [false; 10];
/// while let Some(res) = set.join_next().await {
/// let idx = res.unwrap();
/// seen[idx] = true;
/// }
///
/// for i in 0..10 {
/// assert!(seen[i]);
/// }
/// }
/// ```
///
/// Attach a task to a set after it's been spawned.
///
/// ```no_compile
/// use crate::late_join_set::LateJoinSet;
///
/// #[tokio::main]
/// async fn main() {
/// let mut set = LateJoinSet::default();
///
/// let handle = tokio::spawn(async move { println!("Hello, world!"); });
/// set.insert(handle);
/// set.abort_all();
/// }
/// ```
#[derive(Debug, Default)]
pub struct LateJoinSet {
tasks: FuturesUnordered<JoinHandle<()>>,
}
impl LateJoinSet {
/// Spawn the provided task on the `LateJoinSet`.
///
/// The provided future will start running in the background immediately when this method is
/// called, even if you don't await anything on this `LateJoinSet`.
///
/// # Panics
///
/// This method panics if called outside of a Tokio runtime.
#[track_caller]
pub fn spawn(&self, task: impl Future<Output = ()> + Send + 'static) {
self.insert(tokio::spawn(task));
}
/// Insert an already-spawned task into the [`LateJoinSet`].
pub fn insert(&self, task: JoinHandle<()>) {
self.tasks.push(task);
}
/// Waits until one of the tasks in the set completes.
///
/// Returns `None` if the set is empty.
pub async fn join_next(&mut self) -> Option<Result<(), JoinError>> {
self.tasks.next().await
}
/// Waits until all the tasks in the set complete.
///
/// Ignores any panics in the tasks shutting down.
pub async fn join_all(&mut self) {
while self.join_next().await.is_some() {}
self.tasks.clear();
}
/// Aborts all tasks on this `LateJoinSet`.
///
/// This does not remove the tasks from the `LateJoinSet`. To wait for the tasks to complete
/// cancellation, use `join_all` or call `join_next` in a loop until the `LateJoinSet` is empty.
pub fn abort_all(&self) {
self.tasks.iter().for_each(|jh| jh.abort());
}
}
impl Drop for LateJoinSet {
fn drop(&mut self) {
self.abort_all();
self.tasks.clear();
}
}

View File

@ -1,93 +1,54 @@
//! Watchexec: a library for utilities and programs which respond to events;
//! file changes, human interaction, and more.
//! Watchexec: a library for utilities and programs which respond to (file, signal, etc) events
//! primarily by launching or managing other programs.
//!
//! Also see the CLI tool: <https://watchexec.github.io/>
//!
//! This library is powered by [Tokio](https://tokio.rs).
//!
//! The main way to use this crate involves constructing a [`Watchexec`] around an
//! [`InitConfig`][config::InitConfig] and a [`RuntimeConfig`][config::RuntimeConfig], then running
//! it. [`Handler`][handler::Handler]s are used to hook into watchexec at various points. The
//! runtime config can be changed at any time with the [`Watchexec::reconfigure()`] method.
//! The main way to use this crate involves constructing a [`Watchexec`] around a [`Config`], then
//! running it. [`Handler`][handler::Handler]s are used to hook into Watchexec at various points.
//! The config can be changed at any time with the [`Watchexec::reconfigure()`] method.
//!
//! It's recommended to use the [miette] erroring library in applications, but all errors implement
//! [`std::error::Error`] so your favourite error handling library can of course be used.
//!
//! ```no_run
//! use miette::{IntoDiagnostic, Result};
//! use watchexec::{
//! Watchexec,
//! action::{Action, Outcome},
//! config::{InitConfig, RuntimeConfig},
//! handler::{Handler as _, PrintDebug},
//! };
//! use watchexec_signals::Signal;
//! use watchexec::Watchexec;
//!
//! #[tokio::main]
//! async fn main() -> Result<()> {
//! let mut init = InitConfig::default();
//! init.on_error(PrintDebug(std::io::stderr()));
//!
//! let mut runtime = RuntimeConfig::default();
//! runtime.pathset(["watchexec.conf"]);
//!
//! let conf = YourConfigFormat::load_from_file("watchexec.conf").await?;
//! conf.apply(&mut runtime);
//!
//! let we = Watchexec::new(init, runtime.clone())?;
//! let w = we.clone();
//!
//! let c = runtime.clone();
//! runtime.on_action(move |action: Action| {
//! let mut c = c.clone();
//! let w = w.clone();
//! async move {
//! for event in action.events.iter() {
//! if event.paths().any(|(p, _)| p.ends_with("/watchexec.conf")) {
//! let conf = YourConfigFormat::load_from_file("watchexec.conf").await?;
//!
//! conf.apply(&mut c);
//! w.reconfigure(c.clone());
//! // tada! self-reconfiguring watchexec on config file change!
//!
//! break;
//! }
//! }
//!
//! action.outcome(Outcome::if_running(
//! Outcome::DoNothing,
//! Outcome::both(Outcome::Clear, Outcome::Start),
//! ));
//!
//! Ok(())
//! # as std::result::Result<_, MietteStub>
//! let wx = Watchexec::new(|mut action| {
//! // print any events
//! for event in action.events.iter() {
//! eprintln!("EVENT: {event:?}");
//! }
//! });
//!
//! we.reconfigure(runtime);
//! we.main().await.into_diagnostic()?;
//! // if Ctrl-C is received, quit
//! if action.signals().any(|sig| sig == Signal::Interrupt) {
//! action.quit();
//! }
//!
//! action
//! })?;
//!
//! // watch the current directory
//! wx.config.pathset(["."]);
//!
//! wx.main().await.into_diagnostic()?;
//! Ok(())
//! }
//! # struct YourConfigFormat;
//! # impl YourConfigFormat {
//! # async fn load_from_file(_: &str) -> std::result::Result<Self, MietteStub> { Ok(Self) }
//! # fn apply(&self, _: &mut RuntimeConfig) { }
//! # }
//! # use miette::Diagnostic;
//! # use thiserror::Error;
//! # #[derive(Debug, Error, Diagnostic)]
//! # #[error("stub")]
//! # struct MietteStub;
//! ```
//!
//! Alternatively, one can use the modules exposed by the crate and the external crates such as
//! Alternatively, you can use the modules exposed by the crate and the external crates such as
//! [ClearScreen][clearscreen] and [Command Group][command_group] to build something more advanced,
//! at the cost of reimplementing the glue code. See the examples folder for some basic/demo tools
//! written with the individual modules.
//! at the cost of reimplementing the glue code.
//!
//! Note that the library generates a _lot_ of debug messaging with [tracing]. You should not enable
//! printing even error log messages for this crate unless it's for debugging. Instead, make use of
//! the [`InitConfig::on_error()`][config::InitConfig::on_error()] method to define a handler for
//! errors occurring at runtime that are _meant_ for you to handle (by printing out or otherwise).
//! Note that the library generates a _lot_ of debug messaging with [tracing]. **You should not
//! enable printing even `error`-level log messages for this crate unless it's for debugging.**
//! Instead, make use of the [`Config::on_error()`] method to define a handler for errors
//! occurring at runtime that are _meant_ for you to handle (by printing out or otherwise).
#![doc(html_favicon_url = "https://watchexec.github.io/logo:watchexec.svg")]
#![doc(html_logo_url = "https://watchexec.github.io/logo:watchexec.svg")]
@ -96,29 +57,31 @@
// the toolkit to make your own
pub mod action;
pub mod command;
pub mod error;
pub mod filter;
pub mod fs;
pub mod keyboard;
pub mod paths;
pub mod signal;
pub mod sources;
// the core experience
pub mod changeable;
pub mod config;
pub mod handler;
mod id;
mod late_join_set;
mod watchexec;
// compatibility
#[deprecated(
note = "use the `watchexec-events` crate directly instead",
since = "2.2.0"
)]
pub use watchexec_events as event;
#[doc(inline)]
pub use crate::watchexec::{ErrorHook, Watchexec};
pub use crate::{
id::Id,
watchexec::{ErrorHook, Watchexec},
};
#[doc(no_inline)]
pub use crate::config::Config;
#[doc(no_inline)]
pub use watchexec_supervisor::{command, job};
#[cfg(debug_assertions)]
#[doc(hidden)]
pub mod readme_doc_check {
#[doc = include_str!("../README.md")]

View File

@ -6,7 +6,7 @@ use std::{
path::{Path, PathBuf},
};
use crate::event::{Event, FileType, Tag};
use watchexec_events::{Event, FileType, Tag};
/// The separator for paths used in environment variables.
#[cfg(unix)]

View File

@ -0,0 +1,5 @@
//! Sources of events.
pub mod fs;
pub mod keyboard;
pub mod signal;

View File

@ -5,48 +5,46 @@ use std::{
fs::metadata,
mem::take,
path::{Path, PathBuf},
sync::Arc,
time::Duration,
};
use async_priority_channel as priority;
use normalize_path::NormalizePath;
use notify::{Config, Watcher as _};
use tokio::sync::{mpsc, watch};
use tokio::sync::mpsc;
use tracing::{debug, error, trace};
use watchexec_events::{Event, Priority, Source, Tag};
use crate::{
error::{CriticalError, FsWatcherError, RuntimeError},
event::{Event, Priority, Source, Tag},
Config,
};
/// What kind of filesystem watcher to use.
///
/// For now only native and poll watchers are supported. In the future there may be additional
/// watchers available on some platforms.
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[non_exhaustive]
pub enum Watcher {
/// The Notify-recommended watcher on the platform.
///
/// For platforms Notify supports, that's a [native implementation][notify::RecommendedWatcher],
/// for others it's polling with a default interval.
#[default]
Native,
/// Notifys [poll watcher][notify::PollWatcher] with a custom interval.
Poll(Duration),
}
impl Default for Watcher {
fn default() -> Self {
Self::Native
}
}
impl Watcher {
fn create(
self,
f: impl notify::EventHandler,
) -> Result<Box<dyn notify::Watcher + Send>, RuntimeError> {
) -> Result<Box<dyn notify::Watcher + Send>, CriticalError> {
use notify::{Config, Watcher as _};
match self {
Self::Native => {
notify::RecommendedWatcher::new(f, Config::default()).map(|w| Box::new(w) as _)
@ -56,7 +54,7 @@ impl Watcher {
.map(|w| Box::new(w) as _)
}
}
.map_err(|err| RuntimeError::FsWatcher {
.map_err(|err| CriticalError::FsWatcherInit {
kind: self,
err: if cfg!(target_os = "linux")
&& (matches!(err.kind, notify::ErrorKind::MaxFilesWatch)
@ -74,19 +72,6 @@ impl Watcher {
}
}
/// The configuration of the [fs][self] worker.
///
/// This is marked non-exhaustive so new configuration can be added without breaking.
#[derive(Clone, Debug, Default)]
#[non_exhaustive]
pub struct WorkingData {
/// The set of paths to be watched.
pub pathset: Vec<WatchedPath>,
/// The kind of watcher to be used.
pub watcher: Watcher,
}
/// A path to watch.
///
/// This is currently only a wrapper around a [`PathBuf`], but may be augmented in the future.
@ -128,9 +113,7 @@ impl AsRef<Path> for WatchedPath {
/// While you can run several, you should only have one.
///
/// This only does a bare minimum of setup; to actually start the work, you need to set a non-empty
/// pathset on the [`WorkingData`] with the [`watch`] channel, and send a notification. Take care
/// _not_ to drop the watch sender: this will cause the worker to stop gracefully, which may not be
/// what was expected.
/// pathset in the [`Config`].
///
/// Note that the paths emitted by the watcher are normalised. No guarantee is made about the
/// implementation or output of that normalisation (it may change without notice).
@ -141,25 +124,23 @@ impl AsRef<Path> for WatchedPath {
///
/// ```no_run
/// use async_priority_channel as priority;
/// use tokio::sync::{mpsc, watch};
/// use watchexec::fs::{worker, WorkingData};
/// use tokio::sync::mpsc;
/// use watchexec::{Config, sources::fs::worker};
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let (ev_s, _) = priority::bounded(1024);
/// let (er_s, _) = mpsc::channel(64);
/// let (wd_s, wd_r) = watch::channel(WorkingData::default());
///
/// let mut wkd = WorkingData::default();
/// wkd.pathset = vec![".".into()];
/// wd_s.send(wkd)?;
/// let config = Config::default();
/// config.pathset(["."]);
///
/// worker(wd_r, er_s, ev_s).await?;
/// worker(config.into(), er_s, ev_s).await?;
/// Ok(())
/// }
/// ```
pub async fn worker(
mut working: watch::Receiver<WorkingData>,
config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
) -> Result<(), CriticalError> {
@ -169,95 +150,102 @@ pub async fn worker(
let mut watcher = None;
let mut pathset = HashSet::new();
while working.changed().await.is_ok() {
// In separate scope so we drop the working read lock as early as we can
let (new_watcher, to_watch, to_drop) = {
let data = working.borrow();
trace!(?data, "filesystem worker got a working data change");
let mut config_watch = config.watch();
loop {
config_watch.next().await;
trace!("filesystem worker got a config change");
if data.pathset.is_empty() {
trace!("no more watched paths, dropping watcher");
watcher.take();
pathset.drain();
continue;
}
if watcher.is_none() || watcher_type != data.watcher {
pathset.drain();
(Some(data.watcher), data.pathset.clone(), Vec::new())
} else {
let mut to_watch = Vec::with_capacity(data.pathset.len());
let mut to_drop = Vec::with_capacity(pathset.len());
for path in &data.pathset {
if !pathset.contains(path) {
to_watch.push(path.clone());
}
if config.pathset.get().is_empty() {
trace!(
"{}",
if pathset.is_empty() {
"no watched paths, no watcher needed"
} else {
"no more watched paths, dropping watcher"
}
);
watcher.take();
pathset.clear();
continue;
}
for path in &pathset {
if !data.pathset.contains(path) {
to_drop.push(path.clone());
}
}
// now we know the watcher should be alive, so let's start it if it's not already:
(None, to_watch, to_drop)
}
};
if let Some(kind) = new_watcher {
debug!(?kind, "creating new watcher");
let config_watcher = config.file_watcher.get();
if watcher.is_none() || watcher_type != config_watcher {
debug!(kind=?config_watcher, "creating new watcher");
let n_errors = errors.clone();
let n_events = events.clone();
match kind.create(move |nev: Result<notify::Event, notify::Error>| {
trace!(event = ?nev, "receiving possible event from watcher");
if let Err(e) = process_event(nev, kind, &n_events) {
n_errors.try_send(e).ok();
watcher_type = config_watcher;
watcher = config_watcher
.create(move |nev: Result<notify::Event, notify::Error>| {
trace!(event = ?nev, "receiving possible event from watcher");
if let Err(e) = process_event(nev, config_watcher, &n_events) {
n_errors.try_send(e).ok();
}
})
.map(Some)?;
}
// now let's calculate which paths we should add to the watch, and which we should drop:
let config_pathset = config.pathset.get();
let (to_watch, to_drop) = if pathset.is_empty() {
// if the current pathset is empty, we can take a shortcut
(config_pathset, Vec::new())
} else {
let mut to_watch = Vec::with_capacity(config_pathset.len());
let mut to_drop = Vec::with_capacity(pathset.len());
for path in &pathset {
if !config_pathset.contains(path) {
to_drop.push(path.clone()); // try dropping the clone?
}
}) {
Ok(w) => {
watcher = Some(w);
watcher_type = kind;
}
for path in config_pathset {
if !pathset.contains(&path) {
to_watch.push(path);
}
Err(e) => {
}
(to_watch, to_drop)
};
// now apply it to the watcher
let Some(watcher) = watcher.as_mut() else {
panic!("BUG: watcher should exist at this point");
};
debug!(?to_watch, ?to_drop, "applying changes to the watcher");
for path in to_drop {
trace!(?path, "removing path from the watcher");
if let Err(err) = watcher.unwatch(path.as_ref()) {
error!(?err, "notify unwatch() error");
for e in notify_multi_path_errors(watcher_type, path, err, true) {
errors.send(e).await?;
}
} else {
pathset.remove(&path);
}
}
if let Some(w) = watcher.as_mut() {
debug!(?to_watch, ?to_drop, "applying changes to the watcher");
for path in to_drop {
trace!(?path, "removing path from the watcher");
if let Err(err) = w.unwatch(path.as_ref()) {
error!(?err, "notify unwatch() error");
for e in notify_multi_path_errors(watcher_type, path, err, true) {
errors.send(e).await?;
}
} else {
pathset.remove(&path);
}
}
for path in to_watch {
trace!(?path, "adding path to the watcher");
if let Err(err) = w.watch(path.as_ref(), notify::RecursiveMode::Recursive) {
error!(?err, "notify watch() error");
for e in notify_multi_path_errors(watcher_type, path, err, false) {
errors.send(e).await?;
}
// TODO: unwatch and re-watch manually while ignoring all the erroring paths
// See https://github.com/watchexec/watchexec/issues/218
} else {
pathset.insert(path);
for path in to_watch {
trace!(?path, "adding path to the watcher");
if let Err(err) = watcher.watch(path.as_ref(), notify::RecursiveMode::Recursive) {
error!(?err, "notify watch() error");
for e in notify_multi_path_errors(watcher_type, path, err, false) {
errors.send(e).await?;
}
// TODO: unwatch and re-watch manually while ignoring all the erroring paths
// See https://github.com/watchexec/watchexec/issues/218
} else {
pathset.insert(path);
}
}
}
debug!("ending file watcher");
Ok(())
}
fn notify_multi_path_errors(

View File

@ -1,68 +1,56 @@
//! Event source for keyboard input and related events
use std::sync::Arc;
use async_priority_channel as priority;
use tokio::{
io::AsyncReadExt,
sync::{mpsc, oneshot, watch},
select, spawn,
sync::{mpsc, oneshot},
};
use tracing::trace;
pub use watchexec_events::Keyboard;
use watchexec_events::{Event, Keyboard, Priority, Source, Tag};
use crate::{
error::{CriticalError, KeyboardWatcherError, RuntimeError},
event::{Event, Priority, Source, Tag},
error::{CriticalError, RuntimeError},
Config,
};
/// The configuration of the [keyboard][self] worker.
///
/// This is marked non-exhaustive so new configuration can be added without breaking.
#[derive(Debug, Clone, Default)]
#[non_exhaustive]
pub struct WorkingData {
/// Whether or not to watch for 'end of file' on stdin
pub eof: bool,
}
/// Launch the filesystem event worker.
///
/// While you can run several, you should only have one.
///
/// Sends keyboard events via to the provided 'events' channel
pub async fn worker(
mut working: watch::Receiver<WorkingData>,
config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
) -> Result<(), CriticalError> {
let mut send_close = None;
while working.changed().await.is_ok() {
let watch_for_eof = { working.borrow().eof };
match (watch_for_eof, &send_close) {
// If we want to watch stdin and we're not already watching it then spawn a task to watch it
let mut config_watch = config.watch();
loop {
config_watch.next().await;
match (config.keyboard_events.get(), &send_close) {
// if we want to watch stdin and we're not already watching it then spawn a task to watch it
(true, None) => {
let (close_s, close_r) = tokio::sync::oneshot::channel::<()>();
let (close_s, close_r) = oneshot::channel::<()>();
send_close = Some(close_s);
tokio::spawn(watch_stdin(errors.clone(), events.clone(), close_r));
spawn(watch_stdin(errors.clone(), events.clone(), close_r));
}
// If we don't want to watch stdin but we are already watching it then send a close signal to end the
// watching
// if we don't want to watch stdin but we are already watching it then send a close signal to end
// the watching
(false, Some(_)) => {
// Repeat match using 'take'
if let Some(close_s) = send_close.take() {
if close_s.send(()).is_err() {
errors
.send(RuntimeError::KeyboardWatcher {
err: KeyboardWatcherError::StdinShutdown,
})
.await?;
}
}
// ignore send error as if channel is closed watch is already gone
send_close
.take()
.expect("unreachable due to match")
.send(())
.ok();
}
// Otherwise no action is required
// otherwise no action is required
_ => {}
}
}
Ok(())
}
async fn watch_stdin(
@ -73,7 +61,7 @@ async fn watch_stdin(
let mut stdin = tokio::io::stdin();
let mut buffer = [0; 10];
loop {
tokio::select! {
select! {
result = stdin.read(&mut buffer[..]) => {
// Read from stdin and if we've read 0 bytes then we assume stdin has received an 'eof' so
// we send that event into the system and break out of the loop as 'eof' means that there will

View File

@ -1,41 +1,22 @@
//! Event source for signals / notifications sent to the main process.
use std::sync::Arc;
use async_priority_channel as priority;
use tokio::{select, sync::mpsc};
use tracing::{debug, trace};
use watchexec_events::{Event, Priority, Source, Tag};
use watchexec_signals::Signal;
use crate::{
error::{CriticalError, RuntimeError},
event::{Event, Priority, Source, Tag},
Config,
};
/// Compatibility shim for the old `watchexec::signal::process` module.
pub mod process {
#[deprecated(
note = "use the `watchexec-signals` crate directly instead",
since = "2.2.0"
)]
pub use watchexec_signals::Signal as SubSignal;
}
/// Compatibility shim for the old `watchexec::signal::source` module.
pub mod source {
#[deprecated(
note = "use `watchexec::signal::worker` directly instead",
since = "2.2.0"
)]
pub use super::worker;
#[deprecated(
note = "use the `watchexec-signals` crate directly instead",
since = "2.2.0"
)]
pub use watchexec_signals::Signal as MainSignal;
}
/// Launch the signal event worker.
///
/// While you _can_ run several, you **must** only have one. This may be enforced later.
/// While you _could_ run several (it won't panic), you **must** only have one (for correctness).
/// This may be enforced later.
///
/// # Examples
///
@ -44,26 +25,28 @@ pub mod source {
/// ```no_run
/// use tokio::sync::mpsc;
/// use async_priority_channel as priority;
/// use watchexec::signal::source::worker;
/// use watchexec::sources::signal::worker;
///
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let (ev_s, _) = priority::bounded(1024);
/// let (er_s, _) = mpsc::channel(64);
///
/// worker(er_s, ev_s).await?;
/// worker(Default::default(), er_s, ev_s).await?;
/// Ok(())
/// }
/// ```
pub async fn worker(
config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
) -> Result<(), CriticalError> {
imp_worker(errors, events).await
imp_worker(config, errors, events).await
}
#[cfg(unix)]
async fn imp_worker(
_config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
) -> Result<(), CriticalError> {
@ -72,12 +55,12 @@ async fn imp_worker(
debug!("launching unix signal worker");
macro_rules! listen {
($sig:ident) => {{
trace!(kind=%stringify!($sig), "listening for unix signal");
signal(SignalKind::$sig()).map_err(|err| CriticalError::IoError {
about: concat!("setting ", stringify!($sig), " signal listener"), err
})?
}}
($sig:ident) => {{
trace!(kind=%stringify!($sig), "listening for unix signal");
signal(SignalKind::$sig()).map_err(|err| CriticalError::IoError {
about: concat!("setting ", stringify!($sig), " signal listener"), err
})?
}}
}
let mut s_hangup = listen!(hangup);
@ -104,6 +87,7 @@ async fn imp_worker(
#[cfg(windows)]
async fn imp_worker(
_config: Arc<Config>,
errors: mpsc::Sender<RuntimeError>,
events: priority::Sender<Event, Priority>,
) -> Result<(), CriticalError> {
@ -112,12 +96,12 @@ async fn imp_worker(
debug!("launching windows signal worker");
macro_rules! listen {
($sig:ident) => {{
trace!(kind=%stringify!($sig), "listening for windows process notification");
$sig().map_err(|err| CriticalError::IoError {
about: concat!("setting ", stringify!($sig), " signal listener"), err
})?
}}
($sig:ident) => {{
trace!(kind=%stringify!($sig), "listening for windows process notification");
$sig().map_err(|err| CriticalError::IoError {
about: concat!("setting ", stringify!($sig), " signal listener"), err
})?
}}
}
let mut sigint = listen!(ctrl_c);

View File

@ -1,33 +1,24 @@
use std::{
fmt,
future::Future,
mem::{replace, take},
ops::{Deref, DerefMut},
pin::Pin,
sync::Arc,
task::{Context, Poll},
};
use std::{fmt, future::Future, sync::Arc};
use async_priority_channel as priority;
use atomic_take::AtomicTake;
use futures::TryFutureExt;
use miette::Diagnostic;
use once_cell::sync::OnceCell;
use tokio::{
spawn,
sync::{mpsc, watch, Notify},
task::JoinHandle,
try_join,
sync::{mpsc, Notify},
task::{JoinHandle, JoinSet},
};
use tracing::{debug, error, trace};
use watchexec_events::{Event, Priority};
use crate::{
action,
config::{InitConfig, RuntimeConfig},
error::{CriticalError, ReconfigError, RuntimeError},
event::{Event, Priority},
fs,
handler::{rte, Handler},
keyboard, signal,
action::{self, ActionHandler},
changeable::ChangeableFn,
error::{CriticalError, RuntimeError},
sources::{fs, keyboard, signal},
Config,
};
/// The main watchexec runtime.
@ -38,14 +29,63 @@ use crate::{
/// error hook, and provides an interface to change the runtime configuration during the runtime,
/// inject synthetic events, and wait for graceful shutdown.
pub struct Watchexec {
handle: Arc<AtomicTake<JoinHandle<Result<(), CriticalError>>>>,
/// The configuration of this Watchexec instance.
///
/// Configuration can be changed at any time using the provided methods on [`Config`].
///
/// Treat this field as readonly: replacing it with a different instance of `Config` will not do
/// anything except potentially lose you access to the actual Watchexec config. In normal use
/// you'll have obtained `Watchexec` behind an `Arc` so that won't be an issue.
///
/// # Examples
///
/// Change the action handler:
///
/// ```no_run
/// # use watchexec::Watchexec;
/// let wx = Watchexec::default();
/// wx.config.on_action(|mut action| {
/// if action.signals().next().is_some() {
/// action.quit();
/// }
///
/// action
/// });
/// ```
///
/// Set paths to be watched:
///
/// ```no_run
/// # use watchexec::Watchexec;
/// let wx = Watchexec::new(|mut action| {
/// if action.signals().next().is_some() {
/// action.quit();
/// } else {
/// for event in action.events.iter() {
/// println!("{event:?}");
/// }
/// }
///
/// action
/// }).unwrap();
///
/// wx.config.pathset(["."]);
/// ```
pub config: Arc<Config>,
start_lock: Arc<Notify>,
action_watch: watch::Sender<action::WorkingData>,
fs_watch: watch::Sender<fs::WorkingData>,
keyboard_watch: watch::Sender<keyboard::WorkingData>,
event_input: priority::Sender<Event, Priority>,
handle: Arc<AtomicTake<JoinHandle<Result<(), CriticalError>>>>,
}
impl Default for Watchexec {
/// Instantiate with default config.
///
/// Note that this will panic if the constructor errors.
///
/// Prefer calling `new()` instead.
fn default() -> Self {
Self::with_config(Default::default()).expect("Use Watchexec::new() to avoid this panic")
}
}
impl fmt::Debug for Watchexec {
@ -55,109 +95,106 @@ impl fmt::Debug for Watchexec {
}
impl Watchexec {
/// Instantiates a new `Watchexec` runtime from configuration.
/// Instantiates a new `Watchexec` runtime given an initial action handler.
///
/// Returns an [`Arc`] for convenience; use [`try_unwrap`][Arc::try_unwrap()] to get the value
/// directly if needed.
///
/// Note that `RuntimeConfig` is not a "live" or "shared" instance: if using reconfiguration,
/// you'll usually pass a `clone()` of your `RuntimeConfig` instance to this function; changes
/// made to the instance you _keep_ will not automatically be used by Watchexec, you need to
/// call [`reconfigure()`](Watchexec::reconfigure) with your updated config to apply the changes.
/// directly if needed, or use `new_with_config`.
///
/// Look at the [`Config`] documentation for more on the required action handler.
/// Watchexec will subscribe to most signals sent to the process it runs in and send them, as
/// [`Event`]s, to the action handler. At minimum, you should check for interrupt/ctrl-c events
/// and return an [`Outcome::Exit`], otherwise hitting ctrl-c will do nothing.
///
/// [`Outcome::Exit`]: crate::action::Outcome::Exit
/// and call `action.quit()` in your handler, otherwise hitting ctrl-c will do nothing.
pub fn new(
mut init: InitConfig,
mut runtime: RuntimeConfig,
action_handler: impl (Fn(ActionHandler) -> ActionHandler) + Send + Sync + 'static,
) -> Result<Arc<Self>, CriticalError> {
debug!(?init, ?runtime, pid=%std::process::id(), version=%env!("CARGO_PKG_VERSION"), "initialising");
let config = Config::default();
config.on_action(action_handler);
Self::with_config(config).map(Arc::new)
}
let (ev_s, ev_r) = priority::bounded(init.event_channel_size);
let (ac_s, ac_r) = watch::channel(take(&mut runtime.action));
let (fs_s, fs_r) = watch::channel(fs::WorkingData::default());
let (keyboard_s, keyboard_r) = watch::channel(keyboard::WorkingData::default());
/// Instantiates a new `Watchexec` runtime given an initial async action handler.
///
/// This is the same as [`new`](fn@Self::new) except the action handler is async.
pub fn new_async(
action_handler: impl (Fn(ActionHandler) -> Box<dyn Future<Output = ActionHandler> + Send + Sync>)
+ Send
+ Sync
+ 'static,
) -> Result<Arc<Self>, CriticalError> {
let config = Config::default();
config.on_action_async(action_handler);
Self::with_config(config).map(Arc::new)
}
let event_input = ev_s.clone();
/// Instantiates a new `Watchexec` runtime with a config.
///
/// This is generally not needed: the config can be changed after instantiation (before and
/// after _starting_ Watchexec with `main()`). The only time this should be used is to set the
/// "unchangeable" configuration items for internal details like buffer sizes for queues, or to
/// obtain Self unwrapped by an Arc like `new()` does.
pub fn with_config(config: Config) -> Result<Self, CriticalError> {
debug!(?config, pid=%std::process::id(), version=%env!("CARGO_PKG_VERSION"), "initialising");
let config = Arc::new(config);
let outer_config = config.clone();
// TODO: figure out how to do this (aka start the fs work) after the main task start lock
trace!("sending initial config to fs worker");
fs_s.send(take(&mut runtime.fs))
.expect("cannot send to just-created fs watch (bug)");
trace!("sending initial config to keyboard worker");
keyboard_s
.send(take(&mut runtime.keyboard))
.expect("cannot send to just-created keyboard watch (bug)");
trace!("creating main task");
let notify = Arc::new(Notify::new());
let start_lock = notify.clone();
let (ev_s, ev_r) = priority::bounded(config.event_channel_size);
let event_input = ev_s.clone();
trace!("creating main task");
let handle = spawn(async move {
trace!("waiting for start lock");
notify.notified().await;
debug!("starting main task");
let (er_s, er_r) = mpsc::channel(init.error_channel_size);
let (er_s, er_r) = mpsc::channel(config.error_channel_size);
let eh = replace(&mut init.error_handler, Box::new(()) as _);
let mut tasks = JoinSet::new();
let action = SubTask::spawn(
"action",
action::worker(ac_r, er_s.clone(), ev_s.clone(), ev_r),
tasks.spawn(action::worker(config.clone(), er_s.clone(), ev_r).map_ok(|_| "action"));
tasks.spawn(fs::worker(config.clone(), er_s.clone(), ev_s.clone()).map_ok(|_| "fs"));
tasks.spawn(
signal::worker(config.clone(), er_s.clone(), ev_s.clone()).map_ok(|_| "signal"),
);
let fs = SubTask::spawn("fs", fs::worker(fs_r, er_s.clone(), ev_s.clone()));
let signal =
SubTask::spawn("signal", signal::source::worker(er_s.clone(), ev_s.clone()));
let keyboard = SubTask::spawn(
"keyboard",
keyboard::worker(keyboard_r, er_s.clone(), ev_s.clone()),
tasks.spawn(
keyboard::worker(config.clone(), er_s.clone(), ev_s.clone()).map_ok(|_| "keyboard"),
);
tasks.spawn(error_hook(er_r, config.error_handler.clone()).map_ok(|_| "error"));
let error_hook = SubTask::spawn("error_hook", error_hook(er_r, eh));
// Use Tokio TaskSet when that lands
try_join!(action, error_hook, fs, signal, keyboard)
.map(drop)
.or_else(|e| {
// Close event channel to signal worker task to stop
ev_s.close();
if matches!(e, CriticalError::Exit) {
trace!("got graceful exit request via critical error, erasing the error");
Ok(())
} else {
Err(e)
while let Some(Ok(res)) = tasks.join_next().await {
match res {
Ok("action") => {
debug!("action worker exited, ending watchexec");
break;
}
})
.map(|_| {
debug!("main task graceful exit");
})
Ok(task) => {
debug!(task, "worker exited");
}
Err(CriticalError::Exit) => {
trace!("got graceful exit request via critical error, erasing the error");
// Close event channel to signal worker task to stop
ev_s.close();
}
Err(e) => {
return Err(e);
}
}
}
debug!("main task graceful exit");
tasks.shutdown().await;
Ok(())
});
trace!("done with setup");
Ok(Arc::new(Self {
handle: Arc::new(AtomicTake::new(handle)),
Ok(Self {
config: outer_config,
start_lock,
action_watch: ac_s,
fs_watch: fs_s,
keyboard_watch: keyboard_s,
event_input,
}))
}
/// Applies a new [`RuntimeConfig`] to the runtime.
pub fn reconfigure(&self, config: RuntimeConfig) -> Result<(), ReconfigError> {
debug!(?config, "reconfiguring");
self.action_watch.send(config.action)?;
self.fs_watch.send(config.fs)?;
self.keyboard_watch.send(config.keyboard)?;
Ok(())
handle: Arc::new(AtomicTake::new(handle)),
})
}
/// Inputs an [`Event`] directly.
@ -190,7 +227,7 @@ impl Watchexec {
async fn error_hook(
mut errors: mpsc::Receiver<RuntimeError>,
mut handler: Box<dyn Handler<ErrorHook> + Send>,
handler: ChangeableFn<ErrorHook, ()>,
) -> Result<(), CriticalError> {
while let Some(err) = errors.recv().await {
if matches!(err, RuntimeError::Exit) {
@ -199,20 +236,10 @@ async fn error_hook(
}
error!(%err, "runtime error");
let hook = ErrorHook::new(err);
let crit = hook.critical.clone();
if let Err(err) = handler.handle(hook) {
error!(%err, "error while handling error");
let rehook = ErrorHook::new(rte("error hook", err.as_ref()));
let recrit = rehook.critical.clone();
handler.handle(rehook).unwrap_or_else(|err| {
error!(%err, "error while handling error of handling error");
});
ErrorHook::handle_crit(recrit, "error handler error handler")?;
} else {
ErrorHook::handle_crit(crit, "error handler")?;
}
let payload = ErrorHook::new(err);
let crit = payload.critical.clone();
handler.call(payload);
ErrorHook::handle_crit(crit)?;
}
Ok(())
@ -242,19 +269,16 @@ impl ErrorHook {
}
}
fn handle_crit(
crit: Arc<OnceCell<CriticalError>>,
name: &'static str,
) -> Result<(), CriticalError> {
fn handle_crit(crit: Arc<OnceCell<CriticalError>>) -> Result<(), CriticalError> {
match Arc::try_unwrap(crit) {
Err(err) => {
error!(?err, "{name} hook has an outstanding ref");
error!(?err, "error handler hook has an outstanding ref");
Ok(())
}
Ok(crit) => crit.into_inner().map_or_else(
|| Ok(()),
|crit| {
debug!(%crit, "{name} output a critical error");
debug!(%crit, "error handler output a critical error");
Err(crit)
},
),
@ -282,62 +306,3 @@ impl ErrorHook {
.ok();
}
}
#[derive(Debug)]
struct SubTask {
name: &'static str,
handle: JoinHandle<Result<(), CriticalError>>,
}
impl SubTask {
pub fn spawn(
name: &'static str,
task: impl Future<Output = Result<(), CriticalError>> + Send + 'static,
) -> Self {
debug!(subtask=%name, "spawning subtask");
Self {
name,
handle: spawn(task),
}
}
}
impl Drop for SubTask {
fn drop(&mut self) {
debug!(subtask=%self.name, "aborting subtask");
self.handle.abort();
}
}
impl Deref for SubTask {
type Target = JoinHandle<Result<(), CriticalError>>;
fn deref(&self) -> &Self::Target {
&self.handle
}
}
impl DerefMut for SubTask {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.handle
}
}
impl Future for SubTask {
type Output = Result<(), CriticalError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let subtask = self.name;
match Pin::new(&mut Pin::into_inner(self).handle).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(join_res) => {
debug!(%subtask, "finishing subtask");
Poll::Ready(
join_res
.map_err(CriticalError::MainTaskJoin)
.and_then(|x| x),
)
}
}
}
}

View File

@ -1,10 +1,8 @@
use std::{collections::HashMap, ffi::OsString, path::MAIN_SEPARATOR};
use notify::event::CreateKind;
use watchexec::{
event::{filekind::*, Event, Tag},
paths::summarise_events_to_env,
};
use watchexec::paths::summarise_events_to_env;
use watchexec_events::{filekind::*, Event, Tag};
#[cfg(unix)]
const ENV_SEP: &str = ":";

View File

@ -2,24 +2,16 @@ use std::time::Duration;
use miette::Result;
use tokio::time::sleep;
use watchexec::{
config::{InitConfig, RuntimeConfig},
ErrorHook, Watchexec,
};
use watchexec::{ErrorHook, Watchexec};
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let mut init = InitConfig::default();
init.on_error(|err: ErrorHook| async move {
let wx = Watchexec::default();
wx.config.on_error(|err: ErrorHook| {
eprintln!("Watchexec Runtime Error: {}", err.error);
Ok::<(), std::convert::Infallible>(())
});
let runtime = RuntimeConfig::default();
let wx = Watchexec::new(init, runtime)?;
wx.main();
// TODO: induce an error here

View File

@ -15,8 +15,8 @@ rust-version = "1.58.0"
edition = "2021"
[dependencies]
futures = "0.3.21"
tokio = { version = "1.24.2", features = ["fs"] }
futures = "0.3.29"
tokio = { version = "1.33.0", features = ["fs"] }
tokio-stream = { version = "0.1.9", features = ["fs"] }
[dev-dependencies]

View File

@ -19,16 +19,16 @@ version = "5.3.0"
optional = true
[dependencies.thiserror]
version = "1.0.26"
version = "1.0.50"
optional = true
[dependencies.serde]
version = "1.0.152"
version = "1.0.183"
optional = true
features = ["derive"]
[target.'cfg(unix)'.dependencies.nix]
version = "0.26.2"
version = "0.27.1"
features = ["signal"]
[features]

View File

@ -0,0 +1,5 @@
# Changelog
## Next (YYYY-MM-DD)
- Initial release as a separate crate.

View File

@ -0,0 +1,45 @@
[package]
name = "watchexec-supervisor"
version = "0.1.0"
authors = ["Félix Saparelli <felix@passcod.name>"]
license = "Apache-2.0 OR MIT"
description = "Watchexec's process supervisor component"
keywords = ["process", "command", "supervisor", "watchexec"]
documentation = "https://docs.rs/watchexec-supervisor"
repository = "https://github.com/watchexec/watchexec"
readme = "README.md"
rust-version = "1.58.0"
edition = "2021"
[dependencies]
futures = "0.3.29"
tracing = "0.1.40"
[dependencies.command-group]
version = "5.0.1"
features = ["with-tokio"]
[dependencies.tokio]
version = "1.33.0"
default-features = false
features = ["macros", "process", "rt", "sync", "time"]
[dependencies.watchexec-events]
version = "1.0.0"
default-features = false
path = "../events"
[dependencies.watchexec-signals]
version = "1.0.0"
default-features = false
path = "../signals"
[target.'cfg(unix)'.dependencies.nix]
version = "0.27.1"
features = ["signal"]
[dev-dependencies]
boxcar = "0.2.4"

View File

@ -0,0 +1,15 @@
[![Crates.io page](https://badgen.net/crates/v/watchexec-supervisor)](https://crates.io/crates/watchexec-supervisor)
[![API Docs](https://docs.rs/watchexec-supervisor/badge.svg)][docs]
[![Crate license: Apache 2.0](https://badgen.net/badge/license/Apache%202.0)][license]
[![CI status](https://github.com/watchexec/watchexec/actions/workflows/check.yml/badge.svg)](https://github.com/watchexec/watchexec/actions/workflows/check.yml)
# Supervisor
_Watchexec's process supervisor._
- **[API documentation][docs]**.
- Licensed under [Apache 2.0][license].
- Status: maintained.
[docs]: https://docs.rs/watchexec-supervisor
[license]: ../../LICENSE

View File

@ -0,0 +1,10 @@
pre-release-commit-message = "release: supervisor v{{version}}"
tag-prefix = "supervisor-"
tag-message = "watchexec-supervisor {{version}}"
[[pre-release-replacements]]
file = "CHANGELOG.md"
search = "^## Next.*$"
replace = "## Next (YYYY-MM-DD)\n\n## v{{version}} ({{date}})"
prerelease = true
max = 1

View File

@ -0,0 +1,72 @@
//! Command construction and configuration.
#[doc(inline)]
pub use self::{program::Program, shell::Shell};
mod conversions;
mod program;
mod shell;
/// A command to execute.
///
/// # Example
///
/// ```
/// # use watchexec_supervisor::command::{Command, Program};
/// Command {
/// program: Program::Exec {
/// prog: "make".into(),
/// args: vec!["check".into()],
/// },
/// options: Default::default(),
/// };
/// ```
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Command {
/// Program to execute for this command.
pub program: Program,
/// Options for spawning the program.
pub options: SpawnOptions,
}
/// Options set when constructing or spawning a command.
///
/// It's recommended to use the [`Default`] implementation for this struct, and only set the options
/// you need to change, to proof against new options being added in future.
///
/// # Examples
///
/// ```
/// # use watchexec_supervisor::command::{Command, Program, SpawnOptions};
/// Command {
/// program: Program::Exec {
/// prog: "make".into(),
/// args: vec!["check".into()],
/// },
/// options: SpawnOptions {
/// grouped: true,
/// ..Default::default()
/// },
/// };
/// ```
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
pub struct SpawnOptions {
/// Run the program in a new process group.
///
/// This will use either of Unix [process groups] or Windows [Job Objects] via the
/// [`command-group`](command_group) crate.
///
/// [process groups]: https://en.wikipedia.org/wiki/Process_group
/// [Job Objects]: https://en.wikipedia.org/wiki/Object_Manager_(Windows)
pub grouped: bool,
/// Reset the signal mask of the process before we spawn it.
///
/// By default, the signal mask of the process is inherited from the parent process. This means
/// that if the parent process has blocked any signals, the child process will also block those
/// signals. This can cause problems if the child process is expecting to receive those signals.
///
/// This is only supported on Unix systems.
pub reset_sigmask: bool,
}

View File

@ -0,0 +1,100 @@
use std::fmt;
use tokio::process::Command as TokioCommand;
use tracing::trace;
use super::{Command, Program};
impl Command {
/// Obtain a [`tokio::process::Command`].
pub fn to_spawnable(&self) -> TokioCommand {
trace!(program=?self.program, "constructing command");
#[cfg_attr(not(unix), allow(unused_mut))]
let mut cmd = match &self.program {
Program::Exec { prog, args, .. } => {
let mut c = TokioCommand::new(prog);
c.args(args);
c
}
Program::Shell {
shell,
args,
command,
} => {
let mut c = TokioCommand::new(shell.prog.clone());
// Avoid quoting issues on Windows by using raw_arg everywhere
#[cfg(windows)]
{
for opt in &shell.options {
c.raw_arg(opt);
}
if let Some(progopt) = &shell.program_option {
c.raw_arg(progopt);
}
c.raw_arg(command);
for arg in args {
c.raw_arg(arg);
}
}
#[cfg(not(windows))]
{
c.args(shell.options.clone());
if let Some(progopt) = &shell.program_option {
c.arg(progopt);
}
c.arg(command);
for arg in args {
c.arg(arg);
}
}
c
}
};
#[cfg(unix)]
if self.options.reset_sigmask {
use nix::sys::signal::{sigprocmask, SigSet, SigmaskHow};
unsafe {
cmd.pre_exec(|| {
let mut oldset = SigSet::empty();
let newset = SigSet::all();
trace!(unblocking=?newset, "resetting process sigmask");
sigprocmask(SigmaskHow::SIG_UNBLOCK, Some(&newset), Some(&mut oldset))?;
trace!(?oldset, "sigmask reset");
Ok(())
});
}
}
cmd
}
}
impl fmt::Display for Program {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Exec { prog, args, .. } => {
write!(f, "{}", prog.display())?;
for arg in args {
write!(f, " {arg}")?;
}
Ok(())
}
Self::Shell { command, .. } => {
write!(f, "{command}")
}
}
}
}
impl fmt::Display for Command {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.program)
}
}

View File

@ -0,0 +1,37 @@
use std::path::PathBuf;
use super::Shell;
/// A single program call.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub enum Program {
/// A raw program call: the path or name of a program and its argument list.
Exec {
/// Path or name of the program.
prog: PathBuf,
/// The arguments to pass.
args: Vec<String>,
},
/// A shell program: a string which is to be executed by a shell.
///
/// (Tip: in general, a shell will handle its own job control, so there's no inherent need to
/// set `grouped: true` at the [`Command`](super::Command) level.)
Shell {
/// The shell to run.
shell: Shell,
/// The command line to pass to the shell.
command: String,
/// The arguments to pass to the shell invocation.
///
/// This may not be supported by all shells. Note that some shells require the use of `--`
/// for disambiguation: this is not handled by Watchexec, and will need to be the first
/// item in this vec if desired.
///
/// This appends the values within to the shell process invocation.
args: Vec<String>,
},
}

View File

@ -0,0 +1,40 @@
use std::{borrow::Cow, ffi::OsStr, path::PathBuf};
/// How to call the shell used to run shelled programs.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Shell {
/// Path or name of the shell.
pub prog: PathBuf,
/// Additional options or arguments to pass to the shell.
///
/// These will be inserted before the `program_option` immediately preceding the program string.
pub options: Vec<String>,
/// The syntax of the option which precedes the program string.
///
/// For most shells, this is `-c`. On Windows, CMD.EXE prefers `/C`. If this is `None`, then no
/// option is prepended; this may be useful for non-shell or non-standard shell programs.
pub program_option: Option<Cow<'static, OsStr>>,
}
impl Shell {
/// Shorthand for most shells, using the `-c` convention.
pub fn new(name: impl Into<PathBuf>) -> Self {
Self {
prog: name.into(),
options: Vec::new(),
program_option: Some(Cow::Borrowed(OsStr::new("-c"))),
}
}
#[cfg(windows)]
/// Shorthand for the CMD.EXE shell.
pub fn cmd() -> Self {
Self {
prog: "CMD.EXE".into(),
options: Vec::new(),
program_option: Some(Cow::Borrowed(OsStr::new("/C"))),
}
}
}

View File

@ -0,0 +1,16 @@
//! Error types.
use std::{
io::Error,
sync::{Arc, OnceLock},
};
/// Convenience type for a [`std::io::Error`] which can be shared across threads.
pub type SyncIoError = Arc<OnceLock<Error>>;
/// Make a [`SyncIoError`] from a [`std::io::Error`].
pub fn sync_io_error(err: Error) -> SyncIoError {
let lock = OnceLock::new();
lock.set(err).expect("unreachable: lock was just created");
Arc::new(lock)
}

View File

@ -0,0 +1,71 @@
//! A flag that can be raised to wake a task.
//!
//! Copied wholesale from <https://docs.rs/futures/latest/futures/task/struct.AtomicWaker.html>
//! unfortunately not aware of crated version!
use std::{
pin::Pin,
sync::{
atomic::{AtomicBool, Ordering::Relaxed},
Arc,
},
};
use futures::{
future::Future,
task::{AtomicWaker, Context, Poll},
};
#[derive(Debug)]
struct Inner {
waker: AtomicWaker,
set: AtomicBool,
}
#[derive(Clone, Debug)]
pub struct Flag(Arc<Inner>);
impl Default for Flag {
fn default() -> Self {
Self::new(false)
}
}
impl Flag {
pub fn new(value: bool) -> Self {
Self(Arc::new(Inner {
waker: AtomicWaker::new(),
set: AtomicBool::new(value),
}))
}
pub fn raised(&self) -> bool {
self.0.set.load(Relaxed)
}
pub fn raise(&self) {
self.0.set.store(true, Relaxed);
self.0.waker.wake();
}
}
impl Future for Flag {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
// quick check to avoid registration if already done.
if self.0.set.load(Relaxed) {
return Poll::Ready(());
}
self.0.waker.register(cx.waker());
// Need to check condition **after** `register` to avoid a race
// condition that would result in lost notifications.
if self.0.set.load(Relaxed) {
Poll::Ready(())
} else {
Poll::Pending
}
}
}

View File

@ -0,0 +1,31 @@
//! Job supervision.
#[doc(inline)]
pub use self::{
job::Job,
messages::{Control, Ticket},
state::CommandState,
task::JobTaskContext,
};
#[cfg(test)]
pub(crate) use self::{
priority::Priority,
testchild::{TestChild, TestChildCall},
};
#[doc(inline)]
pub use task::start_job;
#[allow(clippy::module_inception)]
mod job;
mod messages;
mod priority;
mod state;
mod task;
#[cfg(test)]
mod testchild;
#[cfg(test)]
mod test;

View File

@ -0,0 +1,351 @@
use std::{future::Future, sync::Arc, time::Duration};
use tokio::process::Command as TokioCommand;
use watchexec_signals::Signal;
use crate::{command::Command, errors::SyncIoError, flag::Flag};
use super::{
messages::{Control, ControlMessage, Ticket},
priority::{Priority, PrioritySender},
JobTaskContext,
};
/// A handle to a job task spawned in the supervisor.
///
/// A job is a task which manages a [`Command`]. It is responsible for spawning the command's
/// program, for handling messages which control it, for managing the program's lifetime, and for
/// collecting its exit status and some timing information.
///
/// Most of the methods here queue [`Control`]s to the job task and return [`Ticket`]s. Controls
/// execute in order, except where noted. Tickets are futures which resolve when the corresponding
/// control has been run. Unlike most futures, tickets don't need to be polled for controls to make
/// progress; the future is only used to signal completion. Dropping a ticket will not drop the
/// control, so it's safe to do so if you don't care about when the control completes.
///
/// Note that controls are not guaranteed to run, like if the job task stops or panics before a
/// control is processed. If a job task stops gracefully, all pending tickets will resolve
/// immediately. If a job task panics (outside of hooks, panics are bugs!), pending tickets will
/// never resolve.
///
/// This struct is cloneable (internally it is made of Arcs). Dropping the last instance of a Job
/// will close the job's control queue, which will cause the job task to stop gracefully. Note that
/// a task graceful stop is not the same as a graceful stop of the contained command; when the job
/// drops, the command will be dropped in turn, and forcefully terminated via `kill_on_drop`.
#[derive(Debug, Clone)]
pub struct Job {
pub(crate) command: Arc<Command>,
pub(crate) control_queue: PrioritySender,
/// Set to true when the command task has stopped gracefully.
pub(crate) gone: Flag,
}
impl Job {
/// The [`Command`] this job is managing.
pub fn command(&self) -> Arc<Command> {
self.command.clone()
}
/// If this job is dead.
pub fn is_dead(&self) -> bool {
self.gone.raised()
}
fn prepare_control(&self, control: Control) -> (Ticket, ControlMessage) {
let done = Flag::default();
(
Ticket {
job_gone: self.gone.clone(),
control_done: done.clone(),
},
ControlMessage { control, done },
)
}
pub(crate) fn send_controls<const N: usize>(
&self,
controls: [Control; N],
priority: Priority,
) -> Ticket {
if N == 0 || self.gone.raised() {
Ticket::cancelled()
} else if N == 1 {
let control = controls.into_iter().next().expect("UNWRAP: N > 0");
let (ticket, control) = self.prepare_control(control);
self.control_queue.send(control, priority);
ticket
} else {
let mut last_ticket = None;
for control in controls {
let (ticket, control) = self.prepare_control(control);
last_ticket = Some(ticket);
self.control_queue.send(control, priority);
}
last_ticket.expect("UNWRAP: N > 0")
}
}
/// Send a control message to the command.
///
/// All control messages are queued in the order they're sent and processed in order.
///
/// In general prefer using the other methods on this struct rather than sending [`Control`]s
/// directly.
pub fn control(&self, control: Control) -> Ticket {
self.send_controls([control], Priority::Normal)
}
/// Start the command if it's not running.
pub fn start(&self) -> Ticket {
self.control(Control::Start)
}
/// Stop the command if it's running and wait for completion.
///
/// If you don't want to wait for completion, use `signal(Signal::ForceStop)` instead.
pub fn stop(&self) -> Ticket {
self.control(Control::Stop)
}
/// Gracefully stop the command if it's running.
///
/// The command will be sent `signal` and then given `grace` time before being forcefully
/// terminated. If `grace` is zero, that still happens, but the command is terminated forcefully
/// on the next "tick" of the supervisor loop, which doesn't leave the process a lot of time to
/// do anything.
pub fn stop_with_signal(&self, signal: Signal, grace: Duration) -> Ticket {
if cfg!(unix) {
self.control(Control::GracefulStop { signal, grace })
} else {
self.stop()
}
}
/// Restart the command if it's running, or start it if it's not.
pub fn restart(&self) -> Ticket {
self.send_controls([Control::Stop, Control::Start], Priority::Normal)
}
/// Gracefully restart the command if it's running, or start it if it's not.
///
/// The command will be sent `signal` and then given `grace` time before being forcefully
/// terminated. If `grace` is zero, that still happens, but the command is terminated forcefully
/// on the next "tick" of the supervisor loop, which doesn't leave the process a lot of time to
/// do anything.
pub fn restart_with_signal(&self, signal: Signal, grace: Duration) -> Ticket {
if cfg!(unix) {
self.send_controls(
[Control::GracefulStop { signal, grace }, Control::Start],
Priority::Normal,
)
} else {
self.restart()
}
}
/// Restart the command if it's running, but don't start it if it's not.
pub fn try_restart(&self) -> Ticket {
self.control(Control::TryRestart)
}
/// Restart the command if it's running, but don't start it if it's not.
///
/// The command will be sent `signal` and then given `grace` time before being forcefully
/// terminated. If `grace` is zero, that still happens, but the command is terminated forcefully
/// on the next "tick" of the supervisor loop, which doesn't leave the process a lot of time to
/// do anything.
pub fn try_restart_with_signal(&self, signal: Signal, grace: Duration) -> Ticket {
if cfg!(unix) {
self.control(Control::TryGracefulRestart { signal, grace })
} else {
self.try_restart()
}
}
/// Send a signal to the command.
///
/// Sends a signal to the current program, if there is one. If there isn't, this is a no-op.
///
/// On Windows, this is a no-op for all signals but [`Signal::ForceStop`], which tries to stop
/// the command like a `stop()` would, but doesn't wait for completion. This is because Windows
/// doesn't have signals; in future [`Hangup`](Signal::Hangup), [`Interrupt`](Signal::Interrupt),
/// and [`Terminate`](Signal::Terminate) may be implemented using [GenerateConsoleCtrlEvent],
/// see [tracking issue #219](https://github.com/watchexec/watchexec/issues/219).
///
/// [GenerateConsoleCtrlEvent]: https://learn.microsoft.com/en-us/windows/console/generateconsolectrlevent
pub fn signal(&self, sig: Signal) -> Ticket {
self.control(Control::Signal(sig))
}
/// Stop the command, then mark it for garbage collection.
///
/// The underlying control messages are sent like normal, so they wait for all pending controls
/// to process. If you want to delete the command immediately, use `delete_now()`.
pub fn delete(&self) -> Ticket {
self.send_controls([Control::Stop, Control::Delete], Priority::Normal)
}
/// Stop the command immediately, then mark it for garbage collection.
///
/// The underlying control messages are sent with higher priority than normal, so they bypass
/// all others. If you want to delete after all current controls are processed, use `delete()`.
pub fn delete_now(&self) -> Ticket {
self.send_controls([Control::Stop, Control::Delete], Priority::Urgent)
}
/// Get a future which resolves when the command ends.
///
/// If the command is not running, the future resolves immediately.
///
/// The underlying control message is sent with higher priority than normal, so it targets the
/// actively running command, not the one that will be running after the rest of the controls
/// get done; note that may still be racy if the command ends between the time the message is
/// sent and the time it's processed.
pub fn to_wait(&self) -> Ticket {
self.send_controls([Control::NextEnding], Priority::High)
}
/// Run an arbitrary function.
///
/// The function is given [`&JobTaskContext`](JobTaskContext), which contains the state of the
/// currently executing, next-to-start, or just-finished command, as well as the final state of
/// the _previous_ run of the command.
///
/// Technically, some operations can be done through a `&self` shared borrow on the running
/// command's [`ErasedChild`](command_group::tokio::ErasedChild), but this library recommends
/// against taking advantage of this, and prefer using the methods here instead, so that the
/// supervisor can keep track of what's going on.
pub fn run(&self, fun: impl FnOnce(&JobTaskContext<'_>) + Send + Sync + 'static) -> Ticket {
self.control(Control::SyncFunc(Box::new(fun)))
}
/// Run an arbitrary function and await the returned future.
///
/// The function is given [`&JobTaskContext`](JobTaskContext), which contains the state of the
/// currently executing, next-to-start, or just-finished command, as well as the final state of
/// the _previous_ run of the command.
///
/// Technically, some operations can be done through a `&self` shared borrow on the running
/// command's [`ErasedChild`](command_group::tokio::ErasedChild), but this library recommends
/// against taking advantage of this, and prefer using the methods here instead, so that the
/// supervisor can keep track of what's going on.
///
/// A gotcha when using this method is that the future returned by the function can live longer
/// than the `&JobTaskContext` it was given, so you can't bring the context into the async block
/// and instead must clone or copy the parts you need beforehand, in the sync portion.
///
/// For example, this won't compile:
///
/// ```compile_fail
/// # use std::sync::Arc;
/// # use tokio::sync::mpsc;
/// # use watchexec_supervisor::command::{Command, Program};
/// # use watchexec_supervisor::job::{CommandState, start_job};
/// #
/// # let (job, _task) = start_job(Arc::new(Command { program: Program::Exec { prog: "/bin/date".into(), args: Vec::new() }.into(), options: Default::default() }));
/// let (channel, receiver) = mpsc::channel(10);
/// job.run_async(|context| Box::new(async move {
/// if let CommandState::Finished { status, .. } = context.current {
/// channel.send(status).await.ok();
/// }
/// }));
/// ```
///
/// But this does:
///
/// ```no_run
/// # use std::sync::Arc;
/// # use tokio::sync::mpsc;
/// # use watchexec_supervisor::command::{Command, Program};
/// # use watchexec_supervisor::job::{CommandState, start_job};
/// #
/// # let (job, _task) = start_job(Arc::new(Command { program: Program::Exec { prog: "/bin/date".into(), args: Vec::new() }.into(), options: Default::default() }));
/// let (channel, receiver) = mpsc::channel(10);
/// job.run_async(|context| {
/// let status = if let CommandState::Finished { status, .. } = context.current {
/// Some(*status)
/// } else {
/// None
/// };
///
/// Box::new(async move {
/// if let Some(status) = status {
/// channel.send(status).await.ok();
/// }
/// })
/// });
/// ```
pub fn run_async(
&self,
fun: impl (FnOnce(&JobTaskContext<'_>) -> Box<dyn Future<Output = ()> + Send + Sync>)
+ Send
+ Sync
+ 'static,
) -> Ticket {
self.control(Control::AsyncFunc(Box::new(fun)))
}
/// Set the spawn hook.
///
/// The hook will be called once per process spawned, before the process is spawned. It's given
/// a mutable reference to the [`tokio::process::Command`] and some context; it can modify the
/// command as it sees fit.
pub fn set_spawn_hook(
&self,
fun: impl Fn(&mut TokioCommand, &JobTaskContext<'_>) + Send + Sync + 'static,
) -> Ticket {
self.control(Control::SetSyncSpawnHook(Arc::new(fun)))
}
/// Set the spawn hook (async version).
///
/// The hook will be called once per process spawned, before the process is spawned. It's given
/// a mutable reference to the [`tokio::process::Command`] and some context; it can modify the
/// command as it sees fit.
///
/// A gotcha when using this method is that the future returned by the function can live longer
/// than the references it was given, so you can't bring the command or context into the async
/// block and instead must clone or copy the parts you need beforehand, in the sync portion. See
/// the documentation for [`run_async`](Job::run_async) for an example.
///
/// Fortunately, async spawn hooks should be exceedingly rare: there's very few things to do in
/// spawn hooks that can't be done in the simpler sync version.
pub fn set_spawn_async_hook(
&self,
fun: impl (Fn(&mut TokioCommand, &JobTaskContext<'_>) -> Box<dyn Future<Output = ()> + Send + Sync>)
+ Send
+ Sync
+ 'static,
) -> Ticket {
self.control(Control::SetAsyncSpawnHook(Arc::new(fun)))
}
/// Unset any spawn hook.
pub fn unset_spawn_hook(&self) -> Ticket {
self.control(Control::UnsetSpawnHook)
}
/// Set the error handler.
pub fn set_error_handler(&self, fun: impl Fn(SyncIoError) + Send + Sync + 'static) -> Ticket {
self.control(Control::SetSyncErrorHandler(Arc::new(fun)))
}
/// Set the error handler (async version).
pub fn set_async_error_handler(
&self,
fun: impl (Fn(SyncIoError) -> Box<dyn Future<Output = ()> + Send + Sync>)
+ Send
+ Sync
+ 'static,
) -> Ticket {
self.control(Control::SetAsyncErrorHandler(Arc::new(fun)))
}
/// Unset the error handler.
///
/// Errors will be silently ignored.
pub fn unset_error_handler(&self) -> Ticket {
self.control(Control::UnsetErrorHandler)
}
}

View File

@ -0,0 +1,148 @@
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use futures::{future::select, FutureExt};
use watchexec_signals::Signal;
use crate::flag::Flag;
use super::task::{
AsyncErrorHandler, AsyncFunc, AsyncSpawnHook, SyncErrorHandler, SyncFunc, SyncSpawnHook,
};
/// The underlying control message types for [`Job`](super::Job).
///
/// You may use [`Job::control()`](super::Job::control()) to send these messages directly, but in
/// general should prefer the higher-level methods on [`Job`](super::Job) itself.
pub enum Control {
/// For [`Job::start()`](super::Job::start()).
Start,
/// For [`Job::stop()`](super::Job::stop()).
Stop,
/// For [`Job::stop_with_signal()`](super::Job::stop_with_signal()).
GracefulStop {
/// Signal to send immediately
signal: Signal,
/// Time to wait before forceful termination
grace: Duration,
},
/// For [`Job::try_restart()`](super::Job::try_restart()).
TryRestart,
/// For [`Job::try_restart_with_signal()`](super::Job::try_restart_with_signal()).
TryGracefulRestart {
/// Signal to send immediately
signal: Signal,
/// Time to wait before forceful termination and restart
grace: Duration,
},
/// Internal implementation detail of [`Control::TryGracefulRestart`].
ContinueTryGracefulRestart,
/// For [`Job::signal()`](super::Job::signal()).
Signal(Signal),
/// For [`Job::delete()`](super::Job::delete()) and [`Job::delete_now()`](super::Job::delete_now()).
Delete,
/// For [`Job::to_wait()`](super::Job::to_wait()).
NextEnding,
/// For [`Job::run()`](super::Job::run()).
SyncFunc(SyncFunc),
/// For [`Job::run_async()`](super::Job::run_async()).
AsyncFunc(AsyncFunc),
/// For [`Job::set_spawn_hook()`](super::Job::set_spawn_hook()).
SetSyncSpawnHook(SyncSpawnHook),
/// For [`Job::set_spawn_async_hook()`](super::Job::set_spawn_async_hook()).
SetAsyncSpawnHook(AsyncSpawnHook),
/// For [`Job::unset_spawn_hook()`](super::Job::unset_spawn_hook()).
UnsetSpawnHook,
/// For [`Job::set_error_handler()`](super::Job::set_error_handler()).
SetSyncErrorHandler(SyncErrorHandler),
/// For [`Job::set_async_error_handler()`](super::Job::set_async_error_handler()).
SetAsyncErrorHandler(AsyncErrorHandler),
/// For [`Job::unset_error_handler()`](super::Job::unset_error_handler()).
UnsetErrorHandler,
}
impl std::fmt::Debug for Control {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Start => f.debug_struct("Start").finish(),
Self::Stop => f.debug_struct("Stop").finish(),
Self::GracefulStop { signal, grace } => f
.debug_struct("GracefulStop")
.field("signal", signal)
.field("grace", grace)
.finish(),
Self::TryRestart => f.debug_struct("TryRestart").finish(),
Self::TryGracefulRestart { signal, grace } => f
.debug_struct("TryGracefulRestart")
.field("signal", signal)
.field("grace", grace)
.finish(),
Self::ContinueTryGracefulRestart => {
f.debug_struct("ContinueTryGracefulRestart").finish()
}
Self::Signal(signal) => f.debug_struct("Signal").field("signal", signal).finish(),
Self::Delete => f.debug_struct("Delete").finish(),
Self::NextEnding => f.debug_struct("NextEnding").finish(),
Self::SyncFunc(_) => f.debug_struct("SyncFunc").finish_non_exhaustive(),
Self::AsyncFunc(_) => f.debug_struct("AsyncFunc").finish_non_exhaustive(),
Self::SetSyncSpawnHook(_) => f.debug_struct("SetSyncSpawnHook").finish_non_exhaustive(),
Self::SetAsyncSpawnHook(_) => {
f.debug_struct("SetSpawnAsyncHook").finish_non_exhaustive()
}
Self::UnsetSpawnHook => f.debug_struct("UnsetSpawnHook").finish(),
Self::SetSyncErrorHandler(_) => f
.debug_struct("SetSyncErrorHandler")
.finish_non_exhaustive(),
Self::SetAsyncErrorHandler(_) => f
.debug_struct("SetAsyncErrorHandler")
.finish_non_exhaustive(),
Self::UnsetErrorHandler => f.debug_struct("UnsetErrorHandler").finish(),
}
}
}
#[derive(Debug)]
pub(crate) struct ControlMessage {
pub control: Control,
pub done: Flag,
}
/// Lightweight future which resolves when the corresponding control has been run.
///
/// Unlike most futures, tickets don't need to be polled for controls to make progress; the future
/// is only used to signal completion. Dropping a ticket will not drop the control, so it's safe to
/// do so if you don't care about when the control completes.
///
/// Tickets can be cloned, and all clones will resolve at the same time.
#[derive(Debug, Clone)]
pub struct Ticket {
pub(crate) job_gone: Flag,
pub(crate) control_done: Flag,
}
impl Ticket {
pub(crate) fn cancelled() -> Self {
Self {
job_gone: Flag::new(true),
control_done: Flag::new(true),
}
}
}
impl Future for Ticket {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut select(self.job_gone.clone(), self.control_done.clone()).map(|_| ())).poll(cx)
}
}

Some files were not shown because too many files have changed in this diff Show More