diff --git a/Cargo.lock b/Cargo.lock index 38ff0672..ed3c9c27 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,29 +4,27 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "addr2line" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "f4fa78e18c64fce05e902adecd7a5eed15a5e0a3439f7b0e169f0252214865e3" +dependencies = [ + "gimli", +] [[package]] -name = "ahash" -version = "0.7.6" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" -dependencies = [ - "getrandom", - "once_cell", - "version_check", -] +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" @@ -41,9 +39,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67fc08ce920c31afb70f013dcce1bfc3a3195de6a228474e45e1f145b36f8d04" +checksum = "43f6cb1bf222025340178f382c426f13757b2960e89779dfcb319c32542a5a41" dependencies = [ "memchr", ] @@ -63,11 +61,23 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "allocator-api2" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9" + [[package]] name = "ambient-authority" -version = "0.0.1" +version = "0.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8ad6edb4840b78c5c3d88de606b22252d552b55f3a4699fbb10fc070ec3049" +checksum = "e9d4ee0d472d1cd2e28c97dfa124b3d8d992e10eb0a035f33f5d12e3a177ba3b" + +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" [[package]] name = "android_system_properties" @@ -89,21 +99,21 @@ dependencies = [ "anstyle-query", "anstyle-wincon", "colorchoice", - "is-terminal 0.4.7", + "is-terminal", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41ed9a86bf92ae6580e0a31281f65a1b1d867c0cc68d5346e2ae128dddfa6a7d" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" [[package]] name = "anstyle-parse" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e765fd216e48e067936442276d1d57399e37bce53c264d6fefbe298080cb57ee" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" dependencies = [ "utf8parse", ] @@ -149,10 +159,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] -name = "arrayvec" -version = "0.7.2" +name = "async-bincode" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" +checksum = "0688a53af69da2208017b6d68ea675f073fbbc2488e71cc9b40af48ad9404fc2" +dependencies = [ + "bincode", + "byteorder", + "bytes 1.4.0", + "futures-core", + "futures-sink", + "serde", + "tokio", +] [[package]] name = "async-compression" @@ -168,6 +187,26 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite", + "log", + "parking", + "polling", + "rustix 0.37.23", + "slab", + "socket2", + "waker-fn", +] + [[package]] name = "async-lock" version = "2.7.0" @@ -196,18 +235,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] name = "async-trait" -version = "0.1.68" +version = "0.1.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" +checksum = "a564d521dd56509c4c47480d00b80ee55f7e385ae48db5744c67ad50c92d2ebf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -216,17 +255,6 @@ version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c59bdb34bc650a32731b31bd8f0829cc15d24a708ee31559e0bb34f2bc320cba" -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi 0.1.19", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.1.0" @@ -256,7 +284,7 @@ dependencies = [ "http", "hyper", "ring", - "time 0.3.21", + "time 0.3.23", "tokio", "tower", "tracing", @@ -426,7 +454,7 @@ dependencies = [ "percent-encoding", "regex", "sha2", - "time 0.3.21", + "time 0.3.23", "tracing", ] @@ -566,7 +594,7 @@ dependencies = [ "itoa", "num-integer", "ryu", - "time 0.3.21", + "time 0.3.23", ] [[package]] @@ -643,6 +671,21 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backtrace" +version = "0.3.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4319208da049c43661739c5fade2ba182f09d1dc2299b32298d3a31692b17e12" +dependencies = [ + "addr2line 0.20.0", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object 0.31.1", + "rustc-demangle", +] + [[package]] name = "base64" version = "0.13.1" @@ -651,9 +694,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.1" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f1e31e207a6b8fb791a38ea3105e6cb541f55e4d029902d3039a4ad07cc4105" +checksum = "604178f6c5c21f02dc555784810edfb88d34ac2c73b2eae109655649ee73ce3d" [[package]] name = "base64-simd" @@ -686,13 +729,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.6", + "prettyplease 0.2.10", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -718,9 +761,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42" [[package]] name = "block-buffer" @@ -795,6 +838,12 @@ version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1" +[[package]] +name = "bytecount" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c676a478f63e9fa2dd5368a42f28bba0d6c560b775f38583c8bbaa7fcd67c9c" + [[package]] name = "bytemuck" version = "1.13.1" @@ -812,7 +861,7 @@ checksum = "fdde5c9cd29ebd706ce1b35600920a33550e402fc998a2e53ad3b42c3c47a192" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -851,42 +900,53 @@ name = "bytesize" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38fcc2979eff34a4b84e1cf9a1e3da42a7d44b3b690a40cdcb23e3d556cfb2e5" +dependencies = [ + "serde", +] + +[[package]] +name = "camino" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c530edf18f37068ac2d977409ed5cd50d53d73bc653c7647b48eb78976ac9ae2" +dependencies = [ + "serde", +] [[package]] name = "cap-fs-ext" -version = "0.26.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b0e103ce36d217d568903ad27b14ec2238ecb5d65bad2e756a8f3c0d651506e" +checksum = "58bc48200a1a0fa6fba138b1802ad7def18ec1cdd92f7b2a04e21f1bd887f7b9" dependencies = [ "cap-primitives", "cap-std", - "io-lifetimes 0.7.5", - "windows-sys 0.36.1", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", ] [[package]] name = "cap-primitives" -version = "0.26.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af3f336aa91cce16033ed3c94ac91d98956c49b420e6d6cd0dd7d0e386a57085" +checksum = "a4b6df5b295dca8d56f35560be8c391d59f0420f72e546997154e24e765e6451" dependencies = [ "ambient-authority", "fs-set-times", "io-extras", - "io-lifetimes 0.7.5", + "io-lifetimes 1.0.11", "ipnet", "maybe-owned", - "rustix 0.35.13", - "winapi-util", - "windows-sys 0.36.1", - "winx", + "rustix 0.37.23", + "windows-sys 0.48.0", + "winx 0.35.1", ] [[package]] name = "cap-rand" -version = "0.26.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14b9606aa9550d34651bc481443203bc014237bdb992d201d2afa62d2ec6dea" +checksum = "4d25555efacb0b5244cf1d35833d55d21abc916fff0eaad254b8e2453ea9b8ab" dependencies = [ "ambient-authority", "rand", @@ -894,27 +954,48 @@ dependencies = [ [[package]] name = "cap-std" -version = "0.26.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9d6e70b626eceac9d6fc790fe2d72cc3f2f7bc3c35f467690c54a526b0f56db" +checksum = "3373a62accd150b4fcba056d4c5f3b552127f0ec86d3c8c102d60b978174a012" dependencies = [ "cap-primitives", "io-extras", - "io-lifetimes 0.7.5", - "ipnet", - "rustix 0.35.13", + "io-lifetimes 1.0.11", + "rustix 0.37.23", ] [[package]] name = "cap-time-ext" -version = "0.26.1" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a0524f7c4cff2ea547ae2b652bf7a348fd3e48f76556dc928d8b45ab2f1d50" +checksum = "e95002993b7baee6b66c8950470e59e5226a23b3af39fc59c47fe416dd39821a" dependencies = [ "cap-primitives", "once_cell", - "rustix 0.35.13", - "winx", + "rustix 0.37.23", + "winx 0.35.1", +] + +[[package]] +name = "cargo-platform" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbdb825da8a5df079a43676dbe042702f1707b1109f713a01420fbb4cc71fa27" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo_metadata" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" +dependencies = [ + "camino", + "cargo-platform", + "semver", + "serde", + "serde_json", ] [[package]] @@ -943,13 +1024,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "chrono" -version = "0.4.24" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e3c5919066adf22df73762e50cffcde3a758f2a848b113b586d1f86728b673b" +checksum = "ec837a71355b28f6556dbd569b37b3f363091c0bd4b2e735674521b4c5fd9bc5" dependencies = [ + "android-tzdata", "iana-time-zone", "js-sys", - "num-integer", "num-traits", "serde", "time 0.1.45", @@ -970,9 +1051,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.3.0" +version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93aae7a4192245f70fe75dd9157fc7b4a5bf53e88d30bd4396f7d8f9284d5acc" +checksum = "1640e5cc7fb47dbb8338fd471b105e7ed6c3cb2aeb00c2e067127ffd3764a05d" dependencies = [ "clap_builder", "clap_derive", @@ -981,27 +1062,26 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.3.0" +version = "4.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f423e341edefb78c9caba2d9c7f7687d0e72e89df3ce3394554754393ac3990" +checksum = "98c59138d527eeaf9b53f35a77fcc1fad9d883116070c63d5de1c7dc7b00c72b" dependencies = [ "anstream", "anstyle", - "bitflags 1.3.2", "clap_lex", "strsim", ] [[package]] name = "clap_derive" -version = "4.3.0" +version = "4.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "191d9573962933b4027f932c600cd252ce27a8ad5979418fe78e43c07996f27b" +checksum = "b8cd2b2a819ad6eec39e8f1d6b53001af1e5469f8c177579cdaeb313115b825f" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -1010,12 +1090,48 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" +[[package]] +name = "color-eyre" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "concurrent-queue" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62ec6771ecfa0762d24683ee5a32ad78487a3d3afdc0fb8cae19d2c5deb50b7c" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "console" version = "0.15.7" @@ -1042,9 +1158,9 @@ dependencies = [ [[package]] name = "console-subscriber" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57ab2224a0311582eb03adba4caaf18644f7b1f10a760803a803b9b605187fc7" +checksum = "d4cf42660ac07fcebed809cfe561dd8730bcd35b075215e6479c516bcd0d11cb" dependencies = [ "console-api", "crossbeam-channel", @@ -1079,7 +1195,7 @@ dependencies = [ "log", "mime", "paste", - "pin-project 1.1.0", + "pin-project 1.1.2", "serde", "serde_json", "tar", @@ -1115,37 +1231,37 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e4c1eaa2012c47becbbad2ab175484c2a84d1185b566fb2cc5b8707343dfe58" +checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" dependencies = [ "libc", ] [[package]] name = "cranelift-bforest" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62c772976416112fa4484cbd688cb6fb35fd430005c1c586224fc014018abad" +checksum = "182b82f78049f54d3aee5a19870d356ef754226665a695ce2fcdd5d55379718e" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b40ed2dd13c2ac7e24f88a3090c68ad3414eb1d066a95f8f1f7b3b819cb4e46" +checksum = "e7c027bf04ecae5b048d3554deb888061bc26f426afff47bf06d6ac933dce0a6" dependencies = [ - "arrayvec", "bumpalo", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", - "cranelift-egraph", + "cranelift-control", "cranelift-entity", "cranelift-isle", "gimli", + "hashbrown 0.13.2", "log", "regalloc2", "smallvec", @@ -1154,47 +1270,42 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb927a8f1c27c34ee3759b6b0ffa528d2330405d5cc4511f0cab33fe2279f4b5" +checksum = "649f70038235e4c81dba5680d7e5ae83e1081f567232425ab98b55b03afd9904" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43dfa417b884a9ab488d95fd6b93b25e959321fe7bfd7a0a960ba5d7fb7ab927" +checksum = "7a1d1c5ee2611c6a0bdc8d42d5d3dc5ce8bf53a8040561e26e88b9b21f966417" [[package]] -name = "cranelift-egraph" -version = "0.90.1" +name = "cranelift-control" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a66b39785efd8513d2cca967ede56d6cc57c8d7986a595c7c47d0c78de8dce" +checksum = "da66a68b1f48da863d1d53209b8ddb1a6236411d2d72a280ffa8c2f734f7219e" dependencies = [ - "cranelift-entity", - "fxhash", - "hashbrown 0.12.3", - "indexmap", - "log", - "smallvec", + "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0637ffde963cb5d759bc4d454cfa364b6509e6c74cdaa21298add0ed9276f346" +checksum = "9bd897422dbb66621fa558f4d9209875530c53e3c8f4b13b2849fbb667c431a6" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb72b8342685e850cb037350418f62cc4fc55d6c2eb9c7ca01b82f9f1a6f3d56" +checksum = "05db883114c98cfcd6959f72278d2fec42e01ea6a6982cfe4f20e88eebe86653" dependencies = [ "cranelift-codegen", "log", @@ -1204,15 +1315,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "850579cb9e4b448f7c301f1e6e6cbad99abe3f1f1d878a4994cb66e33c6db8cd" +checksum = "84559de86e2564152c87e299c8b2559f9107e9c6d274b24ebeb04fb0a5f4abf8" [[package]] name = "cranelift-native" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d0a279e5bcba3e0466c734d8d8eb6bfc1ad29e95c37f3e4955b492b5616335e" +checksum = "3f40b57f187f0fe1ffaf281df4adba2b4bc623a0f6651954da9f3c184be72761" dependencies = [ "cranelift-codegen", "libc", @@ -1221,14 +1332,14 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.90.1" +version = "0.96.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b8c5e7ffb754093fb89ec4bd4f9dbb9f1c955427299e334917d284745835c2" +checksum = "f3eab6084cc789b9dd0b1316241efeb2968199fee709f4bb4fe0fb0923bb468b" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1305,14 +1416,14 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.14" +version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46bd5f3f85273295a9d14aedfb86f6aadbff6d8f5295c4a9edb08e819dcf5695" +checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset 0.8.0", + "memoffset 0.9.0", "scopeguard", ] @@ -1328,9 +1439,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.15" +version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c063cd8cc95f5c377ed0d4b49a4b21f632396ff690e8470c29b3359b346984b" +checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] @@ -1351,15 +1462,24 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2e66c9d817f1720209181c316d28635c050fa304f9c79e47a520882661b7308" +[[package]] +name = "debugid" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" +dependencies = [ + "uuid", +] + [[package]] name = "derive_arbitrary" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cdeb9ec472d588e539a818b2dee436825730da08ad0017c4b1a17676bdc8b7" +checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.25", ] [[package]] @@ -1414,6 +1534,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "doxygen-rs" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bff670ea0c9bbb8414e7efa6e23ebde2b8f520a7eef78273a3918cf1903e7505" +dependencies = [ + "phf", +] + [[package]] name = "either" version = "1.8.1" @@ -1464,22 +1593,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0" dependencies = [ "humantime", - "is-terminal 0.4.7", + "is-terminal", "log", "regex", "termcolor", ] [[package]] -name = "errno" -version = "0.2.8" +name = "equivalent" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" -dependencies = [ - "errno-dragonfly", - "libc", - "winapi", -] +checksum = "88bffebc5d80432c9b140ee17875ff173a8ab62faad5b257da912bd2f6c1c0a1" [[package]] name = "errno" @@ -1502,18 +1626,43 @@ dependencies = [ "libc", ] +[[package]] +name = "error-chain" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d2f06b9cac1506ece98fe3231e3cc9c4410ec3d5b1f24ae1c8946f0742cdefc" +dependencies = [ + "version_check", +] + [[package]] name = "event-listener" version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "eyre" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +dependencies = [ + "indenter", + "once_cell", +] + [[package]] name = "fallible-iterator" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + [[package]] name = "fallible-streaming-iterator" version = "0.1.9" @@ -1529,6 +1678,17 @@ dependencies = [ "instant", ] +[[package]] +name = "fd-lock" +version = "4.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b0377f1edc77dbd1118507bc7a66e4ab64d2b90c66f90726dc801e73a8c68f9" +dependencies = [ + "cfg-if", + "rustix 0.38.3", + "windows-sys 0.48.0", +] + [[package]] name = "file-per-thread-logger" version = "0.1.6" @@ -1575,7 +1735,7 @@ checksum = "2cd66269887534af4b0c3e3337404591daa8dc8b9b2b3db71f9523beb4bafb41" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -1601,22 +1761,22 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" +checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" dependencies = [ "percent-encoding", ] [[package]] name = "fs-set-times" -version = "0.17.1" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a267b6a9304912e018610d53fe07115d8b530b160e85db4d2d3a59f3ddde1aec" +checksum = "6d167b646a876ba8fda6b50ac645cfd96242553cbaf0ca4fccaa39afcbf0801f" dependencies = [ - "io-lifetimes 0.7.5", - "rustix 0.35.13", - "windows-sys 0.36.1", + "io-lifetimes 1.0.11", + "rustix 0.38.3", + "windows-sys 0.48.0", ] [[package]] @@ -1667,6 +1827,21 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +[[package]] +name = "futures-lite" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.28" @@ -1675,7 +1850,7 @@ checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -1729,6 +1904,19 @@ dependencies = [ "byteorder", ] +[[package]] +name = "fxprof-processed-profile" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" +dependencies = [ + "bitflags 2.3.3", + "debugid", + "fxhash", + "serde", + "serde_json", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1741,34 +1929,23 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c85e1d9ab2eadba7e5040d4e09cbd6d072b76a557ad64e797c2cb9d4da21d7e4" +checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ "cfg-if", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] -[[package]] -name = "ghost" -version = "0.1.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e77ac7b51b8e6313251737fcef4b1c01a2ea102bde68415b62c0ee9268fec357" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.16", -] - [[package]] name = "gimli" -version = "0.26.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" +checksum = "b6c80984affa11d98d1b88b66ac8853f143217b399d3c74116778ff8fdb4ed2e" dependencies = [ - "fallible-iterator", - "indexmap", + "fallible-iterator 0.2.0", + "indexmap 1.9.3", "stable_deref_trait", ] @@ -1780,9 +1957,9 @@ checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" [[package]] name = "h2" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d357c7ae988e7d2182f7d7871d0b963962420b0678b0997ce7de72001aeab782" +checksum = "97ec8491ebaf99c8eaa73058b045fe58073cd6be7f596ac993ced0b0a0c01049" dependencies = [ "bytes 1.4.0", "fnv", @@ -1790,7 +1967,7 @@ dependencies = [ "futures-sink", "futures-util", "http", - "indexmap", + "indexmap 1.9.3", "slab", "tokio", "tokio-util", @@ -1802,9 +1979,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.6", -] [[package]] name = "hashbrown" @@ -1812,16 +1986,26 @@ version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" dependencies = [ - "ahash 0.8.3", + "ahash", +] + +[[package]] +name = "hashbrown" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash", + "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0761a1b9491c4f2e3d66aa0f62d0fba0af9a0e2852e4d48ea506632a4b56e6aa" +checksum = "312f66718a2d7789ffef4f4b7b213138ed9f1eb3aa1d0d82fc99f88fb3ffd26f" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.0", ] [[package]] @@ -1844,28 +2028,46 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] -name = "hermit-abi" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33" +name = "heed" +version = "0.20.0-alpha.3" +source = "git+https://github.com/MarinPostma/heed.git?rev=2ae9a14#2ae9a14ce2270118e23f069ba6999212353d94aa" dependencies = [ + "bytemuck", + "byteorder", + "heed-traits", + "heed-types", "libc", + "lmdb-master-sys", + "once_cell", + "page_size", + "serde", + "synchronoise", + "url", ] [[package]] -name = "hermit-abi" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +name = "heed-traits" +version = "0.20.0-alpha.3" +source = "git+https://github.com/MarinPostma/heed.git?rev=2ae9a14#2ae9a14ce2270118e23f069ba6999212353d94aa" + +[[package]] +name = "heed-types" +version = "0.20.0-alpha.3" +source = "git+https://github.com/MarinPostma/heed.git?rev=2ae9a14#2ae9a14ce2270118e23f069ba6999212353d94aa" dependencies = [ - "libc", + "bincode", + "bytemuck", + "byteorder", + "heed-traits", + "serde", + "serde_json", ] [[package]] name = "hermit-abi" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286" +checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" [[package]] name = "hex" @@ -1930,9 +2132,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.26" +version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab302d72a6f11a3b910431ff93aae7e773078c769f0a3ef15fb9ec692ed147d4" +checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes 1.4.0", "futures-channel", @@ -1969,15 +2171,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0646026eb1b3eea4cd9ba47912ea5ce9cc07713d105b1a14698f4e6433d348b7" +checksum = "8d78e1e73ec14cf7375674f74d7dde185c8206fd9dea6fb6295e8a98098aaa97" dependencies = [ + "futures-util", "http", "hyper", - "rustls 0.21.1", + "rustls 0.21.3", "tokio", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", ] [[package]] @@ -2012,7 +2215,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "226df6fd0aece319a325419d770aa9d947defa60463f142cd82b329121f906a3" dependencies = [ "hyper", - "pin-project 1.1.0", + "pin-project 1.1.2", "tokio", "tokio-tungstenite", "tungstenite", @@ -2027,15 +2230,15 @@ dependencies = [ "futures-util", "hex", "hyper", - "pin-project 1.1.0", + "pin-project 1.1.2", "tokio", ] [[package]] name = "iana-time-zone" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0722cd7114b7de04316e7ea5456a0bbb20e4adb46fd27a3697adb812cff0f37c" +checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2055,15 +2258,27 @@ dependencies = [ ] [[package]] -name = "idna" -version = "0.3.0" +name = "id-arena" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" + +[[package]] +name = "idna" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" dependencies = [ "unicode-bidi", "unicode-normalization", ] +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + [[package]] name = "indexmap" version = "1.9.3" @@ -2075,11 +2290,21 @@ dependencies = [ "serde", ] +[[package]] +name = "indexmap" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +dependencies = [ + "equivalent", + "hashbrown 0.14.0", +] + [[package]] name = "insta" -version = "1.29.0" +version = "1.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a28d25139df397cbca21408bb742cf6837e04cdbebf1b07b760caf971d6a972" +checksum = "28491f7753051e5704d4d0ae7860d45fae3238d7d235bc4289dcd45c48d3cec3" dependencies = [ "console", "lazy_static", @@ -2100,88 +2325,77 @@ dependencies = [ [[package]] name = "inventory" -version = "0.3.6" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0539b5de9241582ce6bd6b0ba7399313560151e58c9aaf8b74b711b1bdce644" -dependencies = [ - "ghost", -] +checksum = "c38a87a1e0e2752433cd4b26019a469112a25fb43b30f5ee9b3b898925c5a0f9" [[package]] name = "io-extras" -version = "0.15.0" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5d8c2ab5becd8720e30fd25f8fa5500d8dc3fceadd8378f05859bd7b46fc49" +checksum = "fde93d48f0d9277f977a333eca8313695ddd5301dc96f7e02aeddcb0dd99096f" dependencies = [ - "io-lifetimes 0.7.5", - "windows-sys 0.36.1", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", ] [[package]] name = "io-lifetimes" -version = "0.7.5" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi", "libc", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "io-lifetimes" -version = "1.0.11" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" -dependencies = [ - "hermit-abi 0.3.1", - "libc", - "windows-sys 0.48.0", -] +checksum = "bffb4def18c48926ccac55c1223e02865ce1a821751a95920448662696e7472c" [[package]] name = "ipnet" -version = "2.7.2" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12b6ee2129af8d4fb011108c73d99a1b83a85977f23b82460c0ae2e25bb4b57f" +checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" [[package]] name = "is-terminal" -version = "0.3.0" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d508111813f9af3afd2f92758f77e4ed2cc9371b642112c6a48d22eb73105c5" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" dependencies = [ - "hermit-abi 0.2.6", - "io-lifetimes 0.7.5", - "rustix 0.35.13", - "windows-sys 0.36.1", + "hermit-abi", + "rustix 0.38.3", + "windows-sys 0.48.0", ] [[package]] -name = "is-terminal" -version = "0.4.7" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adcf93614601c8129ddf72e2d5633df827ba6551541c6d8c59520a371475be1f" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ - "hermit-abi 0.3.1", - "io-lifetimes 1.0.11", - "rustix 0.37.19", - "windows-sys 0.48.0", + "either", ] [[package]] name = "itertools" -version = "0.10.5" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453ad9f582a441959e5f0d088b02ce04cfe8d51a8eaf077f12ac6d3e94164ca6" +checksum = "62b02a5381cc465bd3041d84623d0fa3b66738b52b8e2fc3bab8ad63ab032f4a" [[package]] name = "ittapi" @@ -2214,9 +2428,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f37a4a5928311ac501dee68b3c7613a1037d0edb30c8e5427bd832d55d1b790" +checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" dependencies = [ "wasm-bindgen", ] @@ -2227,7 +2441,7 @@ version = "8.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "pem", "ring", "serde", @@ -2235,6 +2449,15 @@ dependencies = [ "simple_asn1", ] +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -2255,9 +2478,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.144" +version = "0.2.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b00cc1c228a6782d0f076e7b232802e0c5689d41bb5df366f2a6b6621cfdfe1" +checksum = "b4668fb0ea861c1df094127ac5f1da3409a82116a4ba74fca2e58ef927159bb3" [[package]] name = "libloading" @@ -2293,7 +2516,7 @@ checksum = "9c7b1c078b4d3d45ba0db91accc23dcb8d2761d67f819efd94293065597b7ac8" dependencies = [ "anyhow", "async-trait", - "base64 0.21.1", + "base64 0.21.2", "num-traits", "reqwest", "serde_json", @@ -2302,9 +2525,9 @@ dependencies = [ [[package]] name = "libsql-wasmtime-bindings" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcb56f5849df5085e99b7e1bea2e87ff3f93c4143d0922ab43682f904d9cbf59" +checksum = "5c4794ff21e37f83839dad45f8c7977b071315f18705cf73badc9850b9fb6b6f" dependencies = [ "wasmtime", "wasmtime-wasi", @@ -2322,6 +2545,84 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "libsqlx" +version = "0.1.0" +dependencies = [ + "anyhow", + "arbitrary", + "async-trait", + "bytemuck", + "bytes 1.4.0", + "bytesize", + "crc", + "crossbeam", + "either", + "fallible-iterator 0.3.0", + "itertools 0.11.0", + "nix", + "once_cell", + "parking_lot", + "rand", + "regex", + "rusqlite", + "serde", + "serde_json", + "sqld-libsql-bindings", + "sqlite3-parser 0.9.0", + "tempfile", + "thiserror", + "tokio", + "tracing", + "uuid", +] + +[[package]] +name = "libsqlx-server" +version = "0.1.0" +dependencies = [ + "async-bincode", + "async-trait", + "axum", + "base64 0.21.2", + "bincode", + "bytemuck", + "bytes 1.4.0", + "bytesize", + "chrono", + "clap", + "color-eyre", + "either", + "futures", + "heed", + "heed-types", + "hmac", + "humantime", + "hyper", + "itertools 0.11.0", + "libsqlx", + "moka", + "parking_lot", + "priority-queue", + "rand", + "regex", + "serde", + "serde_json", + "sha2", + "sha3", + "tempfile", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util", + "toml 0.7.6", + "tracing", + "tracing-subscriber", + "turmoil", + "uuid", + "walkdir", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2330,21 +2631,32 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.0.46" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.3.8" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +checksum = "09fc20d2ca12cb9f044c93e3bd6d32d523e6e2ec3db4f7b2939cd99026ecd3f0" + +[[package]] +name = "lmdb-master-sys" +version = "0.1.0" +source = "git+https://github.com/MarinPostma/heed.git?rev=2ae9a14#2ae9a14ce2270118e23f069ba6999212353d94aa" +dependencies = [ + "cc", + "doxygen-rs", + "libc", + "pkg-config", +] [[package]] name = "lock_api" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" +checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" dependencies = [ "autocfg", "scopeguard", @@ -2352,12 +2664,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.17" +version = "0.4.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" -dependencies = [ - "cfg-if", -] +checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" [[package]] name = "mach" @@ -2368,6 +2677,15 @@ dependencies = [ "libc", ] +[[package]] +name = "mach2" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" +dependencies = [ + "libc", +] + [[package]] name = "maplit" version = "1.0.2" @@ -2380,7 +2698,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ - "regex-automata", + "regex-automata 0.1.10", ] [[package]] @@ -2416,7 +2734,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffc89ccdc6e10d6907450f753537ebc5c5d3460d2e4e62ea74bd571db62c0f9e" dependencies = [ - "rustix 0.37.19", + "rustix 0.37.23", ] [[package]] @@ -2431,27 +2749,27 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" dependencies = [ "autocfg", ] [[package]] name = "memoffset" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" dependencies = [ "autocfg", ] [[package]] name = "memoffset" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d61c719bcfbcf5d62b3a09efa6088de8c54bc0bfcd3ea7ae39fcc186108b8de1" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] @@ -2488,14 +2806,38 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b9d9a46eff5b4ff64b45a9e316a6d1e0bc719ef429cbec4dc630684212bfdf9" +checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" dependencies = [ "libc", - "log", "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.45.0", + "windows-sys 0.48.0", +] + +[[package]] +name = "moka" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "206bf83f415b0579fd885fe0804eb828e727636657dc1bf73d80d2f1218e14a1" +dependencies = [ + "async-io", + "async-lock", + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "futures-util", + "once_cell", + "parking_lot", + "quanta", + "rustc_version", + "scheduled-thread-pool", + "skeptic", + "smallvec", + "tagptr", + "thiserror", + "triomphe", + "uuid", ] [[package]] @@ -2589,11 +2931,20 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ - "hermit-abi 0.2.6", "libc", ] @@ -2605,13 +2956,22 @@ checksum = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef" [[package]] name = "object" -version = "0.29.0" +version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" dependencies = [ "crc32fast", - "hashbrown 0.12.3", - "indexmap", + "hashbrown 0.13.2", + "indexmap 1.9.3", + "memchr", +] + +[[package]] +name = "object" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bda667d9f2b5051b8833f59f3bf748b28ef54f850f4fcb389a252aa383866d1" +dependencies = [ "memchr", ] @@ -2646,15 +3006,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.17.1" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "openssl" -version = "0.10.52" +version = "0.10.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b8574602df80f7b85fdfc5392fa884a4e3b3f4f35402c070ab34c3d3f78d56" +checksum = "345df152bc43501c5eb9e4654ff05f794effb78d4efe3d53abc158baddc0703d" dependencies = [ "bitflags 1.3.2", "cfg-if", @@ -2673,7 +3033,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -2684,9 +3044,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.87" +version = "0.9.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e17f59264b2809d77ae94f0e1ebabc434773f370d6ca667bd223ea10e06cc7e" +checksum = "374533b0e45f3a7ced10fcaeccca020e66656bc03dac384f852e4e5a7a8104a6" dependencies = [ "cc", "libc", @@ -2706,6 +3066,28 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + +[[package]] +name = "page_size" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b7663cbd190cfd818d08efa8497f6cd383076688c49a391ef7c0d03cd12b561" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "parking" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14f2252c834a40ed9bb5422029649578e63aa341ac401f74e719dd1afda8394e" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2718,22 +3100,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.7" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9069cbb9f99e3a5083476ccb29ceb1de18b9118cafa53e90c9551235de2b9521" +checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.2.16", + "redox_syscall 0.3.5", "smallvec", - "windows-sys 0.45.0", + "windows-targets 0.48.1", ] [[package]] name = "paste" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79" +checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35" [[package]] name = "peeking_take_while" @@ -2752,9 +3134,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" [[package]] name = "petgraph" @@ -2763,23 +3145,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dd7d28ee937e54fe3080c91faa1c3a46c06de6252988a7f4592ba2310ef22a4" dependencies = [ "fixedbitset", - "indexmap", + "indexmap 1.9.3", ] [[package]] name = "phf" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ + "phf_macros", "phf_shared", ] [[package]] name = "phf_codegen" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56ac890c5e3ca598bbdeaa99964edb5b0258a583a9eb6ef4e89fc85d9224770" +checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" dependencies = [ "phf_generator", "phf_shared", @@ -2787,19 +3170,32 @@ dependencies = [ [[package]] name = "phf_generator" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1181c94580fa345f50f19d738aaa39c0ed30a600d95cb2d3e23f94266f14fbf" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared", "rand", ] +[[package]] +name = "phf_macros" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3444646e286606587e49f3bcf1679b8cef1dc2c5ecc29ddacaffc305180d464b" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.25", +] + [[package]] name = "phf_shared" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", "uncased", @@ -2816,11 +3212,11 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c95a7476719eab1e366eaf73d0260af3021184f18177925b07f54b30089ceead" +checksum = "030ad2bc4db10a8944cb0d837f158bdfec4d4a4873ab701a95046770d11f8842" dependencies = [ - "pin-project-internal 1.1.0", + "pin-project-internal 1.1.2", ] [[package]] @@ -2836,20 +3232,20 @@ dependencies = [ [[package]] name = "pin-project-internal" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39407670928234ebc5e6e580247dd567ad73a3578460c5990f9503df207e8f07" +checksum = "ec2e072ecce94ec471b13398d5402c188e76ac03cf74dd1a975161b23a3f6d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] name = "pin-project-lite" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" +checksum = "4c40d25201921e5ff0c862a505c6557ea88568a4e3ace775ab55e93f2f4f9d57" [[package]] name = "pin-utils" @@ -2899,6 +3295,22 @@ dependencies = [ "serde_json", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + [[package]] name = "ppv-lite86" version = "0.2.17" @@ -2917,29 +3329,29 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.6" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b69d39aab54d069e7f2fe8cb970493e7834601ca2d8c65fd7bbd183578080d1" +checksum = "92139198957b410250d43fad93e630d956499a625c527eda65175c8680f83387" dependencies = [ "proc-macro2", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] name = "priority-queue" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9c6be70d989d21a136eb86c2d83e4b328447fac4a88dace2143c179c86267" +checksum = "fff39edfcaec0d64e8d0da38564fad195d2d51b680940295fcc307366e101e61" dependencies = [ "autocfg", - "indexmap", + "indexmap 1.9.3", ] [[package]] name = "proc-macro2" -version = "1.0.58" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1fb82fc0c281dd9671101b66b771ebbe1eaf967b96ac8740dcba4b70005ca8" +checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da" dependencies = [ "unicode-ident", ] @@ -2982,7 +3394,7 @@ checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes 1.4.0", "heck", - "itertools", + "itertools 0.10.5", "lazy_static", "log", "multimap", @@ -3003,7 +3415,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -3027,6 +3439,44 @@ dependencies = [ "cc", ] +[[package]] +name = "pulldown-cmark" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffade02495f22453cd593159ea2f59827aae7f53fa8323f756799b670881dcf8" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +dependencies = [ + "bitflags 1.3.2", + "memchr", + "unicase", +] + +[[package]] +name = "quanta" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" +dependencies = [ + "crossbeam-utils", + "libc", + "mach2", + "once_cell", + "raw-cpuid", + "wasi 0.11.0+wasi-snapshot-preview1", + "web-sys", + "winapi", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -3035,9 +3485,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.27" +version = "1.0.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f4f29d145265ec1c483c7c654450edde0bfe043d3938d6972630663356d9500" +checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105" dependencies = [ "proc-macro2", ] @@ -3072,6 +3522,16 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rand_distr" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "rand_xorshift" version = "0.3.0" @@ -3081,6 +3541,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "raw-cpuid" +version = "10.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "rayon" version = "1.7.0" @@ -3143,25 +3612,27 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.4.2" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91b2eab54204ea0117fe9a060537e0b07a4e72f7c7d182361ecc346cab2240e5" +checksum = "d4a52e724646c6c0800fc456ec43b4165d2f91fba88ceaca06d9e0b400023478" dependencies = [ - "fxhash", + "hashbrown 0.13.2", "log", + "rustc-hash", "slice-group-by", "smallvec", ] [[package]] name = "regex" -version = "1.8.2" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a59b5d8e97dee33696bf13c5ba8ab85341c002922fba050069326b9c498974" +checksum = "b2eae68fc220f7cf2532e4494aded17545fce192d59cd996e0fe7887f4ceb575" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.7.2", + "regex-automata 0.3.2", + "regex-syntax 0.7.3", ] [[package]] @@ -3173,6 +3644,17 @@ dependencies = [ "regex-syntax 0.6.29", ] +[[package]] +name = "regex-automata" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83d3daa6976cffb758ec878f108ba0e062a45b2d6ca3a2cca965338855476caf" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.7.3", +] + [[package]] name = "regex-syntax" version = "0.6.29" @@ -3181,9 +3663,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "436b050e76ed2903236f032a59761c1eb99e1b0aead2c257922771dab1fc8c78" +checksum = "2ab07dc67230e4a4718e70fd5c20055a4334b121f1f9db8fe63ef39ce9b8c846" [[package]] name = "reqwest" @@ -3191,7 +3673,7 @@ version = "0.11.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cde824a14b7c14f85caff81225f411faacc04a2013f41670f41443742b1c1c55" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "encoding_rs", "futures-core", @@ -3200,7 +3682,7 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls 0.24.0", + "hyper-rustls 0.24.1", "hyper-tls", "ipnet", "js-sys", @@ -3210,14 +3692,14 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls 0.21.1", + "rustls 0.21.3", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls 0.24.0", + "tokio-rustls 0.24.1", "tower-service", "url", "wasm-bindgen", @@ -3247,8 +3729,8 @@ name = "rusqlite" version = "0.29.0" source = "git+https://github.com/psarna/rusqlite?rev=d9a97c0f25#d9a97c0f25d48272c91d3f8d93d46cb405c39037" dependencies = [ - "bitflags 2.3.1", - "fallible-iterator", + "bitflags 2.3.3", + "fallible-iterator 0.2.0", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", @@ -3278,31 +3760,30 @@ dependencies = [ [[package]] name = "rustix" -version = "0.35.13" +version = "0.37.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9" +checksum = "4d69718bf81c6127a49dc64e44a742e8bb9213c0ff8869a22c308f84c1d4ab06" dependencies = [ "bitflags 1.3.2", - "errno 0.2.8", - "io-lifetimes 0.7.5", + "errno", + "io-lifetimes 1.0.11", "itoa", "libc", - "linux-raw-sys 0.0.46", + "linux-raw-sys 0.3.8", "once_cell", - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] [[package]] name = "rustix" -version = "0.37.19" +version = "0.38.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acf8729d8542766f1b2cf77eb034d52f40d375bb8b615d0b147089946e16613d" +checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4" dependencies = [ - "bitflags 1.3.2", - "errno 0.3.1", - "io-lifetimes 1.0.11", + "bitflags 2.3.3", + "errno", "libc", - "linux-raw-sys 0.3.8", + "linux-raw-sys 0.4.3", "windows-sys 0.48.0", ] @@ -3320,9 +3801,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.1" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c911ba11bc8433e811ce56fde130ccf32f5127cab0e0194e9c68c5a5b671791e" +checksum = "b19faa85ecb5197342b54f987b142fb3e30d0c90da40f80ef4fa9a726e6676ed" dependencies = [ "log", "ring", @@ -3332,9 +3813,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.2" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -3344,18 +3825,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d194b56d58803a43635bdc398cd17e383d6f71f9182b9a192c127ca42494a59b" +checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" dependencies = [ - "base64 0.21.1", + "base64 0.21.2", ] [[package]] name = "rustls-webpki" -version = "0.100.1" +version = "0.101.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6207cd5ed3d8dca7816f8f3725513a34609c0c765bf652b8c3cb4cfd87db46b" +checksum = "15f36a6828982f422756984e47912a7a51dcbc2a197aa791158f8ca61cd8204e" dependencies = [ "ring", "untrusted", @@ -3363,9 +3844,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f3208ce4d8448b3f3e7d168a73f5e0c43a61e32930de3bceeccedb388b6bf06" +checksum = "dc31bd9b61a32c31f9650d18add92aa83a49ba979c143eefd27fe7177b05bd5f" [[package]] name = "rusty-fork" @@ -3381,19 +3862,43 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91339c0467de62360649f8d3e185ca8de4224ff281f66000de5eb2a77a79041" +checksum = "fe232bdf6be8c8de797b22184ee71118d63780ea42ac85b61d1baa6d3b782ae9" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] [[package]] name = "schannel" -version = "0.1.21" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" +checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ - "windows-sys 0.42.0", + "windows-sys 0.48.0", ] +[[package]] +name = "scheduled-thread-pool" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" +dependencies = [ + "parking_lot", +] + +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.1.0" @@ -3449,34 +3954,37 @@ name = "semver" version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed" +dependencies = [ + "serde", +] [[package]] name = "serde" -version = "1.0.163" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2113ab51b87a539ae008b5c6c02dc020ffa39afd2d83cffcb3f4eb2722cebec2" +checksum = "30e27d1e4fd7659406c492fd6cfaf2066ba8773de45ca75e855590f856dc34a9" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.163" +version = "1.0.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c805777e3930c8883389c602315a24224bcc738b63905ef87cd1420353ea93e" +checksum = "389894603bd18c46fa56231694f8d827779c0951a667087194cf9de94ed24682" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] name = "serde_json" -version = "1.0.96" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" +checksum = "0f1e14e89be7aa4c4b78bdbdc9eb5bf8517829a600ae8eaa39a6e1d960b5185c" dependencies = [ - "indexmap", + "indexmap 2.0.0", "itoa", "ryu", "serde", @@ -3484,9 +3992,19 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.11" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc4422959dd87a76cb117c191dcbffc20467f06c9100b76721dab370f24d3a" +dependencies = [ + "itoa", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7f05c1d5476066defcdfacce1f52fc3cae3af1d3089727100c02ae92e5abbe0" +checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" dependencies = [ "serde", ] @@ -3516,9 +4034,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.6" +version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" +checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" dependencies = [ "cfg-if", "cpufeatures", @@ -3527,14 +4045,24 @@ dependencies = [ [[package]] name = "sha256" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f8b5de2bac3a4ae28e9b611072a8e326d9b26c8189c0972d4c321fa684f1f" +checksum = "08a975c1bc0941703000eaf232c4d8ce188d8d5408d6344b6b2c8c6262772828" dependencies = [ "hex", "sha2", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -3583,7 +4111,7 @@ dependencies = [ "num-bigint", "num-traits", "thiserror", - "time 0.3.21", + "time 0.3.23", ] [[package]] @@ -3592,6 +4120,21 @@ version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" +[[package]] +name = "skeptic" +version = "0.13.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16d23b015676c90a0f01c197bfdc786c20342c73a0afdda9025adb0bc42940a8" +dependencies = [ + "bytecount", + "cargo_metadata", + "error-chain", + "glob", + "pulldown-cmark 0.9.3", + "tempfile", + "walkdir", +] + [[package]] name = "slab" version = "0.4.8" @@ -3609,9 +4152,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" [[package]] name = "socket2" @@ -3635,7 +4178,7 @@ version = "0.1.0" dependencies = [ "anyhow", "bytes 1.4.0", - "fallible-iterator", + "fallible-iterator 0.2.0", "fn-error-context", "libsql-client", "scram", @@ -3657,7 +4200,7 @@ dependencies = [ "aws-config", "aws-sdk-s3", "axum", - "base64 0.21.1", + "base64 0.21.2", "bincode", "bottomless", "bytemuck", @@ -3669,13 +4212,13 @@ dependencies = [ "crossbeam", "enclose", "env_logger", - "fallible-iterator", + "fallible-iterator 0.2.0", "futures", "hmac", "hyper", "hyper-tungstenite", "insta", - "itertools", + "itertools 0.10.5", "jsonwebtoken", "libsql-client", "memmap", @@ -3696,7 +4239,7 @@ dependencies = [ "sha2", "sha256", "sqld-libsql-bindings", - "sqlite3-parser", + "sqlite3-parser 0.8.0", "tempfile", "thiserror", "tokio", @@ -3725,14 +4268,33 @@ dependencies = [ [[package]] name = "sqlite3-parser" -version = "0.8.0" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3995a6daa13c113217b6ad22154865fb06f9cb939bef398fd04f4a7aaaf5bd7" +dependencies = [ + "bitflags 2.3.3", + "cc", + "fallible-iterator 0.2.0", + "indexmap 1.9.3", + "log", + "memchr", + "phf", + "phf_codegen", + "phf_shared", + "smallvec", + "uncased", +] + +[[package]] +name = "sqlite3-parser" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3995a6daa13c113217b6ad22154865fb06f9cb939bef398fd04f4a7aaaf5bd7" +checksum = "db68d3f0682b50197a408d65a3246b7d6173399d1325cf0208fb3fdb66e3229f" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.3.3", "cc", - "fallible-iterator", - "indexmap", + "fallible-iterator 0.3.0", + "indexmap 1.9.3", "log", "memchr", "phf", @@ -3779,9 +4341,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.16" +version = "2.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6f671d4b5ffdb8eadec19c0ae67fe2639df8684bd7bc4b83d986b8db549cf01" +checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2" dependencies = [ "proc-macro2", "quote", @@ -3794,22 +4356,37 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "synchronoise" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" +dependencies = [ + "crossbeam-queue", +] + [[package]] name = "system-interface" -version = "0.23.0" +version = "0.25.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92adbaf536f5aff6986e1e62ba36cee72b1718c5153eee08b9e728ddde3f6029" +checksum = "10081a99cbecbc363d381b9503563785f0b02735fccbb0d4c1a2cb3d39f7e7fe" dependencies = [ - "atty", - "bitflags 1.3.2", + "bitflags 2.3.3", "cap-fs-ext", "cap-std", - "io-lifetimes 0.7.5", - "rustix 0.35.13", - "windows-sys 0.36.1", - "winx", + "fd-lock", + "io-lifetimes 2.0.2", + "rustix 0.38.3", + "windows-sys 0.48.0", + "winx 0.36.1", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "tar" version = "0.4.38" @@ -3823,21 +4400,22 @@ dependencies = [ [[package]] name = "target-lexicon" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd1ba337640d60c3e96bc6f0638a939b9c9a7f2c316a1598c279828b3d1dc8c5" +checksum = "1b1c7f239eb94671427157bd93b3694320f3668d4e1eff08c7285366fd777fac" [[package]] name = "tempfile" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9fbec84f381d5795b08656e4912bec604d162bff9291d6189a78f4c8ab87998" +checksum = "31c0432476357e58790aaa47a8efb0c5138f137343f3b5f23bd36a27e3b0a6d6" dependencies = [ + "autocfg", "cfg-if", "fastrand", "redox_syscall 0.3.5", - "rustix 0.37.19", - "windows-sys 0.45.0", + "rustix 0.37.23", + "windows-sys 0.48.0", ] [[package]] @@ -3863,22 +4441,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "a35fc5b8971143ca348fa6df4f024d4d55264f3468c71ad1c2f365b0a4d58c42" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "463fe12d7993d3b327787537ce8dd4dfa058de32fc2b195ef3cde03dc4771e8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -3904,11 +4482,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.21" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3403384eaacbca9923fa06940178ac13e4edb725486d70e8e15881d0c836cc" +checksum = "59e399c068f43a5d116fedaf73b203fa4f9c519f17e2b34f63221d3792f81446" dependencies = [ "itoa", + "libc", + "num_threads", "serde", "time-core", "time-macros", @@ -3922,9 +4502,9 @@ checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" [[package]] name = "time-macros" -version = "0.2.9" +version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "372950940a5f07bf38dbe211d7283c9e6d7327df53794992d293e534c733d09b" +checksum = "96ba15a897f3c86766b757e5ac7221554c6750054d74d5b28844fce5fb36a6c4" dependencies = [ "time-core", ] @@ -3946,11 +4526,12 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.28.1" +version = "1.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aa32867d44e6f2ce3385e89dceb990188b8bb0fb25b0cf576647a6f98ac5105" +checksum = "532826ff75199d5833b9d2c5fe410f29235e25704ee5f0ef599fb51c21f4a4da" dependencies = [ "autocfg", + "backtrace", "bytes 1.4.0", "libc", "mio", @@ -3982,7 +4563,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -4008,11 +4589,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0d409377ff5b1e3ca6437aa86c1eb7d40c134bfec254e44c830defa92669db5" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" dependencies = [ - "rustls 0.21.1", + "rustls 0.21.3", "tokio", ] @@ -4027,6 +4608,19 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-test" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53474327ae5e166530d17f2d956afcb4f8a004de581b3cae10f12006bc8163e3" +dependencies = [ + "async-stream", + "bytes 1.4.0", + "futures-core", + "tokio", + "tokio-stream", +] + [[package]] name = "tokio-tungstenite" version = "0.19.0" @@ -4062,6 +4656,40 @@ dependencies = [ "serde", ] +[[package]] +name = "toml" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c17e963a819c331dcacd7ab957d80bc2b9a9c1e71c804826d2f283dd65306542" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c500344a19072298cd05a7224b3c0c629348b78692bf48466c5238656e315a78" +dependencies = [ + "indexmap 2.0.0", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + [[package]] name = "tonic" version = "0.8.3" @@ -4081,7 +4709,7 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.0", + "pin-project 1.1.2", "prost", "prost-derive", "rustls-pemfile", @@ -4104,7 +4732,7 @@ checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", - "base64 0.21.1", + "base64 0.21.2", "bytes 1.4.0", "futures-core", "futures-util", @@ -4114,7 +4742,7 @@ dependencies = [ "hyper", "hyper-timeout", "percent-encoding", - "pin-project 1.1.0", + "pin-project 1.1.2", "prost", "tokio", "tokio-stream", @@ -4145,8 +4773,8 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap", - "pin-project 1.1.0", + "indexmap 1.9.3", + "pin-project 1.1.2", "pin-project-lite", "rand", "slab", @@ -4206,13 +4834,13 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f57e3ca2a01450b1a921183a9c9cbfda207fd822cef4ccb00a65402cbba7a74" +checksum = "5f4f31f56159e98206da9efd823404b79b6ef3143b4a7ab76e67b1751b25a4ab" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", ] [[package]] @@ -4225,13 +4853,23 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-error" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber", +] + [[package]] name = "tracing-futures" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" dependencies = [ - "pin-project 1.1.0", + "pin-project 1.1.2", "tracing", ] @@ -4264,6 +4902,12 @@ dependencies = [ "tracing-log", ] +[[package]] +name = "triomphe" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" + [[package]] name = "try-lock" version = "0.2.4" @@ -4289,6 +4933,26 @@ dependencies = [ "utf-8", ] +[[package]] +name = "turmoil" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e72ab712288bd737d0abc60712e031fb48488bbce7810ac2135da067e916469" +dependencies = [ + "bytes 1.4.0", + "futures", + "indexmap 1.9.3", + "rand", + "rand_distr", + "scoped-tls", + "tokio", + "tokio-stream", + "tokio-test", + "tokio-util", + "tracing", + "tracing-subscriber", +] + [[package]] name = "typenum" version = "1.16.0" @@ -4310,6 +4974,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "unicase" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.13" @@ -4318,9 +4991,9 @@ checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0" +checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73" [[package]] name = "unicode-normalization" @@ -4337,6 +5010,12 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" +[[package]] +name = "unicode-xid" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + [[package]] name = "untrusted" version = "0.7.1" @@ -4351,9 +5030,9 @@ checksum = "2fbfe96089af082b3c856f83bdd0b6866241377d9dbea803fb39481151e5742d" [[package]] name = "url" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" +checksum = "50bff7831e19200a85b17131d085c25d7811bc4e186efdaf54bbd132994a88cb" dependencies = [ "form_urlencoded", "idna", @@ -4380,9 +5059,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345444e32442451b267fc254ae85a209c64be56d2890e601a0c37ff0c3c5ecd2" +checksum = "d023da39d1fde5a8a3fe1f3e01ca9632ada0a63e9797de55a879d6e2236277be" dependencies = [ "atomic", "getrandom", @@ -4403,13 +5082,13 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" -version = "8.2.1" +version = "8.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b3c89c2c7e50f33e4d35527e5bf9c11d6d132226dbbd1753f0fbe9f19ef88c6" +checksum = "bbc5ad0d9d26b2c49a5ab7da76c3e79d3ee37e7821799f8223fcb8f2f391a2e7" dependencies = [ "anyhow", "rustversion", - "time 0.3.21", + "time 0.3.23", ] [[package]] @@ -4433,13 +5112,28 @@ dependencies = [ "libc", ] +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + +[[package]] +name = "walkdir" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36df944cda56c7d8d8b7496af378e6b16de9284591917d307c9b4d313c44e698" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ - "log", "try-lock", ] @@ -4457,9 +5151,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi-cap-std-sync" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecbeebb8985a5423f36f976b2f4a0b3c6ce38d7d9a7247e1ce07aa2880e4f29b" +checksum = "5d29c5da3b5cfc9212a7fa824224875cb67fb89d2a8392db655e4c59b8ab2ae7" dependencies = [ "anyhow", "async-trait", @@ -4469,40 +5163,41 @@ dependencies = [ "cap-time-ext", "fs-set-times", "io-extras", - "io-lifetimes 0.7.5", - "is-terminal 0.3.0", + "io-lifetimes 1.0.11", + "is-terminal", "once_cell", - "rustix 0.35.13", + "rustix 0.37.23", "system-interface", "tracing", "wasi-common", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasi-common" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e2171f3783fe6600ee24ff6c58ca1b329c55e458cc1622ecc1fd0427648607" +checksum = "f8bd905dcec1448664bf63d42d291cbae0feeea3ad41631817b8819e096d76bd" dependencies = [ "anyhow", "bitflags 1.3.2", "cap-rand", "cap-std", "io-extras", - "rustix 0.35.13", + "log", + "rustix 0.37.23", "thiserror", "tracing", "wasmtime", "wiggle", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasm-bindgen" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bba0e8cb82ba49ff4e229459ff22a191bbe9a1cb3a341610c9c33efc27ddf73" +checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -4510,24 +5205,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b04bc93f9d6bdee709f6bd2118f57dd6679cf1176a1af464fca3ab0d66d8fb" +checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d1985d03709c53167ce907ff394f5316aa22cb4e12761295c5dc57dacb6297e" +checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ "cfg-if", "js-sys", @@ -4537,9 +5232,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14d6b024f1a526bb0234f52840389927257beb670610081360e5a03c5df9c258" +checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4547,134 +5242,178 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e128beba882dd1eb6200e1dc92ae6c5dbaa4311aa7bb211ca035779e5efc39f8" +checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.16", + "syn 2.0.25", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.86" +version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9d5b4305409d1fc9482fee2d7f9bcbf24b3972bf59817ef757e23982242a93" +checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" [[package]] name = "wasm-encoder" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83c94f464d50e31da425794a02da1a82d4b96a657dcb152a6664e8aa915be517" +checksum = "18c41dbd92eaebf3612a39be316540b8377c871cb9bde6b064af962984912881" dependencies = [ "leb128", ] [[package]] name = "wasmparser" -version = "0.93.0" +version = "0.103.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a4460aa3e271fa180b6a5d003e728f3963fb30e3ba0fa7c9634caa06049328" +checksum = "2c437373cac5ea84f1113d648d51f71751ffbe3d90c00ae67618cf20d0b5ee7b" dependencies = [ - "indexmap", + "indexmap 1.9.3", + "url", ] [[package]] name = "wasmtime" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d18265705b1c49218776577d9f301d79ab06888c7f4a32e2ed24e68a55738ce7" +checksum = "634357e8668774b24c80b210552f3f194e2342a065d6d83845ba22c5817d0770" dependencies = [ "anyhow", "async-trait", "bincode", + "bumpalo", "cfg-if", - "indexmap", + "fxprof-processed-profile", + "indexmap 1.9.3", "libc", "log", - "object", + "object 0.30.4", "once_cell", "paste", "psm", "rayon", "serde", + "serde_json", "target-lexicon", "wasmparser", "wasmtime-cache", + "wasmtime-component-macro", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-fiber", "wasmtime-jit", "wasmtime-runtime", "wat", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-asm-macros" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a201583f6c79b96e74dcce748fa44fb2958f474ef13c93f880ea4d3bed31ae4f" +checksum = "d33c73c24ce79b0483a3b091a9acf88871f4490b88998e8974b22236264d304c" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f37efc6945b08fcb634cffafc438dd299bac55a27c836954656c634d3e63c31" +checksum = "6107809b2d9f5b2fd3ddbaddb3bb92ff8048b62f4030debf1408119ffd38c6cb" dependencies = [ "anyhow", - "base64 0.13.1", + "base64 0.21.2", "bincode", "directories-next", "file-per-thread-logger", "log", - "rustix 0.35.13", + "rustix 0.37.23", "serde", "sha2", - "toml", - "windows-sys 0.36.1", + "toml 0.5.11", + "windows-sys 0.48.0", "zstd", ] +[[package]] +name = "wasmtime-component-macro" +version = "9.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ba489850d9c91c6c5b9e1696ee89e7a69d9796236a005f7e9131b6746e13b6" +dependencies = [ + "anyhow", + "proc-macro2", + "quote", + "syn 1.0.109", + "wasmtime-component-util", + "wasmtime-wit-bindgen", + "wit-parser", +] + +[[package]] +name = "wasmtime-component-util" +version = "9.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fa88f9e77d80f828c9d684741a9da649366c6d1cceb814755dd9cab7112d1d1" + [[package]] name = "wasmtime-cranelift" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe208297e045ea0ee6702be88772ea40f918d55fbd4163981a4699aff034b634" +checksum = "5800616a28ed6bd5e8b99ea45646c956d798ae030494ac0689bc3e45d3b689c1" dependencies = [ "anyhow", "cranelift-codegen", + "cranelift-control", "cranelift-entity", "cranelift-frontend", "cranelift-native", "cranelift-wasm", "gimli", "log", - "object", + "object 0.30.4", "target-lexicon", "thiserror", "wasmparser", + "wasmtime-cranelift-shared", + "wasmtime-environ", +] + +[[package]] +name = "wasmtime-cranelift-shared" +version = "9.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27e4030b959ac5c5d6ee500078977e813f8768fa2b92fc12be01856cd0c76c55" +dependencies = [ + "anyhow", + "cranelift-codegen", + "cranelift-control", + "cranelift-native", + "gimli", + "object 0.30.4", + "target-lexicon", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "754b97f7441ac780a7fa738db5b9c23c1b70ef4abccd8ad205ada5669d196ba2" +checksum = "9ec815d01a8d38aceb7ed4678f9ba551ae6b8a568a63810ac3ad9293b0fd01c8" dependencies = [ "anyhow", "cranelift-entity", "gimli", - "indexmap", + "indexmap 1.9.3", "log", - "object", + "object 0.30.4", "serde", "target-lexicon", "thiserror", @@ -4684,24 +5423,24 @@ dependencies = [ [[package]] name = "wasmtime-fiber" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f54abc960b4a055ba16b942cbbd1da641e0ad44cc97a7608f3d43c069b120e" +checksum = "23c5127908fdf720614891ec741c13dd70c844e102caa393e2faca1ee68e9bfb" dependencies = [ "cc", "cfg-if", - "rustix 0.35.13", + "rustix 0.37.23", "wasmtime-asm-macros", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-jit" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32800cb6e29faabab7056593f70a4c00c65c75c365aaf05406933f2169d0c22f" +checksum = "2712eafe829778b426cad0e1769fef944898923dd29f0039e34e0d53ba72b234" dependencies = [ - "addr2line", + "addr2line 0.19.0", "anyhow", "bincode", "cfg-if", @@ -4709,71 +5448,69 @@ dependencies = [ "gimli", "ittapi", "log", - "object", + "object 0.30.4", "rustc-demangle", "serde", "target-lexicon", - "thiserror", "wasmtime-environ", "wasmtime-jit-debug", "wasmtime-jit-icache-coherence", "wasmtime-runtime", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-jit-debug" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe057012a0ba6cee3685af1e923d6e0a6cb9baf15fb3ffa4be3d7f712c7dec42" +checksum = "65fb78eacf4a6e47260d8ef8cc81ea8ddb91397b2e848b3fb01567adebfe89b5" dependencies = [ - "object", + "object 0.30.4", "once_cell", - "rustix 0.35.13", + "rustix 0.37.23", ] [[package]] name = "wasmtime-jit-icache-coherence" -version = "2.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6bbabb309c06cc238ee91b1455b748c45f0bdcab0dda2c2db85b0a1e69fcb66" +checksum = "d1364900b05f7d6008516121e8e62767ddb3e176bdf4c84dfa85da1734aeab79" dependencies = [ "cfg-if", "libc", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-runtime" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09a23b6e138e89594c0189162e524a29e217aec8f9a4e1959a34f74c64e8d17d" +checksum = "4a16ffe4de9ac9669175c0ea5c6c51ffc596dfb49320aaa6f6c57eff58cef069" dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap", + "indexmap 1.9.3", "libc", "log", "mach", "memfd", - "memoffset 0.6.5", + "memoffset 0.8.0", "paste", "rand", - "rustix 0.35.13", - "thiserror", + "rustix 0.37.23", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-fiber", "wasmtime-jit-debug", - "windows-sys 0.36.1", + "windows-sys 0.48.0", ] [[package]] name = "wasmtime-types" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68ec7615fde8c79737f1345d81f0b18da83b3db929a87b4604f27c932246d1e2" +checksum = "19961c9a3b04d5e766875a5c467f6f5d693f508b3e81f8dc4a1444aa94f041c9" dependencies = [ "cranelift-entity", "serde", @@ -4783,17 +5520,29 @@ dependencies = [ [[package]] name = "wasmtime-wasi" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca539adf155dca1407aa3656e5661bf2364b1f3ebabc7f0a8bd62629d876acfa" +checksum = "21080ff62878f1d7c53d9571053dbe96552c0f982f9f29eac65ea89974fabfd7" dependencies = [ "anyhow", + "libc", "wasi-cap-std-sync", "wasi-common", "wasmtime", "wiggle", ] +[[package]] +name = "wasmtime-wit-bindgen" +version = "9.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "421f0d16cc5c612b35ae53a0be3d3124c72296f18e5be3468263c745d56d37ab" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + [[package]] name = "wast" version = "35.0.2" @@ -4805,9 +5554,9 @@ dependencies = [ [[package]] name = "wast" -version = "59.0.0" +version = "60.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38462178c91e3f990df95f12bf48abe36018e03550a58a65c53975f4e704fc35" +checksum = "bd06cc744b536e30387e72a48fdd492105b9c938bb4f415c39c616a7a0a697ad" dependencies = [ "leb128", "memchr", @@ -4817,18 +5566,18 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.65" +version = "1.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936a025be0417a94d6e9bf92bfdf9e06dbf63debf187b650d9c73a5add701f1" +checksum = "5abe520f0ab205366e9ac7d3e6b2fc71de44e32a2b58f2ec871b6b575bdcea3b" dependencies = [ - "wast 59.0.0", + "wast 60.0.0", ] [[package]] name = "web-sys" -version = "0.3.63" +version = "0.3.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bdd9ef4e984da1187bf8110c5cf5b845fbc87a23602cdf912386a76fcd3a7c2" +checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" dependencies = [ "js-sys", "wasm-bindgen", @@ -4866,9 +5615,9 @@ dependencies = [ [[package]] name = "wiggle" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2da09ca5b8bb9278a2123e8c36342166b9aaa55a0dbab18b231f46d6f6ab85bc" +checksum = "5b34e40b7b17a920d03449ca78b0319984379eed01a9a11c1def9c3d3832d85a" dependencies = [ "anyhow", "async-trait", @@ -4881,9 +5630,9 @@ dependencies = [ [[package]] name = "wiggle-generate" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba5796f53b429df7d44cfdaae8f6d9cd981d82aec3516561352ca9c5e73ee185" +checksum = "9eefda132eaa84fe5f15d23a55a912f8417385aee65d0141d78a3b65e46201ed" dependencies = [ "anyhow", "heck", @@ -4896,9 +5645,9 @@ dependencies = [ [[package]] name = "wiggle-macro" -version = "3.0.1" +version = "9.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b830eb7203d48942fb8bc8bb105f76e7d09c33a082d638e990e02143bb2facd" +checksum = "6ca1a344a0ba781e2a94b27be5bb78f23e43d52336bd663b810d49d7189ad334" dependencies = [ "proc-macro2", "quote", @@ -4943,35 +5692,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" dependencies = [ - "windows-targets 0.48.0", -] - -[[package]] -name = "windows-sys" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" -dependencies = [ - "windows_aarch64_msvc 0.36.1", - "windows_i686_gnu 0.36.1", - "windows_i686_msvc 0.36.1", - "windows_x86_64_gnu 0.36.1", - "windows_x86_64_msvc 0.36.1", -] - -[[package]] -name = "windows-sys" -version = "0.42.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", + "windows-targets 0.48.1", ] [[package]] @@ -4989,7 +5710,7 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets 0.48.0", + "windows-targets 0.48.1", ] [[package]] @@ -5009,9 +5730,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.48.0" +version = "0.48.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b1eb6f0cd7c80c79759c929114ef071b87354ce476d9d94271031c0497adfd5" +checksum = "05d4b17490f70499f20b9e791dcf6a299785ce8af4d709018206dc5b4953e95f" dependencies = [ "windows_aarch64_gnullvm 0.48.0", "windows_aarch64_msvc 0.48.0", @@ -5034,12 +5755,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91ae572e1b79dba883e0d315474df7305d12f569b400fcf90581b06062f7e1bc" -[[package]] -name = "windows_aarch64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" - [[package]] name = "windows_aarch64_msvc" version = "0.42.2" @@ -5052,12 +5767,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2ef27e0d7bdfcfc7b868b317c1d32c641a6fe4629c171b8928c7b08d98d7cf3" -[[package]] -name = "windows_i686_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" - [[package]] name = "windows_i686_gnu" version = "0.42.2" @@ -5070,12 +5779,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "622a1962a7db830d6fd0a69683c80a18fda201879f0f447f065a3b7467daa241" -[[package]] -name = "windows_i686_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" - [[package]] name = "windows_i686_msvc" version = "0.42.2" @@ -5088,12 +5791,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4542c6e364ce21bf45d69fdd2a8e455fa38d316158cfd43b3ac1c5b1b19f8e00" -[[package]] -name = "windows_x86_64_gnu" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" - [[package]] name = "windows_x86_64_gnu" version = "0.42.2" @@ -5118,12 +5815,6 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7896dbc1f41e08872e9d5e8f8baa8fdd2677f29468c4e156210174edc7f7b953" -[[package]] -name = "windows_x86_64_msvc" -version = "0.36.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" - [[package]] name = "windows_x86_64_msvc" version = "0.42.2" @@ -5136,6 +5827,15 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a515f5799fe4961cb532f983ce2b23082366b898e52ffbce459c86f67c8378a" +[[package]] +name = "winnow" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81a2094c43cc94775293eaa0e499fbc30048a6d824ac82c0351a8c0bf9112529" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.10.1" @@ -5147,13 +5847,38 @@ dependencies = [ [[package]] name = "winx" -version = "0.33.0" +version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7b01e010390eb263a4518c8cebf86cb67469d1511c00b749a47b64c39e8054d" +checksum = "1c52a121f0fbf9320d5f2a9a5d82f6cb7557eda5e8b47fc3e7f359ec866ae960" dependencies = [ "bitflags 1.3.2", - "io-lifetimes 0.7.5", - "windows-sys 0.36.1", + "io-lifetimes 1.0.11", + "windows-sys 0.48.0", +] + +[[package]] +name = "winx" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4857cedf8371f690bb6782a3e2b065c54d1b6661be068aaf3eac8b45e813fdf8" +dependencies = [ + "bitflags 2.3.3", + "windows-sys 0.48.0", +] + +[[package]] +name = "wit-parser" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca2581061573ef6d1754983d7a9b3ed5871ef859d52708ea9a0f5af32919172" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 1.9.3", + "log", + "pulldown-cmark 0.8.0", + "unicode-xid", + "url", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 846999ef..26e14f1e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,8 @@ members = [ "sqld", "sqld-libsql-bindings", "testing/end-to-end", + "libsqlx", + "libsqlx-server", ] [workspace.dependencies] diff --git a/libsqlx-server/Cargo.toml b/libsqlx-server/Cargo.toml new file mode 100644 index 00000000..243cb0ea --- /dev/null +++ b/libsqlx-server/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "libsqlx-server" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-bincode = { version = "0.7.1", features = ["tokio"] } +async-trait = "0.1.71" +axum = "0.6.18" +base64 = "0.21.2" +bincode = "1.3.3" +bytemuck = { version = "1.13.1", features = ["derive"] } +bytes = { version = "1.4.0", features = ["serde"] } +bytesize = { version = "1.2.0", features = ["serde"] } +chrono = { version = "0.4.26", features = ["serde"] } +clap = { version = "4.3.11", features = ["derive"] } +color-eyre = "0.6.2" +either = "1.8.1" +futures = "0.3.28" +# heed = { version = "0.20.0-alpha.3", features = ["serde-bincode", "sync-read-txn"] } +heed = { git = "https://github.com/MarinPostma/heed.git", rev = "2ae9a14", features = ["serde-bincode", "sync-read-txn"] } +heed-types = { git = "https://github.com/MarinPostma/heed.git", rev = "2ae9a14" } +# heed-types = "0.20.0-alpha.3" +hmac = "0.12.1" +humantime = "2.1.0" +hyper = { version = "0.14.27", features = ["h2", "server"] } +itertools = "0.11.0" +libsqlx = { version = "0.1.0", path = "../libsqlx", features = ["tokio"] } +moka = { version = "0.11.2", features = ["future"] } +parking_lot = "0.12.1" +priority-queue = "1.3.2" +rand = "0.8.5" +regex = "1.9.1" +serde = { version = "1.0.166", features = ["derive", "rc"] } +serde_json = "1.0.100" +sha2 = "0.10.7" +sha3 = "0.10.8" +tempfile = "3.6.0" +thiserror = "1.0.43" +tokio = { version = "1.29.1", features = ["full"] } +tokio-stream = "0.1.14" +tokio-util = "0.7.8" +toml = "0.7.6" +tracing = "0.1.37" +tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } +uuid = { version = "1.4.0", features = ["v4", "serde"] } + +[dev-dependencies] +turmoil = "0.5.5" +walkdir = "2.3.3" diff --git a/libsqlx-server/assets/test/simple-log b/libsqlx-server/assets/test/simple-log new file mode 100644 index 00000000..0cf5b053 Binary files /dev/null and b/libsqlx-server/assets/test/simple-log differ diff --git a/libsqlx-server/src/allocation/config.rs b/libsqlx-server/src/allocation/config.rs new file mode 100644 index 00000000..13de097d --- /dev/null +++ b/libsqlx-server/src/allocation/config.rs @@ -0,0 +1,35 @@ +use std::time::Duration; + +use serde::{Deserialize, Serialize}; + +use crate::linc::NodeId; + +/// Structural supertype of AllocConfig, used for checking the meta version. Subsequent version of +/// AllocConfig need to conform to this prototype. +#[derive(Debug, Serialize, Deserialize)] +struct ConfigVersion { + config_version: u32, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct AllocConfig { + pub max_conccurent_connection: u32, + pub db_name: String, + pub db_config: DbConfig, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum DbConfig { + Primary { + /// maximum size the replication log is allowed to grow, before it is compacted. + max_log_size: usize, + /// Interval at which to force compaction + replication_log_compact_interval: Option, + transaction_timeout_duration: Duration, + }, + Replica { + primary_node_id: NodeId, + proxy_request_timeout_duration: Duration, + transaction_timeout_duration: Duration, + }, +} diff --git a/libsqlx-server/src/allocation/mod.rs b/libsqlx-server/src/allocation/mod.rs new file mode 100644 index 00000000..20be7cc4 --- /dev/null +++ b/libsqlx-server/src/allocation/mod.rs @@ -0,0 +1,705 @@ +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::future::poll_fn; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{ready, Context, Poll}; +use std::time::{Duration, Instant}; + +use either::Either; +use libsqlx::libsql::LibsqlDatabase; +use libsqlx::program::Program; +use libsqlx::proxy::WriteProxyDatabase; +use libsqlx::result_builder::ResultBuilder; +use libsqlx::{Database as _, InjectableDatabase}; +use tokio::sync::{mpsc, oneshot}; +use tokio::task::{block_in_place, JoinSet}; +use tokio::time::Interval; + +use crate::allocation::primary::FrameStreamer; +use crate::allocation::timeout_notifier::timeout_monitor; +use crate::compactor::CompactionQueue; +use crate::error::Error; +use crate::hrana::proto::DescribeResult; +use crate::linc::bus::Dispatch; +use crate::linc::proto::{Frames, Message}; +use crate::linc::{Inbound, NodeId}; +use crate::meta::DatabaseId; +use crate::replica_commit_store::ReplicaCommitStore; + +use self::config::{AllocConfig, DbConfig}; +use self::primary::compactor::Compactor; +use self::primary::{PrimaryConnection, PrimaryDatabase, ProxyResponseBuilder}; +use self::replica::{ProxyDatabase, RemoteDb, ReplicaConnection, Replicator}; +use self::timeout_notifier::TimeoutMonitor; + +pub mod config; +mod primary; +mod replica; +mod timeout_notifier; + +/// Maximum number of frame a Frame message is allowed to contain +const FRAMES_MESSAGE_MAX_COUNT: usize = 5; +/// Maximum number of frames in the injector buffer +const MAX_INJECTOR_BUFFER_CAPACITY: usize = 32; + +pub enum ConnectionMessage { + Execute { + pgm: Program, + builder: Box, + }, + Describe, +} + +pub enum AllocationMessage { + Connect { + ret: oneshot::Sender>, + }, + Inbound(Inbound), +} + +pub enum Database { + Primary { + db: PrimaryDatabase, + compact_interval: Option>>, + transaction_timeout_duration: Duration, + }, + Replica { + db: ProxyDatabase, + injector_handle: mpsc::Sender, + primary_id: NodeId, + last_received_frame_ts: Option, + transaction_timeout_duration: Duration, + }, +} + +impl Database { + fn poll(&mut self, cx: &mut Context<'_>) -> Poll<()> { + if let Self::Primary { + compact_interval: Some(ref mut interval), + db, + .. + } = self + { + ready!(interval.poll_tick(cx)); + tracing::debug!("attempting periodic log compaction"); + let db = db.db.clone(); + tokio::task::spawn_blocking(move || { + db.compact_log(); + }); + return Poll::Ready(()); + } + + Poll::Pending + } + + fn txn_timeout_duration(&self) -> Duration { + match self { + Database::Primary { + transaction_timeout_duration, + .. + } => *transaction_timeout_duration, + Database::Replica { + transaction_timeout_duration, + .. + } => *transaction_timeout_duration, + } + } +} + +impl Database { + pub fn from_config( + config: &AllocConfig, + path: PathBuf, + dispatcher: Arc, + compaction_queue: Arc, + replica_commit_store: Arc, + ) -> crate::Result { + let database_id = DatabaseId::from_name(&config.db_name); + + match config.db_config { + DbConfig::Primary { + max_log_size, + replication_log_compact_interval, + transaction_timeout_duration, + } => { + let (sender, receiver) = tokio::sync::watch::channel(None); + let db = LibsqlDatabase::new_primary( + path, + Compactor::new( + max_log_size, + replication_log_compact_interval, + compaction_queue.clone(), + database_id, + ), + false, + Box::new(move |fno| { + let _ = sender.send(Some(fno)); + }), + )?; + + let compact_interval = replication_log_compact_interval.map(|d| { + let mut i = tokio::time::interval(d / 2); + i.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay); + Box::pin(i) + }); + + Ok(Self::Primary { + db: PrimaryDatabase { + db: Arc::new(db), + replica_streams: HashMap::new(), + frame_notifier: receiver, + snapshot_store: compaction_queue.snapshot_store.clone(), + }, + compact_interval, + transaction_timeout_duration, + }) + } + DbConfig::Replica { + primary_node_id, + proxy_request_timeout_duration, + transaction_timeout_duration, + } => { + let next_frame_no = + block_in_place(|| replica_commit_store.get_commit_index(database_id))? + .map(|fno| fno + 1) + .unwrap_or(0); + + let commit_callback = + Arc::new(move |fno| replica_commit_store.commit(database_id, fno).is_ok()); + + let rdb = LibsqlDatabase::new_replica( + path, + MAX_INJECTOR_BUFFER_CAPACITY, + commit_callback, + )?; + + let wdb = RemoteDb { + proxy_request_timeout_duration, + }; + let db = WriteProxyDatabase::new(rdb, wdb, Arc::new(|_| ())); + let injector = db.injector()?; + let (sender, receiver) = mpsc::channel(16); + + let replicator = Replicator::new( + dispatcher, + next_frame_no, + database_id, + primary_node_id, + injector, + receiver, + ); + + tokio::spawn(replicator.run()); + + Ok(Self::Replica { + db, + injector_handle: sender, + primary_id: primary_node_id, + last_received_frame_ts: None, + transaction_timeout_duration, + }) + } + } + } + + fn connect( + &self, + connection_id: u32, + alloc: &Allocation, + on_txn_status_change_cb: impl Fn(bool) + Send + Sync + 'static, + ) -> crate::Result { + match self { + Database::Primary { + db: PrimaryDatabase { db, .. }, + .. + } => { + let mut conn = db.connect()?; + conn.set_on_txn_status_change_cb(on_txn_status_change_cb); + Ok(Either::Right(PrimaryConnection { conn })) + } + Database::Replica { db, primary_id, .. } => { + let mut conn = db.connect()?; + conn.reader_mut() + .set_on_txn_status_change_cb(on_txn_status_change_cb); + Ok(Either::Left(ReplicaConnection { + conn, + connection_id, + next_req_id: 0, + primary_node_id: *primary_id, + database_id: DatabaseId::from_name(&alloc.db_name), + dispatcher: alloc.dispatcher.clone(), + })) + } + } + } + + pub fn is_primary(&self) -> bool { + matches!(self, Self::Primary { .. }) + } +} + +pub struct Allocation { + pub inbox: mpsc::Receiver, + pub database: Database, + /// spawned connection futures, returning their connection id on completion. + pub connections_futs: JoinSet<(NodeId, u32)>, + pub next_conn_id: u32, + pub max_concurrent_connections: u32, + pub connections: HashMap>, + + /// handle to the message bus + pub dispatcher: Arc, + pub db_name: String, +} + +#[derive(Clone)] +pub struct ConnectionHandle { + messages: mpsc::Sender, + inbound: mpsc::Sender, +} + +impl ConnectionHandle { + pub async fn execute(&self, pgm: Program, builder: Box) { + let msg = ConnectionMessage::Execute { pgm, builder }; + if let Err(e) = self.messages.send(msg).await { + let ConnectionMessage::Execute { mut builder, .. } = e.0 else { unreachable!() }; + builder.finnalize_error("connection closed".to_string()); + } + } + + pub async fn describe(&self, sql: String) -> crate::Result { + todo!() + } +} + +impl Allocation { + pub async fn run(mut self) { + loop { + let fut = poll_fn(|cx| self.database.poll(cx)); + tokio::select! { + _ = fut => (), + Some(msg) = self.inbox.recv() => { + match msg { + AllocationMessage::Connect { ret } => { + let _ = ret.send(self.new_conn(None).await); + } + AllocationMessage::Inbound(msg) => { + if let Err(e) = self.handle_inbound(msg).await { + tracing::error!("allocation loop finished with error: {e}"); + return + } + } + } + }, + maybe_id = self.connections_futs.join_next(), if !self.connections_futs.is_empty() => { + if let Some(Ok((node_id, conn_id))) = maybe_id { + self.connections.get_mut(&node_id).map(|m| m.remove(&conn_id)); + } + }, + else => break, + } + } + } + + async fn handle_inbound(&mut self, msg: Inbound) -> crate::Result<()> { + debug_assert_eq!( + msg.enveloppe.database_id, + Some(DatabaseId::from_name(&self.db_name)) + ); + + match msg.enveloppe.message { + Message::Handshake { .. } => unreachable!("handshake should have been caught earlier"), + Message::ReplicationHandshake { .. } => todo!(), + Message::ReplicationHandshakeResponse { .. } => todo!(), + Message::Replicate { + req_no, + next_frame_no, + } => match &mut self.database { + Database::Primary { + db: + PrimaryDatabase { + db, + replica_streams, + frame_notifier, + snapshot_store, + .. + }, + .. + } => { + let streamer = FrameStreamer { + logger: db.logger(), + database_id: DatabaseId::from_name(&self.db_name), + node_id: msg.from, + next_frame_no, + req_no, + seq_no: 0, + dipatcher: self.dispatcher.clone() as _, + notifier: frame_notifier.clone(), + buffer: Vec::new(), + snapshot_store: snapshot_store.clone(), + }; + + match replica_streams.entry(msg.from) { + Entry::Occupied(mut e) => { + let (old_req_no, old_handle) = e.get_mut(); + // ignore req_no older that the current req_no + if *old_req_no < req_no { + let handle = tokio::spawn(streamer.run()); + let old_handle = std::mem::replace(old_handle, handle); + *old_req_no = req_no; + old_handle.abort(); + } + } + Entry::Vacant(e) => { + let handle = tokio::spawn(streamer.run()); + // For some reason, yielding here is necessary for the task to start running + tokio::task::yield_now().await; + e.insert((req_no, handle)); + } + } + } + Database::Replica { .. } => todo!("not a primary!"), + }, + Message::Frames(frames) => { + if let Database::Replica { + injector_handle, + last_received_frame_ts, + .. + } = &mut self.database + { + *last_received_frame_ts = Some(Instant::now()); + if injector_handle.send(frames).await.is_err() { + return Err(Error::InjectorExited); + } + } + } + Message::ProxyRequest { + connection_id, + req_id, + program, + } => { + self.handle_proxy(msg.from, connection_id, req_id, program) + .await + } + Message::ProxyResponse(ref r) => { + if let Some(conn) = self + .connections + .get(&self.dispatcher.node_id()) + .and_then(|m| m.get(&r.connection_id).cloned()) + { + if conn.inbound.send(msg).await.is_err() { + tracing::error!("cannot process message: connection is closed"); + } + } + } + Message::CancelRequest { .. } => todo!(), + Message::CloseConnection { .. } => todo!(), + Message::Error(_) => todo!(), + } + + Ok(()) + } + + async fn handle_proxy( + &mut self, + to: NodeId, + connection_id: u32, + req_id: u32, + program: Program, + ) { + let dispatcher = self.dispatcher.clone(); + let database_id = DatabaseId::from_name(&self.db_name); + let mut builder = + ProxyResponseBuilder::new(dispatcher, database_id, to, req_id, connection_id); + + if self.database.is_primary() { + match self + .connections + .get(&to) + .and_then(|m| m.get(&connection_id).cloned()) + { + Some(handle) => { + tokio::spawn(async move { handle.execute(program, Box::new(builder)).await }); + } + None => match self.new_conn(Some((to, connection_id))).await { + Ok(handle) => { + tokio::spawn( + async move { handle.execute(program, Box::new(builder)).await }, + ); + } + Err(e) => builder.finnalize_error(format!("error creating connection: {e}")), + }, + } + } + } + + async fn new_conn(&mut self, remote: Option<(NodeId, u32)>) -> crate::Result { + let conn_id = self.next_conn_id(); + let (timeout_monitor, notifier) = timeout_monitor(); + let timeout = self.database.txn_timeout_duration(); + let conn = block_in_place(|| { + self.database.connect(conn_id, self, move |is_txn| { + if is_txn { + notifier.timeout_at(Instant::now() + timeout); + } else { + notifier.disable(); + } + }) + })?; + + let (messages_sender, messages_receiver) = mpsc::channel(1); + let (inbound_sender, inbound_receiver) = mpsc::channel(1); + let id = remote.unwrap_or((self.dispatcher.node_id(), conn_id)); + let conn = Connection { + id, + conn, + messages: messages_receiver, + inbound: inbound_receiver, + last_txn_timedout: false, + timeout_monitor, + }; + + self.connections_futs.spawn(conn.run()); + + let handle = ConnectionHandle { + messages: messages_sender, + inbound: inbound_sender, + }; + + self.connections + .entry(id.0) + .or_insert_with(HashMap::new) + .insert(id.1, handle.clone()); + + Ok(handle) + } + + fn next_conn_id(&mut self) -> u32 { + loop { + self.next_conn_id = self.next_conn_id.wrapping_add(1); + if self + .connections + .get(&self.dispatcher.node_id()) + .and_then(|m| m.get(&self.next_conn_id)) + .is_none() + { + return self.next_conn_id; + } + } + } +} + +#[async_trait::async_trait] +trait ConnectionHandler: Send { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()>; + async fn handle_conn_message(&mut self, exec: ConnectionMessage); + async fn handle_inbound(&mut self, msg: Inbound); +} + +#[async_trait::async_trait] +impl ConnectionHandler for Either +where + L: ConnectionHandler, + R: ConnectionHandler, +{ + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { + match self { + Either::Left(l) => l.poll_ready(cx), + Either::Right(r) => r.poll_ready(cx), + } + } + + async fn handle_conn_message(&mut self, msg: ConnectionMessage) { + match self { + Either::Left(l) => l.handle_conn_message(msg).await, + Either::Right(r) => r.handle_conn_message(msg).await, + } + } + async fn handle_inbound(&mut self, msg: Inbound) { + match self { + Either::Left(l) => l.handle_inbound(msg).await, + Either::Right(r) => r.handle_inbound(msg).await, + } + } +} + +struct Connection { + id: (NodeId, u32), + conn: C, + messages: mpsc::Receiver, + inbound: mpsc::Receiver, + last_txn_timedout: bool, + timeout_monitor: TimeoutMonitor, +} + +impl Connection { + async fn run(mut self) -> (NodeId, u32) { + loop { + let message_ready = + futures::future::join(self.messages.recv(), poll_fn(|cx| self.conn.poll_ready(cx))); + + tokio::select! { + _ = &mut self.timeout_monitor => { + self.last_txn_timedout = true; + } + Some(inbound) = self.inbound.recv() => { + self.conn.handle_inbound(inbound).await; + } + (Some(msg), _) = message_ready => { + if self.last_txn_timedout { + self.last_txn_timedout = false; + match msg { + ConnectionMessage::Execute { mut builder, .. } => { + let _ = builder.finnalize_error("transaction has timed out".into()); + }, + ConnectionMessage::Describe => todo!(), + } + } else { + self.conn.handle_conn_message(msg).await; + } + }, + else => break, + } + } + + tracing::debug!("connection exited: {:?}", self.id); + + self.id + } +} + +#[cfg(test)] +mod test { + use std::time::Duration; + + use heed::EnvOpenOptions; + use libsqlx::result_builder::{ResultBuilder, StepResultsBuilder}; + use tempfile::tempdir; + use tokio::sync::Notify; + + use crate::allocation::replica::ReplicaConnection; + use crate::init_dirs; + use crate::linc::bus::Bus; + use crate::replica_commit_store::ReplicaCommitStore; + use crate::snapshot_store::SnapshotStore; + + use super::*; + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn proxy_request_timeout() { + let bus = Arc::new(Bus::new(0, |_, _| async {})); + let _queue = bus.connect(1); // pretend connection to node 1 + let tmp = tempfile::TempDir::new().unwrap(); + let read_db = + LibsqlDatabase::new_replica(tmp.path().to_path_buf(), 1, Arc::new(|_| ())).unwrap(); + let write_db = RemoteDb { + proxy_request_timeout_duration: Duration::from_millis(100), + }; + let db = WriteProxyDatabase::new(read_db, write_db, Arc::new(|_| ())); + let conn = db.connect().unwrap(); + let conn = ReplicaConnection { + conn, + connection_id: 0, + next_req_id: 0, + primary_node_id: 1, + database_id: DatabaseId::random(), + dispatcher: bus, + }; + + let (messages_sender, messages) = mpsc::channel(1); + let (_inbound_sender, inbound) = mpsc::channel(1); + let (timeout_monitor, _) = timeout_monitor(); + let connection = Connection { + id: (0, 0), + conn, + messages, + inbound, + timeout_monitor, + last_txn_timedout: false, + }; + + let handle = tokio::spawn(connection.run()); + + let notify = Arc::new(Notify::new()); + struct Builder(Arc); + impl ResultBuilder for Builder { + fn finnalize_error(&mut self, _e: String) { + self.0.notify_waiters() + } + } + + let builder = Box::new(Builder(notify.clone())); + let msg = ConnectionMessage::Execute { + pgm: Program::seq(&["create table test (c)"]), + builder, + }; + messages_sender.send(msg).await.unwrap(); + + notify.notified().await; + + handle.abort(); + } + + #[tokio::test(flavor = "multi_thread", worker_threads = 1)] + async fn txn_timeout() { + let bus = Arc::new(Bus::new(0, |_, _| async {})); + let tmp = tempdir().unwrap(); + init_dirs(tmp.path()).await.unwrap(); + let config = AllocConfig { + max_conccurent_connection: 10, + db_name: "test/db".to_owned(), + db_config: DbConfig::Primary { + max_log_size: 100000, + replication_log_compact_interval: None, + transaction_timeout_duration: Duration::from_millis(100), + }, + }; + let (_sender, inbox) = mpsc::channel(10); + let env = EnvOpenOptions::new() + .max_dbs(10) + .map_size(4096 * 100) + .open(tmp.path()) + .unwrap(); + let store = Arc::new(SnapshotStore::new(tmp.path().to_path_buf(), env.clone()).unwrap()); + let queue = + Arc::new(CompactionQueue::new(env.clone(), tmp.path().to_path_buf(), store).unwrap()); + let replica_commit_store = Arc::new(ReplicaCommitStore::new(env.clone())); + let mut alloc = Allocation { + inbox, + database: Database::from_config( + &config, + tmp.path().to_path_buf(), + bus.clone(), + queue, + replica_commit_store, + ) + .unwrap(), + connections_futs: JoinSet::new(), + next_conn_id: 0, + max_concurrent_connections: config.max_conccurent_connection, + hrana_server: Arc::new(hrana::http::Server::new(None)), + dispatcher: bus, + db_name: config.db_name, + connections: HashMap::new(), + }; + + let conn = alloc.new_conn(None).await.unwrap(); + tokio::spawn(alloc.run()); + + let (snd, rcv) = oneshot::channel(); + let builder = StepResultsBuilder::new(snd); + conn.execute(Program::seq(&["begin"]), Box::new(builder)) + .await; + rcv.await.unwrap().unwrap(); + + tokio::time::sleep(Duration::from_secs(1)).await; + + let (snd, rcv) = oneshot::channel(); + let builder = StepResultsBuilder::new(snd); + conn.execute(Program::seq(&["create table test (x)"]), Box::new(builder)) + .await; + assert!(rcv.await.unwrap().is_err()); + } +} diff --git a/libsqlx-server/src/allocation/primary/compactor.rs b/libsqlx-server/src/allocation/primary/compactor.rs new file mode 100644 index 00000000..3ee2aca4 --- /dev/null +++ b/libsqlx-server/src/allocation/primary/compactor.rs @@ -0,0 +1,68 @@ +use std::{ + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; + +use libsqlx::libsql::{LogCompactor, LogFile}; +use uuid::Uuid; + +use crate::{ + compactor::{CompactionJob, CompactionQueue}, + meta::DatabaseId, +}; + +pub struct Compactor { + max_log_size: usize, + last_compacted_at: Instant, + compact_interval: Option, + queue: Arc, + database_id: DatabaseId, +} + +impl Compactor { + pub fn new( + max_log_size: usize, + compact_interval: Option, + queue: Arc, + database_id: DatabaseId, + ) -> Self { + Self { + max_log_size, + last_compacted_at: Instant::now(), + compact_interval, + queue, + database_id, + } + } +} + +impl LogCompactor for Compactor { + fn should_compact(&self, log: &LogFile) -> bool { + let mut should_compact = false; + if let Some(compact_interval) = self.compact_interval { + should_compact |= self.last_compacted_at.elapsed() >= compact_interval + } + + should_compact |= log.size() >= self.max_log_size; + + should_compact + } + + fn compact( + &mut self, + log_id: Uuid, + ) -> Result<(), Box> { + self.last_compacted_at = Instant::now(); + self.queue.push(&CompactionJob { + database_id: self.database_id, + log_id, + })?; + + Ok(()) + } + + fn snapshot_dir(&self) -> PathBuf { + self.queue.snapshot_queue_dir() + } +} diff --git a/libsqlx-server/src/allocation/primary/mod.rs b/libsqlx-server/src/allocation/primary/mod.rs new file mode 100644 index 00000000..63e60e61 --- /dev/null +++ b/libsqlx-server/src/allocation/primary/mod.rs @@ -0,0 +1,354 @@ +use std::collections::HashMap; +use std::mem::size_of; +use std::sync::Arc; +use std::task::{Context, Poll}; +use std::time::Duration; + +use bytes::Bytes; +use libsqlx::libsql::{LibsqlDatabase, PrimaryType}; +use libsqlx::result_builder::{QueryResultBuilderError, ResultBuilder}; +use libsqlx::{Connection, Frame, FrameHeader, FrameNo, LogReadError, ReplicationLogger}; +use tokio::task::block_in_place; + +use crate::linc::bus::Dispatch; +use crate::linc::proto::{BuilderStep, Enveloppe, Frames, Message, StepError, Value}; +use crate::linc::{Inbound, NodeId, Outbound}; +use crate::meta::DatabaseId; +use crate::snapshot_store::SnapshotStore; + +use super::{ConnectionHandler, ConnectionMessage, FRAMES_MESSAGE_MAX_COUNT}; + +pub mod compactor; + +const MAX_STEP_BATCH_SIZE: usize = 100_000_000; // ~100kb + // +pub struct PrimaryDatabase { + pub db: Arc>, + pub replica_streams: HashMap)>, + pub frame_notifier: tokio::sync::watch::Receiver>, + pub snapshot_store: Arc, +} + +pub struct ProxyResponseBuilder { + dispatcher: Arc, + buffer: Vec, + database_id: DatabaseId, + to: NodeId, + req_id: u32, + connection_id: u32, + next_seq_no: u32, +} + +impl ProxyResponseBuilder { + pub fn new( + dispatcher: Arc, + database_id: DatabaseId, + to: NodeId, + req_id: u32, + connection_id: u32, + ) -> Self { + Self { + dispatcher, + buffer: Vec::new(), + database_id, + to, + req_id, + connection_id, + next_seq_no: 0, + } + } + + fn maybe_send(&mut self) -> crate::Result<()> { + // FIXME: this is stupid: compute current buffer size on the go instead + let size = self + .buffer + .iter() + .map(|s| match s { + BuilderStep::FinishStep(_, _) => 2 * 8, + BuilderStep::StepError(StepError(s)) => s.len(), + BuilderStep::ColsDesc(ref d) => d + .iter() + .map(|c| c.name.len() + c.decl_ty.as_ref().map(|t| t.len()).unwrap_or_default()) + .sum(), + BuilderStep::Finnalize { .. } => 9, + BuilderStep::AddRowValue(v) => match v { + crate::linc::proto::Value::Text(s) | crate::linc::proto::Value::Blob(s) => { + s.len() + } + _ => size_of::(), + }, + _ => 8, + }) + .sum::(); + + if size > MAX_STEP_BATCH_SIZE { + self.send()?; + } + + Ok(()) + } + + fn send(&mut self) -> crate::Result<()> { + let msg = Outbound { + to: self.to, + enveloppe: Enveloppe { + database_id: Some(self.database_id), + message: Message::ProxyResponse(crate::linc::proto::ProxyResponse { + connection_id: self.connection_id, + req_id: self.req_id, + row_steps: std::mem::take(&mut self.buffer), + seq_no: self.next_seq_no, + }), + }, + }; + + self.next_seq_no += 1; + tokio::runtime::Handle::current().block_on(self.dispatcher.dispatch(msg))?; + + Ok(()) + } +} + +impl ResultBuilder for ProxyResponseBuilder { + fn init( + &mut self, + _config: &libsqlx::result_builder::QueryBuilderConfig, + ) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::Init); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::BeginStep); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::FinishStep( + affected_row_count, + last_insert_rowid, + )); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn step_error(&mut self, error: libsqlx::error::Error) -> Result<(), QueryResultBuilderError> { + self.buffer + .push(BuilderStep::StepError(StepError(error.to_string()))); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.buffer + .push(BuilderStep::ColsDesc(cols.map(Into::into).collect())); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::BeginRows); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::BeginRow); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn add_row_value( + &mut self, + v: libsqlx::result_builder::ValueRef, + ) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::AddRowValue(v.into())); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::FinishRow); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.buffer.push(BuilderStep::FinishRows); + self.maybe_send() + .map_err(QueryResultBuilderError::from_any)?; + Ok(()) + } + + fn finnalize( + &mut self, + is_txn: bool, + frame_no: Option, + ) -> Result { + self.buffer + .push(BuilderStep::Finnalize { is_txn, frame_no }); + self.send().map_err(QueryResultBuilderError::from_any)?; + Ok(true) + } +} + +pub struct FrameStreamer { + pub logger: Arc, + pub database_id: DatabaseId, + pub node_id: NodeId, + pub next_frame_no: FrameNo, + pub req_no: u32, + pub seq_no: u32, + pub dipatcher: Arc, + pub notifier: tokio::sync::watch::Receiver>, + pub buffer: Vec, + pub snapshot_store: Arc, +} + +impl FrameStreamer { + pub async fn run(mut self) { + loop { + match block_in_place(|| self.logger.get_frame(self.next_frame_no)) { + Ok(frame) => { + if self.buffer.len() > FRAMES_MESSAGE_MAX_COUNT { + self.send_frames().await; + } + self.buffer.push(frame.bytes()); + self.next_frame_no += 1; + } + Err(LogReadError::Ahead) => { + tracing::debug!("frame {} not yet avaiblable", self.next_frame_no); + if !self.buffer.is_empty() { + self.send_frames().await; + } + if self + .notifier + .wait_for(|fno| fno.map(|f| f >= self.next_frame_no).unwrap_or(false)) + .await + .is_err() + { + break; + } + } + Err(LogReadError::Error(_)) => todo!("handle log read error"), + Err(LogReadError::SnapshotRequired) => { + if let Err(e) = self.send_snapshot().await { + tracing::error!("error sending snapshot: {e}"); + break; + } + } + } + } + } + + async fn send_snapshot(&mut self) -> crate::Result<()> { + tracing::debug!("sending frames from snapshot"); + loop { + match self + .snapshot_store + .locate_file(self.database_id, self.next_frame_no)? + { + Some(file) => { + let mut iter = file.frames_iter_from(self.next_frame_no).peekable(); + + while let Some(frame) = block_in_place(|| iter.next()) { + let frame = frame?; + // TODO: factorize in maybe_send + if self.buffer.len() > FRAMES_MESSAGE_MAX_COUNT { + self.send_frames().await?; + } + let size_after = iter + .peek() + .is_none() + .then_some(file.header.size_after) + .unwrap_or(0); + let frame = Frame::from_parts( + &FrameHeader { + frame_no: frame.header().frame_no, + page_no: frame.header().page_no, + size_after, + }, + frame.page(), + ); + self.next_frame_no = frame.header().frame_no + 1; + self.buffer.push(frame.bytes()); + + tokio::task::yield_now().await; + } + + break; + } + None => { + // snapshot is not ready yet, wait a bit + // FIXME: notify when snapshot becomes ready instead of using loop + tokio::time::sleep(Duration::from_millis(100)).await; + } + } + } + + Ok(()) + } + + async fn send_frames(&mut self) -> crate::Result<()> { + let frames = std::mem::take(&mut self.buffer); + let outbound = Outbound { + to: self.node_id, + enveloppe: Enveloppe { + database_id: Some(self.database_id), + message: Message::Frames(Frames { + req_no: self.req_no, + seq_no: self.seq_no, + frames, + }), + }, + }; + self.seq_no += 1; + self.dipatcher.dispatch(outbound).await?; + + Ok(()) + } +} + +pub struct PrimaryConnection { + pub conn: libsqlx::libsql::LibsqlConnection, +} + +#[async_trait::async_trait] +impl ConnectionHandler for PrimaryConnection { + fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<()> { + Poll::Ready(()) + } + + async fn handle_conn_message(&mut self, msg: ConnectionMessage) { + match msg { + ConnectionMessage::Execute { pgm, builder } => { + block_in_place(|| self.conn.execute_program(&pgm, builder)) + } + ConnectionMessage::Describe => { + todo!() + } + } + } + + async fn handle_inbound(&mut self, _msg: Inbound) { + tracing::debug!("primary connection received message, ignoring.") + } +} diff --git a/libsqlx-server/src/allocation/replica.rs b/libsqlx-server/src/allocation/replica.rs new file mode 100644 index 00000000..3d6e81e0 --- /dev/null +++ b/libsqlx-server/src/allocation/replica.rs @@ -0,0 +1,357 @@ +use std::ops::Deref; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{ready, Context, Poll}; +use std::time::Duration; + +use futures::Future; +use libsqlx::libsql::{LibsqlConnection, LibsqlDatabase, ReplicaType}; +use libsqlx::program::Program; +use libsqlx::proxy::{WriteProxyConnection, WriteProxyDatabase}; +use libsqlx::result_builder::{Column, QueryBuilderConfig, ResultBuilder}; +use libsqlx::{Connection, DescribeResponse, Frame, FrameNo, Injector}; +use parking_lot::Mutex; +use tokio::sync::mpsc; +use tokio::task::block_in_place; +use tokio::time::{timeout, Sleep}; + +use crate::linc::bus::Dispatch; +use crate::linc::proto::{BuilderStep, Enveloppe, Frames, Message, ProxyResponse}; +use crate::linc::{Inbound, NodeId, Outbound}; +use crate::meta::DatabaseId; + +use super::{ConnectionHandler, ConnectionMessage}; + +type ProxyConnection = WriteProxyConnection, RemoteConn>; +pub type ProxyDatabase = WriteProxyDatabase, RemoteDb>; + +pub struct RemoteDb { + pub proxy_request_timeout_duration: Duration, +} + +#[derive(Clone)] +pub struct RemoteConn { + inner: Arc, +} + +struct Request { + id: Option, + builder: Box, + pgm: Option, + next_seq_no: u32, + timeout: Pin>, +} + +pub struct RemoteConnInner { + current_req: Mutex>, + request_timeout_duration: Duration, +} + +impl Deref for RemoteConn { + type Target = RemoteConnInner; + + fn deref(&self) -> &Self::Target { + self.inner.as_ref() + } +} + +impl libsqlx::Connection for RemoteConn { + fn execute_program( + &mut self, + program: &libsqlx::program::Program, + builder: Box, + ) { + // When we need to proxy a query, we place it in the current request slot. When we are + // back in a async context, we'll send it to the primary, and asynchrously drive the + // builder. + let mut lock = self.inner.current_req.lock(); + *lock = match *lock { + Some(_) => unreachable!("conccurent request on the same connection!"), + None => Some(Request { + id: None, + builder, + pgm: Some(program.clone()), + next_seq_no: 0, + timeout: Box::pin(tokio::time::sleep(self.inner.request_timeout_duration)), + }), + }; + } + + fn describe(&self, _sql: String) -> libsqlx::Result { + unreachable!("Describe request should not be proxied") + } +} + +impl libsqlx::Database for RemoteDb { + type Connection = RemoteConn; + + fn connect(&self) -> Result { + Ok(RemoteConn { + inner: Arc::new(RemoteConnInner { + current_req: Default::default(), + request_timeout_duration: self.proxy_request_timeout_duration, + }), + }) + } +} + +pub struct Replicator { + dispatcher: Arc, + req_id: u32, + next_frame_no: FrameNo, + next_seq: u32, + database_id: DatabaseId, + primary_node_id: NodeId, + injector: Box, + receiver: mpsc::Receiver, +} + +impl Replicator { + pub fn new( + dispatcher: Arc, + next_frame_no: FrameNo, + database_id: DatabaseId, + primary_node_id: NodeId, + injector: Box, + receiver: mpsc::Receiver, + ) -> Self { + Self { + dispatcher, + req_id: 0, + next_frame_no, + next_seq: 0, + database_id, + primary_node_id, + injector, + receiver, + } + } + + pub async fn run(mut self) { + macro_rules! ok_or_log { + ($e:expr) => { + if let Err(e) = $e { + tracing::warn!("failed to start replication process: {e}"); + } + }; + } + + ok_or_log!(self.query_replicate().await); + loop { + match timeout(Duration::from_secs(5), self.receiver.recv()).await { + Ok(Some(Frames { + req_no: req_id, + seq_no: seq, + frames, + })) => { + // ignore frames from a previous call to Replicate + if req_id != self.req_id { + tracing::debug!(req_id, self.req_id, "wrong req_id"); + continue; + } + if seq != self.next_seq { + // this is not the batch of frame we were expecting, drop what we have, and + // ask again from last checkpoint + tracing::debug!(seq, self.next_seq, "wrong seq"); + ok_or_log!(self.query_replicate().await); + continue; + }; + self.next_seq += 1; + + tracing::debug!("injecting {} frames", frames.len()); + + for bytes in frames { + let inject = || -> crate::Result<()> { + let frame = Frame::try_from_bytes(bytes)?; + block_in_place(|| { + if let Some(last_committed) = self.injector.inject(frame).unwrap() { + tracing::debug!(last_committed); + self.next_frame_no = last_committed + 1; + } + Ok(()) + }) + }; + + if let Err(e) = inject() { + tracing::error!("error injecting frames: {e}"); + ok_or_log!(self.query_replicate().await); + break; + } + } + } + // no news from primary for the past 5 secs, send a request again + Err(_) => ok_or_log!(self.query_replicate().await), + Ok(None) => break, + } + } + } + + async fn query_replicate(&mut self) -> crate::Result<()> { + tracing::debug!("seinding replication request"); + self.req_id += 1; + self.next_seq = 0; + // clear buffered, uncommitted frames + self.injector.clear(); + self.dispatcher + .dispatch(Outbound { + to: self.primary_node_id, + enveloppe: Enveloppe { + database_id: Some(self.database_id), + message: Message::Replicate { + next_frame_no: self.next_frame_no, + req_no: self.req_id, + }, + }, + }) + .await?; + + Ok(()) + } +} + +pub struct ReplicaConnection { + pub conn: ProxyConnection, + pub connection_id: u32, + pub next_req_id: u32, + pub primary_node_id: NodeId, + pub database_id: DatabaseId, + pub dispatcher: Arc, +} + +impl ReplicaConnection { + fn handle_proxy_response(&mut self, resp: ProxyResponse) { + let mut lock = self.conn.writer().inner.current_req.lock(); + let finnalized = match *lock { + Some(ref mut req) if req.id == Some(resp.req_id) && resp.seq_no == req.next_seq_no => { + self.next_req_id += 1; + // TODO: pass actual config + let config = QueryBuilderConfig { max_size: None }; + let mut finnalized = false; + for step in resp.row_steps.into_iter() { + if finnalized { + break; + }; + match step { + BuilderStep::Init => req.builder.init(&config).unwrap(), + BuilderStep::BeginStep => req.builder.begin_step().unwrap(), + BuilderStep::FinishStep(affected_row_count, last_insert_rowid) => req + .builder + .finish_step(affected_row_count, last_insert_rowid) + .unwrap(), + BuilderStep::StepError(_e) => req + .builder + .step_error(todo!("handle proxy step error")) + .unwrap(), + BuilderStep::ColsDesc(cols) => req + .builder + .cols_description(&mut &mut cols.iter().map(|c| Column { + name: &c.name, + decl_ty: c.decl_ty.as_deref(), + })) + .unwrap(), + BuilderStep::BeginRows => req.builder.begin_rows().unwrap(), + BuilderStep::BeginRow => req.builder.begin_row().unwrap(), + BuilderStep::AddRowValue(v) => { + req.builder.add_row_value((&v).into()).unwrap() + } + BuilderStep::FinishRow => req.builder.finish_row().unwrap(), + BuilderStep::FinishRows => req.builder.finish_rows().unwrap(), + BuilderStep::Finnalize { is_txn, frame_no } => { + let _ = req.builder.finnalize(is_txn, frame_no).unwrap(); + finnalized = true; + } + BuilderStep::FinnalizeError(e) => { + req.builder.finnalize_error(e); + finnalized = true; + } + } + } + finnalized + } + Some(_) => todo!("error processing response"), + None => { + tracing::error!("received builder message, but there is no pending request"); + false + } + }; + + if finnalized { + *lock = None; + } + } +} + +#[async_trait::async_trait] +impl ConnectionHandler for ReplicaConnection { + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<()> { + // we are currently handling a request on this connection + let mut req = self.conn.writer().current_req.lock(); + let should_abort_query = match &mut *req { + Some(ref mut req) => { + ready!(req.timeout.as_mut().poll(cx)); + // the request has timedout, we finalize the builder with a error, and clean the + // current request. + req.builder.finnalize_error("request timed out".to_string()); + true + } + None => return Poll::Ready(()), + }; + + if should_abort_query { + *req = None + } + + Poll::Ready(()) + } + + async fn handle_conn_message(&mut self, msg: ConnectionMessage) { + match msg { + ConnectionMessage::Execute { pgm, builder } => { + self.conn.execute_program(&pgm, builder); + let msg = { + let mut lock = self.conn.writer().inner.current_req.lock(); + match *lock { + Some(ref mut req) if req.id.is_none() => { + let program = req + .pgm + .take() + .expect("unsent request should have a program"); + let req_id = self.next_req_id; + self.next_req_id += 1; + req.id = Some(req_id); + + let msg = Outbound { + to: self.primary_node_id, + enveloppe: Enveloppe { + database_id: Some(self.database_id), + message: Message::ProxyRequest { + connection_id: self.connection_id, + req_id, + program, + }, + }, + }; + + Some(msg) + } + _ => None, + } + }; + + if let Some(msg) = msg { + self.dispatcher.dispatch(msg).await; + } + } + ConnectionMessage::Describe => (), + } + } + + async fn handle_inbound(&mut self, msg: Inbound) { + match msg.enveloppe.message { + Message::ProxyResponse(resp) => { + self.handle_proxy_response(resp); + } + _ => (), // ignore anything else + } + } +} diff --git a/libsqlx-server/src/allocation/timeout_notifier.rs b/libsqlx-server/src/allocation/timeout_notifier.rs new file mode 100644 index 00000000..b64c71a8 --- /dev/null +++ b/libsqlx-server/src/allocation/timeout_notifier.rs @@ -0,0 +1,90 @@ +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll, Waker}; +use std::time::Instant; + +use futures::{Future, FutureExt}; +use parking_lot::Mutex; +use tokio::time::{sleep_until, Sleep}; + +pub fn timeout_monitor() -> (TimeoutMonitor, TimeoutNotifier) { + let inner = Arc::new(Mutex::new(TimeoutInner { + sleep: Box::pin(sleep_until(Instant::now().into())), + enabled: false, + waker: None, + })); + + ( + TimeoutMonitor { + inner: inner.clone(), + }, + TimeoutNotifier { inner }, + ) +} + +pub struct TimeoutMonitor { + inner: Arc>, +} + +pub struct TimeoutNotifier { + inner: Arc>, +} + +impl TimeoutNotifier { + pub fn disable(&self) { + self.inner.lock().enabled = false; + } + + pub fn timeout_at(&self, at: Instant) { + let mut inner = self.inner.lock(); + inner.enabled = true; + inner.sleep.as_mut().reset(at.into()); + if let Some(waker) = inner.waker.take() { + waker.wake() + } + } +} + +struct TimeoutInner { + sleep: Pin>, + enabled: bool, + waker: Option, +} + +impl Future for TimeoutMonitor { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let mut inner = self.inner.lock(); + if inner.enabled { + inner.sleep.poll_unpin(cx) + } else { + inner.waker.replace(cx.waker().clone()); + Poll::Pending + } + } +} + +#[cfg(test)] +mod test { + use std::time::Duration; + + use super::*; + + #[tokio::test] + async fn set_timeout() { + let (monitor, notifier) = timeout_monitor(); + notifier.timeout_at(Instant::now() + Duration::from_millis(100)); + monitor.await; + } + + #[tokio::test] + async fn disable_timeout() { + let (monitor, notifier) = timeout_monitor(); + notifier.timeout_at(Instant::now() + Duration::from_millis(1)); + notifier.disable(); + assert!(tokio::time::timeout(Duration::from_millis(10), monitor) + .await + .is_err()); + } +} diff --git a/libsqlx-server/src/compactor.rs b/libsqlx-server/src/compactor.rs new file mode 100644 index 00000000..060a1dda --- /dev/null +++ b/libsqlx-server/src/compactor.rs @@ -0,0 +1,419 @@ +use std::borrow::Cow; +use std::fs::File; +use std::io::{BufWriter, Write}; +use std::mem::size_of; +use std::os::unix::prelude::FileExt; +use std::path::{Path, PathBuf}; +use std::sync::{ + atomic::{AtomicU64, Ordering}, + Arc, +}; + +use bytemuck::{bytes_of, pod_read_unaligned, try_from_bytes, Pod, Zeroable}; +use bytes::{Bytes, BytesMut}; +use heed::byteorder::BigEndian; +use heed_types::{SerdeBincode, U64}; +use libsqlx::libsql::LogFile; +use libsqlx::{Frame, FrameNo}; +use serde::{Deserialize, Serialize}; +use tempfile::NamedTempFile; +use tokio::sync::watch; +use tokio::task::block_in_place; +use uuid::Uuid; + +use crate::meta::DatabaseId; +use crate::snapshot_store::SnapshotStore; + +#[derive(Debug, Serialize, Deserialize)] +pub struct CompactionJob { + /// Id of the database whose log needs to be compacted + pub database_id: DatabaseId, + /// path to the log to compact + pub log_id: Uuid, +} + +pub struct CompactionQueue { + env: heed::Env, + queue: heed::Database, SerdeBincode>, + next_id: AtomicU64, + notify: watch::Sender>, + db_path: PathBuf, + pub snapshot_store: Arc, +} + +impl CompactionQueue { + const COMPACTION_QUEUE_DB_NAME: &str = "compaction_queue_db"; + pub fn new( + env: heed::Env, + db_path: PathBuf, + snapshot_store: Arc, + ) -> crate::Result { + let mut txn = env.write_txn()?; + let queue = env.create_database(&mut txn, Some(Self::COMPACTION_QUEUE_DB_NAME))?; + let next_id = match queue.last(&mut txn)? { + Some((id, _)) => id + 1, + None => 0, + }; + txn.commit()?; + + let (notify, _) = watch::channel((next_id > 0).then(|| next_id - 1)); + Ok(Self { + env, + queue, + next_id: next_id.into(), + notify, + db_path, + snapshot_store, + }) + } + + pub fn push(&self, job: &CompactionJob) -> crate::Result<()> { + tracing::debug!("new compaction job available: {job:?}"); + let mut txn = self.env.write_txn()?; + let id = self.next_id.fetch_add(1, Ordering::Relaxed); + self.queue.put(&mut txn, &id, job)?; + txn.commit()?; + self.notify.send_replace(Some(id)); + + Ok(()) + } + + pub async fn peek(&self) -> crate::Result<(u64, CompactionJob)> { + let id = self.next_id.load(Ordering::Relaxed); + let peek = || { + block_in_place(|| -> crate::Result<_> { + let txn = self.env.read_txn()?; + Ok(self.queue.first(&txn)?) + }) + }; + + loop { + match peek()? { + Some(job) => return Ok(job), + None => { + self.notify + .subscribe() + .wait_for(|x| x.map(|x| x >= id).unwrap_or_default()) + .await + .expect("we're holding the other side of the channel!"); + } + } + } + } + + fn complete(&self, txn: &mut heed::RwTxn, job_id: u64) -> crate::Result<()> { + block_in_place(|| self.queue.delete(txn, &job_id))?; + Ok(()) + } + + async fn compact(&self) -> crate::Result<()> { + let (job_id, job) = self.peek().await?; + tracing::debug!("starting new compaction job: {job:?}"); + let to_compact_path = self.snapshot_queue_dir().join(job.log_id.to_string()); + let (start_fno, end_fno) = tokio::task::spawn_blocking({ + let to_compact_path = to_compact_path.clone(); + let db_path = self.db_path.clone(); + move || { + let log = LogFile::new(to_compact_path)?; + let (start_fno, end_fno, iter) = + log.rev_deduped().expect("compaction job with no frames!"); + let mut builder = SnapshotBuilder::new( + &db_path, + job.database_id, + job.log_id, + start_fno, + end_fno, + )?; + for frame in iter { + let frame = frame?; + builder.push_frame(frame)?; + } + builder.finish() + } + }) + .await + .map_err(|_| { + crate::error::Error::Internal(color_eyre::eyre::anyhow!("compaction thread panicked")) + })??; + + let mut txn = self.env.write_txn()?; + self.complete(&mut txn, job_id)?; + self.snapshot_store + .register(&mut txn, job.database_id, start_fno, end_fno, job.log_id)?; + txn.commit()?; + + std::fs::remove_file(to_compact_path)?; + + Ok(()) + } + + pub fn snapshot_queue_dir(&self) -> PathBuf { + self.db_path.join("snapshot_queue") + } +} + +pub async fn run_compactor_loop(compactor: Arc) -> color_eyre::Result<()> { + loop { + compactor.compact().await?; + } +} + +#[derive(Debug, Copy, Clone, Zeroable, Pod, PartialEq, Eq)] +#[repr(C)] +/// header of a snapshot file +pub struct SnapshotFileHeader { + /// id of the database + pub db_id: DatabaseId, + /// first frame in the snapshot + pub start_frame_no: u64, + /// end frame in the snapshot + pub end_frame_no: u64, + /// number of frames in the snapshot + pub frame_count: u64, + /// safe of the database after applying the snapshot + pub size_after: u32, + pub _pad: u32, +} + +/// An utility to build a snapshots from log frames +pub struct SnapshotBuilder { + pub header: SnapshotFileHeader, + snapshot_id: Uuid, + snapshot_file: BufWriter, + db_path: PathBuf, + last_seen_frame_no: u64, +} + +#[derive(Debug, Clone, Copy, Pod, Zeroable)] +#[repr(C)] +pub struct SnapshotFrameHeader { + pub frame_no: FrameNo, + pub page_no: u32, + _pad: u32, +} + +#[derive(Clone)] +pub struct SnapshotFrame { + data: Bytes, +} + +impl SnapshotFrame { + const SIZE: usize = size_of::() + 4096; + + #[cfg(test)] + pub fn try_from_bytes(data: Bytes) -> crate::Result { + if data.len() != Self::SIZE { + color_eyre::eyre::bail!("invalid snapshot frame") + } + + Ok(Self { data }) + } + + pub fn header(&self) -> Cow { + let data = &self.data[..size_of::()]; + try_from_bytes(data) + .map(Cow::Borrowed) + .unwrap_or_else(|_| Cow::Owned(pod_read_unaligned(data))) + } + + pub(crate) fn page(&self) -> &[u8] { + &self.data[size_of::()..] + } +} + +impl SnapshotBuilder { + pub fn new( + db_path: &Path, + db_id: DatabaseId, + snapshot_id: Uuid, + start_fno: FrameNo, + end_fno: FrameNo, + ) -> crate::Result { + let temp_dir = db_path.join("tmp"); + let mut target = BufWriter::new(NamedTempFile::new_in(&temp_dir)?); + // reserve header space + target.write_all(&[0; size_of::()])?; + + Ok(Self { + header: SnapshotFileHeader { + db_id, + start_frame_no: start_fno, + end_frame_no: end_fno, + frame_count: 0, + size_after: 0, + _pad: 0, + }, + snapshot_file: target, + db_path: db_path.to_path_buf(), + last_seen_frame_no: u64::MAX, + snapshot_id, + }) + } + + pub fn push_frame(&mut self, frame: Frame) -> crate::Result<()> { + assert!(frame.header().frame_no < self.last_seen_frame_no); + self.last_seen_frame_no = frame.header().frame_no; + + if frame.header().frame_no == self.header.end_frame_no { + self.header.size_after = frame.header().size_after; + } + + let header = SnapshotFrameHeader { + frame_no: frame.header().frame_no, + page_no: frame.header().page_no, + _pad: 0, + }; + + self.snapshot_file.write_all(bytes_of(&header))?; + self.snapshot_file.write_all(frame.page())?; + + self.header.frame_count += 1; + + Ok(()) + } + + /// Persist the snapshot, and returns the name and size is frame on the snapshot. + pub fn finish(mut self) -> crate::Result<(FrameNo, FrameNo)> { + self.snapshot_file.flush()?; + let file = self + .snapshot_file + .into_inner() + .map_err(|e| crate::error::Error::Internal(e.into()))?; + + file.as_file().write_all_at(bytes_of(&self.header), 0)?; + + let path = self + .db_path + .join("snapshots") + .join(self.snapshot_id.to_string()); + file.persist(path) + .map_err(|e| crate::error::Error::Internal(e.into()))?; + + Ok((self.header.start_frame_no, self.header.end_frame_no)) + } +} + +pub struct SnapshotFile { + pub file: File, + pub header: SnapshotFileHeader, +} + +impl SnapshotFile { + pub fn open(path: &Path) -> crate::Result { + let file = File::open(path)?; + let mut header_buf = [0; size_of::()]; + file.read_exact_at(&mut header_buf, 0)?; + let header: SnapshotFileHeader = pod_read_unaligned(&header_buf); + + Ok(Self { file, header }) + } + + /// Iterator on the frames contained in the snapshot file, in reverse frame_no order. + pub fn frames_iter(&self) -> impl Iterator> + '_ { + let mut current_offset = 0; + std::iter::from_fn(move || { + if current_offset >= self.header.frame_count { + return None; + } + let read_offset = size_of::() as u64 + + current_offset * SnapshotFrame::SIZE as u64; + current_offset += 1; + let mut buf = BytesMut::zeroed(SnapshotFrame::SIZE); + match self.file.read_exact_at(&mut buf, read_offset as _) { + Ok(_) => Some(Ok(SnapshotFrame { data: buf.freeze() })), + Err(e) => Some(Err(e.into())), + } + }) + } + + /// Like `frames_iter`, but stops as soon as a frame with frame_no <= `frame_no` is reached + pub fn frames_iter_from( + &self, + frame_no: u64, + ) -> impl Iterator> + '_ { + let mut iter = self.frames_iter(); + std::iter::from_fn(move || match iter.next() { + Some(Ok(frame)) => { + if frame.header().frame_no < frame_no { + None + } else { + Some(Ok(frame)) + } + } + other => other, + }) + } +} + +#[cfg(test)] +mod test { + use std::collections::HashSet; + + use crate::init_dirs; + + use super::*; + + #[tokio::test(flavor = "multi_thread")] + async fn create_snapshot() { + let temp = tempfile::tempdir().unwrap(); + init_dirs(temp.path()).await.unwrap(); + let env = heed::EnvOpenOptions::new() + .max_dbs(100) + .map_size(1000 * 4096) + .open(temp.path().join("meta")) + .unwrap(); + let snapshot_store = SnapshotStore::new(temp.path().to_path_buf(), env.clone()).unwrap(); + let store = Arc::new(snapshot_store); + let queue = CompactionQueue::new(env, temp.path().to_path_buf(), store.clone()).unwrap(); + let log_id = Uuid::new_v4(); + let database_id = DatabaseId::random(); + + let log_path = temp.path().join("snapshot_queue").join(log_id.to_string()); + tokio::fs::copy("assets/test/simple-log", &log_path) + .await + .unwrap(); + + let log_file = LogFile::new(log_path).unwrap(); + let expected_start_frameno = log_file.header().start_frame_no; + let expected_end_frameno = + log_file.header().start_frame_no + log_file.header().frame_count - 1; + let mut expected_page_content = log_file + .frames_iter() + .unwrap() + .map(|f| f.unwrap().header().page_no) + .collect::>(); + + queue.push(&CompactionJob { + database_id, + log_id, + }); + + queue.compact().await.unwrap(); + + let snapshot_path = temp.path().join("snapshots").join(log_id.to_string()); + assert!(snapshot_path.exists()); + + let snapshot_file = SnapshotFile::open(&snapshot_path).unwrap(); + assert_eq!(snapshot_file.header.start_frame_no, expected_start_frameno); + assert_eq!(snapshot_file.header.end_frame_no, expected_end_frameno); + assert!(snapshot_file + .frames_iter() + .all(|f| expected_page_content.remove(&f.unwrap().header().page_no))); + assert!(expected_page_content.is_empty()); + + assert_eq!( + snapshot_file + .frames_iter() + .map(Result::unwrap) + .map(|f| f.header().frame_no) + .reduce(|prev, new| { + assert!(new < prev); + new + }) + .unwrap(), + 0 + ); + + assert_eq!(store.locate(database_id, 0).unwrap().snapshot_id, log_id); + } +} diff --git a/libsqlx-server/src/config.rs b/libsqlx-server/src/config.rs new file mode 100644 index 00000000..84b961eb --- /dev/null +++ b/libsqlx-server/src/config.rs @@ -0,0 +1,102 @@ +use std::net::SocketAddr; +use std::path::PathBuf; + +use serde::de::Visitor; +use serde::Deserialize; + +#[derive(Deserialize, Debug, Clone)] +pub struct Config { + /// Database path + #[serde(default = "default_db_path")] + pub db_path: PathBuf, + /// Cluster configuration + pub cluster: ClusterConfig, + /// User API configuration + pub user_api: UserApiConfig, + /// Admin API configuration + pub admin_api: AdminApiConfig, +} + +impl Config { + pub fn validate(&self) -> color_eyre::Result<()> { + // TODO: implement validation + Ok(()) + } +} + +#[derive(Deserialize, Debug, Clone)] +pub struct ClusterConfig { + pub id: u64, + /// Address to bind this node to + #[serde(default = "default_linc_addr")] + pub addr: SocketAddr, + /// List of peers in the format `@` + pub peers: Vec, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct UserApiConfig { + #[serde(default = "default_user_addr")] + pub addr: SocketAddr, +} + +#[derive(Deserialize, Debug, Clone)] +pub struct AdminApiConfig { + #[serde(default = "default_admin_addr")] + pub addr: SocketAddr, +} + +fn default_db_path() -> PathBuf { + PathBuf::from("data.sqld") +} + +fn default_admin_addr() -> SocketAddr { + "0.0.0.0:8081".parse().unwrap() +} + +fn default_user_addr() -> SocketAddr { + "0.0.0.0:8080".parse().unwrap() +} + +fn default_linc_addr() -> SocketAddr { + "0.0.0.0:5001".parse().unwrap() +} + +#[derive(Debug, Clone)] +pub struct Peer { + pub id: u64, + pub addr: String, +} + +impl<'de> Deserialize<'de> for Peer { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct V; + + impl Visitor<'_> for V { + type Value = Peer; + + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("a string in the format :") + } + + fn visit_str(self, v: &str) -> Result + where + E: serde::de::Error, + { + let mut iter = v.split("@"); + let Some(id) = iter.next() else { return Err(E::custom("node id is missing")) }; + let Ok(id) = id.parse::() else { return Err(E::custom("failed to parse node id")) }; + let Some(addr) = iter.next() else { return Err(E::custom("node address is missing")) }; + Ok(Peer { + id, + addr: addr.to_string(), + }) + } + } + + deserializer.deserialize_str(V) + } +} diff --git a/libsqlx-server/src/database.rs b/libsqlx-server/src/database.rs new file mode 100644 index 00000000..2147a85d --- /dev/null +++ b/libsqlx-server/src/database.rs @@ -0,0 +1,20 @@ +use tokio::sync::{mpsc, oneshot}; + +use crate::allocation::{AllocationMessage, ConnectionHandle}; + +pub struct Database { + pub sender: mpsc::Sender, +} + +impl Database { + pub async fn connect(&self) -> crate::Result { + let (ret, conn) = oneshot::channel(); + self.sender + .send(AllocationMessage::Connect { ret }) + .await + .map_err(|_| crate::error::Error::AllocationClosed)?; + + conn.await + .map_err(|_| crate::error::Error::ConnectionClosed)? + } +} diff --git a/libsqlx-server/src/error.rs b/libsqlx-server/src/error.rs new file mode 100644 index 00000000..f62bf20c --- /dev/null +++ b/libsqlx-server/src/error.rs @@ -0,0 +1,21 @@ +use crate::meta::AllocationError; + +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error(transparent)] + Libsqlx(#[from] libsqlx::error::Error), + #[error("replica injector loop exited")] + InjectorExited, + #[error("connection closed")] + ConnectionClosed, + #[error(transparent)] + Io(#[from] std::io::Error), + #[error("allocation closed")] + AllocationClosed, + #[error("internal error: {0}")] + Internal(color_eyre::eyre::Error), + #[error(transparent)] + Heed(#[from] heed::Error), + #[error(transparent)] + Allocation(#[from] AllocationError), +} diff --git a/libsqlx-server/src/hrana/batch.rs b/libsqlx-server/src/hrana/batch.rs new file mode 100644 index 00000000..d01bc8ac --- /dev/null +++ b/libsqlx-server/src/hrana/batch.rs @@ -0,0 +1,142 @@ +use std::collections::HashMap; +use std::sync::Arc; + +// use crate::auth::Authenticated; + +use libsqlx::analysis::Statement; +use libsqlx::program::{Cond, Program, Step}; +use libsqlx::query::{Params, Query}; +use libsqlx::result_builder::{StepResult, StepResultsBuilder}; +use tokio::sync::oneshot; + +use crate::allocation::ConnectionHandle; + +use super::error::HranaError; +use super::result_builder::HranaBatchProtoBuilder; +use super::stmt::{proto_stmt_to_query, StmtError}; +use super::{proto, ProtocolError, Version}; + +fn proto_cond_to_cond(cond: &proto::BatchCond, max_step_i: usize) -> Result { + let try_convert_step = |step: i32| -> Result { + let step = usize::try_from(step).map_err(|_| ProtocolError::BatchCondBadStep)?; + if step >= max_step_i { + return Err(ProtocolError::BatchCondBadStep); + } + Ok(step) + }; + + let cond = match cond { + proto::BatchCond::Ok { step } => Cond::Ok { + step: try_convert_step(*step)?, + }, + proto::BatchCond::Error { step } => Cond::Err { + step: try_convert_step(*step)?, + }, + proto::BatchCond::Not { cond } => Cond::Not { + cond: proto_cond_to_cond(cond, max_step_i)?.into(), + }, + proto::BatchCond::And { conds } => Cond::And { + conds: conds + .iter() + .map(|cond| proto_cond_to_cond(cond, max_step_i)) + .collect::>()?, + }, + proto::BatchCond::Or { conds } => Cond::Or { + conds: conds + .iter() + .map(|cond| proto_cond_to_cond(cond, max_step_i)) + .collect::>()?, + }, + }; + + Ok(cond) +} + +pub fn proto_batch_to_program( + batch: &proto::Batch, + sqls: &HashMap, + version: Version, +) -> Result { + let mut steps = Vec::with_capacity(batch.steps.len()); + for (step_i, step) in batch.steps.iter().enumerate() { + let query = proto_stmt_to_query(&step.stmt, sqls, version)?; + let cond = step + .condition + .as_ref() + .map(|cond| proto_cond_to_cond(cond, step_i)) + .transpose()?; + let step = Step { query, cond }; + + steps.push(step); + } + + Ok(Program::new(steps)) +} + +pub async fn execute_batch( + conn: &ConnectionHandle, + // auth: Authenticated, + pgm: Program, +) -> Result { + let (builder, ret) = HranaBatchProtoBuilder::new(); + conn.execute( + pgm, + // auth, + Box::new(builder), + ) + .await; + + Ok(ret + .await + .map_err(|_| crate::error::Error::ConnectionClosed)?) +} + +pub fn proto_sequence_to_program(sql: &str) -> Result { + let stmts = Statement::parse(sql) + .collect::, libsqlx::error::Error>>() + .map_err(|err| StmtError::SqlParse { source: err.into() })?; + + let steps = stmts + .into_iter() + .enumerate() + .map(|(step_i, stmt)| { + let cond = match step_i { + 0 => None, + _ => Some(Cond::Ok { step: step_i - 1 }), + }; + let query = Query { + stmt, + params: Params::empty(), + want_rows: false, + }; + Step { cond, query } + }) + .collect::>(); + + Ok(Program { steps }) +} + +pub async fn execute_sequence( + conn: &ConnectionHandle, + // auth: Authenticated, + pgm: Program, +) -> Result<(), HranaError> { + let (send, ret) = oneshot::channel(); + let builder = StepResultsBuilder::new(send); + conn.execute( + pgm, + // auth, + Box::new(builder), + ) + .await; + + ret.await + .unwrap() + .unwrap() + .into_iter() + .try_for_each(|result| match result { + StepResult::Ok => Ok(()), + StepResult::Err(e) => Err(crate::error::Error::from(e))?, + StepResult::Skipped => todo!(), // Err(anyhow!("Statement in sequence was not executed")), + }) +} diff --git a/libsqlx-server/src/hrana/error.rs b/libsqlx-server/src/hrana/error.rs new file mode 100644 index 00000000..8f8711a1 --- /dev/null +++ b/libsqlx-server/src/hrana/error.rs @@ -0,0 +1,28 @@ +use super::http::request::StreamResponseError; +use super::http::StreamError; +use super::stmt::StmtError; +use super::ProtocolError; + +#[derive(Debug, thiserror::Error)] +pub enum HranaError { + #[error(transparent)] + Stmt(#[from] StmtError), + #[error(transparent)] + Proto(#[from] ProtocolError), + #[error(transparent)] + Stream(#[from] StreamError), + #[error(transparent)] + StreamResponse(#[from] StreamResponseError), + #[error(transparent)] + Libsqlx(crate::error::Error), +} + +impl HranaError { + pub fn code(&self) -> Option<&str> { + match self { + HranaError::Stmt(e) => Some(e.code()), + HranaError::StreamResponse(e) => Some(e.code()), + HranaError::Stream(_) | HranaError::Libsqlx(_) | HranaError::Proto(_) => None, + } + } +} diff --git a/libsqlx-server/src/hrana/http/mod.rs b/libsqlx-server/src/hrana/http/mod.rs new file mode 100644 index 00000000..790a3c4f --- /dev/null +++ b/libsqlx-server/src/hrana/http/mod.rs @@ -0,0 +1,51 @@ +use parking_lot::Mutex; + +use super::error::HranaError; +// use crate::auth::Authenticated; +use crate::database::Database; +pub use stream::StreamError; + +pub mod proto; +pub mod request; +mod stream; + +pub struct Server { + self_url: Option, + baton_key: [u8; 32], + stream_state: Mutex, +} + +impl Server { + pub fn new(self_url: Option) -> Self { + Self { + self_url, + baton_key: rand::random(), + stream_state: Mutex::new(stream::ServerStreamState::new()), + } + } + + pub async fn run_expire(&self) { + stream::run_expire(self).await + } +} + +pub async fn handle_pipeline( + server: &Server, + // auth: Authenticated, + req: proto::PipelineRequestBody, + db: Database, +) -> crate::Result { + let mut stream_guard = stream::acquire(server, req.baton.as_deref(), db).await?; + + let mut results = Vec::with_capacity(req.requests.len()); + for request in req.requests.into_iter() { + let result = request::handle(&mut stream_guard, /*auth,*/ request).await?; + results.push(result); + } + + Ok(proto::PipelineResponseBody { + baton: stream_guard.release(), + base_url: server.self_url.clone(), + results, + }) +} diff --git a/libsqlx-server/src/hrana/http/proto.rs b/libsqlx-server/src/hrana/http/proto.rs new file mode 100644 index 00000000..ba1285f1 --- /dev/null +++ b/libsqlx-server/src/hrana/http/proto.rs @@ -0,0 +1,115 @@ +//! Structures for Hrana-over-HTTP. + +pub use super::super::proto::*; +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Debug)] +pub struct PipelineRequestBody { + pub baton: Option, + pub requests: Vec, +} + +#[derive(Serialize, Debug)] +pub struct PipelineResponseBody { + pub baton: Option, + pub base_url: Option, + pub results: Vec, +} + +#[derive(Serialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum StreamResult { + Ok { response: StreamResponse }, + Error { error: Error }, +} + +#[derive(Deserialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum StreamRequest { + Close(CloseStreamReq), + Execute(ExecuteStreamReq), + Batch(BatchStreamReq), + Sequence(SequenceStreamReq), + Describe(DescribeStreamReq), + StoreSql(StoreSqlStreamReq), + CloseSql(CloseSqlStreamReq), +} + +#[derive(Serialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum StreamResponse { + Close(CloseStreamResp), + Execute(ExecuteStreamResp), + Batch(BatchStreamResp), + Sequence(SequenceStreamResp), + Describe(DescribeStreamResp), + StoreSql(StoreSqlStreamResp), + CloseSql(CloseSqlStreamResp), +} + +#[derive(Deserialize, Debug)] +pub struct CloseStreamReq {} + +#[derive(Serialize, Debug)] +pub struct CloseStreamResp {} + +#[derive(Deserialize, Debug)] +pub struct ExecuteStreamReq { + pub stmt: Stmt, +} + +#[derive(Serialize, Debug)] +pub struct ExecuteStreamResp { + pub result: StmtResult, +} + +#[derive(Deserialize, Debug)] +pub struct BatchStreamReq { + pub batch: Batch, +} + +#[derive(Serialize, Debug)] +pub struct BatchStreamResp { + pub result: BatchResult, +} + +#[derive(Deserialize, Debug)] +pub struct SequenceStreamReq { + #[serde(default)] + pub sql: Option, + #[serde(default)] + pub sql_id: Option, +} + +#[derive(Serialize, Debug)] +pub struct SequenceStreamResp {} + +#[derive(Deserialize, Debug)] +pub struct DescribeStreamReq { + #[serde(default)] + pub sql: Option, + #[serde(default)] + pub sql_id: Option, +} + +#[derive(Serialize, Debug)] +pub struct DescribeStreamResp { + pub result: DescribeResult, +} + +#[derive(Deserialize, Debug)] +pub struct StoreSqlStreamReq { + pub sql_id: i32, + pub sql: String, +} + +#[derive(Serialize, Debug)] +pub struct StoreSqlStreamResp {} + +#[derive(Deserialize, Debug)] +pub struct CloseSqlStreamReq { + pub sql_id: i32, +} + +#[derive(Serialize, Debug)] +pub struct CloseSqlStreamResp {} diff --git a/libsqlx-server/src/hrana/http/request.rs b/libsqlx-server/src/hrana/http/request.rs new file mode 100644 index 00000000..eaf84eb5 --- /dev/null +++ b/libsqlx-server/src/hrana/http/request.rs @@ -0,0 +1,108 @@ +use crate::hrana::error::HranaError; +use crate::hrana::ProtocolError; + +use super::super::{batch, stmt, Version}; +use super::{proto, stream}; +// use crate::auth::Authenticated; + +/// An error from executing a [`proto::StreamRequest`] +#[derive(thiserror::Error, Debug)] +pub enum StreamResponseError { + #[error("The server already stores {count} SQL texts, it cannot store more")] + SqlTooMany { count: usize }, + #[error(transparent)] + Stmt(stmt::StmtError), +} + +pub async fn handle( + stream_guard: &mut stream::Guard<'_>, + // auth: Authenticated, + request: proto::StreamRequest, +) -> Result { + let result = match try_handle(stream_guard /*, auth*/, request).await { + Ok(response) => proto::StreamResult::Ok { response }, + Err(err) => { + if let HranaError::StreamResponse(err) = err { + let error = proto::Error { + message: err.to_string(), + code: err.code().into(), + }; + proto::StreamResult::Error { error } + } else { + Err(err)? + } + } + }; + Ok(result) +} + +async fn try_handle( + stream_guard: &mut stream::Guard<'_>, + // auth: Authenticated, + request: proto::StreamRequest, +) -> crate::Result { + Ok(match request { + proto::StreamRequest::Close(_req) => { + stream_guard.close_conn(); + proto::StreamResponse::Close(proto::CloseStreamResp {}) + } + proto::StreamRequest::Execute(req) => { + let db = stream_guard.get_conn()?; + let sqls = stream_guard.sqls(); + let query = stmt::proto_stmt_to_query(&req.stmt, sqls, Version::Hrana2)?; + let result = stmt::execute_stmt(db, /*auth,*/ query).await?; + proto::StreamResponse::Execute(proto::ExecuteStreamResp { result }) + } + proto::StreamRequest::Batch(req) => { + let db = stream_guard.get_conn()?; + let sqls = stream_guard.sqls(); + let pgm = batch::proto_batch_to_program(&req.batch, sqls, Version::Hrana2)?; + let result = batch::execute_batch(db, /*auth,*/ pgm).await?; + proto::StreamResponse::Batch(proto::BatchStreamResp { result }) + } + proto::StreamRequest::Sequence(req) => { + let db = stream_guard.get_conn()?; + let sqls = stream_guard.sqls(); + let sql = + stmt::proto_sql_to_sql(req.sql.as_deref(), req.sql_id, sqls, Version::Hrana2)?; + let pgm = batch::proto_sequence_to_program(sql)?; + batch::execute_sequence(db, /*auth,*/ pgm).await?; + proto::StreamResponse::Sequence(proto::SequenceStreamResp {}) + } + proto::StreamRequest::Describe(req) => { + let db = stream_guard.get_conn()?; + let sqls = stream_guard.sqls(); + let sql = + stmt::proto_sql_to_sql(req.sql.as_deref(), req.sql_id, sqls, Version::Hrana2)?; + let result = stmt::describe_stmt(db, /* auth,*/ sql.into()).await?; + proto::StreamResponse::Describe(proto::DescribeStreamResp { result }) + } + proto::StreamRequest::StoreSql(req) => { + let sqls = stream_guard.sqls_mut(); + let sql_id = req.sql_id; + if sqls.contains_key(&sql_id) { + Err(ProtocolError::SqlExists { sql_id })? + } else if sqls.len() >= MAX_SQL_COUNT { + Err(StreamResponseError::SqlTooMany { count: sqls.len() })? + } + sqls.insert(sql_id, req.sql); + proto::StreamResponse::StoreSql(proto::StoreSqlStreamResp {}) + } + proto::StreamRequest::CloseSql(req) => { + let sqls = stream_guard.sqls_mut(); + sqls.remove(&req.sql_id); + proto::StreamResponse::CloseSql(proto::CloseSqlStreamResp {}) + } + }) +} + +const MAX_SQL_COUNT: usize = 50; + +impl StreamResponseError { + pub fn code(&self) -> &'static str { + match self { + Self::SqlTooMany { .. } => "SQL_STORE_TOO_MANY", + Self::Stmt(err) => err.code(), + } + } +} diff --git a/libsqlx-server/src/hrana/http/stream.rs b/libsqlx-server/src/hrana/http/stream.rs new file mode 100644 index 00000000..1320df90 --- /dev/null +++ b/libsqlx-server/src/hrana/http/stream.rs @@ -0,0 +1,406 @@ +use base64::prelude::{Engine as _, BASE64_STANDARD_NO_PAD}; +use futures::Future; +use hmac::Mac as _; +use priority_queue::PriorityQueue; +use std::cmp::Reverse; +use std::collections::{HashMap, VecDeque}; +use std::pin::Pin; +use std::{future, mem, task}; +use tokio::time::{Duration, Instant}; + +use super::super::ProtocolError; +use super::Server; +use crate::allocation::ConnectionHandle; +use crate::database::Database; +use crate::hrana::error::HranaError; + +/// Mutable state related to streams, owned by [`Server`] and protected with a mutex. +pub struct ServerStreamState { + /// Map from stream ids to stream handles. The stream ids are random integers. + handles: HashMap, + /// Queue of streams ordered by the instant when they should expire. All these stream ids + /// should refer to handles in the [`Handle::Available`] variant. + expire_queue: PriorityQueue>, + /// Queue of expired streams that are still stored as [`Handle::Expired`], together with the + /// instant when we should remove them completely. + cleanup_queue: VecDeque<(u64, Instant)>, + /// The timer that we use to wait for the next item in `expire_queue`. + expire_sleep: Pin>, + /// A waker to wake up the task that expires streams from the `expire_queue`. + expire_waker: Option, + /// See [`roundup_instant()`]. + expire_round_base: Instant, +} + +/// Handle to a stream, owned by the [`ServerStreamState`]. +enum Handle { + /// A stream that is open and ready to be used by requests. [`Stream::db`] should always be + /// `Some`. + Available(Box), + /// A stream that has been acquired by a request that hasn't finished processing. This will be + /// replaced with `Available` when the request completes and releases the stream. + Acquired, + /// A stream that has been expired. This stream behaves as closed, but we keep this around for + /// some time to provide a nicer error messages (i.e., if the stream is expired, we return a + /// "stream expired" error rather than "invalid baton" error). + Expired, +} + +/// State of a Hrana-over-HTTP stream. +/// +/// The stream is either owned by [`Handle::Available`] (when it's not in use) or by [`Guard`] +/// (when it's being used by a request). +struct Stream { + /// The database connection that corresponds to this stream. This is `None` after the `"close"` + /// request was executed. + conn: Option, + /// The cache of SQL texts stored on the server with `"store_sql"` requests. + sqls: HashMap, + /// Stream id of this stream. The id is generated randomly (it should be unguessable). + stream_id: u64, + /// Sequence number that is expected in the next baton. To make sure that clients issue stream + /// requests sequentially, the baton returned from each HTTP request includes this sequence + /// number, and the following HTTP request must show a baton with the same sequence number. + baton_seq: u64, +} + +/// Guard object that is used to access a stream from the outside. The guard makes sure that the +/// stream's entry in [`ServerStreamState::handles`] is either removed or replaced with +/// [`Handle::Available`] after the guard goes out of scope. +pub struct Guard<'srv> { + server: &'srv Server, + /// The guarded stream. This is only set to `None` in the destructor. + stream: Option>, + /// If set to `true`, the destructor will release the stream for further use (saving it as + /// [`Handle::Available`] in [`ServerStreamState::handles`]. If false, the stream is removed on + /// drop. + release: bool, +} + +/// An unrecoverable error that should close the stream. The difference from [`ProtocolError`] is +/// that a correct client may trigger this error, it does not mean that the protocol has been +/// violated. +#[derive(thiserror::Error, Debug)] +pub enum StreamError { + #[error("The stream has expired due to inactivity")] + StreamExpired, +} + +impl ServerStreamState { + pub fn new() -> Self { + Self { + handles: HashMap::new(), + expire_queue: PriorityQueue::new(), + cleanup_queue: VecDeque::new(), + expire_sleep: Box::pin(tokio::time::sleep(Duration::ZERO)), + expire_waker: None, + expire_round_base: Instant::now(), + } + } +} + +/// Acquire a guard to a new or existing stream. If baton is `Some`, we try to look up the stream, +/// otherwise we create a new stream. +pub async fn acquire<'srv>( + server: &'srv Server, + baton: Option<&str>, + db: Database, +) -> Result, HranaError> { + let stream = match baton { + Some(baton) => { + let (stream_id, baton_seq) = decode_baton(server, baton)?; + + let mut state = server.stream_state.lock(); + let handle = state.handles.get_mut(&stream_id); + match handle { + None => { + Err(ProtocolError::BatonInvalid { + reason: format!("Stream handle for {stream_id} was not found"), + })?; + } + Some(Handle::Acquired) => { + Err(ProtocolError::BatonReused { + reason: format!("Stream handle for {stream_id} is acquired"), + })?; + } + Some(Handle::Expired) => Err(StreamError::StreamExpired)?, + Some(Handle::Available(stream)) => { + if stream.baton_seq != baton_seq { + Err(ProtocolError::BatonReused { + reason: format!( + "Expected baton seq {}, received {baton_seq}", + stream.baton_seq + ), + })?; + } + } + }; + + let Handle::Available(mut stream) = mem::replace(handle.unwrap(), Handle::Acquired) else { + unreachable!() + }; + + tracing::debug!("Stream {stream_id} was acquired with baton seq {baton_seq}"); + // incrementing the sequence number forces the next HTTP request to use a different + // baton + stream.baton_seq = stream.baton_seq.wrapping_add(1); + unmark_expire(&mut state, stream.stream_id); + stream + } + None => { + let conn = db.connect().await?; + let mut state = server.stream_state.lock(); + let stream = Box::new(Stream { + conn: Some(conn), + sqls: HashMap::new(), + stream_id: gen_stream_id(&mut state), + // initializing the sequence number randomly makes it much harder to exploit + // collisions in batons + baton_seq: rand::random(), + }); + state.handles.insert(stream.stream_id, Handle::Acquired); + tracing::debug!( + "Stream {} was created with baton seq {}", + stream.stream_id, + stream.baton_seq + ); + stream + } + }; + Ok(Guard { + server, + stream: Some(stream), + release: false, + }) +} + +impl<'srv> Guard<'srv> { + pub fn get_conn(&self) -> Result<&ConnectionHandle, ProtocolError> { + let stream = self.stream.as_ref().unwrap(); + stream.conn.as_ref().ok_or(ProtocolError::BatonStreamClosed) + } + + /// Closes the database connection. The next call to [`Guard::release()`] will then remove the + /// stream. + pub fn close_conn(&mut self) { + let stream = self.stream.as_mut().unwrap(); + stream.conn = None; + } + + pub fn sqls(&self) -> &HashMap { + &self.stream.as_ref().unwrap().sqls + } + + pub fn sqls_mut(&mut self) -> &mut HashMap { + &mut self.stream.as_mut().unwrap().sqls + } + + /// Releases the guard and returns the baton that can be used to access this stream in the next + /// HTTP request. Returns `None` if the stream has been closed (and thus cannot be accessed + /// again). + pub fn release(mut self) -> Option { + let stream = self.stream.as_ref().unwrap(); + if stream.conn.is_some() { + self.release = true; // tell destructor to make the stream available again + Some(encode_baton( + self.server, + stream.stream_id, + stream.baton_seq, + )) + } else { + None + } + } +} + +impl<'srv> Drop for Guard<'srv> { + fn drop(&mut self) { + let stream = self.stream.take().unwrap(); + let stream_id = stream.stream_id; + + let mut state = self.server.stream_state.lock(); + let Some(handle) = state.handles.remove(&stream_id) else { + panic!("Dropped a Guard for stream {stream_id}, \ + but Server does not contain a handle to it"); + }; + if !matches!(handle, Handle::Acquired) { + panic!( + "Dropped a Guard for stream {stream_id}, \ + but Server contained handle that is not acquired" + ); + } + + if self.release { + state.handles.insert(stream_id, Handle::Available(stream)); + mark_expire(&mut state, stream_id); + tracing::debug!("Stream {stream_id} was released for further use"); + } else { + tracing::debug!("Stream {stream_id} was closed"); + } + } +} + +fn gen_stream_id(state: &mut ServerStreamState) -> u64 { + for _ in 0..10 { + let stream_id = rand::random(); + if !state.handles.contains_key(&stream_id) { + return stream_id; + } + } + panic!("Failed to generate a free stream id with rejection sampling") +} + +/// Encodes the baton. +/// +/// The baton is base64-encoded byte string that is composed from: +/// +/// - payload (16 bytes): +/// - `stream_id` (8 bytes, big endian) +/// - `baton_seq` (8 bytes, big endian) +/// - MAC (32 bytes): an authentication code generated with HMAC-SHA256 +/// +/// The MAC is used to cryptographically verify that the baton was generated by this server. It is +/// unlikely that we ever issue the same baton twice, because there are 2^128 possible combinations +/// for payload (note that both `stream_id` and the initial `baton_seq` are generated randomly). +fn encode_baton(server: &Server, stream_id: u64, baton_seq: u64) -> String { + let mut payload = [0; 16]; + payload[0..8].copy_from_slice(&stream_id.to_be_bytes()); + payload[8..16].copy_from_slice(&baton_seq.to_be_bytes()); + + let mut hmac = hmac::Hmac::::new_from_slice(&server.baton_key).unwrap(); + hmac.update(&payload); + let mac = hmac.finalize().into_bytes(); + + let mut baton_data = [0; 48]; + baton_data[0..16].copy_from_slice(&payload); + baton_data[16..48].copy_from_slice(&mac); + BASE64_STANDARD_NO_PAD.encode(baton_data) +} + +/// Decodes a baton encoded with `encode_baton()` and returns `(stream_id, baton_seq)`. Always +/// returns a [`ProtocolError::BatonInvalid`] if the baton is invalid, but it attaches an anyhow +/// context that describes the precise cause. +fn decode_baton(server: &Server, baton_str: &str) -> crate::Result<(u64, u64), HranaError> { + let baton_data = + BASE64_STANDARD_NO_PAD + .decode(baton_str) + .map_err(|err| ProtocolError::BatonInvalid { + reason: format!("Could not base64-decode baton: {err}"), + })?; + + if baton_data.len() != 48 { + Err(ProtocolError::BatonInvalid { + reason: format!("Baton has invalid size of {} bytes", baton_data.len()), + })?; + } + + let payload = &baton_data[0..16]; + let received_mac = &baton_data[16..48]; + + let mut hmac = hmac::Hmac::::new_from_slice(&server.baton_key).unwrap(); + hmac.update(payload); + hmac.verify_slice(received_mac) + .map_err(|_| ProtocolError::BatonInvalid { + reason: "Invalid MAC on baton".into(), + })?; + + let stream_id = u64::from_be_bytes(payload[0..8].try_into().unwrap()); + let baton_seq = u64::from_be_bytes(payload[8..16].try_into().unwrap()); + Ok((stream_id, baton_seq)) +} + +/// How long do we keep a stream in [`Handle::Available`] state before expiration. Note that every +/// HTTP request resets the timer to beginning, so the client can keep a stream alive for a long +/// time, as long as it pings regularly. +const EXPIRATION: Duration = Duration::from_secs(10); + +/// How long do we keep an expired stream in [`Handle::Expired`] state before removing it for good. +const CLEANUP: Duration = Duration::from_secs(300); + +fn mark_expire(state: &mut ServerStreamState, stream_id: u64) { + let expire_at = roundup_instant(state, Instant::now() + EXPIRATION); + if state.expire_sleep.deadline() > expire_at { + if let Some(waker) = state.expire_waker.take() { + waker.wake(); + } + } + state.expire_queue.push(stream_id, Reverse(expire_at)); +} + +fn unmark_expire(state: &mut ServerStreamState, stream_id: u64) { + state.expire_queue.remove(&stream_id); +} + +/// Handles stream expiration (and cleanup). The returned future is never resolved. +pub async fn run_expire(server: &Server) { + future::poll_fn(|cx| { + let mut state = server.stream_state.lock(); + pump_expire(&mut state, cx); + task::Poll::Pending + }) + .await +} + +fn pump_expire(state: &mut ServerStreamState, cx: &mut task::Context) { + let now = Instant::now(); + + // expire all streams in the `expire_queue` that have passed their expiration time + let wakeup_at = loop { + let stream_id = match state.expire_queue.peek() { + Some((&stream_id, &Reverse(expire_at))) => { + if expire_at <= now { + stream_id + } else { + break expire_at; + } + } + None => break now + Duration::from_secs(60), + }; + state.expire_queue.pop(); + + match state.handles.get_mut(&stream_id) { + Some(handle @ Handle::Available(_)) => { + *handle = Handle::Expired; + } + _ => continue, + } + tracing::debug!("Stream {stream_id} was expired"); + + let cleanup_at = roundup_instant(state, now + CLEANUP); + state.cleanup_queue.push_back((stream_id, cleanup_at)); + }; + + // completely remove streams that are due in `cleanup_queue` + loop { + let stream_id = match state.cleanup_queue.front() { + Some(&(stream_id, cleanup_at)) if cleanup_at <= now => stream_id, + _ => break, + }; + state.cleanup_queue.pop_front(); + + let handle = state.handles.remove(&stream_id); + assert!(matches!(handle, Some(Handle::Expired))); + tracing::debug!("Stream {stream_id} was cleaned up after expiration"); + } + + // make sure that this function is called again no later than at time `wakeup_at` + state.expire_sleep.as_mut().reset(wakeup_at); + state.expire_waker = Some(cx.waker().clone()); + let _: task::Poll<()> = state.expire_sleep.as_mut().poll(cx); +} + +/// Rounds the `instant` to the next second. This is used to ensure that streams that expire close +/// together are expired at exactly the same instant, thus reducing the number of times that +/// [`pump_expire()`] is called during periods of high load. +fn roundup_instant(state: &ServerStreamState, instant: Instant) -> Instant { + let duration_s = (instant - state.expire_round_base).as_secs(); + state.expire_round_base + Duration::from_secs(duration_s + 1) +} + +impl StreamError { + pub fn code(&self) -> &'static str { + match self { + Self::StreamExpired => "STREAM_EXPIRED", + } + } +} diff --git a/libsqlx-server/src/hrana/mod.rs b/libsqlx-server/src/hrana/mod.rs new file mode 100644 index 00000000..8f4c7f68 --- /dev/null +++ b/libsqlx-server/src/hrana/mod.rs @@ -0,0 +1,69 @@ +use std::fmt; + +pub mod batch; +pub mod http; +pub mod proto; +mod result_builder; +pub mod stmt; +// pub mod ws; +pub mod error; + +#[derive(Debug, Copy, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub enum Version { + Hrana1, + Hrana2, +} + +impl fmt::Display for Version { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Version::Hrana1 => write!(f, "hrana1"), + Version::Hrana2 => write!(f, "hrana2"), + } + } +} + +/// An unrecoverable protocol error that should close the WebSocket or HTTP stream. A correct +/// client should never trigger any of these errors. +#[derive(thiserror::Error, Debug)] +pub enum ProtocolError { + #[error("Cannot deserialize client message: {source}")] + Deserialize { source: serde_json::Error }, + #[error("Received a binary WebSocket message, which is not supported")] + BinaryWebSocketMessage, + #[error("Received a request before hello message")] + RequestBeforeHello, + + #[error("Stream {stream_id} not found")] + StreamNotFound { stream_id: i32 }, + #[error("Stream {stream_id} already exists")] + StreamExists { stream_id: i32 }, + + #[error("Either `sql` or `sql_id` are required, but not both")] + SqlIdAndSqlGiven, + #[error("Either `sql` or `sql_id` are required")] + SqlIdOrSqlNotGiven, + #[error("SQL text {sql_id} not found")] + SqlNotFound { sql_id: i32 }, + #[error("SQL text {sql_id} already exists")] + SqlExists { sql_id: i32 }, + + #[error("Invalid reference to step in a batch condition")] + BatchCondBadStep, + + #[error("Received an invalid baton: {reason}")] + BatonInvalid { reason: String }, + #[error("Received a baton that has already been used: {reason}")] + BatonReused { reason: String }, + #[error("Stream for this baton was closed")] + BatonStreamClosed, + + #[error("{what} is only supported in protocol version {min_version} and higher")] + NotSupported { + what: &'static str, + min_version: Version, + }, + + #[error("{0}")] + ResponseTooLarge(String), +} diff --git a/libsqlx-server/src/hrana/proto.rs b/libsqlx-server/src/hrana/proto.rs new file mode 100644 index 00000000..8d544a07 --- /dev/null +++ b/libsqlx-server/src/hrana/proto.rs @@ -0,0 +1,160 @@ +//! Structures in Hrana that are common for WebSockets and HTTP. + +use bytes::Bytes; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Serialize, Debug)] +pub struct Error { + pub message: String, + pub code: String, +} + +#[derive(Deserialize, Debug)] +pub struct Stmt { + #[serde(default)] + pub sql: Option, + #[serde(default)] + pub sql_id: Option, + #[serde(default)] + pub args: Vec, + #[serde(default)] + pub named_args: Vec, + #[serde(default)] + pub want_rows: Option, +} + +#[derive(Deserialize, Debug)] +pub struct NamedArg { + pub name: String, + pub value: Value, +} + +#[derive(Serialize, Debug)] +pub struct StmtResult { + pub cols: Vec, + pub rows: Vec>, + pub affected_row_count: u64, + #[serde(with = "option_i64_as_str")] + pub last_insert_rowid: Option, +} + +#[derive(Serialize, Debug)] +pub struct Col { + pub name: Option, + pub decltype: Option, +} + +#[derive(Deserialize, Debug)] +pub struct Batch { + pub steps: Vec, +} + +#[derive(Deserialize, Debug)] +pub struct BatchStep { + pub stmt: Stmt, + #[serde(default)] + pub condition: Option, +} + +#[derive(Serialize, Debug)] +pub struct BatchResult { + pub step_results: Vec>, + pub step_errors: Vec>, +} + +#[derive(Deserialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum BatchCond { + Ok { step: i32 }, + Error { step: i32 }, + Not { cond: Box }, + And { conds: Vec }, + Or { conds: Vec }, +} + +#[derive(Serialize, Debug)] +pub struct DescribeResult { + pub params: Vec, + pub cols: Vec, + pub is_explain: bool, + pub is_readonly: bool, +} + +#[derive(Serialize, Debug)] +pub struct DescribeParam { + pub name: Option, +} + +#[derive(Serialize, Debug)] +pub struct DescribeCol { + pub name: String, + pub decltype: Option, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum Value { + Null, + Integer { + #[serde(with = "i64_as_str")] + value: i64, + }, + Float { + value: f64, + }, + Text { + value: Arc, + }, + Blob { + #[serde(with = "bytes_as_base64", rename = "base64")] + value: Bytes, + }, +} + +mod i64_as_str { + use serde::{de, ser}; + use serde::{de::Error as _, Serialize as _}; + + pub fn serialize(value: &i64, ser: S) -> Result { + value.to_string().serialize(ser) + } + + pub fn deserialize<'de, D: de::Deserializer<'de>>(de: D) -> Result { + let str_value = <&'de str as de::Deserialize>::deserialize(de)?; + str_value.parse().map_err(|_| { + D::Error::invalid_value( + de::Unexpected::Str(str_value), + &"decimal integer as a string", + ) + }) + } +} + +mod option_i64_as_str { + use serde::{ser, Serialize as _}; + + pub fn serialize(value: &Option, ser: S) -> Result { + value.map(|v| v.to_string()).serialize(ser) + } +} + +mod bytes_as_base64 { + use base64::{engine::general_purpose::STANDARD_NO_PAD, Engine as _}; + use bytes::Bytes; + use serde::{de, ser}; + use serde::{de::Error as _, Serialize as _}; + + pub fn serialize(value: &Bytes, ser: S) -> Result { + STANDARD_NO_PAD.encode(value).serialize(ser) + } + + pub fn deserialize<'de, D: de::Deserializer<'de>>(de: D) -> Result { + let text = <&'de str as de::Deserialize>::deserialize(de)?; + let text = text.trim_end_matches('='); + let bytes = STANDARD_NO_PAD.decode(text).map_err(|_| { + D::Error::invalid_value(de::Unexpected::Str(text), &"binary data encoded as base64") + })?; + Ok(Bytes::from(bytes)) + } +} diff --git a/libsqlx-server/src/hrana/result_builder.rs b/libsqlx-server/src/hrana/result_builder.rs new file mode 100644 index 00000000..3985795b --- /dev/null +++ b/libsqlx-server/src/hrana/result_builder.rs @@ -0,0 +1,379 @@ +use std::fmt::{self, Write as _}; +use std::io; + +use bytes::Bytes; +use libsqlx::{result_builder::*, FrameNo}; +use tokio::sync::oneshot; + +use crate::hrana::stmt::proto_error_from_stmt_error; + +use super::error::HranaError; +use super::proto; + +pub struct SingleStatementBuilder { + builder: StatementBuilder, + ret: Option>>, +} + +impl SingleStatementBuilder { + pub fn new() -> ( + Self, + oneshot::Receiver>, + ) { + let (ret, rcv) = oneshot::channel(); + ( + Self { + builder: StatementBuilder::default(), + ret: Some(ret), + }, + rcv, + ) + } +} + +impl ResultBuilder for SingleStatementBuilder { + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.builder.init(config) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.begin_step() + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.builder + .finish_step(affected_row_count, last_insert_rowid) + } + + fn step_error(&mut self, error: libsqlx::error::Error) -> Result<(), QueryResultBuilderError> { + self.builder.step_error(error) + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.builder.cols_description(cols) + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.begin_row() + } + + fn add_row_value(&mut self, v: ValueRef) -> Result<(), QueryResultBuilderError> { + self.builder.add_row_value(v) + } + + fn finnalize( + &mut self, + _is_txn: bool, + _frame_no: Option, + ) -> Result { + let res = self.builder.take_ret(); + let _ = self.ret.take().unwrap().send(res); + Ok(true) + } + + fn finnalize_error(&mut self, _e: String) { + todo!() + } +} + +#[derive(Debug, Default)] +struct StatementBuilder { + has_step: bool, + cols: Vec, + rows: Vec>, + err: Option, + affected_row_count: u64, + last_insert_rowid: Option, + current_size: u64, + max_response_size: u64, +} + +impl StatementBuilder { + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + *self = Self { + max_response_size: config.max_size.unwrap_or(u64::MAX), + ..Default::default() + }; + + Ok(()) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + // SingleStatementBuilder only builds a single statement + assert!(!self.has_step); + self.has_step = true; + Ok(()) + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.last_insert_rowid = last_insert_rowid; + self.affected_row_count = affected_row_count; + + Ok(()) + } + + fn step_error(&mut self, error: libsqlx::error::Error) -> Result<(), QueryResultBuilderError> { + assert!(self.err.is_none()); + let mut f = SizeFormatter(0); + write!(&mut f, "{error}").unwrap(); + self.current_size = f.0; + + self.err = Some(error); + + Ok(()) + } + + fn cols_description<'a>( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + assert!(self.err.is_none()); + assert!(self.cols.is_empty()); + + let mut cols_size = 0; + + self.cols.extend(cols.into_iter().map(Into::into).map(|c| { + cols_size += estimate_cols_json_size(&c); + proto::Col { + name: Some(c.name.to_owned()), + decltype: c.decl_ty.map(ToString::to_string), + } + })); + + self.current_size += cols_size; + if self.current_size > self.max_response_size { + return Err(QueryResultBuilderError::ResponseTooLarge( + self.max_response_size, + )); + } + + Ok(()) + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + assert!(self.err.is_none()); + self.rows.push(Vec::with_capacity(self.cols.len())); + Ok(()) + } + + fn add_row_value(&mut self, v: ValueRef) -> Result<(), QueryResultBuilderError> { + assert!(self.err.is_none()); + let estimate_size = value_json_size(&v); + if self.current_size + estimate_size > self.max_response_size { + return Err(QueryResultBuilderError::ResponseTooLarge( + self.max_response_size, + )); + } + + self.current_size += estimate_size; + + let val = match v { + ValueRef::Null => proto::Value::Null, + ValueRef::Integer(value) => proto::Value::Integer { value }, + ValueRef::Real(value) => proto::Value::Float { value }, + ValueRef::Text(s) => proto::Value::Text { + value: String::from_utf8(s.to_vec()) + .map_err(QueryResultBuilderError::from_any)? + .into(), + }, + ValueRef::Blob(d) => proto::Value::Blob { + value: Bytes::copy_from_slice(d), + }, + }; + + self.rows + .last_mut() + .expect("row must be initialized") + .push(val); + + Ok(()) + } + + pub fn take_ret(&mut self) -> crate::Result { + match self.err.take() { + Some(err) => Err(crate::error::Error::from(err))?, + None => Ok(proto::StmtResult { + cols: std::mem::take(&mut self.cols), + rows: std::mem::take(&mut self.rows), + affected_row_count: self.affected_row_count, + last_insert_rowid: self.last_insert_rowid, + }), + } + } +} + +struct SizeFormatter(u64); + +impl io::Write for SizeFormatter { + fn write(&mut self, buf: &[u8]) -> io::Result { + self.0 += buf.len() as u64; + Ok(buf.len()) + } + + fn flush(&mut self) -> io::Result<()> { + Ok(()) + } +} + +impl fmt::Write for SizeFormatter { + fn write_str(&mut self, s: &str) -> fmt::Result { + self.0 += s.len() as u64; + Ok(()) + } +} + +fn value_json_size(v: &ValueRef) -> u64 { + let mut f = SizeFormatter(0); + match v { + ValueRef::Null => write!(&mut f, r#"{{"type":"null"}}"#).unwrap(), + ValueRef::Integer(i) => write!(&mut f, r#"{{"type":"integer", "value": "{i}"}}"#).unwrap(), + ValueRef::Real(x) => write!(&mut f, r#"{{"type":"integer","value": {x}"}}"#).unwrap(), + ValueRef::Text(s) => { + // error will be caught later. + if let Ok(s) = std::str::from_utf8(s) { + write!(&mut f, r#"{{"type":"text","value":"{s}"}}"#).unwrap() + } + } + ValueRef::Blob(b) => return b.len() as u64, + } + + f.0 +} + +fn estimate_cols_json_size(c: &Column) -> u64 { + let mut f = SizeFormatter(0); + write!( + &mut f, + r#"{{"name":"{}","decltype":"{}"}}"#, + c.name, + c.decl_ty.unwrap_or("null") + ) + .unwrap(); + f.0 +} + +#[derive(Debug)] +pub struct HranaBatchProtoBuilder { + step_results: Vec>, + step_errors: Vec>, + stmt_builder: StatementBuilder, + current_size: u64, + max_response_size: u64, + step_empty: bool, + ret: Option>, +} + +impl HranaBatchProtoBuilder { + pub fn new() -> (Self, oneshot::Receiver) { + let (ret, rcv) = oneshot::channel(); + ( + Self { + step_results: Vec::new(), + step_errors: Vec::new(), + stmt_builder: StatementBuilder::default(), + current_size: 0, + max_response_size: u64::MAX, + step_empty: false, + ret: Some(ret), + }, + rcv, + ) + } + + pub fn into_ret(&mut self) -> proto::BatchResult { + proto::BatchResult { + step_results: std::mem::take(&mut self.step_results), + step_errors: std::mem::take(&mut self.step_errors), + } + } +} + +impl ResultBuilder for HranaBatchProtoBuilder { + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.max_response_size = config.max_size.unwrap_or(u64::MAX); + self.stmt_builder.init(config)?; + Ok(()) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.step_empty = true; + self.stmt_builder.begin_step() + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.stmt_builder + .finish_step(affected_row_count, last_insert_rowid)?; + self.current_size += self.stmt_builder.current_size; + + let new_builder = StatementBuilder { + current_size: 0, + max_response_size: self.max_response_size - self.current_size, + ..Default::default() + }; + match std::mem::replace(&mut self.stmt_builder, new_builder).take_ret() { + Ok(res) => { + self.step_results.push((!self.step_empty).then_some(res)); + self.step_errors.push(None); + } + Err(e) => { + self.step_results.push(None); + self.step_errors.push(Some(proto_error_from_stmt_error( + Err(HranaError::from(e)).map_err(QueryResultBuilderError::from_any)?, + ))); + } + } + + Ok(()) + } + + fn step_error(&mut self, error: libsqlx::error::Error) -> Result<(), QueryResultBuilderError> { + self.stmt_builder.step_error(error) + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.step_empty = false; + self.stmt_builder.cols_description(cols) + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.stmt_builder.begin_row() + } + + fn add_row_value(&mut self, v: ValueRef) -> Result<(), QueryResultBuilderError> { + self.stmt_builder.add_row_value(v) + } + + fn finnalize( + &mut self, + _is_txn: bool, + _frame_no: Option, + ) -> Result { + if let Some(ret) = self.ret.take() { + let _ = ret.send(self.into_ret()); + } + + Ok(false) + } + + fn finnalize_error(&mut self, _e: String) { + todo!() + } +} diff --git a/libsqlx-server/src/hrana/stmt.rs b/libsqlx-server/src/hrana/stmt.rs new file mode 100644 index 00000000..d1a7799e --- /dev/null +++ b/libsqlx-server/src/hrana/stmt.rs @@ -0,0 +1,308 @@ +use std::collections::HashMap; + +use futures::FutureExt; +use libsqlx::analysis::Statement; +use libsqlx::program::Program; +use libsqlx::query::{Params, Query, Value}; +use libsqlx::DescribeResponse; + +use super::error::HranaError; +use super::result_builder::SingleStatementBuilder; +use super::{proto, ProtocolError, Version}; +use crate::allocation::ConnectionHandle; +// use crate::auth::Authenticated; +use crate::hrana; + +/// An error during execution of an SQL statement. +#[derive(thiserror::Error, Debug)] +pub enum StmtError { + #[error("SQL string could not be parsed: {source}")] + SqlParse { source: color_eyre::eyre::Error }, + #[error("SQL string does not contain any statement")] + SqlNoStmt, + #[error("SQL string contains more than one statement")] + SqlManyStmts, + #[error("Arguments do not match SQL parameters: {source}")] + ArgsInvalid { source: color_eyre::eyre::Error }, + #[error("Specifying both positional and named arguments is not supported")] + ArgsBothPositionalAndNamed, + + #[error("Transaction timed out")] + TransactionTimeout, + #[error("Server cannot handle additional transactions")] + TransactionBusy, + #[error("SQLite error: {message}")] + SqliteError { + source: libsqlx::rusqlite::ffi::Error, + message: String, + }, + #[error("SQL input error: {message} (at offset {offset})")] + SqlInputError { + source: libsqlx::rusqlite::ffi::Error, + message: String, + offset: i32, + }, + + #[error("Operation was blocked{}", .reason.as_ref().map(|msg| format!(": {}", msg)).unwrap_or_default())] + Blocked { reason: Option }, +} + +pub async fn execute_stmt( + conn: &ConnectionHandle, + // auth: Authenticated, + query: Query, +) -> crate::Result { + let (builder, ret) = SingleStatementBuilder::new(); + conn.execute( + Program::from_queries(Some(query)), /*, auth*/ + Box::new(builder), + ) + .await; + ret.await + .map_err(|_| crate::error::Error::ConnectionClosed)? +} + +pub async fn describe_stmt( + db: &ConnectionHandle, + // auth: Authenticated, + sql: String, +) -> crate::Result { + todo!() + // match db.describe(sql/*, auth*/).await? { + // Ok(describe_response) => Ok(proto_describe_result_from_describe_response( + // describe_response, + // )), + // Err(sqld_error) => match stmt_error_from_sqld_error(sqld_error) { + // Ok(stmt_error) => Err(stmt_error)?, + // Err(sqld_error) => Err(sqld_error)?, + // }, + // } +} + +pub fn proto_stmt_to_query( + proto_stmt: &proto::Stmt, + sqls: &HashMap, + verion: Version, +) -> crate::Result { + let sql = proto_sql_to_sql(proto_stmt.sql.as_deref(), proto_stmt.sql_id, sqls, verion)?; + + let mut stmt_iter = Statement::parse(sql); + let stmt = match stmt_iter.next() { + Some(Ok(stmt)) => stmt, + Some(Err(err)) => Err(StmtError::SqlParse { source: err.into() })?, + None => Err(StmtError::SqlNoStmt)?, + }; + + if stmt_iter.next().is_some() { + Err(StmtError::SqlManyStmts)? + } + + let params = if proto_stmt.named_args.is_empty() { + let values = proto_stmt.args.iter().map(proto_value_to_value).collect(); + Params::Positional(values) + } else if proto_stmt.args.is_empty() { + let values = proto_stmt + .named_args + .iter() + .map(|arg| (arg.name.clone(), proto_value_to_value(&arg.value))) + .collect(); + Params::Named(values) + } else { + Err(StmtError::ArgsBothPositionalAndNamed)? + }; + + let want_rows = proto_stmt.want_rows.unwrap_or(true); + Ok(Query { + stmt, + params, + want_rows, + }) +} + +pub fn proto_sql_to_sql<'s>( + proto_sql: Option<&'s str>, + proto_sql_id: Option, + sqls: &'s HashMap, + verion: Version, +) -> Result<&'s str, ProtocolError> { + if proto_sql_id.is_some() && verion < Version::Hrana2 { + return Err(ProtocolError::NotSupported { + what: "`sql_id`", + min_version: Version::Hrana2, + }); + } + + match (proto_sql, proto_sql_id) { + (Some(sql), None) => Ok(sql), + (None, Some(sql_id)) => match sqls.get(&sql_id) { + Some(sql) => Ok(sql), + None => Err(ProtocolError::SqlNotFound { sql_id }), + }, + (Some(_), Some(_)) => Err(ProtocolError::SqlIdAndSqlGiven), + (None, None) => Err(ProtocolError::SqlIdOrSqlNotGiven), + } +} + +fn proto_value_to_value(proto_value: &proto::Value) -> Value { + match proto_value { + proto::Value::Null => Value::Null, + proto::Value::Integer { value } => Value::Integer(*value), + proto::Value::Float { value } => Value::Real(*value), + proto::Value::Text { value } => Value::Text(value.as_ref().into()), + proto::Value::Blob { value } => Value::Blob(value.as_ref().into()), + } +} + +fn proto_value_from_value(value: Value) -> proto::Value { + match value { + Value::Null => proto::Value::Null, + Value::Integer(value) => proto::Value::Integer { value }, + Value::Real(value) => proto::Value::Float { value }, + Value::Text(value) => proto::Value::Text { + value: value.into(), + }, + Value::Blob(value) => proto::Value::Blob { + value: value.into(), + }, + } +} + +fn proto_describe_result_from_describe_response( + response: DescribeResponse, +) -> proto::DescribeResult { + proto::DescribeResult { + params: response + .params + .into_iter() + .map(|p| proto::DescribeParam { name: p.name }) + .collect(), + cols: response + .cols + .into_iter() + .map(|c| proto::DescribeCol { + name: c.name, + decltype: c.decltype, + }) + .collect(), + is_explain: response.is_explain, + is_readonly: response.is_readonly, + } +} + +impl From for HranaError { + fn from(error: crate::error::Error) -> Self { + if let crate::error::Error::Libsqlx(e) = error { + match e { + libsqlx::error::Error::LibSqlInvalidQueryParams(source) => StmtError::ArgsInvalid { + source: color_eyre::eyre::anyhow!("{source}"), + } + .into(), + libsqlx::error::Error::LibSqlTxTimeout => StmtError::TransactionTimeout.into(), + libsqlx::error::Error::LibSqlTxBusy => StmtError::TransactionBusy.into(), + libsqlx::error::Error::Blocked(reason) => StmtError::Blocked { reason }.into(), + libsqlx::error::Error::RusqliteError(rusqlite_error) => match rusqlite_error { + libsqlx::error::RusqliteError::SqliteFailure(sqlite_error, Some(message)) => { + StmtError::SqliteError { + source: sqlite_error, + message, + } + .into() + } + libsqlx::error::RusqliteError::SqliteFailure(sqlite_error, None) => { + StmtError::SqliteError { + message: sqlite_error.to_string(), + source: sqlite_error, + } + .into() + } + libsqlx::error::RusqliteError::SqlInputError { + error: sqlite_error, + msg: message, + offset, + .. + } => StmtError::SqlInputError { + source: sqlite_error, + message, + offset, + } + .into(), + rusqlite_error => { + return crate::error::Error::from(libsqlx::error::Error::RusqliteError( + rusqlite_error, + )) + .into() + } + }, + sqld_error => return crate::error::Error::from(sqld_error).into(), + } + } else { + Self::Libsqlx(error) + } + } +} + +pub fn proto_error_from_stmt_error(error: &StmtError) -> hrana::proto::Error { + hrana::proto::Error { + message: error.to_string(), + code: error.code().into(), + } +} + +impl StmtError { + pub fn code(&self) -> &'static str { + match self { + Self::SqlParse { .. } => "SQL_PARSE_ERROR", + Self::SqlNoStmt => "SQL_NO_STATEMENT", + Self::SqlManyStmts => "SQL_MANY_STATEMENTS", + Self::ArgsInvalid { .. } => "ARGS_INVALID", + Self::ArgsBothPositionalAndNamed => "ARGS_BOTH_POSITIONAL_AND_NAMED", + Self::TransactionTimeout => "TRANSACTION_TIMEOUT", + Self::TransactionBusy => "TRANSACTION_BUSY", + Self::SqliteError { source, .. } => sqlite_error_code(source.code), + Self::SqlInputError { .. } => "SQL_INPUT_ERROR", + Self::Blocked { .. } => "BLOCKED", + } + } +} + +fn sqlite_error_code(code: libsqlx::error::ErrorCode) -> &'static str { + match code { + libsqlx::error::ErrorCode::InternalMalfunction => "SQLITE_INTERNAL", + libsqlx::error::ErrorCode::PermissionDenied => "SQLITE_PERM", + libsqlx::error::ErrorCode::OperationAborted => "SQLITE_ABORT", + libsqlx::error::ErrorCode::DatabaseBusy => "SQLITE_BUSY", + libsqlx::error::ErrorCode::DatabaseLocked => "SQLITE_LOCKED", + libsqlx::error::ErrorCode::OutOfMemory => "SQLITE_NOMEM", + libsqlx::error::ErrorCode::ReadOnly => "SQLITE_READONLY", + libsqlx::error::ErrorCode::OperationInterrupted => "SQLITE_INTERRUPT", + libsqlx::error::ErrorCode::SystemIoFailure => "SQLITE_IOERR", + libsqlx::error::ErrorCode::DatabaseCorrupt => "SQLITE_CORRUPT", + libsqlx::error::ErrorCode::NotFound => "SQLITE_NOTFOUND", + libsqlx::error::ErrorCode::DiskFull => "SQLITE_FULL", + libsqlx::error::ErrorCode::CannotOpen => "SQLITE_CANTOPEN", + libsqlx::error::ErrorCode::FileLockingProtocolFailed => "SQLITE_PROTOCOL", + libsqlx::error::ErrorCode::SchemaChanged => "SQLITE_SCHEMA", + libsqlx::error::ErrorCode::TooBig => "SQLITE_TOOBIG", + libsqlx::error::ErrorCode::ConstraintViolation => "SQLITE_CONSTRAINT", + libsqlx::error::ErrorCode::TypeMismatch => "SQLITE_MISMATCH", + libsqlx::error::ErrorCode::ApiMisuse => "SQLITE_MISUSE", + libsqlx::error::ErrorCode::NoLargeFileSupport => "SQLITE_NOLFS", + libsqlx::error::ErrorCode::AuthorizationForStatementDenied => "SQLITE_AUTH", + libsqlx::error::ErrorCode::ParameterOutOfRange => "SQLITE_RANGE", + libsqlx::error::ErrorCode::NotADatabase => "SQLITE_NOTADB", + libsqlx::error::ErrorCode::Unknown => "SQLITE_UNKNOWN", + _ => "SQLITE_UNKNOWN", + } +} + +impl From<&proto::Value> for Value { + fn from(proto_value: &proto::Value) -> Value { + proto_value_to_value(proto_value) + } +} + +impl From for proto::Value { + fn from(value: Value) -> proto::Value { + proto_value_from_value(value) + } +} diff --git a/libsqlx-server/src/hrana/ws/conn.rs b/libsqlx-server/src/hrana/ws/conn.rs new file mode 100644 index 00000000..44daf98f --- /dev/null +++ b/libsqlx-server/src/hrana/ws/conn.rs @@ -0,0 +1,301 @@ +use std::borrow::Cow; +use std::future::Future; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use anyhow::{bail, Context as _, Result}; +use futures::stream::FuturesUnordered; +use futures::{ready, FutureExt as _, StreamExt as _}; +use tokio::sync::oneshot; +use tokio_tungstenite::tungstenite; +use tungstenite::protocol::frame::coding::CloseCode; + +use crate::database::Database; + +use super::super::{ProtocolError, Version}; +use super::handshake::WebSocket; +use super::{handshake, proto, session, Server, Upgrade}; + +/// State of a Hrana connection. +struct Conn { + conn_id: u64, + server: Arc>, + ws: WebSocket, + ws_closed: bool, + /// The version of the protocol that has been negotiated in the WebSocket handshake. + version: Version, + /// After a successful authentication, this contains the session-level state of the connection. + session: Option>, + /// Join set for all tasks that were spawned to handle the connection. + join_set: tokio::task::JoinSet<()>, + /// Future responses to requests that we have received but are evaluating asynchronously. + responses: FuturesUnordered, +} + +/// A `Future` that stores a handle to a future response to request which is being evaluated +/// asynchronously. +struct ResponseFuture { + /// The request id, which must be included in the response. + request_id: i32, + /// The future that will be resolved with the response. + response_rx: futures::future::Fuse>>, +} + +pub(super) async fn handle_tcp( + server: Arc>, + socket: tokio::net::TcpStream, + conn_id: u64, +) -> Result<()> { + let (ws, version) = handshake::handshake_tcp(socket) + .await + .context("Could not perform the WebSocket handshake on TCP connection")?; + handle_ws(server, ws, version, conn_id).await +} + +pub(super) async fn handle_upgrade( + server: Arc>, + upgrade: Upgrade, + conn_id: u64, +) -> Result<()> { + let (ws, version) = handshake::handshake_upgrade(upgrade) + .await + .context("Could not perform the WebSocket handshake on HTTP connection")?; + handle_ws(server, ws, version, conn_id).await +} + +async fn handle_ws( + server: Arc>, + ws: WebSocket, + version: Version, + conn_id: u64, +) -> Result<()> { + let mut conn = Conn { + conn_id, + server, + ws, + ws_closed: false, + version, + session: None, + join_set: tokio::task::JoinSet::new(), + responses: FuturesUnordered::new(), + }; + + loop { + if let Some(kicker) = conn.server.idle_kicker.as_ref() { + kicker.kick(); + } + + tokio::select! { + Some(client_msg_res) = conn.ws.recv() => { + let client_msg = client_msg_res + .context("Could not receive a WebSocket message")?; + match handle_msg(&mut conn, client_msg).await { + Ok(true) => continue, + Ok(false) => break, + Err(err) => { + match err.downcast::() { + Ok(proto_err) => { + tracing::warn!( + "Connection #{} terminated due to protocol error: {}", + conn.conn_id, + proto_err, + ); + let close_code = protocol_error_to_close_code(&proto_err); + close(&mut conn, close_code, proto_err.to_string()).await; + return Ok(()) + } + Err(err) => { + close(&mut conn, CloseCode::Error, "Internal server error".into()).await; + return Err(err); + } + } + } + } + }, + Some(task_res) = conn.join_set.join_next() => { + task_res.expect("Connection subtask failed") + }, + Some(response_res) = conn.responses.next() => { + let response_msg = response_res?; + send_msg(&mut conn, &response_msg).await?; + }, + else => break, + } + } + + close( + &mut conn, + CloseCode::Normal, + "Thank you for using sqld".into(), + ) + .await; + Ok(()) +} + +async fn handle_msg( + conn: &mut Conn, + client_msg: tungstenite::Message, +) -> Result { + match client_msg { + tungstenite::Message::Text(client_msg) => { + // client messages are received as text WebSocket messages that encode the `ClientMsg` + // in JSON + let client_msg: proto::ClientMsg = match serde_json::from_str(&client_msg) { + Ok(client_msg) => client_msg, + Err(err) => bail!(ProtocolError::Deserialize { source: err }), + }; + + match client_msg { + proto::ClientMsg::Hello { jwt } => handle_hello_msg(conn, jwt).await, + proto::ClientMsg::Request { + request_id, + request, + } => handle_request_msg(conn, request_id, request).await, + } + } + tungstenite::Message::Binary(_) => bail!(ProtocolError::BinaryWebSocketMessage), + tungstenite::Message::Ping(ping_data) => { + let pong_msg = tungstenite::Message::Pong(ping_data); + conn.ws + .send(pong_msg) + .await + .context("Could not send pong to the WebSocket")?; + Ok(true) + } + tungstenite::Message::Pong(_) => Ok(true), + tungstenite::Message::Close(_) => Ok(false), + tungstenite::Message::Frame(_) => panic!("Received a tungstenite::Message::Frame"), + } +} + +async fn handle_hello_msg(conn: &mut Conn, jwt: Option) -> Result { + let hello_res = match conn.session.as_mut() { + None => session::handle_initial_hello(&conn.server, conn.version, jwt) + .map(|session| conn.session = Some(session)), + Some(session) => session::handle_repeated_hello(&conn.server, session, jwt), + }; + + match hello_res { + Ok(_) => { + send_msg(conn, &proto::ServerMsg::HelloOk {}).await?; + Ok(true) + } + Err(err) => match downcast_error(err) { + Ok(error) => { + send_msg(conn, &proto::ServerMsg::HelloError { error }).await?; + Ok(false) + } + Err(err) => Err(err), + }, + } +} + +async fn handle_request_msg( + conn: &mut Conn, + request_id: i32, + request: proto::Request, +) -> Result { + let Some(session) = conn.session.as_mut() else { + bail!(ProtocolError::RequestBeforeHello) + }; + + let response_rx = session::handle_request(&conn.server, session, &mut conn.join_set, request) + .await + .unwrap_or_else(|err| { + // we got an error immediately, but let's treat it as a special case of the general + // flow + let (tx, rx) = oneshot::channel(); + tx.send(Err(err)).unwrap(); + rx + }); + + conn.responses.push(ResponseFuture { + request_id, + response_rx: response_rx.fuse(), + }); + Ok(true) +} + +impl Future for ResponseFuture { + type Output = Result; + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match ready!(Pin::new(&mut self.response_rx).poll(cx)) { + Ok(Ok(response)) => Poll::Ready(Ok(proto::ServerMsg::ResponseOk { + request_id: self.request_id, + response, + })), + Ok(Err(err)) => match downcast_error(err) { + Ok(error) => Poll::Ready(Ok(proto::ServerMsg::ResponseError { + request_id: self.request_id, + error, + })), + Err(err) => Poll::Ready(Err(err)), + }, + Err(_recv_err) => { + // do not propagate this error, because the error that caused the receiver to drop + // is very likely propagating from another task at this moment, and we don't want + // to hide it. + // this is also the reason why we need to use `Fuse` in self.response_rx + tracing::warn!("Response sender was dropped"); + Poll::Pending + } + } + } +} + +fn downcast_error(err: anyhow::Error) -> Result { + match err.downcast_ref::() { + Some(error) => Ok(proto::Error { + message: error.to_string(), + code: error.code().into(), + }), + None => Err(err), + } +} + +async fn send_msg(conn: &mut Conn, msg: &proto::ServerMsg) -> Result<()> { + let msg = serde_json::to_string(&msg).context("Could not serialize response message")?; + let msg = tungstenite::Message::Text(msg); + conn.ws + .send(msg) + .await + .context("Could not send response to the WebSocket") +} + +async fn close(conn: &mut Conn, code: CloseCode, reason: String) { + if conn.ws_closed { + return; + } + + let close_frame = tungstenite::protocol::frame::CloseFrame { + code, + reason: Cow::Owned(reason), + }; + if let Err(err) = conn + .ws + .send(tungstenite::Message::Close(Some(close_frame))) + .await + { + if !matches!( + err, + tungstenite::Error::AlreadyClosed | tungstenite::Error::ConnectionClosed + ) { + tracing::warn!( + "Could not send close frame to WebSocket of connection #{}: {:?}", + conn.conn_id, + err + ); + } + } + + conn.ws_closed = true; +} + +fn protocol_error_to_close_code(err: &ProtocolError) -> CloseCode { + match err { + ProtocolError::Deserialize { .. } => CloseCode::Invalid, + ProtocolError::BinaryWebSocketMessage => CloseCode::Unsupported, + _ => CloseCode::Policy, + } +} diff --git a/libsqlx-server/src/hrana/ws/handshake.rs b/libsqlx-server/src/hrana/ws/handshake.rs new file mode 100644 index 00000000..ef187a6a --- /dev/null +++ b/libsqlx-server/src/hrana/ws/handshake.rs @@ -0,0 +1,140 @@ +use anyhow::{anyhow, bail, Context as _, Result}; +use futures::{SinkExt as _, StreamExt as _}; +use tokio_tungstenite::tungstenite; +use tungstenite::http; + +use super::super::Version; +use super::Upgrade; + +#[derive(Debug)] +pub enum WebSocket { + Tcp(tokio_tungstenite::WebSocketStream), + Upgraded(tokio_tungstenite::WebSocketStream), +} + +pub async fn handshake_tcp(socket: tokio::net::TcpStream) -> Result<(WebSocket, Version)> { + let mut version = None; + let callback = |req: &http::Request<()>, resp: http::Response<()>| { + let (mut resp_parts, _) = resp.into_parts(); + resp_parts + .headers + .insert("server", http::HeaderValue::from_static("sqld-hrana-tcp")); + + match negotiate_version(req.headers(), &mut resp_parts.headers) { + Ok(version_) => { + version = Some(version_); + Ok(http::Response::from_parts(resp_parts, ())) + } + Err(resp_body) => Err(http::Response::from_parts(resp_parts, Some(resp_body))), + } + }; + + let ws_config = Some(get_ws_config()); + let stream = + tokio_tungstenite::accept_hdr_async_with_config(socket, callback, ws_config).await?; + Ok((WebSocket::Tcp(stream), version.unwrap())) +} + +pub async fn handshake_upgrade(upgrade: Upgrade) -> Result<(WebSocket, Version)> { + let mut req = upgrade.request; + + let ws_config = Some(get_ws_config()); + let (mut resp, stream_fut_version_res) = match hyper_tungstenite::upgrade(&mut req, ws_config) { + Ok((mut resp, stream_fut)) => match negotiate_version(req.headers(), resp.headers_mut()) { + Ok(version) => (resp, Ok((stream_fut, version))), + Err(msg) => { + *resp.status_mut() = http::StatusCode::BAD_REQUEST; + *resp.body_mut() = hyper::Body::from(msg.clone()); + ( + resp, + Err(anyhow!("Could not negotiate subprotocol: {}", msg)), + ) + } + }, + Err(err) => { + let resp = http::Response::builder() + .status(http::StatusCode::BAD_REQUEST) + .body(hyper::Body::from(format!("{err}"))) + .unwrap(); + ( + resp, + Err(anyhow!(err).context("Protocol error in HTTP upgrade")), + ) + } + }; + + resp.headers_mut().insert( + "server", + http::HeaderValue::from_static("sqld-hrana-upgrade"), + ); + if upgrade.response_tx.send(resp).is_err() { + bail!("Could not send the HTTP upgrade response") + } + + let (stream_fut, version) = stream_fut_version_res?; + let stream = stream_fut + .await + .context("Could not upgrade HTTP request to a WebSocket")?; + Ok((WebSocket::Upgraded(stream), version)) +} + +fn negotiate_version( + req_headers: &http::HeaderMap, + resp_headers: &mut http::HeaderMap, +) -> Result { + if let Some(protocol_hdr) = req_headers.get("sec-websocket-protocol") { + let supported_by_client = protocol_hdr + .to_str() + .unwrap_or("") + .split(',') + .map(|p| p.trim()); + + let mut hrana1_supported = false; + let mut hrana2_supported = false; + for protocol_str in supported_by_client { + hrana1_supported |= protocol_str.eq_ignore_ascii_case("hrana1"); + hrana2_supported |= protocol_str.eq_ignore_ascii_case("hrana2"); + } + + let version = if hrana2_supported { + Version::Hrana2 + } else if hrana1_supported { + Version::Hrana1 + } else { + return Err("Only 'hrana1' and 'hrana2' subprotocols are supported".into()); + }; + + resp_headers.append( + "sec-websocket-protocol", + http::HeaderValue::from_str(&version.to_string()).unwrap(), + ); + Ok(version) + } else { + // Sec-WebSocket-Protocol header not present, assume that the client wants hrana1 + // According to RFC 6455, we must not set the Sec-WebSocket-Protocol response header + Ok(Version::Hrana1) + } +} + +fn get_ws_config() -> tungstenite::protocol::WebSocketConfig { + tungstenite::protocol::WebSocketConfig { + max_send_queue: Some(1 << 20), + ..Default::default() + } +} + +impl WebSocket { + pub async fn recv(&mut self) -> Option> { + match self { + Self::Tcp(stream) => stream.next().await, + Self::Upgraded(stream) => stream.next().await, + } + } + + pub async fn send(&mut self, msg: tungstenite::Message) -> tungstenite::Result<()> { + match self { + Self::Tcp(stream) => stream.send(msg).await, + Self::Upgraded(stream) => stream.send(msg).await, + } + } +} diff --git a/libsqlx-server/src/hrana/ws/mod.rs b/libsqlx-server/src/hrana/ws/mod.rs new file mode 100644 index 00000000..bcdb5209 --- /dev/null +++ b/libsqlx-server/src/hrana/ws/mod.rs @@ -0,0 +1,101 @@ +// use crate::auth::Auth; +use crate::database::Database; +use enclose::enclose; +use std::net::SocketAddr; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; +use tokio::sync::{mpsc, oneshot}; + +pub mod proto; + +mod conn; +mod handshake; +mod session; + +struct Server { + auth: Arc, + // idle_kicker: Option, + next_conn_id: AtomicU64, +} + +#[derive(Debug)] +pub struct Accept { + pub socket: tokio::net::TcpStream, + pub peer_addr: SocketAddr, +} + +#[derive(Debug)] +pub struct Upgrade { + pub request: hyper::Request, + pub response_tx: oneshot::Sender>, +} + +pub async fn serve( + db_factory: Arc>, + auth: Arc, + idle_kicker: Option, + mut accept_rx: mpsc::Receiver, + mut upgrade_rx: mpsc::Receiver, +) -> Result<()> { + let server = Arc::new(Server { + db_factory, + auth, + idle_kicker, + next_conn_id: AtomicU64::new(0), + }); + + let mut join_set = tokio::task::JoinSet::new(); + loop { + if let Some(kicker) = server.idle_kicker.as_ref() { + kicker.kick(); + } + + tokio::select! { + Some(accept) = accept_rx.recv() => { + let conn_id = server.next_conn_id.fetch_add(1, Ordering::AcqRel); + tracing::info!("Received TCP connection #{} from {}", conn_id, accept.peer_addr); + + join_set.spawn(enclose!{(server, conn_id) async move { + match conn::handle_tcp(server, accept.socket, conn_id).await { + Ok(_) => tracing::info!("TCP connection #{} was terminated", conn_id), + Err(err) => tracing::error!("TCP connection #{} failed: {:?}", conn_id, err), + } + }}); + }, + Some(upgrade) = upgrade_rx.recv() => { + let conn_id = server.next_conn_id.fetch_add(1, Ordering::AcqRel); + tracing::info!("Received HTTP upgrade connection #{}", conn_id); + + join_set.spawn(enclose!{(server, conn_id) async move { + match conn::handle_upgrade(server, upgrade, conn_id).await { + Ok(_) => tracing::info!("HTTP upgrade connection #{} was terminated", conn_id), + Err(err) => tracing::error!("HTTP upgrade connection #{} failed: {:?}", conn_id, err), + } + }}); + }, + Some(task_res) = join_set.join_next() => { + task_res.expect("Hrana connection task failed") + }, + else => { + tracing::error!("hrana server loop exited"); + return Ok(()) + } + } + } +} + +pub async fn listen(bind_addr: SocketAddr, accept_tx: mpsc::Sender) -> Result<()> { + let listener = tokio::net::TcpListener::bind(bind_addr) + .await + .context("Could not bind TCP listener")?; + let local_addr = listener.local_addr()?; + tracing::info!("Listening for Hrana connections on {}", local_addr); + + loop { + let (socket, peer_addr) = listener + .accept() + .await + .context("Could not accept a TCP connection")?; + let _: Result<_, _> = accept_tx.send(Accept { socket, peer_addr }).await; + } +} diff --git a/libsqlx-server/src/hrana/ws/proto.rs b/libsqlx-server/src/hrana/ws/proto.rs new file mode 100644 index 00000000..6bb88367 --- /dev/null +++ b/libsqlx-server/src/hrana/ws/proto.rs @@ -0,0 +1,127 @@ +//! Structures for Hrana-over-WebSockets. + +pub use super::super::proto::*; +use serde::{Deserialize, Serialize}; + +#[derive(Deserialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ClientMsg { + Hello { jwt: Option }, + Request { request_id: i32, request: Request }, +} + +#[derive(Serialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum ServerMsg { + HelloOk {}, + HelloError { error: Error }, + ResponseOk { request_id: i32, response: Response }, + ResponseError { request_id: i32, error: Error }, +} + +#[derive(Deserialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum Request { + OpenStream(OpenStreamReq), + CloseStream(CloseStreamReq), + Execute(ExecuteReq), + Batch(BatchReq), + Sequence(SequenceReq), + Describe(DescribeReq), + StoreSql(StoreSqlReq), + CloseSql(CloseSqlReq), +} + +#[derive(Serialize, Debug)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum Response { + OpenStream(OpenStreamResp), + CloseStream(CloseStreamResp), + Execute(ExecuteResp), + Batch(BatchResp), + Sequence(SequenceResp), + Describe(DescribeResp), + StoreSql(StoreSqlResp), + CloseSql(CloseSqlResp), +} + +#[derive(Deserialize, Debug)] +pub struct OpenStreamReq { + pub stream_id: i32, +} + +#[derive(Serialize, Debug)] +pub struct OpenStreamResp {} + +#[derive(Deserialize, Debug)] +pub struct CloseStreamReq { + pub stream_id: i32, +} + +#[derive(Serialize, Debug)] +pub struct CloseStreamResp {} + +#[derive(Deserialize, Debug)] +pub struct ExecuteReq { + pub stream_id: i32, + pub stmt: Stmt, +} + +#[derive(Serialize, Debug)] +pub struct ExecuteResp { + pub result: StmtResult, +} + +#[derive(Deserialize, Debug)] +pub struct BatchReq { + pub stream_id: i32, + pub batch: Batch, +} + +#[derive(Serialize, Debug)] +pub struct BatchResp { + pub result: BatchResult, +} + +#[derive(Deserialize, Debug)] +pub struct SequenceReq { + pub stream_id: i32, + #[serde(default)] + pub sql: Option, + #[serde(default)] + pub sql_id: Option, +} + +#[derive(Serialize, Debug)] +pub struct SequenceResp {} + +#[derive(Deserialize, Debug)] +pub struct DescribeReq { + pub stream_id: i32, + #[serde(default)] + pub sql: Option, + #[serde(default)] + pub sql_id: Option, +} + +#[derive(Serialize, Debug)] +pub struct DescribeResp { + pub result: DescribeResult, +} + +#[derive(Deserialize, Debug)] +pub struct StoreSqlReq { + pub sql_id: i32, + pub sql: String, +} + +#[derive(Serialize, Debug)] +pub struct StoreSqlResp {} + +#[derive(Deserialize, Debug)] +pub struct CloseSqlReq { + pub sql_id: i32, +} + +#[derive(Serialize, Debug)] +pub struct CloseSqlResp {} diff --git a/libsqlx-server/src/hrana/ws/session.rs b/libsqlx-server/src/hrana/ws/session.rs new file mode 100644 index 00000000..f59bcecc --- /dev/null +++ b/libsqlx-server/src/hrana/ws/session.rs @@ -0,0 +1,329 @@ +use std::collections::HashMap; + +use anyhow::{anyhow, bail, Context as _, Result}; +use futures::future::BoxFuture; +use tokio::sync::{mpsc, oneshot}; + +use super::super::{batch, stmt, ProtocolError, Version}; +use super::{proto, Server}; +use crate::auth::{AuthError, Authenticated}; +use crate::database::Database; + +/// Session-level state of an authenticated Hrana connection. +pub struct Session { + authenticated: Authenticated, + version: Version, + streams: HashMap>, + sqls: HashMap, +} + +struct StreamHandle { + job_tx: mpsc::Sender>, +} + +/// An arbitrary job that is executed on a [`Stream`]. +/// +/// All jobs are executed sequentially on a single task (as evidenced by the `&mut Stream` passed +/// to `f`). +struct StreamJob { + /// The async function which performs the job. + #[allow(clippy::type_complexity)] + f: Box FnOnce(&'s mut Stream) -> BoxFuture<'s, Result> + Send>, + /// The result of `f` will be sent here. + resp_tx: oneshot::Sender>, +} + +/// State of a Hrana stream, which corresponds to a standalone database connection. +struct Stream { + /// The database handle is `None` when the stream is created, and normally set to `Some` by the + /// first job executed on the stream by the [`proto::OpenStreamReq`] request. However, if that + /// request returns an error, the following requests may encounter a `None` here. + db: Option, +} + +/// An error which can be converted to a Hrana [Error][proto::Error]. +#[derive(thiserror::Error, Debug)] +pub enum ResponseError { + #[error("Authentication failed: {source}")] + Auth { source: AuthError }, + #[error("Stream {stream_id} has failed to open")] + StreamNotOpen { stream_id: i32 }, + #[error("The server already stores {count} SQL texts, it cannot store more")] + SqlTooMany { count: usize }, + #[error(transparent)] + Stmt(stmt::StmtError), +} + +pub(super) fn handle_initial_hello( + server: &Server, + version: Version, + jwt: Option, +) -> Result> { + let authenticated = server + .auth + .authenticate_jwt(jwt.as_deref()) + .map_err(|err| anyhow!(ResponseError::Auth { source: err }))?; + + Ok(Session { + authenticated, + version, + streams: HashMap::new(), + sqls: HashMap::new(), + }) +} + +pub(super) fn handle_repeated_hello( + server: &Server, + session: &mut Session, + jwt: Option, +) -> Result<()> { + if session.version < Version::Hrana2 { + bail!(ProtocolError::NotSupported { + what: "Repeated hello message", + min_version: Version::Hrana2, + }) + } + + session.authenticated = server + .auth + .authenticate_jwt(jwt.as_deref()) + .map_err(|err| anyhow!(ResponseError::Auth { source: err }))?; + Ok(()) +} + +pub(super) async fn handle_request( + server: &Server, + session: &mut Session, + join_set: &mut tokio::task::JoinSet<()>, + req: proto::Request, +) -> Result>> { + // TODO: this function has rotten: it is too long and contains too much duplicated code. It + // should be refactored at the next opportunity, together with code in stmt.rs and batch.rs + + let (resp_tx, resp_rx) = oneshot::channel(); + + macro_rules! stream_respond { + ($stream_hnd:expr, async move |$stream:ident| { $($body:tt)* }) => { + stream_respond($stream_hnd, resp_tx, move |$stream| { + Box::pin(async move { $($body)* }) + }) + .await + }; + } + + macro_rules! respond { + ($value:expr) => { + resp_tx.send(Ok($value)).unwrap() + }; + } + + macro_rules! ensure_version { + ($min_version:expr, $what:expr) => { + if session.version < $min_version { + bail!(ProtocolError::NotSupported { + what: $what, + min_version: $min_version, + }) + } + }; + } + + macro_rules! get_stream_mut { + ($stream_id:expr) => { + match session.streams.get_mut(&$stream_id) { + Some(stream_hdn) => stream_hdn, + None => bail!(ProtocolError::StreamNotFound { + stream_id: $stream_id + }), + } + }; + } + + macro_rules! get_stream_db { + ($stream:expr, $stream_id:expr) => { + match $stream.db.as_ref() { + Some(db) => db, + None => bail!(ResponseError::StreamNotOpen { + stream_id: $stream_id + }), + } + }; + } + + match req { + proto::Request::OpenStream(req) => { + let stream_id = req.stream_id; + if session.streams.contains_key(&stream_id) { + bail!(ProtocolError::StreamExists { stream_id }) + } + + let mut stream_hnd = stream_spawn(join_set, Stream { db: None }); + let db_factory = server.db_factory.clone(); + + stream_respond!(&mut stream_hnd, async move |stream| { + let db = db_factory + .create() + .await + .context("Could not create a database connection")?; + stream.db = Some(db); + Ok(proto::Response::OpenStream(proto::OpenStreamResp {})) + }); + + session.streams.insert(stream_id, stream_hnd); + } + proto::Request::CloseStream(req) => { + let stream_id = req.stream_id; + let Some(mut stream_hnd) = session.streams.remove(&stream_id) else { + bail!(ProtocolError::StreamNotFound { stream_id }) + }; + + stream_respond!(&mut stream_hnd, async move |_stream| { + Ok(proto::Response::CloseStream(proto::CloseStreamResp {})) + }); + } + proto::Request::Execute(req) => { + let stream_id = req.stream_id; + let stream_hnd = get_stream_mut!(stream_id); + + let query = stmt::proto_stmt_to_query(&req.stmt, &session.sqls, session.version) + .map_err(catch_stmt_error)?; + let auth = session.authenticated; + + stream_respond!(stream_hnd, async move |stream| { + let db = get_stream_db!(stream, stream_id); + let result = stmt::execute_stmt(db, auth, query) + .await + .map_err(catch_stmt_error)?; + Ok(proto::Response::Execute(proto::ExecuteResp { result })) + }); + } + proto::Request::Batch(req) => { + let stream_id = req.stream_id; + let stream_hnd = get_stream_mut!(stream_id); + + let pgm = batch::proto_batch_to_program(&req.batch, &session.sqls, session.version) + .map_err(catch_stmt_error)?; + let auth = session.authenticated; + + stream_respond!(stream_hnd, async move |stream| { + let db = get_stream_db!(stream, stream_id); + let result = batch::execute_batch(db, auth, pgm).await?; + Ok(proto::Response::Batch(proto::BatchResp { result })) + }); + } + proto::Request::Sequence(req) => { + ensure_version!(Version::Hrana2, "The `sequence` request"); + let stream_id = req.stream_id; + let stream_hnd = get_stream_mut!(stream_id); + + let sql = stmt::proto_sql_to_sql( + req.sql.as_deref(), + req.sql_id, + &session.sqls, + session.version, + )?; + let pgm = batch::proto_sequence_to_program(sql).map_err(catch_stmt_error)?; + let auth = session.authenticated; + + stream_respond!(stream_hnd, async move |stream| { + let db = get_stream_db!(stream, stream_id); + batch::execute_sequence(db, auth, pgm) + .await + .map_err(catch_stmt_error)?; + Ok(proto::Response::Sequence(proto::SequenceResp {})) + }); + } + proto::Request::Describe(req) => { + ensure_version!(Version::Hrana2, "The `describe` request"); + let stream_id = req.stream_id; + let stream_hnd = get_stream_mut!(stream_id); + + let sql = stmt::proto_sql_to_sql( + req.sql.as_deref(), + req.sql_id, + &session.sqls, + session.version, + )? + .into(); + let auth = session.authenticated; + + stream_respond!(stream_hnd, async move |stream| { + let db = get_stream_db!(stream, stream_id); + let result = stmt::describe_stmt(db, auth, sql) + .await + .map_err(catch_stmt_error)?; + Ok(proto::Response::Describe(proto::DescribeResp { result })) + }); + } + proto::Request::StoreSql(req) => { + ensure_version!(Version::Hrana2, "The `store_sql` request"); + let sql_id = req.sql_id; + if session.sqls.contains_key(&sql_id) { + bail!(ProtocolError::SqlExists { sql_id }) + } else if session.sqls.len() >= MAX_SQL_COUNT { + bail!(ResponseError::SqlTooMany { + count: session.sqls.len() + }) + } + + session.sqls.insert(sql_id, req.sql); + respond!(proto::Response::StoreSql(proto::StoreSqlResp {})); + } + proto::Request::CloseSql(req) => { + ensure_version!(Version::Hrana2, "The `close_sql` request"); + session.sqls.remove(&req.sql_id); + respond!(proto::Response::CloseSql(proto::CloseSqlResp {})); + } + } + Ok(resp_rx) +} + +const MAX_SQL_COUNT: usize = 150; + +fn stream_spawn( + join_set: &mut tokio::task::JoinSet<()>, + stream: Stream, +) -> StreamHandle { + let (job_tx, mut job_rx) = mpsc::channel::>(8); + join_set.spawn(async move { + let mut stream = stream; + while let Some(job) = job_rx.recv().await { + let res = (job.f)(&mut stream).await; + let _: Result<_, _> = job.resp_tx.send(res); + } + }); + StreamHandle { job_tx } +} + +async fn stream_respond( + stream_hnd: &mut StreamHandle, + resp_tx: oneshot::Sender>, + f: F, +) where + for<'s> F: FnOnce(&'s mut Stream) -> BoxFuture<'s, Result>, + F: Send + 'static, +{ + let job = StreamJob { + f: Box::new(f), + resp_tx, + }; + let _: Result<_, _> = stream_hnd.job_tx.send(job).await; +} + +fn catch_stmt_error(err: anyhow::Error) -> anyhow::Error { + match err.downcast::() { + Ok(stmt_err) => anyhow!(ResponseError::Stmt(stmt_err)), + Err(err) => err, + } +} + +impl ResponseError { + pub fn code(&self) -> &'static str { + match self { + Self::Auth { source } => source.code(), + Self::SqlTooMany { .. } => "SQL_STORE_TOO_MANY", + Self::StreamNotOpen { .. } => "STREAM_NOT_OPEN", + Self::Stmt(err) => err.code(), + } + } +} diff --git a/libsqlx-server/src/http/admin.rs b/libsqlx-server/src/http/admin.rs new file mode 100644 index 00000000..e90ecfe2 --- /dev/null +++ b/libsqlx-server/src/http/admin.rs @@ -0,0 +1,257 @@ +use std::ops::Deref; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use axum::extract::{Path, State}; +use axum::response::IntoResponse; +use axum::routing::{delete, post}; +use axum::{Json, Router}; +use chrono::{DateTime, Utc}; +use color_eyre::eyre::Result; +use hyper::server::accept::Accept; +use hyper::StatusCode; +use serde::{Deserialize, Deserializer, Serialize}; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::allocation::config::{AllocConfig, DbConfig}; +use crate::linc::bus::Bus; +use crate::linc::NodeId; +use crate::manager::Manager; +use crate::meta::{AllocationError, DatabaseId}; + +impl IntoResponse for crate::error::Error { + fn into_response(self) -> axum::response::Response { + #[derive(Serialize)] + struct ErrorBody { + message: String, + } + + let mut resp = Json(ErrorBody { + message: self.to_string(), + }) + .into_response(); + *resp.status_mut() = match self { + crate::error::Error::Libsqlx(_) + | crate::error::Error::InjectorExited + | crate::error::Error::ConnectionClosed + | crate::error::Error::Io(_) + | crate::error::Error::AllocationClosed + | crate::error::Error::Internal(_) + | crate::error::Error::Heed(_) => StatusCode::INTERNAL_SERVER_ERROR, + crate::error::Error::Allocation(AllocationError::AlreadyExist(_)) => { + StatusCode::BAD_REQUEST + } + }; + + resp + } +} + +pub struct Config { + pub bus: Arc>>, +} + +struct AdminServerState { + bus: Arc>>, +} + +pub async fn run_admin_api(config: Config, listener: I) -> Result<()> +where + I: Accept, + I::Conn: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + let state = AdminServerState { bus: config.bus }; + + let app = Router::new() + .route("/manage/allocation", post(allocate).get(list_allocs)) + .route("/manage/allocation/:db_name", delete(deallocate)) + .with_state(Arc::new(state)); + axum::Server::builder(listener) + .serve(app.into_make_service()) + .await?; + + Ok(()) +} + +#[derive(Serialize, Debug)] +struct ErrorResponse {} + +#[derive(Serialize, Debug)] +#[serde(rename_all = "lowercase")] +enum DbType { + Primary, + Replica, +} + +#[derive(Serialize, Debug)] +struct AllocationSummaryView { + created_at: DateTime, + database_name: String, + #[serde(rename = "type")] + ty: DbType, +} + +#[derive(Deserialize, Debug)] +struct AllocateReq { + database_name: String, + max_conccurent_connection: Option, + config: DbConfigReq, +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case", deny_unknown_fields)] +pub struct Primary { + /// The maximum size the replication is allowed to grow. Expects a string like 200mb. + #[serde(default = "default_max_log_size")] + pub max_replication_log_size: bytesize::ByteSize, + pub replication_log_compact_interval: Option, + #[serde(default = "default_txn_timeout")] + transaction_timeout_duration: HumanDuration, +} + +#[derive(Debug)] +pub struct HumanDuration(Duration); + +impl Deref for HumanDuration { + type Target = Duration; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl<'de> Deserialize<'de> for HumanDuration { + fn deserialize(deserializer: D) -> std::result::Result + where + D: Deserializer<'de>, + { + struct DurationVisitor; + impl serde::de::Visitor<'_> for DurationVisitor { + type Value = HumanDuration; + + fn visit_str(self, v: &str) -> std::result::Result + where + E: serde::de::Error, + { + match humantime::Duration::from_str(v) { + Ok(d) => Ok(HumanDuration(*d)), + Err(e) => Err(E::custom(e.to_string())), + } + } + + fn expecting(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_str("a duration, in a string format") + } + } + + deserializer.deserialize_str(DurationVisitor) + } +} + +#[derive(Debug, Deserialize)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum DbConfigReq { + Primary(Primary), + Replica { + primary_node_id: NodeId, + #[serde(default = "default_proxy_timeout")] + proxy_request_timeout_duration: HumanDuration, + #[serde(default = "default_txn_timeout")] + transaction_timeout_duration: HumanDuration, + }, +} + +const fn default_max_log_size() -> bytesize::ByteSize { + bytesize::ByteSize::mb(100) +} + +const fn default_proxy_timeout() -> HumanDuration { + HumanDuration(Duration::from_secs(5)) +} + +const fn default_txn_timeout() -> HumanDuration { + HumanDuration(Duration::from_secs(5)) +} + +async fn allocate( + State(state): State>, + Json(req): Json, +) -> crate::Result> { + let config = AllocConfig { + max_conccurent_connection: req.max_conccurent_connection.unwrap_or(16), + db_name: req.database_name.clone(), + db_config: match req.config { + DbConfigReq::Primary(Primary { + max_replication_log_size, + replication_log_compact_interval, + transaction_timeout_duration, + }) => DbConfig::Primary { + max_log_size: max_replication_log_size.as_u64() as usize, + replication_log_compact_interval: replication_log_compact_interval + .as_deref() + .copied(), + transaction_timeout_duration: *transaction_timeout_duration, + }, + DbConfigReq::Replica { + primary_node_id, + proxy_request_timeout_duration, + transaction_timeout_duration, + } => DbConfig::Replica { + primary_node_id, + proxy_request_timeout_duration: *proxy_request_timeout_duration, + transaction_timeout_duration: *transaction_timeout_duration, + }, + }, + }; + + let dispatcher = state.bus.clone(); + let id = DatabaseId::from_name(&req.database_name); + let meta = state.bus.handler().allocate(id, config, dispatcher).await?; + + Ok(Json(AllocationSummaryView { + created_at: meta.created_at, + database_name: meta.config.db_name, + ty: match meta.config.db_config { + DbConfig::Primary {..} => DbType::Primary, + DbConfig::Replica {..} => DbType::Replica, + } + })) +} + +async fn deallocate( + State(state): State>, + Path(database_name): Path, +) -> crate::Result<()> { + let id = DatabaseId::from_name(&database_name); + state.bus.handler().deallocate(id).await?; + + Ok(()) +} + +#[derive(Serialize, Debug)] +struct ListAllocResp { + allocs: Vec, +} + +#[derive(Serialize, Debug)] +struct AllocView { + id: String, +} + +async fn list_allocs( + State(state): State>, +) -> crate::Result> { + let allocs = state + .bus + .handler() + .store() + .list_allocs()? + .into_iter() + .map(|meta| AllocView { + id: meta.config.db_name, + }) + .collect(); + + Ok(Json(ListAllocResp { allocs })) +} diff --git a/libsqlx-server/src/http/mod.rs b/libsqlx-server/src/http/mod.rs new file mode 100644 index 00000000..1e6bf65b --- /dev/null +++ b/libsqlx-server/src/http/mod.rs @@ -0,0 +1,2 @@ +pub mod admin; +pub mod user; diff --git a/libsqlx-server/src/http/user/error.rs b/libsqlx-server/src/http/user/error.rs new file mode 100644 index 00000000..81a9ea2b --- /dev/null +++ b/libsqlx-server/src/http/user/error.rs @@ -0,0 +1,44 @@ +use axum::response::IntoResponse; +use axum::Json; +use hyper::StatusCode; +use serde::Serialize; + +#[derive(Debug, thiserror::Error)] +pub enum UserApiError { + #[error("missing host header")] + MissingHost, + #[error("invalid host header format")] + InvalidHost, + #[error("Database `{0}` doesn't exist")] + UnknownDatabase(String), + #[error(transparent)] + LibsqlxServer(#[from] crate::error::Error), +} + +impl UserApiError { + fn http_status(&self) -> StatusCode { + match self { + UserApiError::MissingHost + | UserApiError::InvalidHost + | UserApiError::UnknownDatabase(_) => StatusCode::BAD_REQUEST, + UserApiError::LibsqlxServer(_) => StatusCode::INTERNAL_SERVER_ERROR, + } + } +} + +#[derive(Debug, Serialize)] +pub struct ApiError { + error: String, +} + +impl IntoResponse for UserApiError { + fn into_response(self) -> axum::response::Response { + let mut resp = Json(ApiError { + error: self.to_string(), + }) + .into_response(); + *resp.status_mut() = self.http_status(); + + resp + } +} diff --git a/libsqlx-server/src/http/user/extractors.rs b/libsqlx-server/src/http/user/extractors.rs new file mode 100644 index 00000000..fc84900e --- /dev/null +++ b/libsqlx-server/src/http/user/extractors.rs @@ -0,0 +1,33 @@ +use std::sync::Arc; + +use axum::async_trait; +use axum::extract::FromRequestParts; +use hyper::http::request::Parts; + +use crate::{database::Database, meta::DatabaseId}; + +use super::{error::UserApiError, UserApiState}; + +#[async_trait] +impl FromRequestParts> for Database { + type Rejection = UserApiError; + + async fn from_request_parts( + parts: &mut Parts, + state: &Arc, + ) -> Result { + let Some(host) = parts.headers.get("host") else { return Err(UserApiError::MissingHost) }; + let Ok(host_str) = std::str::from_utf8(host.as_bytes()) else {return Err(UserApiError::MissingHost)}; + let db_name = parse_host(host_str)?; + let db_id = DatabaseId::from_name(db_name); + let Some(sender) = state.manager.schedule(db_id, state.bus.clone()).await? else { return Err(UserApiError::UnknownDatabase(db_name.to_owned())) }; + + Ok(Database { sender }) + } +} + +fn parse_host(host: &str) -> Result<&str, UserApiError> { + let mut split = host.split("."); + let Some(db_id) = split.next() else { return Err(UserApiError::InvalidHost) }; + Ok(db_id) +} diff --git a/libsqlx-server/src/http/user/mod.rs b/libsqlx-server/src/http/user/mod.rs new file mode 100644 index 00000000..7b43d36d --- /dev/null +++ b/libsqlx-server/src/http/user/mod.rs @@ -0,0 +1,86 @@ +use std::sync::Arc; + +use axum::extract::State; +use axum::response::IntoResponse; +use axum::routing::post; +use axum::{Json, Router}; +use color_eyre::Result; +use hyper::server::accept::Accept; +use hyper::StatusCode; +use serde::Serialize; +use tokio::io::{AsyncRead, AsyncWrite}; + +use crate::database::Database; +use crate::hrana; +use crate::hrana::error::HranaError; +use crate::hrana::http::proto::{PipelineRequestBody, PipelineResponseBody}; +use crate::linc::bus::Bus; +use crate::manager::Manager; + +mod error; +mod extractors; + +#[derive(Debug, Serialize)] +struct ErrorResponseBody { + pub message: String, + pub code: String, +} + +impl IntoResponse for HranaError { + fn into_response(self) -> axum::response::Response { + let (message, code) = match self.code() { + Some(code) => (self.to_string(), code.to_owned()), + None => ( + "internal error, please check the logs".to_owned(), + "INTERNAL_ERROR".to_owned(), + ), + }; + let resp = ErrorResponseBody { message, code }; + let mut resp = Json(resp).into_response(); + *resp.status_mut() = StatusCode::BAD_REQUEST; + resp + } +} + +pub struct Config { + pub manager: Arc, + pub bus: Arc>>, + pub hrana_server: Arc, +} + +struct UserApiState { + manager: Arc, + bus: Arc>>, + hrana_server: Arc, +} + +pub async fn run_user_api(config: Config, listener: I) -> Result<()> +where + I: Accept, + I::Conn: AsyncRead + AsyncWrite + Send + Unpin + 'static, +{ + let state = UserApiState { + manager: config.manager, + bus: config.bus, + hrana_server: config.hrana_server, + }; + + let app = Router::new() + .route("/v2/pipeline", post(handle_hrana_pipeline)) + .with_state(Arc::new(state)); + + axum::Server::builder(listener) + .serve(app.into_make_service()) + .await?; + + Ok(()) +} + +async fn handle_hrana_pipeline( + State(state): State>, + db: Database, + Json(req): Json, +) -> crate::Result, HranaError> { + let ret = hrana::http::handle_pipeline(&state.hrana_server, req, db).await?; + Ok(Json(ret)) +} diff --git a/libsqlx-server/src/linc/bus.rs b/libsqlx-server/src/linc/bus.rs new file mode 100644 index 00000000..c74ba267 --- /dev/null +++ b/libsqlx-server/src/linc/bus.rs @@ -0,0 +1,74 @@ +use std::collections::HashSet; +use std::sync::Arc; + +use parking_lot::RwLock; +use tokio::sync::mpsc; + +use super::connection::SendQueue; +use super::handler::Handler; +use super::proto::Enveloppe; +use super::{Inbound, NodeId, Outbound}; + +pub struct Bus { + node_id: NodeId, + handler: H, + peers: RwLock>, + send_queue: SendQueue, +} + +impl Bus { + pub fn new(node_id: NodeId, handler: H) -> Self { + let send_queue = SendQueue::new(); + Self { + node_id, + handler, + send_queue, + peers: Default::default(), + } + } + + pub fn node_id(&self) -> NodeId { + self.node_id + } + + pub fn handler(&self) -> &H { + &self.handler + } + + pub async fn incomming(self: &Arc, incomming: Inbound) { + if let Err(e) = self.handler.handle(self.clone(), incomming).await { + tracing::error!("error handling message: {e}") + } + } + + pub fn connect(&self, node_id: NodeId) -> mpsc::UnboundedReceiver { + // TODO: handle peer already exists + self.peers.write().insert(node_id); + self.send_queue.register(node_id) + } +} + +#[async_trait::async_trait] +pub trait Dispatch: Send + Sync + 'static { + async fn dispatch(&self, msg: Outbound) -> crate::Result<()>; + /// id of the current node + fn node_id(&self) -> NodeId; +} + +#[async_trait::async_trait] +impl Dispatch for Bus { + async fn dispatch(&self, msg: Outbound) -> crate::Result<()> { + assert!( + msg.to != self.node_id(), + "trying to send a message to ourself!" + ); + // This message is outbound. + self.send_queue.enqueue(msg).await?; + + Ok(()) + } + + fn node_id(&self) -> NodeId { + self.node_id + } +} diff --git a/libsqlx-server/src/linc/connection.rs b/libsqlx-server/src/linc/connection.rs new file mode 100644 index 00000000..9fa41bb1 --- /dev/null +++ b/libsqlx-server/src/linc/connection.rs @@ -0,0 +1,391 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use async_bincode::tokio::AsyncBincodeStream; +use async_bincode::AsyncDestination; +use color_eyre::eyre::bail; +use futures::{SinkExt, StreamExt}; +use parking_lot::RwLock; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::sync::mpsc; +use tokio::time::{Duration, Instant}; + +use crate::linc::proto::ProtoError; +use crate::linc::CURRENT_PROTO_VERSION; + +use super::bus::Bus; +use super::handler::Handler; +use super::proto::{Enveloppe, Message}; +use super::{Inbound, NodeId, Outbound}; + +/// A connection to another node. Manage the connection state, and (de)register streams with the +/// `Bus` +pub struct Connection { + /// Id of the current node + pub peer: Option, + /// State of the connection + pub state: ConnectionState, + /// Sink/Stream for network messages + conn: AsyncBincodeStream, + /// Are we the initiator of this connection? + is_initiator: bool, + /// send queue for this connection + send_queue: Option>, + bus: Arc>, +} + +#[derive(Debug)] +pub enum ConnectionState { + Init, + Connecting, + Connected, + // Closing the connection with an error + CloseError(color_eyre::eyre::Error), + // Graceful connection shutdown + Close, +} + +pub fn handshake_deadline() -> Instant { + const HANDSHAKE_TIMEOUT: Duration = Duration::from_secs(5); + Instant::now() + HANDSHAKE_TIMEOUT +} + +// TODO: limit send queue depth +pub struct SendQueue { + senders: RwLock>>, +} + +impl SendQueue { + pub fn new() -> Self { + Self { + senders: Default::default(), + } + } + + pub async fn enqueue(&self, msg: Outbound) -> crate::Result<()> { + let sender = match self.senders.read().get(&msg.to) { + Some(sender) => sender.clone(), + None => { + return Err(crate::error::Error::Internal(color_eyre::eyre::anyhow!( + "failed to deliver message: unknown node id `{}`", + msg.to + ))) + } + }; + + sender.send(msg.enveloppe).map_err(|_| { + crate::error::Error::Internal(color_eyre::eyre::anyhow!( + "failed to deliver message: connection closed" + )) + })?; + + Ok(()) + } + + pub fn register(&self, node_id: NodeId) -> mpsc::UnboundedReceiver { + let (sender, receiver) = mpsc::unbounded_channel(); + self.senders.write().insert(node_id, sender); + + receiver + } +} + +impl Connection +where + S: AsyncRead + AsyncWrite + Unpin, + H: Handler, +{ + const MAX_CONNECTION_MESSAGES: usize = 128; + + pub fn new_initiator(stream: S, bus: Arc>) -> Self { + Self { + peer: None, + state: ConnectionState::Init, + conn: AsyncBincodeStream::from(stream).for_async(), + is_initiator: true, + send_queue: None, + bus, + } + } + + pub fn new_acceptor(stream: S, bus: Arc>) -> Self { + Connection { + peer: None, + state: ConnectionState::Connecting, + is_initiator: false, + bus, + send_queue: None, + conn: AsyncBincodeStream::from(stream).for_async(), + } + } + + pub async fn run(mut self) { + while self.tick().await {} + } + + pub async fn tick(&mut self) -> bool { + match self.state { + ConnectionState::Connected => self.tick_connected().await, + ConnectionState::Init => match self.initiate_connection().await { + Ok(_) => { + self.state = ConnectionState::Connecting; + } + Err(e) => { + self.state = ConnectionState::CloseError(e); + } + }, + ConnectionState::Connecting => { + if let Err(e) = self + .wait_handshake_response_with_deadline(handshake_deadline()) + .await + { + self.state = ConnectionState::CloseError(e); + } + } + ConnectionState::CloseError(ref e) => { + tracing::error!("closing connection with {:?}: {e}", self.peer); + return false; + } + ConnectionState::Close => return false, + } + true + } + + async fn tick_connected(&mut self) { + tokio::select! { + m = self.conn.next() => { + match m { + Some(Ok(m)) => { + self.handle_message(m).await; + } + Some(Err(e)) => { + self.state = ConnectionState::CloseError(e.into()); + } + None => { + self.state = ConnectionState::Close; + } + } + }, + Some(m) = self.send_queue.as_mut().expect("no send_queue in connected sate").recv() => { + let feed = || async { + self.conn.feed(m).await?; + // send as many as possible + while let Ok(m) = self.send_queue.as_mut().expect("no send_queue in connected sate").try_recv() { + self.conn.feed(m).await?; + } + self.conn.flush().await?; + + Ok(()) + }; + + if let Err(e) = feed().await { + tracing::error!("error flusing send queue for {}; closing connection", self.peer.unwrap()); + self.state = ConnectionState::CloseError(e) + } + }, + else => { + self.state = ConnectionState::Close; + } + } + } + + async fn handle_message(&mut self, enveloppe: Enveloppe) { + let incomming = Inbound { + from: self.peer.expect("peer id should be known at this point"), + enveloppe, + }; + self.bus.incomming(incomming).await; + } + + fn close_error(&mut self, error: color_eyre::eyre::Error) { + self.state = ConnectionState::CloseError(error); + } + + /// wait for a handshake response from peer + pub async fn wait_handshake_response_with_deadline( + &mut self, + deadline: Instant, + ) -> color_eyre::Result<()> { + assert!(matches!(self.state, ConnectionState::Connecting)); + + match tokio::time::timeout_at(deadline, self.conn.next()).await { + Ok(Some(Ok(Enveloppe { + message: + Message::Handshake { + protocol_version, + node_id, + }, + .. + }))) => { + if protocol_version != CURRENT_PROTO_VERSION { + let msg = Enveloppe { + database_id: None, + message: Message::Error(ProtoError::HandshakeVersionMismatch { + expected: CURRENT_PROTO_VERSION, + }), + }; + + let _ = self.conn.send(msg).await; + + bail!("handshake error: invalid peer protocol version"); + } else { + // when not initiating a connection, respond to handshake message with a + // handshake message + if !self.is_initiator { + let msg = Enveloppe { + database_id: None, + message: Message::Handshake { + protocol_version: CURRENT_PROTO_VERSION, + node_id: self.bus.node_id(), + }, + }; + self.conn.send(msg).await?; + } + + tracing::info!("Connected to peer {node_id}"); + + self.peer = Some(node_id); + self.state = ConnectionState::Connected; + self.send_queue = Some(self.bus.connect(node_id)); + + Ok(()) + } + } + Ok(Some(Ok(Enveloppe { + message: Message::Error(e), + .. + }))) => { + bail!("handshake error: {e}"); + } + Ok(Some(Ok(_))) => { + bail!("unexpected message from peer during handshake."); + } + Ok(Some(Err(e))) => { + bail!("failed to perform handshake with peer: {e}"); + } + Ok(None) => { + bail!("failed to perform handshake with peer: connection closed"); + } + Err(_e) => { + bail!("failed to perform handshake with peer: timed out"); + } + } + } + + async fn initiate_connection(&mut self) -> color_eyre::Result<()> { + let msg = Enveloppe { + database_id: None, + message: Message::Handshake { + protocol_version: CURRENT_PROTO_VERSION, + node_id: self.bus.node_id(), + }, + }; + + self.conn.send(msg).await?; + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use futures::{future, pin_mut}; + use tokio::sync::Notify; + use turmoil::net::{TcpListener, TcpStream}; + + use super::*; + + #[test] + fn invalid_handshake() { + let mut sim = turmoil::Builder::new().build(); + + let host_node_id = 0; + let done = Arc::new(Notify::new()); + let done_clone = done.clone(); + sim.host("host", move || { + let done_clone = done_clone.clone(); + async move { + let bus = Arc::new(Bus::new(host_node_id, |_, _| async {})); + let listener = turmoil::net::TcpListener::bind("0.0.0.0:1234") + .await + .unwrap(); + let (s, _) = listener.accept().await.unwrap(); + let connection = Connection::new_acceptor(s, bus); + let done = done_clone.notified(); + let run = connection.run(); + pin_mut!(done); + pin_mut!(run); + future::select(run, done).await; + + Ok(()) + } + }); + + sim.client("client", async move { + let s = TcpStream::connect("host:1234").await.unwrap(); + let mut s = AsyncBincodeStream::<_, Enveloppe, Enveloppe, _>::from(s).for_async(); + + let msg = Enveloppe { + database_id: None, + message: Message::Handshake { + protocol_version: 1234, + node_id: 1, + }, + }; + s.send(msg).await.unwrap(); + let m = s.next().await.unwrap().unwrap(); + + assert!(matches!( + m.message, + Message::Error(ProtoError::HandshakeVersionMismatch { .. }) + )); + + done.notify_waiters(); + + Ok(()) + }); + + sim.run().unwrap(); + } + + #[test] + fn connection_closed_by_peer_close_connection() { + let mut sim = turmoil::Builder::new().build(); + + let notify = Arc::new(Notify::new()); + sim.host("host", { + let notify = notify.clone(); + move || { + let notify = notify.clone(); + async move { + let listener = TcpListener::bind("0.0.0.0:1234").await.unwrap(); + let (stream, _) = listener.accept().await.unwrap(); + notify.notified().await; + + // drop connection + drop(stream); + + Ok(()) + } + } + }); + + sim.client("client", async move { + let stream = TcpStream::connect("host:1234").await.unwrap(); + let bus = Arc::new(Bus::new(1, |_, _| async {})); + let mut conn = Connection::new_acceptor(stream, bus); + + notify.notify_waiters(); + + conn.tick().await; + + assert!(matches!(conn.state, ConnectionState::CloseError(_))); + + Ok(()) + }); + + sim.run().unwrap(); + } +} diff --git a/libsqlx-server/src/linc/connection_pool.rs b/libsqlx-server/src/linc/connection_pool.rs new file mode 100644 index 00000000..3415dee4 --- /dev/null +++ b/libsqlx-server/src/linc/connection_pool.rs @@ -0,0 +1,83 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use itertools::Itertools; +use tokio::task::JoinSet; +use tokio::time::Duration; + +use super::connection::Connection; +use super::handler::Handler; +use super::net::Connector; +use super::{bus::Bus, NodeId}; + +/// Manages a pool of connections to other peers, handling re-connection. +pub struct ConnectionPool { + managed_peers: HashMap, + connections: JoinSet, + bus: Arc>, +} + +impl ConnectionPool { + pub fn new( + bus: Arc>, + managed_peers: impl IntoIterator, + ) -> Self { + Self { + managed_peers: managed_peers + .into_iter() + .filter(|(id, _)| *id < bus.node_id()) + .collect(), + connections: JoinSet::new(), + bus, + } + } + + pub fn managed_count(&self) -> usize { + self.managed_peers.len() + } + + pub async fn run(mut self) -> color_eyre::Result<()> { + self.init::().await; + + while self.tick::().await {} + + Ok(()) + } + + pub async fn tick(&mut self) -> bool { + if let Some(maybe_to_restart) = self.connections.join_next().await { + if let Ok(to_restart) = maybe_to_restart { + self.connect::(to_restart); + } + true + } else { + false + } + } + + async fn init(&mut self) { + let peers = self.managed_peers.keys().copied().collect_vec(); + peers.into_iter().for_each(|p| self.connect::(p)); + } + + fn connect(&mut self, peer_id: NodeId) { + let bus = self.bus.clone(); + let peer_addr = self.managed_peers[&peer_id].clone(); + let fut = async move { + let stream = match C::connect(peer_addr.clone()).await { + Ok(stream) => stream, + Err(e) => { + tracing::error!("error connection to peer {peer_id}@{peer_addr}: {e}"); + tokio::time::sleep(Duration::from_secs(1)).await; + return peer_id; + } + }; + let connection = Connection::new_initiator(stream, bus.clone()); + connection.run().await; + + peer_id + }; + + self.connections.spawn(fut); + } +} diff --git a/libsqlx-server/src/linc/handler.rs b/libsqlx-server/src/linc/handler.rs new file mode 100644 index 00000000..410e4c24 --- /dev/null +++ b/libsqlx-server/src/linc/handler.rs @@ -0,0 +1,23 @@ +use std::sync::Arc; + +use super::bus::Dispatch; +use super::Inbound; + +#[async_trait::async_trait] +pub trait Handler: Sized + Send + Sync + 'static { + /// Handle inbound message + async fn handle(&self, bus: Arc, msg: Inbound) -> crate::Result<()>; +} + +#[cfg(test)] +#[async_trait::async_trait] +impl Handler for F +where + F: Fn(Arc, Inbound) -> Fut + Send + Sync + 'static, + Fut: std::future::Future + Send, +{ + async fn handle(&self, bus: Arc, msg: Inbound) -> crate::Result<()> { + (self)(bus, msg).await; + Ok(()) + } +} diff --git a/libsqlx-server/src/linc/mod.rs b/libsqlx-server/src/linc/mod.rs new file mode 100644 index 00000000..2ee07790 --- /dev/null +++ b/libsqlx-server/src/linc/mod.rs @@ -0,0 +1,27 @@ +use self::proto::Enveloppe; + +pub mod bus; +pub mod connection; +pub mod connection_pool; +pub mod handler; +pub mod net; +pub mod proto; +pub mod server; + +pub type NodeId = u64; + +const CURRENT_PROTO_VERSION: u32 = 1; + +#[derive(Debug)] +pub struct Inbound { + /// Id of the node sending the message + pub from: NodeId, + /// payload + pub enveloppe: Enveloppe, +} + +#[derive(Debug)] +pub struct Outbound { + pub to: NodeId, + pub enveloppe: Enveloppe, +} diff --git a/libsqlx-server/src/linc/net.rs b/libsqlx-server/src/linc/net.rs new file mode 100644 index 00000000..a7fa87af --- /dev/null +++ b/libsqlx-server/src/linc/net.rs @@ -0,0 +1,90 @@ +use std::io; +use std::net::SocketAddr; +use std::pin::Pin; + +use futures::Future; +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::net::TcpListener; +use tokio::net::TcpStream; + +pub trait Connector +where + Self: Sized + AsyncRead + AsyncWrite + Unpin + 'static + Send, +{ + type Future: Future> + Send; + + fn connect(addr: String) -> Self::Future; +} + +impl Connector for TcpStream { + type Future = Pin> + Send>>; + + fn connect(addr: String) -> Self::Future { + Box::pin(TcpStream::connect(addr)) + } +} + +pub trait Listener { + type Stream: AsyncRead + AsyncWrite + Unpin + Send + 'static; + type Future<'a>: Future> + 'a + where + Self: 'a; + + fn accept(&self) -> Self::Future<'_>; + fn local_addr(&self) -> color_eyre::Result; +} + +pub struct AcceptFut<'a>(&'a TcpListener); + +impl<'a> Future for AcceptFut<'a> { + type Output = io::Result<(TcpStream, SocketAddr)>; + + fn poll( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + self.0.poll_accept(cx) + } +} + +impl Listener for TcpListener { + type Stream = TcpStream; + type Future<'a> = AcceptFut<'a>; + + fn accept(&self) -> Self::Future<'_> { + AcceptFut(self) + } + + fn local_addr(&self) -> color_eyre::Result { + Ok(self.local_addr()?) + } +} + +#[cfg(test)] +mod test { + use super::*; + + use turmoil::net::{TcpListener, TcpStream}; + + impl Listener for TcpListener { + type Stream = TcpStream; + type Future<'a> = + Pin> + 'a>>; + + fn accept(&self) -> Self::Future<'_> { + Box::pin(self.accept()) + } + + fn local_addr(&self) -> color_eyre::Result { + Ok(self.local_addr()?) + } + } + + impl Connector for TcpStream { + type Future = Pin> + Send + 'static>>; + + fn connect(addr: String) -> Self::Future { + Box::pin(Self::connect(addr)) + } + } +} diff --git a/libsqlx-server/src/linc/proto.rs b/libsqlx-server/src/linc/proto.rs new file mode 100644 index 00000000..7e3a583d --- /dev/null +++ b/libsqlx-server/src/linc/proto.rs @@ -0,0 +1,168 @@ +use bytes::Bytes; +use libsqlx::{program::Program, FrameNo}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::meta::DatabaseId; + +use super::NodeId; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Enveloppe { + pub database_id: Option, + pub message: Message, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)] +/// a batch of frames to inject +pub struct Frames { + /// must match the Replicate request id + pub req_no: u32, + /// sequence id, monotonically incremented, reset when req_id changes. + /// Used to detect gaps in received frames. + pub seq_no: u32, + pub frames: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +/// Response to a proxied query +pub struct ProxyResponse { + pub connection_id: u32, + /// id of the request this message is a response to. + pub req_id: u32, + pub seq_no: u32, + /// Collection of steps to drive the query builder transducer. + pub row_steps: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub enum Message { + /// Initial message exchanged between nodes when connecting + Handshake { + protocol_version: u32, + node_id: NodeId, + }, + ReplicationHandshake { + database_name: String, + }, + ReplicationHandshakeResponse { + /// id of the replication log + log_id: Uuid, + /// current frame_no of the primary + current_frame_no: u64, + }, + Replicate { + /// incremental request id, used when responding with a Frames message + req_no: u32, + /// next frame no to send + next_frame_no: u64, + }, + Frames(Frames), + /// Proxy a query to a primary + ProxyRequest { + /// id of the connection to perform the query against + /// If the connection doesn't already exist it is created + /// Id of the request. + /// Responses to this request must have the same id. + connection_id: u32, + req_id: u32, + program: Program, + }, + ProxyResponse(ProxyResponse), + /// Stop processing request `id`. + CancelRequest { + req_id: u32, + }, + /// Close Connection with passed id. + CloseConnection { + connection_id: u32, + }, + Error(ProtoError), +} + +#[derive(Debug, Serialize, Deserialize, thiserror::Error, PartialEq, Eq)] +pub enum ProtoError { + /// Incompatible protocol versions + #[error("invalid protocol version, expected: {expected}")] + HandshakeVersionMismatch { expected: u32 }, + #[error("unknown database {0}")] + UnknownDatabase(String), +} + +/// Steps applied to the query builder transducer to build a response to a proxied query. +/// Those types closely mirror those of the `QueryBuilderTrait`. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum BuilderStep { + Init, + BeginStep, + FinishStep(u64, Option), + StepError(StepError), + ColsDesc(Vec), + BeginRows, + BeginRow, + AddRowValue(Value), + FinishRow, + FinishRows, + Finnalize { + is_txn: bool, + frame_no: Option, + }, + FinnalizeError(String), +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub enum Value { + Null, + Integer(i64), + Real(f64), + // TODO: how to stream blobs/string??? + Text(Vec), + Blob(Vec), +} + +impl<'a> Into> for &'a Value { + fn into(self) -> libsqlx::result_builder::ValueRef<'a> { + use libsqlx::result_builder::ValueRef; + match self { + Value::Null => ValueRef::Null, + Value::Integer(i) => ValueRef::Integer(*i), + Value::Real(x) => ValueRef::Real(*x), + Value::Text(ref t) => ValueRef::Text(t), + Value::Blob(ref b) => ValueRef::Blob(b), + } + } +} + +impl From> for Value { + fn from(value: libsqlx::result_builder::ValueRef) -> Self { + use libsqlx::result_builder::ValueRef; + match value { + ValueRef::Null => Self::Null, + ValueRef::Integer(i) => Self::Integer(i), + ValueRef::Real(x) => Self::Real(x), + ValueRef::Text(s) => Self::Text(s.into()), + ValueRef::Blob(b) => Self::Blob(b.into()), + } + } +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct Column { + /// name of the column + pub name: String, + /// Declared type of the column, if any. + pub decl_ty: Option, +} + +impl From> for Column { + fn from(value: libsqlx::result_builder::Column) -> Self { + Self { + name: value.name.to_string(), + decl_ty: value.decl_ty.map(ToOwned::to_owned), + } + } +} + +/// for now, the stringified version of a sqld::error::Error. +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +pub struct StepError(pub String); diff --git a/libsqlx-server/src/linc/server.rs b/libsqlx-server/src/linc/server.rs new file mode 100644 index 00000000..6371a059 --- /dev/null +++ b/libsqlx-server/src/linc/server.rs @@ -0,0 +1,120 @@ +use std::sync::Arc; + +use tokio::io::{AsyncRead, AsyncWrite}; +use tokio::task::JoinSet; + +use crate::linc::connection::Connection; + +use super::bus::Bus; +use super::handler::Handler; + +pub struct Server { + /// reference to the bus + bus: Arc>, + /// Connection tasks owned by the server + connections: JoinSet>, +} + +impl Server { + pub fn new(bus: Arc>) -> Self { + Self { + bus, + connections: JoinSet::new(), + } + } + + /// Close all connections + #[cfg(test)] + pub async fn close_connections(&mut self) { + self.connections.abort_all(); + while self.connections.join_next().await.is_some() {} + } + + pub async fn run(mut self, mut listener: L) -> color_eyre::Result<()> + where + L: super::net::Listener, + { + tracing::info!("Cluster server listening on {}", listener.local_addr()?); + while self.tick(&mut listener).await {} + + Ok(()) + } + + pub async fn tick(&mut self, listener: &mut L) -> bool + where + L: super::net::Listener, + { + match listener.accept().await { + Ok((stream, _addr)) => { + self.make_connection(stream).await; + true + } + Err(e) => { + tracing::error!("error creating connection: {e}"); + false + } + } + } + + async fn make_connection(&mut self, stream: S) + where + S: AsyncRead + AsyncWrite + Unpin + Send + 'static, + { + let bus = self.bus.clone(); + let fut = async move { + let connection = Connection::new_acceptor(stream, bus); + connection.run().await; + Ok(()) + }; + + self.connections.spawn(fut); + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use super::*; + + use turmoil::net::TcpStream; + + #[test] + fn server_respond_to_handshake() { + let mut sim = turmoil::Builder::new().build(); + + let host_node_id = 0; + let notify = Arc::new(tokio::sync::Notify::new()); + sim.host("host", move || { + let notify = notify.clone(); + async move { + let bus = Arc::new(Bus::new(host_node_id, |_, _| async {})); + let mut server = Server::new(bus); + let mut listener = turmoil::net::TcpListener::bind("0.0.0.0:1234") + .await + .unwrap(); + server.tick(&mut listener).await; + notify.notified().await; + + Ok(()) + } + }); + + sim.client("client", async move { + let node_id = 1; + let mut c = Connection::new_initiator( + TcpStream::connect("host:1234").await.unwrap(), + Arc::new(Bus::new(node_id, |_, _| async {})), + ); + + c.tick().await; + c.tick().await; + + assert_eq!(c.peer, Some(host_node_id)); + + Ok(()) + }); + + sim.run().unwrap(); + } +} diff --git a/libsqlx-server/src/main.rs b/libsqlx-server/src/main.rs new file mode 100644 index 00000000..6162df26 --- /dev/null +++ b/libsqlx-server/src/main.rs @@ -0,0 +1,181 @@ +use std::fs::read_to_string; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use clap::Parser; +use compactor::{run_compactor_loop, CompactionQueue}; +use config::{AdminApiConfig, ClusterConfig, UserApiConfig}; +use http::admin::run_admin_api; +use http::user::run_user_api; +use hyper::server::conn::AddrIncoming; +use linc::bus::Bus; +use manager::Manager; +use meta::Store; +use replica_commit_store::ReplicaCommitStore; +use snapshot_store::SnapshotStore; +use tokio::fs::create_dir_all; +use tokio::net::{TcpListener, TcpStream}; +use tokio::task::JoinSet; +use tracing::metadata::LevelFilter; +use tracing_subscriber::prelude::*; + +mod allocation; +mod compactor; +mod config; +mod database; +mod error; +mod hrana; +mod http; +mod linc; +mod manager; +mod meta; +mod replica_commit_store; +mod snapshot_store; + +pub type Result = std::result::Result; + +#[derive(Debug, Parser)] +struct Args { + /// Path to the node configuration file + #[clap(long, short)] + config: PathBuf, +} + +async fn spawn_admin_api( + set: &mut JoinSet>, + config: &AdminApiConfig, + bus: Arc>>, +) -> color_eyre::Result<()> { + let admin_api_listener = TcpListener::bind(config.addr).await?; + let fut = run_admin_api( + http::admin::Config { bus }, + AddrIncoming::from_listener(admin_api_listener)?, + ); + set.spawn(fut); + + Ok(()) +} + +async fn spawn_user_api( + set: &mut JoinSet>, + config: &UserApiConfig, + manager: Arc, + bus: Arc>>, +) -> color_eyre::Result<()> { + let user_api_listener = TcpListener::bind(config.addr).await?; + let hrana_server = Arc::new(hrana::http::Server::new(None)); + set.spawn({ + let hrana_server = hrana_server.clone(); + async move { + hrana_server.run_expire().await; + Ok(()) + } + }); + set.spawn(run_user_api( + http::user::Config { + manager, + bus, + hrana_server, + }, + AddrIncoming::from_listener(user_api_listener)?, + )); + + Ok(()) +} + +async fn spawn_cluster_networking( + set: &mut JoinSet>, + config: &ClusterConfig, + bus: Arc>>, +) -> color_eyre::Result<()> { + let server = linc::server::Server::new(bus.clone()); + + let listener = TcpListener::bind(config.addr).await?; + set.spawn(server.run(listener)); + + let pool = linc::connection_pool::ConnectionPool::new( + bus, + config.peers.iter().map(|p| (p.id, p.addr.clone())), + ); + if pool.managed_count() > 0 { + set.spawn(pool.run::()); + } + + Ok(()) +} + +async fn init_dirs(db_path: &Path) -> color_eyre::Result<()> { + create_dir_all(&db_path).await?; + create_dir_all(db_path.join("tmp")).await?; + create_dir_all(db_path.join("snapshot_queue")).await?; + create_dir_all(db_path.join("snapshots")).await?; + create_dir_all(db_path.join("dbs")).await?; + create_dir_all(db_path.join("meta")).await?; + + Ok(()) +} + +#[tokio::main(flavor = "multi_thread", worker_threads = 10)] +async fn main() -> color_eyre::Result<()> { + init()?; + + let args = Args::parse(); + let config_str = read_to_string(args.config)?; + let config: config::Config = toml::from_str(&config_str)?; + config.validate()?; + + let mut join_set = JoinSet::new(); + + init_dirs(&config.db_path).await?; + + let env = heed::EnvOpenOptions::new() + .max_dbs(1000) + .map_size(100 * 1024 * 1024) + .open(config.db_path.join("meta"))?; + + let snapshot_store = Arc::new(SnapshotStore::new(config.db_path.clone(), env.clone())?); + let compaction_queue = Arc::new(CompactionQueue::new( + env.clone(), + config.db_path.clone(), + snapshot_store, + )?); + let store = Arc::new(Store::new(env.clone())?); + let replica_commit_store = Arc::new(ReplicaCommitStore::new(env.clone())?); + let manager = Arc::new(Manager::new( + config.db_path.clone(), + store.clone(), + 100, + compaction_queue.clone(), + replica_commit_store, + )); + let bus = Arc::new(Bus::new(config.cluster.id, manager.clone())); + + join_set.spawn(run_compactor_loop(compaction_queue)); + spawn_cluster_networking(&mut join_set, &config.cluster, bus.clone()).await?; + spawn_admin_api(&mut join_set, &config.admin_api, bus.clone()).await?; + spawn_user_api(&mut join_set, &config.user_api, manager, bus).await?; + + join_set.join_next().await; + + Ok(()) +} + +fn init() -> color_eyre::Result<()> { + let registry = tracing_subscriber::registry(); + + registry + .with( + tracing_subscriber::fmt::layer() + .with_ansi(false) + .with_filter( + tracing_subscriber::EnvFilter::builder() + .with_default_directive(LevelFilter::INFO.into()) + .from_env_lossy(), + ), + ) + .init(); + + color_eyre::install()?; + + Ok(()) +} diff --git a/libsqlx-server/src/manager.rs b/libsqlx-server/src/manager.rs new file mode 100644 index 00000000..2f0cafa1 --- /dev/null +++ b/libsqlx-server/src/manager.rs @@ -0,0 +1,127 @@ +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Arc; + +use moka::future::Cache; +use tokio::sync::mpsc; +use tokio::task::JoinSet; + +use crate::allocation::config::AllocConfig; +use crate::allocation::{Allocation, AllocationMessage, Database}; +use crate::compactor::CompactionQueue; +use crate::linc::bus::Dispatch; +use crate::linc::handler::Handler; +use crate::linc::Inbound; +use crate::meta::{AllocMeta, DatabaseId, Store}; +use crate::replica_commit_store::ReplicaCommitStore; + +pub struct Manager { + cache: Cache>, + meta_store: Arc, + db_path: PathBuf, + compaction_queue: Arc, + replica_commit_store: Arc, +} + +const MAX_ALLOC_MESSAGE_QUEUE_LEN: usize = 32; + +impl Manager { + pub fn new( + db_path: PathBuf, + meta_store: Arc, + max_conccurent_allocs: u64, + compaction_queue: Arc, + replica_commit_store: Arc, + ) -> Self { + Self { + cache: Cache::new(max_conccurent_allocs), + meta_store, + db_path, + compaction_queue, + replica_commit_store, + } + } + + /// Returns a handle to an allocation, lazily initializing if it isn't already loaded. + pub async fn schedule( + self: &Arc, + database_id: DatabaseId, + dispatcher: Arc, + ) -> crate::Result>> { + if let Some(sender) = self.cache.get(&database_id) { + return Ok(Some(sender.clone())); + } + + if let Some(meta) = self.meta_store.meta(&database_id)? { + let path = self.db_path.join("dbs").join(database_id.to_string()); + tokio::fs::create_dir_all(&path).await?; + let (alloc_sender, inbox) = mpsc::channel(MAX_ALLOC_MESSAGE_QUEUE_LEN); + let alloc = Allocation { + inbox, + database: Database::from_config( + &meta.config, + path, + dispatcher.clone(), + self.compaction_queue.clone(), + self.replica_commit_store.clone(), + )?, + connections_futs: JoinSet::new(), + next_conn_id: 0, + max_concurrent_connections: meta.config.max_conccurent_connection, + dispatcher, + db_name: meta.config.db_name, + connections: HashMap::new(), + }; + + tokio::spawn(alloc.run()); + + self.cache.insert(database_id, alloc_sender.clone()).await; + + return Ok(Some(alloc_sender)); + } + + Ok(None) + } + + pub async fn allocate( + self: &Arc, + database_id: DatabaseId, + config: AllocConfig, + dispatcher: Arc, + ) -> crate::Result { + let meta = self.store().allocate(&database_id, config)?; + self.schedule(database_id, dispatcher).await?; + + Ok(meta) + } + + pub async fn deallocate(&self, database_id: DatabaseId) -> crate::Result<()> { + self.meta_store.deallocate(&database_id)?; + self.cache.remove(&database_id).await; + let db_path = self.db_path.join("dbs").join(database_id.to_string()); + if db_path.exists() { + tokio::fs::remove_dir_all(db_path).await?; + } + + Ok(()) + } + + pub fn store(&self) -> &Store { + &self.meta_store + } +} + +#[async_trait::async_trait] +impl Handler for Arc { + async fn handle(&self, bus: Arc, msg: Inbound) -> crate::Result<()> { + if let Some(database_id) = msg.enveloppe.database_id { + if let Some(sender) = self.clone().schedule(database_id, bus.clone()).await? { + sender + .send(AllocationMessage::Inbound(msg)) + .await + .map_err(|_| crate::error::Error::AllocationClosed)?; + } + } + Ok(()) + } +} diff --git a/libsqlx-server/src/meta.rs b/libsqlx-server/src/meta.rs new file mode 100644 index 00000000..a2a3ac87 --- /dev/null +++ b/libsqlx-server/src/meta.rs @@ -0,0 +1,136 @@ +use std::fmt; +use std::mem::size_of; + +use chrono::{DateTime, Utc}; +use heed::bytemuck::{Pod, Zeroable}; +use heed_types::{OwnedType, SerdeBincode}; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use sha3::digest::{ExtendableOutput, Update, XofReader}; +use sha3::Shake128; +use tokio::task::block_in_place; + +use crate::allocation::config::AllocConfig; + +#[derive(Debug, Serialize, Deserialize)] +pub struct AllocMeta { + pub config: AllocConfig, + pub created_at: DateTime, +} + +pub struct Store { + env: heed::Env, + alloc_config_db: heed::Database, SerdeBincode>, +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Hash, Clone, Copy, Pod, Zeroable)] +#[repr(transparent)] +pub struct DatabaseId([u8; 16]); + +impl DatabaseId { + pub fn from_name(name: &str) -> Self { + let mut hasher = Shake128::default(); + hasher.update(name.as_bytes()); + let mut reader = hasher.finalize_xof(); + let mut out = [0; 16]; + reader.read(&mut out); + Self(out) + } + + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), size_of::()); + Self(bytes.try_into().unwrap()) + } + + #[cfg(test)] + pub fn random() -> Self { + Self(uuid::Uuid::new_v4().into_bytes()) + } +} + +impl fmt::Display for DatabaseId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:x}", u128::from_be_bytes(self.0)) + } +} + +impl AsRef<[u8]> for DatabaseId { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +#[derive(Debug, thiserror::Error)] +pub enum AllocationError { + #[error("an allocation already exists for {0}")] + AlreadyExist(String), +} + +impl Store { + const ALLOC_CONFIG_DB_NAME: &'static str = "alloc_conf_db"; + + pub fn new(env: heed::Env) -> crate::Result { + let mut txn = env.write_txn()?; + let alloc_config_db = env.create_database(&mut txn, Some(Self::ALLOC_CONFIG_DB_NAME))?; + txn.commit()?; + + Ok(Self { + env, + alloc_config_db, + }) + } + + pub fn allocate(&self, id: &DatabaseId, config: AllocConfig) -> crate::Result { + block_in_place(|| { + let mut txn = self.env.write_txn()?; + if self + .alloc_config_db + .lazily_decode_data() + .get(&txn, id)? + .is_some() + { + Err(AllocationError::AlreadyExist(config.db_name.clone()))?; + }; + + let meta = AllocMeta { + config, + created_at: Utc::now(), + }; + + self.alloc_config_db.put(&mut txn, id, &meta)?; + + txn.commit()?; + + Ok(meta) + }) + } + + pub fn deallocate(&self, id: &DatabaseId) -> crate::Result<()> { + block_in_place(|| { + let mut txn = self.env.write_txn()?; + self.alloc_config_db.delete(&mut txn, id)?; + txn.commit()?; + + Ok(()) + }) + } + + pub fn meta(&self, id: &DatabaseId) -> crate::Result> { + block_in_place(|| { + let txn = self.env.read_txn()?; + Ok(self.alloc_config_db.get(&txn, id)?) + }) + } + + pub fn list_allocs(&self) -> crate::Result> { + block_in_place(|| { + let txn = self.env.read_txn()?; + let res = self + .alloc_config_db + .iter(&txn)? + .map(|x| x.map(|x| x.1)) + .try_collect()?; + Ok(res) + }) + } +} diff --git a/libsqlx-server/src/replica_commit_store.rs b/libsqlx-server/src/replica_commit_store.rs new file mode 100644 index 00000000..2598c3b0 --- /dev/null +++ b/libsqlx-server/src/replica_commit_store.rs @@ -0,0 +1,34 @@ +use heed_types::OwnedType; +use libsqlx::FrameNo; + +use crate::meta::DatabaseId; + +/// Stores replica last injected commit index +pub struct ReplicaCommitStore { + env: heed::Env, + database: heed::Database, OwnedType>, +} + +impl ReplicaCommitStore { + const DB_NAME: &str = "replica-commit-store"; + pub fn new(env: heed::Env) -> crate::Result { + let mut txn = env.write_txn()?; + let database = env.create_database(&mut txn, Some(Self::DB_NAME))?; + txn.commit()?; + + Ok(Self { env, database }) + } + + pub fn commit(&self, database_id: DatabaseId, frame_no: FrameNo) -> crate::Result<()> { + let mut txn = self.env.write_txn()?; + self.database.put(&mut txn, &database_id, &frame_no)?; + txn.commit()?; + + Ok(()) + } + + pub fn get_commit_index(&self, database_id: DatabaseId) -> crate::Result> { + let txn = self.env.read_txn()?; + Ok(self.database.get(&txn, &database_id)?) + } +} diff --git a/libsqlx-server/src/snapshot_store.rs b/libsqlx-server/src/snapshot_store.rs new file mode 100644 index 00000000..73cbc0cb --- /dev/null +++ b/libsqlx-server/src/snapshot_store.rs @@ -0,0 +1,218 @@ +use std::mem::size_of; +use std::path::PathBuf; + +use bytemuck::{Pod, Zeroable}; +use heed_types::{CowType, SerdeBincode}; +use libsqlx::FrameNo; +use serde::{Deserialize, Serialize}; +use tokio::task::block_in_place; +use uuid::Uuid; + +use crate::{compactor::SnapshotFile, meta::DatabaseId}; + +/// Equivalent to a u64, but stored in big-endian ordering. +/// Used for storing values whose bytes need to be lexically ordered. +#[derive(Clone, Copy, Zeroable, Pod, Debug)] +#[repr(transparent)] +struct BEU64([u8; size_of::()]); + +impl From for BEU64 { + fn from(value: u64) -> Self { + Self(value.to_be_bytes()) + } +} + +impl From for u64 { + fn from(value: BEU64) -> Self { + u64::from_be_bytes(value.0) + } +} + +#[derive(Clone, Copy, Zeroable, Pod, Debug)] +#[repr(C)] +struct SnapshotKey { + database_id: DatabaseId, + start_frame_no: BEU64, + end_frame_no: BEU64, +} + +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +pub struct SnapshotMeta { + pub snapshot_id: Uuid, +} + +pub struct SnapshotStore { + env: heed::Env, + database: heed::Database, SerdeBincode>, + db_path: PathBuf, +} + +impl SnapshotStore { + const SNAPSHOT_STORE_NAME: &str = "snapshot-store-db"; + + pub fn new(db_path: PathBuf, env: heed::Env) -> crate::Result { + let mut txn = env.write_txn()?; + let database = env.create_database(&mut txn, Some(Self::SNAPSHOT_STORE_NAME))?; + txn.commit()?; + + Ok(Self { + database, + db_path, + env, + }) + } + + pub fn register( + &self, + txn: &mut heed::RwTxn, + database_id: DatabaseId, + start_frame_no: FrameNo, + end_frame_no: FrameNo, + snapshot_id: Uuid, + ) -> crate::Result<()> { + let key = SnapshotKey { + database_id, + start_frame_no: start_frame_no.into(), + end_frame_no: end_frame_no.into(), + }; + + let data = SnapshotMeta { snapshot_id }; + + block_in_place(|| self.database.put(txn, &key, &data))?; + + Ok(()) + } + + /// Locate a snapshot for `database_id` that contains `frame_no` + pub fn locate( + &self, + database_id: DatabaseId, + frame_no: FrameNo, + ) -> crate::Result> { + let txn = self.env.read_txn()?; + // Snapshot keys are lexicographically ordered, looking for the first key less than of + // equal to (db_id, frame_no, FrameNo::MAX) will always return the entry we're looking for + // if it exists. + let key = SnapshotKey { + database_id, + start_frame_no: frame_no.into(), + end_frame_no: u64::MAX.into(), + }; + + match self.database.get_lower_than_or_equal_to(&txn, &key)? { + Some((key, v)) => { + if key.database_id != database_id { + return Ok(None); + } else if frame_no >= key.start_frame_no.into() + && frame_no <= key.end_frame_no.into() + { + tracing::debug!( + "found snapshot for {frame_no}; {}-{}", + u64::from(key.start_frame_no), + u64::from(key.end_frame_no) + ); + return Ok(Some(v)); + } else { + Ok(None) + } + } + None => Ok(None), + } + } + + pub fn locate_file( + &self, + database_id: DatabaseId, + frame_no: FrameNo, + ) -> crate::Result> { + let Some(meta) = self.locate(database_id, frame_no)? else { return Ok(None) }; + let path = self + .db_path + .join("snapshots") + .join(meta.snapshot_id.to_string()); + Ok(Some(SnapshotFile::open(&path)?)) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn insert_and_locate() { + let temp = tempfile::tempdir().unwrap(); + let env = heed::EnvOpenOptions::new() + .max_dbs(10) + .map_size(1000 * 4096) + .open(temp.path()) + .unwrap(); + let store = SnapshotStore::new(temp.path().to_path_buf(), env).unwrap(); + let mut txn = store.env.write_txn().unwrap(); + let db_id = DatabaseId::random(); + let snapshot_id = Uuid::new_v4(); + store.register(&mut txn, db_id, 0, 51, snapshot_id); + txn.commit().unwrap(); + + assert!(store.locate(db_id, 0).is_some()); + assert!(store.locate(db_id, 17).is_some()); + assert!(store.locate(db_id, 51).is_some()); + assert!(store.locate(db_id, 52).is_none()); + } + + #[test] + fn multiple_snapshots() { + let temp = tempfile::tempdir().unwrap(); + let env = heed::EnvOpenOptions::new() + .max_dbs(10) + .map_size(1000 * 4096) + .open(temp.path()) + .unwrap(); + let store = SnapshotStore::new(temp.path().to_path_buf(), env).unwrap(); + let mut txn = store.env.write_txn().unwrap(); + let db_id = DatabaseId::random(); + let snapshot_1_id = Uuid::new_v4(); + store.register(&mut txn, db_id, 0, 51, snapshot_1_id); + let snapshot_2_id = Uuid::new_v4(); + store.register(&mut txn, db_id, 52, 112, snapshot_2_id); + txn.commit().unwrap(); + + assert_eq!(store.locate(db_id, 0).unwrap().snapshot_id, snapshot_1_id); + assert_eq!(store.locate(db_id, 17).unwrap().snapshot_id, snapshot_1_id); + assert_eq!(store.locate(db_id, 51).unwrap().snapshot_id, snapshot_1_id); + assert_eq!(store.locate(db_id, 52).unwrap().snapshot_id, snapshot_2_id); + assert_eq!(store.locate(db_id, 100).unwrap().snapshot_id, snapshot_2_id); + assert_eq!(store.locate(db_id, 112).unwrap().snapshot_id, snapshot_2_id); + assert!(store.locate(db_id, 12345).is_none()); + } + + #[test] + fn multiple_databases() { + let temp = tempfile::tempdir().unwrap(); + let env = heed::EnvOpenOptions::new() + .max_dbs(10) + .map_size(1000 * 4096) + .open(temp.path()) + .unwrap(); + let store = SnapshotStore::new(temp.path().to_path_buf(), env).unwrap(); + let mut txn = store.env.write_txn().unwrap(); + let db_id1 = DatabaseId::random(); + let db_id2 = DatabaseId::random(); + let snapshot_id1 = Uuid::new_v4(); + let snapshot_id2 = Uuid::new_v4(); + store.register(&mut txn, db_id1, 0, 51, snapshot_id1); + store.register(&mut txn, db_id2, 0, 51, snapshot_id2); + txn.commit().unwrap(); + + assert_eq!(store.locate(db_id1, 0).unwrap().snapshot_id, snapshot_id1); + assert_eq!(store.locate(db_id2, 0).unwrap().snapshot_id, snapshot_id2); + + assert_eq!(store.locate(db_id1, 12).unwrap().snapshot_id, snapshot_id1); + assert_eq!(store.locate(db_id2, 18).unwrap().snapshot_id, snapshot_id2); + + assert_eq!(store.locate(db_id1, 51).unwrap().snapshot_id, snapshot_id1); + assert_eq!(store.locate(db_id2, 51).unwrap().snapshot_id, snapshot_id2); + + assert!(store.locate(db_id1, 52).is_none()); + assert!(store.locate(db_id2, 52).is_none()); + } +} diff --git a/libsqlx/Cargo.toml b/libsqlx/Cargo.toml new file mode 100644 index 00000000..519339ec --- /dev/null +++ b/libsqlx/Cargo.toml @@ -0,0 +1,39 @@ +[package] +name = "libsqlx" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "0.1.68" +bytesize = "1.2.0" +serde = { version = "1", features = ["rc"] } +serde_json = "1.0.99" +rusqlite = { workspace = true } +anyhow = "1.0.71" +sqlite3-parser = "0.9.0" +fallible-iterator = "0.3.0" +bytes = "1.4.0" +tracing = "0.1.37" +bytemuck = { version = "1.13.1", features = ["derive"] } +parking_lot = "0.12.1" +uuid = { version = "1.4.0", features = ["v4"] } +sqld-libsql-bindings = { version = "0", path = "../sqld-libsql-bindings" } +crossbeam = "0.8.2" +thiserror = "1.0.40" +nix = "0.26.2" +crc = "3.0.1" +once_cell = "1.18.0" +regex = "1.8.4" +tempfile = "3.6.0" +either = "1.8.1" +tokio = { version = "1", optional = true, features = ["sync"] } + +[dev-dependencies] +arbitrary = { version = "1.3.0", features = ["derive"] } +itertools = "0.11.0" +rand = "0.8.5" + +[features] +tokio = ["dep:tokio"] diff --git a/libsqlx/assets/test/simple_wallog b/libsqlx/assets/test/simple_wallog new file mode 100644 index 00000000..42e5b3a9 Binary files /dev/null and b/libsqlx/assets/test/simple_wallog differ diff --git a/libsqlx/src/analysis.rs b/libsqlx/src/analysis.rs new file mode 100644 index 00000000..0706ebff --- /dev/null +++ b/libsqlx/src/analysis.rs @@ -0,0 +1,286 @@ +use fallible_iterator::FallibleIterator; +use serde::{Deserialize, Serialize}; +use sqlite3_parser::ast::{Cmd, PragmaBody, QualifiedName, Stmt}; +use sqlite3_parser::lexer::sql::{Parser, ParserError}; + +/// A group of statements to be executed together. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Statement { + pub stmt: String, + pub kind: StmtKind, + /// Is the statement an INSERT, UPDATE or DELETE? + pub is_iud: bool, + pub is_insert: bool, +} + +impl Default for Statement { + fn default() -> Self { + Self::empty() + } +} + +/// Classify statement in categories of interest. +#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)] +pub enum StmtKind { + /// The begining of a transaction + TxnBegin, + /// The end of a transaction + TxnEnd, + Read, + Write, + Other, +} + +fn is_temp(name: &QualifiedName) -> bool { + name.db_name.as_ref().map(|n| n.0.as_str()) == Some("TEMP") +} + +fn is_reserved_tbl(name: &QualifiedName) -> bool { + let n = name.name.0.to_lowercase(); + n == "_litestream_seq" || n == "_litestream_lock" || n == "libsql_wasm_func_table" +} + +fn write_if_not_reserved(name: &QualifiedName) -> Option { + (!is_reserved_tbl(name)).then_some(StmtKind::Write) +} + +impl StmtKind { + fn kind(cmd: &Cmd) -> Option { + match cmd { + Cmd::Explain(Stmt::Pragma(name, body)) => Self::pragma_kind(name, body.as_ref()), + Cmd::Explain(_) => Some(Self::Other), + Cmd::ExplainQueryPlan(_) => Some(Self::Other), + Cmd::Stmt(Stmt::Begin { .. }) => Some(Self::TxnBegin), + Cmd::Stmt(Stmt::Commit { .. } | Stmt::Rollback { .. }) => Some(Self::TxnEnd), + Cmd::Stmt( + Stmt::CreateVirtualTable { tbl_name, .. } + | Stmt::CreateTable { + tbl_name, + temporary: false, + .. + }, + ) if !is_temp(tbl_name) => Some(Self::Write), + Cmd::Stmt( + Stmt::Insert { + with: _, + or_conflict: _, + tbl_name, + .. + } + | Stmt::Update { + with: _, + or_conflict: _, + tbl_name, + .. + }, + ) => write_if_not_reserved(tbl_name), + + Cmd::Stmt(Stmt::Delete { + with: _, tbl_name, .. + }) => write_if_not_reserved(tbl_name), + Cmd::Stmt(Stmt::DropTable { + if_exists: _, + tbl_name, + }) => write_if_not_reserved(tbl_name), + Cmd::Stmt(Stmt::AlterTable(tbl_name, _)) => write_if_not_reserved(tbl_name), + Cmd::Stmt( + Stmt::DropIndex { .. } + | Stmt::CreateTrigger { + temporary: false, .. + } + | Stmt::CreateIndex { .. }, + ) => Some(Self::Write), + Cmd::Stmt(Stmt::Select { .. }) => Some(Self::Read), + Cmd::Stmt(Stmt::Pragma(name, body)) => Self::pragma_kind(name, body.as_ref()), + _ => None, + } + } + + fn pragma_kind(name: &QualifiedName, body: Option<&PragmaBody>) -> Option { + let name = name.name.0.as_str(); + match name { + // always ok to be served by primary or replicas - pure readonly pragmas + "table_list" | "index_list" | "table_info" | "table_xinfo" | "index_xinfo" + | "pragma_list" | "compile_options" | "database_list" | "function_list" + | "module_list" => Some(Self::Read), + // special case for `encoding` - it's effectively readonly for connections + // that already created a database, which is always the case for sqld + "encoding" => Some(Self::Read), + // always ok to be served by primary + "foreign_keys" | "foreign_key_list" | "foreign_key_check" | "collation_list" + | "data_version" | "freelist_count" | "integrity_check" | "legacy_file_format" + | "page_count" | "quick_check" | "stats" => Some(Self::Write), + // ok to be served by primary without args + "analysis_limit" + | "application_id" + | "auto_vacuum" + | "automatic_index" + | "busy_timeout" + | "cache_size" + | "cache_spill" + | "cell_size_check" + | "checkpoint_fullfsync" + | "defer_foreign_keys" + | "fullfsync" + | "hard_heap_limit" + | "journal_mode" + | "journal_size_limit" + | "legacy_alter_table" + | "locking_mode" + | "max_page_count" + | "mmap_size" + | "page_size" + | "query_only" + | "read_uncommitted" + | "recursive_triggers" + | "reverse_unordered_selects" + | "schema_version" + | "secure_delete" + | "soft_heap_limit" + | "synchronous" + | "temp_store" + | "threads" + | "trusted_schema" + | "user_version" + | "wal_autocheckpoint" => { + match body { + Some(_) => None, + None => Some(Self::Write), + } + } + // changes the state of the connection, and can't be allowed rn: + "case_sensitive_like" | "ignore_check_constraints" | "incremental_vacuum" + // TODO: check if optimize can be safely performed + | "optimize" + | "parser_trace" + | "shrink_memory" + | "wal_checkpoint" => None, + _ => { + tracing::debug!("Unknown pragma: {name}"); + None + }, + } + } +} + +/// The state of a transaction for a series of statement +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum State { + /// The txn in an opened state + Txn, + /// The txn in a closed state + Init, + /// This is an invalid state for the state machine + Invalid, +} + +impl State { + pub fn step(&mut self, kind: StmtKind) { + *self = match (*self, kind) { + (State::Txn, StmtKind::TxnBegin) | (State::Init, StmtKind::TxnEnd) => State::Invalid, + (State::Txn, StmtKind::TxnEnd) => State::Init, + (state, StmtKind::Other | StmtKind::Write | StmtKind::Read) => state, + (State::Invalid, _) => State::Invalid, + (State::Init, StmtKind::TxnBegin) => State::Txn, + }; + } + + pub fn reset(&mut self) { + *self = State::Init + } +} + +impl Statement { + pub fn empty() -> Self { + Self { + stmt: String::new(), + // empty statement is arbitrarely made of the read kind so it is not send to a writer + kind: StmtKind::Read, + is_iud: false, + is_insert: false, + } + } + + pub fn parse(s: &str) -> impl Iterator> + '_ { + fn parse_inner( + original: &str, + stmt_count: u64, + has_more_stmts: bool, + c: Cmd, + ) -> crate::Result { + let kind = + StmtKind::kind(&c).ok_or_else(|| crate::error::Error::UnsupportedStatement)?; + + if stmt_count == 1 && !has_more_stmts { + // XXX: Temporary workaround for integration with Atlas + if let Cmd::Stmt(Stmt::CreateTable { .. }) = &c { + return Ok(Statement { + stmt: original.to_string(), + kind, + is_iud: false, + is_insert: false, + }); + } + } + + let is_iud = matches!( + c, + Cmd::Stmt(Stmt::Insert { .. } | Stmt::Update { .. } | Stmt::Delete { .. }) + ); + let is_insert = matches!(c, Cmd::Stmt(Stmt::Insert { .. })); + + Ok(Statement { + stmt: c.to_string(), + kind, + is_iud, + is_insert, + }) + } + // The parser needs to be boxed because it's large, and you don't want it on the stack. + // There's upstream work to make it smaller, but in the meantime the parser should remain + // on the heap: + // - https://github.com/gwenn/lemon-rs/issues/8 + // - https://github.com/gwenn/lemon-rs/pull/19 + let mut parser = Box::new(Parser::new(s.as_bytes()).peekable()); + let mut stmt_count = 0; + std::iter::from_fn(move || { + stmt_count += 1; + match parser.next() { + Ok(Some(cmd)) => Some(parse_inner( + s, + stmt_count, + parser.peek().map_or(true, |o| o.is_some()), + cmd, + )), + Ok(None) => None, + Err(sqlite3_parser::lexer::sql::Error::ParserError( + ParserError::SyntaxError { + token_type: _, + found: Some(found), + }, + Some((line, col)), + )) => Some(Err(crate::error::Error::SyntaxError { line, col, found })), + Err(e) => Some(Err(e.into())), + } + }) + } + + pub fn is_read_only(&self) -> bool { + matches!( + self.kind, + StmtKind::Read | StmtKind::TxnEnd | StmtKind::TxnBegin + ) + } +} + +/// Given a an initial state and an array of queries, attempts to predict what the final state will +/// be +pub fn predict_final_state<'a>( + mut state: State, + stmts: impl Iterator, +) -> State { + for stmt in stmts { + state.step(stmt.kind); + } + state +} diff --git a/libsqlx/src/connection.rs b/libsqlx/src/connection.rs new file mode 100644 index 00000000..9c3fcdb0 --- /dev/null +++ b/libsqlx/src/connection.rs @@ -0,0 +1,51 @@ +use either::Either; + +use crate::program::Program; +use crate::result_builder::ResultBuilder; + +#[derive(Debug, Clone)] +pub struct DescribeResponse { + pub params: Vec, + pub cols: Vec, + pub is_explain: bool, + pub is_readonly: bool, +} + +#[derive(Debug, Clone)] +pub struct DescribeParam { + pub name: Option, +} + +#[derive(Debug, Clone)] +pub struct DescribeCol { + pub name: String, + pub decltype: Option, +} + +pub trait Connection { + /// Executes a query program + fn execute_program(&mut self, pgm: &Program, result_builder: Box); + + /// Parse the SQL statement and return information about it. + fn describe(&self, sql: String) -> crate::Result; +} + +impl Connection for Either +where + T: Connection, + X: Connection, +{ + fn execute_program(&mut self, pgm: &Program, result_builder: Box) { + match self { + Either::Left(c) => c.execute_program(pgm, result_builder), + Either::Right(c) => c.execute_program(pgm, result_builder), + } + } + + fn describe(&self, sql: String) -> crate::Result { + match self { + Either::Left(c) => c.describe(sql), + Either::Right(c) => c.describe(sql), + } + } +} diff --git a/libsqlx/src/database/frame.rs b/libsqlx/src/database/frame.rs new file mode 100644 index 00000000..d3dd4a44 --- /dev/null +++ b/libsqlx/src/database/frame.rs @@ -0,0 +1,113 @@ +use std::borrow::Cow; +use std::fmt; +use std::mem::{size_of, transmute}; +use std::ops::Deref; + +use bytemuck::{bytes_of, pod_read_unaligned, try_from_bytes, Pod, Zeroable}; +use bytes::{Bytes, BytesMut}; + +use super::libsql::replication_log::WAL_PAGE_SIZE; +use super::FrameNo; + +/// The file header for the WAL log. All fields are represented in little-endian ordering. +/// See `encode` and `decode` for actual layout. +// repr C for stable sizing +#[repr(C)] +#[derive(Debug, Clone, Copy, Zeroable, Pod)] +pub struct FrameHeader { + /// Incremental frame number + pub frame_no: FrameNo, + /// page number, if frame_type is FrameType::Page + pub page_no: u32, + /// Size of the database (in page) after commiting the transaction. This is passed from sqlite, + /// and serves as commit transaction boundary + pub size_after: u32, +} + +#[derive(Clone)] +/// The owned version of a replication frame. +/// Cloning this is cheap. +pub struct Frame { + data: Bytes, +} + +impl fmt::Debug for Frame { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Frame") + .field("header", &self.header()) + .field("data", &"[..]") + .finish() + } +} + +impl Frame { + /// size of a single frame + pub const SIZE: usize = size_of::() + WAL_PAGE_SIZE as usize; + + pub fn from_parts(header: &FrameHeader, data: &[u8]) -> Self { + assert_eq!(data.len(), WAL_PAGE_SIZE as usize); + let mut buf = BytesMut::with_capacity(Self::SIZE); + buf.extend_from_slice(bytes_of(header)); + buf.extend_from_slice(data); + + Self { data: buf.freeze() } + } + + pub fn try_from_bytes(data: Bytes) -> crate::Result { + if data.len() != Self::SIZE { + return Err(crate::error::Error::InvalidFrame); + } + + Ok(Self { data }) + } + + pub fn bytes(&self) -> Bytes { + self.data.clone() + } + + pub fn page_bytes(&self) -> Bytes { + let mut data = self.data.clone(); + let _ = data.split_to(size_of::()); + debug_assert_eq!(data.len(), WAL_PAGE_SIZE as usize); + data + } +} + +/// The borrowed version of Frame +#[repr(transparent)] +pub struct FrameBorrowed { + data: [u8], +} + +impl FrameBorrowed { + pub fn header(&self) -> Cow { + let data = &self.data[..size_of::()]; + try_from_bytes(data) + .map(Cow::Borrowed) + .unwrap_or_else(|_| Cow::Owned(pod_read_unaligned(data))) + } + + /// Returns the bytes for this frame. Includes the header bytes. + pub fn as_slice(&self) -> &[u8] { + &self.data + } + + pub fn from_bytes(data: &[u8]) -> &Self { + assert_eq!(data.len(), Frame::SIZE); + // SAFETY: &FrameBorrowed is equivalent to &[u8] + unsafe { transmute(data) } + } + + /// returns this frame's page data. + pub fn page(&self) -> &[u8] { + &self.data[size_of::()..] + } +} + +impl Deref for Frame { + type Target = FrameBorrowed; + + fn deref(&self) -> &Self::Target { + FrameBorrowed::from_bytes(&self.data) + } +} diff --git a/libsqlx/src/database/libsql/connection.rs b/libsqlx/src/database/libsql/connection.rs new file mode 100644 index 00000000..9579dc94 --- /dev/null +++ b/libsqlx/src/database/libsql/connection.rs @@ -0,0 +1,323 @@ +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use rusqlite::{OpenFlags, Statement, StatementStatus}; +use sqld_libsql_bindings::wal_hook::{WalHook, WalMethodsHook}; + +use crate::connection::{Connection, DescribeCol, DescribeParam, DescribeResponse}; +use crate::error::Error; +use crate::program::{Cond, Program, Step}; +use crate::query::Query; +use crate::result_builder::{QueryBuilderConfig, ResultBuilder}; +use crate::seal::Seal; +use crate::Result; + +use super::{LibsqlDbType, RowStatsHandler}; + +pub struct RowStats { + pub rows_read: u64, + pub rows_written: u64, +} + +impl From<&Statement<'_>> for RowStats { + fn from(stmt: &Statement) -> Self { + Self { + rows_read: stmt.get_status(StatementStatus::RowsRead) as u64, + rows_written: stmt.get_status(StatementStatus::RowsWritten) as u64, + } + } +} + +pub fn open_db<'a, W>( + path: &Path, + wal_methods: &'static WalMethodsHook, + hook_ctx: &'a mut W::Context, + flags: Option, +) -> std::result::Result, rusqlite::Error> +where + W: WalHook, +{ + let flags = flags.unwrap_or( + OpenFlags::SQLITE_OPEN_READ_WRITE + | OpenFlags::SQLITE_OPEN_CREATE + | OpenFlags::SQLITE_OPEN_URI + | OpenFlags::SQLITE_OPEN_NO_MUTEX, + ); + + sqld_libsql_bindings::Connection::open(path, flags, wal_methods, hook_ctx) +} + +pub struct LibsqlConnection { + conn: sqld_libsql_bindings::Connection<'static>, // holds a ref to _context, must be dropped first. + row_stats_handler: Option>, + builder_config: QueryBuilderConfig, + /// `true` is the connection is in an open connection state + is_txn: bool, + on_txn_status_change_cb: Option>, + _context: Seal::Context>>, +} + +impl LibsqlConnection { + pub(crate) fn new( + path: &Path, + extensions: Option>, + wal_methods: &'static WalMethodsHook, + hook_ctx: ::Context, + row_stats_callback: Option>, + builder_config: QueryBuilderConfig, + on_txn_status_change_cb: Option>, + ) -> Result> { + let mut ctx = Box::new(hook_ctx); + let this = LibsqlConnection { + conn: open_db( + path, + wal_methods, + unsafe { &mut *(ctx.as_mut() as *mut _) }, + None, + )?, + on_txn_status_change_cb, + builder_config, + row_stats_handler: row_stats_callback, + is_txn: false, + _context: Seal::new(ctx), + }; + + if let Some(extensions) = extensions { + for ext in extensions.iter() { + unsafe { + let _guard = rusqlite::LoadExtensionGuard::new(&this.conn).unwrap(); + if let Err(e) = this.conn.load_extension(ext, None) { + tracing::error!("failed to load extension: {}", ext.display()); + Err(e)?; + } + tracing::debug!("Loaded extension {}", ext.display()); + } + } + } + + Ok(this) + } + + #[cfg(test)] + pub fn inner_connection(&self) -> &sqld_libsql_bindings::Connection<'static> { + &self.conn + } + + fn run(&mut self, pgm: &Program, builder: &mut dyn ResultBuilder) -> Result<()> { + let mut results = Vec::with_capacity(pgm.steps.len()); + + builder.init(&self.builder_config)?; + + for step in pgm.steps() { + let res = self.execute_step(step, &results, builder)?; + results.push(res); + } + + let is_txn = !self.conn.is_autocommit(); + if !builder.finnalize(is_txn, None)? && is_txn { + let _ = self.conn.execute("ROLLBACK", ()); + } + + Ok(()) + } + + fn execute_step( + &mut self, + step: &Step, + results: &[bool], + builder: &mut dyn ResultBuilder, + ) -> Result { + builder.begin_step()?; + let mut enabled = match step.cond.as_ref() { + Some(cond) => match eval_cond(cond, results) { + Ok(enabled) => enabled, + Err(e) => { + builder.step_error(e).unwrap(); + false + } + }, + None => true, + }; + + let (affected_row_count, last_insert_rowid) = if enabled { + match self.execute_query(&step.query, builder) { + // builder error interupt the execution of query. we should exit immediately. + Err(e @ Error::BuilderError(_)) => return Err(e), + Err(e) => { + builder.step_error(e)?; + enabled = false; + (0, None) + } + Ok(x) => x, + } + } else { + (0, None) + }; + + builder.finish_step(affected_row_count, last_insert_rowid)?; + + let is_txn = !self.conn.is_autocommit(); + if self.is_txn != is_txn { + // txn status changed + if let Some(ref cb) = self.on_txn_status_change_cb { + cb(is_txn) + } + } + self.is_txn = is_txn; + + Ok(enabled) + } + + fn execute_query( + &self, + query: &Query, + builder: &mut dyn ResultBuilder, + ) -> Result<(u64, Option)> { + tracing::trace!("executing query: {}", query.stmt.stmt); + + let mut stmt = self.conn.prepare(&query.stmt.stmt)?; + + let cols = stmt.columns(); + let cols_count = cols.len(); + builder.cols_description(&mut cols.iter().map(Into::into))?; + drop(cols); + + query + .params + .bind(&mut stmt) + .map_err(|e| Error::LibSqlInvalidQueryParams(e.to_string()))?; + + let mut qresult = stmt.raw_query(); + builder.begin_rows()?; + while let Some(row) = qresult.next()? { + builder.begin_row()?; + for i in 0..cols_count { + let val = row.get_ref(i)?; + builder.add_row_value(val)?; + } + builder.finish_row()?; + } + + builder.finish_rows()?; + + // sqlite3_changes() is only modified for INSERT, UPDATE or DELETE; it is not reset for SELECT, + // but we want to return 0 in that case. + let affected_row_count = match query.stmt.is_iud { + true => self.conn.changes(), + false => 0, + }; + + // sqlite3_last_insert_rowid() only makes sense for INSERTs into a rowid table. we can't detect + // a rowid table, but at least we can detect an INSERT + let last_insert_rowid = match query.stmt.is_insert { + true => Some(self.conn.last_insert_rowid()), + false => None, + }; + + drop(qresult); + + if let Some(ref handler) = self.row_stats_handler { + handler.handle_row_stats(RowStats::from(&stmt)) + } + + Ok((affected_row_count, last_insert_rowid)) + } + + pub fn set_on_txn_status_change_cb(&mut self, cb: impl Fn(bool) + Send + Sync + 'static) { + self.on_txn_status_change_cb = Some(Box::new(cb)); + } +} + +fn eval_cond(cond: &Cond, results: &[bool]) -> Result { + let get_step_res = |step: usize| -> Result { + let res = results.get(step).ok_or(Error::InvalidBatchStep(step))?; + + Ok(*res) + }; + + Ok(match cond { + Cond::Ok { step } => get_step_res(*step)?, + Cond::Err { step } => !get_step_res(*step)?, + Cond::Not { cond } => !eval_cond(cond, results)?, + Cond::And { conds } => conds + .iter() + .try_fold(true, |x, cond| eval_cond(cond, results).map(|y| x & y))?, + Cond::Or { conds } => conds + .iter() + .try_fold(false, |x, cond| eval_cond(cond, results).map(|y| x | y))?, + }) +} + +impl Connection for LibsqlConnection { + fn execute_program(&mut self, pgm: &Program, mut builder: Box) { + if let Err(e) = self.run(pgm, &mut *builder) { + builder.finnalize_error(e.to_string()); + } + } + + fn describe(&self, sql: String) -> crate::Result { + let stmt = self.conn.prepare(&sql)?; + + let params = (1..=stmt.parameter_count()) + .map(|param_i| { + let name = stmt.parameter_name(param_i).map(|n| n.into()); + DescribeParam { name } + }) + .collect(); + + let cols = stmt + .columns() + .into_iter() + .map(|col| { + let name = col.name().into(); + let decltype = col.decl_type().map(|t| t.into()); + DescribeCol { name, decltype } + }) + .collect(); + + let is_explain = stmt.is_explain() != 0; + let is_readonly = stmt.readonly(); + + Ok(DescribeResponse { + params, + cols, + is_explain, + is_readonly, + }) + } +} + +#[cfg(test)] +mod test { + // use itertools::Itertools; + // + // use crate::result_builder::{test::test_driver, IgnoreResult}; + // + // use super::*; + + // fn setup_test_conn(ctx: &mut ()) -> Conn { + // let mut conn = Conn { + // timeout_deadline: None, + // conn: sqld_libsql_bindings::Connection::test(ctx), + // timed_out: false, + // builder_config: QueryBuilderConfig::default(), + // row_stats_callback: None, + // }; + // + // let stmts = std::iter::once("create table test (x)") + // .chain(std::iter::repeat("insert into test values ('hello world')").take(100)) + // .collect_vec(); + // conn.run(Program::seq(&stmts), IgnoreResult).unwrap(); + // + // conn + // } + // + // #[test] + // fn test_libsql_conn_builder_driver() { + // test_driver(1000, |b| { + // let ctx = &mut (); + // let mut conn = setup_test_conn(ctx); + // conn.run(Program::seq(&["select * from test"]), b) + // }) + // } +} diff --git a/libsqlx/src/database/libsql/injector/headers.rs b/libsqlx/src/database/libsql/injector/headers.rs new file mode 100644 index 00000000..0973d65b --- /dev/null +++ b/libsqlx/src/database/libsql/injector/headers.rs @@ -0,0 +1,47 @@ +use std::marker::PhantomData; + +use rusqlite::ffi::PgHdr; + +pub struct Headers<'a> { + ptr: *mut PgHdr, + _pth: PhantomData<&'a ()>, +} + +impl<'a> Headers<'a> { + // safety: ptr is guaranteed to be valid for 'a + pub(crate) unsafe fn new(ptr: *mut PgHdr) -> Self { + Self { + ptr, + _pth: PhantomData, + } + } + + pub(crate) fn as_ptr(&mut self) -> *mut PgHdr { + self.ptr + } + + pub(crate) fn all_applied(&self) -> bool { + let mut current = self.ptr; + while !current.is_null() { + unsafe { + // WAL appended + if (*current).flags & 0x040 == 0 { + return false; + } + current = (*current).pDirty; + } + } + + true + } +} + +impl Drop for Headers<'_> { + fn drop(&mut self) { + let mut current = self.ptr; + while !current.is_null() { + let h: Box = unsafe { Box::from_raw(current as _) }; + current = h.pDirty; + } + } +} diff --git a/libsqlx/src/database/libsql/injector/hook.rs b/libsqlx/src/database/libsql/injector/hook.rs new file mode 100644 index 00000000..57c34fbc --- /dev/null +++ b/libsqlx/src/database/libsql/injector/hook.rs @@ -0,0 +1,169 @@ +use std::ffi::{c_int, CStr}; + +use rusqlite::ffi::{libsql_wal as Wal, PgHdr}; +use sqld_libsql_bindings::ffi::types::XWalFrameFn; +use sqld_libsql_bindings::init_static_wal_method; +use sqld_libsql_bindings::wal_hook::WalHook; + +use crate::database::frame::FrameBorrowed; +use crate::database::libsql::replication_log::WAL_PAGE_SIZE; + +use super::headers::Headers; +use super::{FrameBuffer, OnCommitCb}; + +// Those are custom error codes returned by the replicator hook. +pub const LIBSQL_INJECT_FATAL: c_int = 200; +/// Injection succeeded, left on a open txn state +pub const LIBSQL_INJECT_OK_TXN: c_int = 201; +/// Injection succeeded +pub const LIBSQL_INJECT_OK: c_int = 202; + +pub struct InjectorHookCtx { + /// shared frame buffer + buffer: FrameBuffer, + /// currently in a txn + is_txn: bool, + on_commit_cb: OnCommitCb, +} + +impl InjectorHookCtx { + pub fn new(buffer: FrameBuffer, commit_handler: OnCommitCb) -> Self { + Self { + buffer, + is_txn: false, + on_commit_cb: commit_handler, + } + } + + fn inject_pages( + &mut self, + sync_flags: i32, + orig: XWalFrameFn, + wal: *mut Wal, + ) -> anyhow::Result<()> { + self.is_txn = true; + let buffer = self.buffer.lock(); + let (mut headers, last_frame_no, size_after) = + make_page_header(buffer.iter().map(|f| &**f)); + + let ret = unsafe { + orig( + wal, + WAL_PAGE_SIZE, + headers.as_ptr(), + size_after, + (size_after != 0) as _, + sync_flags, + ) + }; + + if ret == 0 { + debug_assert!(headers.all_applied()); + drop(headers); + if size_after != 0 { + (self.on_commit_cb)(last_frame_no); + self.is_txn = false; + } + tracing::trace!("applied frame batch"); + + Ok(()) + } else { + anyhow::bail!("failed to apply pages"); + } + } +} + +/// Turn a list of `WalFrame` into a list of PgHdr. +/// The caller has the responsibility to free the returned headers. +/// return (headers, last_frame_no, size_after) +fn make_page_header<'a>( + frames: impl Iterator, +) -> (Headers<'a>, u64, u32) { + let mut first_pg: *mut PgHdr = std::ptr::null_mut(); + let mut current_pg; + let mut last_frame_no = 0; + let mut size_after = 0; + + let mut headers_count = 0; + let mut prev_pg: *mut PgHdr = std::ptr::null_mut(); + for frame in frames { + if frame.header().frame_no > last_frame_no { + last_frame_no = frame.header().frame_no; + size_after = frame.header().size_after; + } + + let page = PgHdr { + pPage: std::ptr::null_mut(), + pData: frame.page().as_ptr() as _, + pExtra: std::ptr::null_mut(), + pCache: std::ptr::null_mut(), + pDirty: std::ptr::null_mut(), + pPager: std::ptr::null_mut(), + pgno: frame.header().page_no, + pageHash: 0, + flags: 0x02, // PGHDR_DIRTY - it works without the flag, but why risk it + nRef: 0, + pDirtyNext: std::ptr::null_mut(), + pDirtyPrev: std::ptr::null_mut(), + }; + headers_count += 1; + current_pg = Box::into_raw(Box::new(page)); + if first_pg.is_null() { + first_pg = current_pg; + } + if !prev_pg.is_null() { + unsafe { + (*prev_pg).pDirty = current_pg; + } + } + prev_pg = current_pg; + } + + tracing::trace!("built {headers_count} page headers"); + + let headers = unsafe { Headers::new(first_pg) }; + (headers, last_frame_no, size_after) +} + +init_static_wal_method!(INJECTOR_METHODS, InjectorHook); + +/// The injector hook hijacks a call to xframes, and replace the content of the call with it's own +/// frames. +/// The Caller must first call `set_frames`, passing the frames to be injected, then trigger a call +/// to xFrames from the libsql connection (see dummy write in `injector`), and can then collect the +/// result on the injection with `take_result` +pub enum InjectorHook {} + +unsafe impl WalHook for InjectorHook { + type Context = InjectorHookCtx; + + fn on_frames( + wal: &mut Wal, + _page_size: c_int, + _page_headers: *mut PgHdr, + _size_after: u32, + _is_commit: c_int, + sync_flags: c_int, + orig: XWalFrameFn, + ) -> c_int { + let wal_ptr = wal as *mut _; + let ctx = Self::wal_extract_ctx(wal); + let ret = ctx.inject_pages(sync_flags, orig, wal_ptr); + if let Err(e) = ret { + tracing::error!("fatal replication error: {e}"); + return LIBSQL_INJECT_FATAL; + } + + ctx.buffer.lock().clear(); + + if !ctx.is_txn { + LIBSQL_INJECT_OK + } else { + LIBSQL_INJECT_OK_TXN + } + } + + fn name() -> &'static CStr { + CStr::from_bytes_with_nul(b"frame_injector_hook\0").unwrap() + } +} diff --git a/libsqlx/src/database/libsql/injector/mod.rs b/libsqlx/src/database/libsql/injector/mod.rs new file mode 100644 index 00000000..5318b997 --- /dev/null +++ b/libsqlx/src/database/libsql/injector/mod.rs @@ -0,0 +1,228 @@ +use std::collections::VecDeque; +use std::path::Path; +use std::sync::Arc; + +use parking_lot::Mutex; +use rusqlite::OpenFlags; + +use crate::database::frame::Frame; +use crate::database::libsql::injector::hook::{ + INJECTOR_METHODS, LIBSQL_INJECT_FATAL, LIBSQL_INJECT_OK, LIBSQL_INJECT_OK_TXN, +}; +use crate::database::{FrameNo, InjectError}; +use crate::seal::Seal; + +use hook::InjectorHookCtx; + +mod headers; +mod hook; + +pub type FrameBuffer = Arc>>; +pub type OnCommitCb = Arc bool + Send + Sync + 'static>; + +pub struct Injector { + /// The injector is in a transaction state + is_txn: bool, + /// Buffer for holding current transaction frames + buffer: FrameBuffer, + /// Maximum capacity of the frame buffer + capacity: usize, + /// Injector connection + // connection must be dropped before the hook context + connection: sqld_libsql_bindings::Connection<'static>, + /// Pointer to the hook + _hook_ctx: Seal>, +} + +impl crate::database::Injector for Injector { + fn inject(&mut self, frame: Frame) -> Result, InjectError> { + let res = self.inject_frame(frame).unwrap(); + Ok(res) + } + + fn clear(&mut self) { + self.buffer.lock().clear(); + } +} + +/// Methods from this trait are called before and after performing a frame injection. +/// This trait trait is used to record the last committed frame_no to the log. +/// The implementer can persist the pre and post commit frame no, and compare them in the event of +/// a crash; if the pre and post commit frame_no don't match, then the log may be corrupted. + +impl Injector { + pub fn new( + path: &Path, + on_commit_cb: OnCommitCb, + buffer_capacity: usize, + ) -> crate::Result { + let buffer = FrameBuffer::default(); + let ctx = InjectorHookCtx::new(buffer.clone(), on_commit_cb); + let mut ctx = Box::new(ctx); + let connection = sqld_libsql_bindings::Connection::open( + path, + OpenFlags::SQLITE_OPEN_READ_WRITE + | OpenFlags::SQLITE_OPEN_CREATE + | OpenFlags::SQLITE_OPEN_URI + | OpenFlags::SQLITE_OPEN_NO_MUTEX, + &INJECTOR_METHODS, + // safety: hook is dropped after connection + unsafe { &mut *(ctx.as_mut() as *mut _) }, + )?; + + Ok(Self { + is_txn: false, + buffer, + capacity: buffer_capacity, + connection, + _hook_ctx: Seal::new(ctx), + }) + } + + /// Inject on frame into the log. If this was a commit frame, returns Ok(Some(FrameNo)). + pub(crate) fn inject_frame(&mut self, frame: Frame) -> crate::Result> { + let frame_close_txn = frame.header().size_after != 0; + self.buffer.lock().push_back(frame); + if frame_close_txn || self.buffer.lock().len() >= self.capacity { + if !self.is_txn { + self.begin_txn()?; + } + return self.flush(); + } + + Ok(None) + } + + /// Flush the buffer to libsql WAL. + /// Trigger a dummy write, and flush the cache to trigger a call to xFrame. The buffer's frame + /// are then injected into the wal. + fn flush(&mut self) -> crate::Result> { + let last_frame_no = match self.buffer.lock().back() { + Some(f) => f.header().frame_no, + None => { + tracing::trace!("nothing to inject"); + return Ok(None); + } + }; + self.connection + .execute("INSERT INTO __DUMMY__ VALUES (42)", ())?; + // force call to xframe + match self.connection.cache_flush() { + Ok(_) => panic!("replication hook was not called"), + Err(e) => { + if let Some(e) = e.sqlite_error() { + if e.extended_code == LIBSQL_INJECT_OK { + // refresh schema + self.connection + .pragma_update(None, "writable_schema", "reset")?; + self.commit(); + self.is_txn = false; + assert!(self.buffer.lock().is_empty()); + return Ok(Some(last_frame_no)); + } else if e.extended_code == LIBSQL_INJECT_OK_TXN { + self.is_txn = true; + assert!(self.buffer.lock().is_empty()); + return Ok(None); + } else if e.extended_code == LIBSQL_INJECT_FATAL { + todo!("handle fatal error"); + } + } + + todo!("handle fatal error"); + } + } + } + + fn commit(&mut self) { + // TODO: error? + let _ = dbg!(self.connection.execute("COMMIT", ())); + } + + fn begin_txn(&mut self) -> crate::Result<()> { + self.connection.execute("BEGIN IMMEDIATE", ())?; + self.connection + .execute("CREATE TABLE __DUMMY__ (__dummy__)", ())?; + Ok(()) + } +} + +#[cfg(test)] +mod test { + use std::path::PathBuf; + use std::sync::Arc; + + use crate::database::libsql::injector::Injector; + use crate::database::libsql::replication_log::logger::LogFile; + + #[test] + fn test_simple_inject_frames() { + let log = LogFile::new(PathBuf::from("assets/test/simple_wallog")).unwrap(); + let temp = tempfile::tempdir().unwrap(); + + let mut injector = Injector::new(temp.path(), Arc::new(|_| ()), 10).unwrap(); + for frame in log.frames_iter().unwrap() { + let frame = frame.unwrap(); + injector.inject_frame(frame).unwrap(); + } + + let conn = rusqlite::Connection::open(temp.path().join("data")).unwrap(); + + conn.query_row("SELECT COUNT(*) FROM test", (), |row| { + assert_eq!(row.get::<_, usize>(0).unwrap(), 5); + Ok(()) + }) + .unwrap(); + } + + #[test] + fn test_inject_frames_split_txn() { + let log = LogFile::new(PathBuf::from("assets/test/simple_wallog")).unwrap(); + let temp = tempfile::tempdir().unwrap(); + + // inject one frame at a time + let mut injector = Injector::new(temp.path(), Arc::new(|_| ()), 1).unwrap(); + for frame in log.frames_iter().unwrap() { + let frame = frame.unwrap(); + injector.inject_frame(frame).unwrap(); + } + + let conn = rusqlite::Connection::open(temp.path().join("data")).unwrap(); + + conn.query_row("SELECT COUNT(*) FROM test", (), |row| { + assert_eq!(row.get::<_, usize>(0).unwrap(), 5); + Ok(()) + }) + .unwrap(); + } + + #[test] + fn test_inject_partial_txn_isolated() { + let log = LogFile::new(PathBuf::from("assets/test/simple_wallog")).unwrap(); + let temp = tempfile::tempdir().unwrap(); + + // inject one frame at a time + let mut injector = Injector::new(temp.path(), Arc::new(|_| ()), 10).unwrap(); + let mut iter = log.frames_iter().unwrap(); + + assert!(injector + .inject_frame(iter.next().unwrap().unwrap()) + .unwrap() + .is_none()); + let conn = rusqlite::Connection::open(temp.path().join("data")).unwrap(); + assert!(conn + .query_row("SELECT COUNT(*) FROM test", (), |_| Ok(())) + .is_err()); + + while injector + .inject_frame(iter.next().unwrap().unwrap()) + .unwrap() + .is_none() + {} + + // reset schema + conn.pragma_update(None, "writable_schema", "reset") + .unwrap(); + conn.query_row("SELECT COUNT(*) FROM test", (), |_| Ok(())) + .unwrap(); + } +} diff --git a/libsqlx/src/database/libsql/mod.rs b/libsqlx/src/database/libsql/mod.rs new file mode 100644 index 00000000..3a1191f1 --- /dev/null +++ b/libsqlx/src/database/libsql/mod.rs @@ -0,0 +1,393 @@ +use std::path::PathBuf; +use std::sync::Arc; + +use sqld_libsql_bindings::wal_hook::{TransparentMethods, WalHook, TRANSPARENT_METHODS}; +use sqld_libsql_bindings::WalMethodsHook; + +use crate::database::{Database, InjectableDatabase}; +use crate::error::Error; +use crate::result_builder::QueryBuilderConfig; + +use connection::RowStats; +use injector::Injector; +use replication_log::logger::{ + ReplicationLogger, ReplicationLoggerHook, ReplicationLoggerHookCtx, REPLICATION_METHODS, +}; + +use self::injector::OnCommitCb; +use self::replication_log::logger::FrameNotifierCb; + +pub use connection::LibsqlConnection; +pub use replication_log::logger::{LogCompactor, LogFile}; + +mod connection; +mod injector; +pub(crate) mod replication_log; + +pub struct PrimaryType { + logger: Arc, +} + +impl LibsqlDbType for PrimaryType { + type ConnectionHook = ReplicationLoggerHook; + + fn hook() -> &'static WalMethodsHook { + &REPLICATION_METHODS + } + + fn hook_context(&self) -> ::Context { + ReplicationLoggerHookCtx { + buffer: Vec::new(), + logger: self.logger.clone(), + } + } +} + +pub struct ReplicaType { + on_commit_cb: OnCommitCb, + injector_buffer_capacity: usize, +} + +impl LibsqlDbType for ReplicaType { + type ConnectionHook = TransparentMethods; + + fn hook() -> &'static WalMethodsHook { + &TRANSPARENT_METHODS + } + + fn hook_context(&self) -> ::Context {} +} + +pub trait LibsqlDbType { + type ConnectionHook: WalHook; + + /// Return a static reference to the instanciated WAL hook + fn hook() -> &'static WalMethodsHook; + /// returns new context for the wal hook + fn hook_context(&self) -> ::Context; +} + +/// A generic wrapper around a libsql database. +/// `LibsqlDatabase` can be specialized into either a `ReplicaType` or a `PrimaryType`. +/// In `PrimaryType` mode, the LibsqlDatabase maintains a replication log that can be replicated to +/// a `LibsqlDatabase` in replica mode, thanks to the methods provided by `InjectableDatabase` +/// implemented for `LibsqlDatabase`. +pub struct LibsqlDatabase { + /// The connection factory for this database + db_path: PathBuf, + extensions: Option>, + response_size_limit: u64, + row_stats_callback: Option>, + /// type-specific data for the database + ty: T, +} + +/// Handler trait for gathering row stats when executing queries. +pub trait RowStatsHandler: Send + Sync { + fn handle_row_stats(&self, stats: RowStats); +} + +impl RowStatsHandler for F +where + F: Fn(RowStats) + Send + Sync, +{ + fn handle_row_stats(&self, stats: RowStats) { + (self)(stats) + } +} + +impl LibsqlDatabase { + /// Creates a new replica type database + pub fn new_replica( + db_path: PathBuf, + injector_buffer_capacity: usize, + on_commit_cb: OnCommitCb, + ) -> crate::Result { + let ty = ReplicaType { + on_commit_cb, + injector_buffer_capacity, + }; + + Ok(Self::new(db_path, ty)) + } +} + +impl LibsqlDatabase { + pub fn new_primary( + db_path: PathBuf, + compactor: impl LogCompactor, + // whether the log is dirty and might need repair + dirty: bool, + new_frame_notifier: FrameNotifierCb, + ) -> crate::Result { + let ty = PrimaryType { + logger: Arc::new(ReplicationLogger::open( + &db_path, + dirty, + compactor, + new_frame_notifier, + )?), + }; + Ok(Self::new(db_path, ty)) + } + + pub fn compact_log(&self) { + self.ty.logger.compact(); + } + + pub fn logger(&self) -> Arc { + self.ty.logger.clone() + } +} + +impl LibsqlDatabase { + /// Create a new instance with the passed `LibsqlDbType`. + fn new(db_path: PathBuf, ty: T) -> Self { + Self { + db_path, + extensions: None, + response_size_limit: u64::MAX, + row_stats_callback: None, + ty, + } + } + + /// Load extensions for connection to this database. + pub fn with_extensions(mut self, ext: impl IntoIterator) -> Self { + self.extensions = Some(ext.into_iter().collect()); + self + } + + /// Register a callback + pub fn with_row_stats_handler(mut self, handler: Arc) -> Self { + self.row_stats_callback = Some(handler); + self + } +} + +impl Database for LibsqlDatabase { + type Connection = LibsqlConnection; + + fn connect(&self) -> Result { + Ok(LibsqlConnection::::new( + &self.db_path, + self.extensions.clone(), + T::hook(), + self.ty.hook_context(), + self.row_stats_callback.clone(), + QueryBuilderConfig { + max_size: Some(self.response_size_limit), + }, + None, + )?) + } +} + +impl InjectableDatabase for LibsqlDatabase { + fn injector(&self) -> crate::Result> { + Ok(Box::new(Injector::new( + &self.db_path, + self.ty.on_commit_cb.clone(), + self.ty.injector_buffer_capacity, + )?)) + } +} + +#[cfg(test)] +mod test { + use std::sync::atomic::AtomicBool; + use std::sync::atomic::Ordering::Relaxed; + + use parking_lot::Mutex; + use rusqlite::types::Value; + use uuid::Uuid; + + use crate::connection::Connection; + use crate::database::libsql::replication_log::logger::LogFile; + use crate::program::Program; + use crate::result_builder::{QueryResultBuilderError, ResultBuilder}; + + use super::*; + + struct ReadRowBuilder(Arc>>); + + impl ResultBuilder for ReadRowBuilder { + fn add_row_value( + &mut self, + v: rusqlite::types::ValueRef, + ) -> Result<(), QueryResultBuilderError> { + self.0.lock().push(v.into()); + Ok(()) + } + } + + #[test] + fn inject_libsql_db() { + let temp = tempfile::tempdir().unwrap(); + let replica = ReplicaType { + on_commit_cb: Arc::new(|_| ()), + injector_buffer_capacity: 10, + }; + let db = LibsqlDatabase::new(temp.path().to_path_buf(), replica); + + let mut conn = db.connect().unwrap(); + let row: Arc>> = Default::default(); + let builder = Box::new(ReadRowBuilder(row.clone())); + conn.execute_program(&Program::seq(&["select count(*) from test"]), builder) + .unwrap(); + assert!(row.lock().is_empty()); + + let log = LogFile::new(PathBuf::from("assets/test/simple_wallog")).unwrap(); + let mut injector = db.injector().unwrap(); + log.frames_iter().unwrap().for_each(|f| { + injector.inject(f.unwrap()).unwrap(); + }); + + let row: Arc>> = Default::default(); + let builder = Box::new(ReadRowBuilder(row.clone())); + conn.execute_program(&Program::seq(&["select count(*) from test"]), builder) + .unwrap(); + assert_eq!(row.lock()[0], Value::Integer(5)); + } + + #[test] + fn roundtrip_primary_replica() { + let temp_primary = tempfile::tempdir().unwrap(); + let temp_replica = tempfile::tempdir().unwrap(); + + let primary = LibsqlDatabase::new( + temp_primary.path().to_path_buf(), + PrimaryType { + logger: Arc::new( + ReplicationLogger::open(temp_primary.path(), false, (), Box::new(|_| ())) + .unwrap(), + ), + }, + ); + + let replica = LibsqlDatabase::new( + temp_replica.path().to_path_buf(), + ReplicaType { + on_commit_cb: Arc::new(|_| ()), + injector_buffer_capacity: 10, + }, + ); + + let mut primary_conn = primary.connect().unwrap(); + primary_conn + .execute_program( + &Program::seq(&["create table test (x)", "insert into test values (42)"]), + Box::new(()), + ) + .unwrap(); + + let logfile = primary.ty.logger.log_file.read(); + + let mut injector = replica.injector().unwrap(); + for frame in logfile.frames_iter().unwrap() { + let frame = frame.unwrap(); + injector.inject(frame).unwrap(); + } + + let mut replica_conn = replica.connect().unwrap(); + let row: Arc>> = Default::default(); + let builder = Box::new(ReadRowBuilder(row.clone())); + replica_conn + .execute_program(&Program::seq(&["select * from test limit 1"]), builder) + .unwrap(); + + assert_eq!(row.lock().len(), 1); + assert_eq!(row.lock()[0], Value::Integer(42)); + } + + #[test] + fn primary_compact_log() { + struct Compactor(Arc); + + impl LogCompactor for Compactor { + fn should_compact(&self, log: &LogFile) -> bool { + log.header().frame_count > 2 + } + + fn compact( + &mut self, + _id: Uuid, + ) -> Result<(), Box> { + self.0.store(true, Relaxed); + Ok(()) + } + + fn snapshot_dir(&self) -> PathBuf { + todo!(); + } + } + + let temp = tempfile::tempdir().unwrap(); + let compactor_called = Arc::new(AtomicBool::new(false)); + let db = LibsqlDatabase::new_primary( + temp.path().to_path_buf(), + Compactor(compactor_called.clone()), + false, + Box::new(|_| ()), + ) + .unwrap(); + + let mut conn = db.connect().unwrap(); + conn.execute_program( + &Program::seq(&["create table test (x)", "insert into test values (12)"]), + Box::new(()), + ) + .unwrap(); + assert!(compactor_called.load(Relaxed)); + } + + #[test] + fn no_compaction_uncommited_frames() { + struct Compactor(Arc); + + impl LogCompactor for Compactor { + fn should_compact(&self, log: &LogFile) -> bool { + assert_eq!(log.uncommitted_frame_count, 0); + self.0.store(true, Relaxed); + false + } + + fn compact( + &mut self, + _id: Uuid, + ) -> Result<(), Box> { + unreachable!() + } + + fn snapshot_dir(&self) -> PathBuf { + todo!() + } + } + + let temp = tempfile::tempdir().unwrap(); + let compactor_called = Arc::new(AtomicBool::new(false)); + let db = LibsqlDatabase::new_primary( + temp.path().to_path_buf(), + Compactor(compactor_called.clone()), + false, + Box::new(|_| ()), + ) + .unwrap(); + + let mut conn = db.connect().unwrap(); + conn.execute_program( + &Program::seq(&[ + "begin", + "create table test (x)", + "insert into test values (12)", + ]), + Box::new(()), + ) + .unwrap(); + conn.inner_connection().cache_flush().unwrap(); + assert!(!compactor_called.load(Relaxed)); + conn.execute_program(&Program::seq(&["commit"]), Box::new(())) + .unwrap(); + assert!(compactor_called.load(Relaxed)); + } +} diff --git a/libsqlx/src/database/libsql/replication_log/frame_stream.rs b/libsqlx/src/database/libsql/replication_log/frame_stream.rs new file mode 100644 index 00000000..79436f99 --- /dev/null +++ b/libsqlx/src/database/libsql/replication_log/frame_stream.rs @@ -0,0 +1,114 @@ +use std::sync::Arc; +use std::task::{ready, Poll}; +use std::{pin::Pin, task::Context}; + +use futures::future::BoxFuture; +use futures::Stream; + +use crate::database::frame::Frame; + +use super::FrameNo; +use super::logger::{ReplicationLogger, LogReadError}; + + +/// Streams frames from the replication log starting at `current_frame_no`. +/// Only stops if the current frame is not in the log anymore. +pub struct FrameStream { + next_frame_no: FrameNo, + max_available_frame_no: FrameNo, + logger: Arc, + state: FrameStreamState, +} + +impl FrameStream { + pub fn new(logger: Arc, next_frame_no: FrameNo) -> Self { + let max_available_frame_no = *logger.new_frame_notifier.subscribe().borrow(); + Self { + next_frame_no, + max_available_frame_no, + logger, + state: FrameStreamState::Init, + } + } + + fn transition_state_next_frame(&mut self) { + if matches!(self.state, FrameStreamState::Closed) { + return; + } + + let next_frameno = self.next_frame_no; + let logger = self.logger.clone(); + let fut = async move { + let res = tokio::task::spawn_blocking(move || logger.get_frame(next_frameno)).await; + match res { + Ok(Ok(frame)) => Ok(frame), + Ok(Err(e)) => Err(e), + Err(e) => Err(LogReadError::Error(e.into())), + } + }; + + self.state = FrameStreamState::WaitingFrame(Box::pin(fut)); + } +} + +enum FrameStreamState { + Init, + /// waiting for new frames to replicate + WaitingFrameNo(BoxFuture<'static, anyhow::Result>), + WaitingFrame(BoxFuture<'static, Result>), + Closed, +} + +impl Stream for FrameStream { + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.state { + FrameStreamState::Init => { + self.transition_state_next_frame(); + self.poll_next(cx) + } + FrameStreamState::WaitingFrameNo(ref mut fut) => { + self.max_available_frame_no = match ready!(fut.as_mut().poll(cx)) { + Ok(frame_no) => frame_no, + Err(e) => { + self.state = FrameStreamState::Closed; + return Poll::Ready(Some(Err(LogReadError::Error(e)))); + } + }; + self.transition_state_next_frame(); + self.poll_next(cx) + } + FrameStreamState::WaitingFrame(ref mut fut) => match ready!(fut.as_mut().poll(cx)) { + Ok(frame) => { + self.next_frame_no += 1; + self.transition_state_next_frame(); + Poll::Ready(Some(Ok(frame))) + } + + Err(LogReadError::Ahead) => { + let mut notifier = self.logger.new_frame_notifier.subscribe(); + let max_available_frame_no = *notifier.borrow(); + // check in case value has already changed, otherwise we'll be notified later + if max_available_frame_no > self.max_available_frame_no { + self.max_available_frame_no = max_available_frame_no; + self.transition_state_next_frame(); + self.poll_next(cx) + } else { + let fut = async move { + notifier.changed().await?; + Ok(*notifier.borrow()) + }; + self.state = FrameStreamState::WaitingFrameNo(Box::pin(fut)); + self.poll_next(cx) + } + } + Err(e) => { + self.state = FrameStreamState::Closed; + Poll::Ready(Some(Err(e))) + } + }, + FrameStreamState::Closed => Poll::Ready(None), + } + } +} diff --git a/libsqlx/src/database/libsql/replication_log/logger.rs b/libsqlx/src/database/libsql/replication_log/logger.rs new file mode 100644 index 00000000..998fada7 --- /dev/null +++ b/libsqlx/src/database/libsql/replication_log/logger.rs @@ -0,0 +1,1016 @@ +use std::collections::HashSet; +use std::ffi::{c_int, c_void, CStr}; +use std::fs::{remove_dir_all, File, OpenOptions}; +use std::io::Write; +use std::mem::size_of; +use std::os::unix::prelude::FileExt; +use std::path::{Path, PathBuf}; +use std::sync::Arc; + +use anyhow::bail; +use bytemuck::{bytes_of, pod_read_unaligned, Pod, Zeroable}; +use bytes::{Bytes, BytesMut}; +use parking_lot::{Mutex, RwLock}; +use rusqlite::ffi::{ + libsql_wal as Wal, sqlite3, PgHdr, SQLITE_CHECKPOINT_TRUNCATE, SQLITE_IOERR, SQLITE_OK, +}; +use sqld_libsql_bindings::ffi::types::{ + XWalCheckpointFn, XWalFrameFn, XWalSavePointUndoFn, XWalUndoFn, +}; +use sqld_libsql_bindings::ffi::PageHdrIter; +use sqld_libsql_bindings::init_static_wal_method; +use sqld_libsql_bindings::wal_hook::WalHook; +use uuid::Uuid; + +use crate::database::frame::{Frame, FrameHeader}; +#[cfg(feature = "bottomless")] +use crate::libsql::ffi::SQLITE_IOERR_WRITE; + +use super::{FrameNo, WAL_MAGIC, WAL_PAGE_SIZE}; + +init_static_wal_method!(REPLICATION_METHODS, ReplicationLoggerHook); + +#[derive(PartialEq, Eq)] +struct Version([u16; 4]); + +impl Version { + fn current() -> Self { + let major = env!("CARGO_PKG_VERSION_MAJOR").parse().unwrap(); + let minor = env!("CARGO_PKG_VERSION_MINOR").parse().unwrap(); + let patch = env!("CARGO_PKG_VERSION_PATCH").parse().unwrap(); + Self([0, major, minor, patch]) + } +} + +pub enum ReplicationLoggerHook {} + +#[derive(Clone)] +pub struct ReplicationLoggerHookCtx { + pub(crate) buffer: Vec, + pub(crate) logger: Arc, + #[cfg(feature = "bottomless")] + bottomless_replicator: Option>>, +} + +/// This implementation of WalHook intercepts calls to `on_frame`, and writes them to a +/// shadow wal. Writing to the shadow wal is done in three steps: +/// i. append the new pages at the offset pointed by header.start_frame_no + header.frame_count +/// ii. call the underlying implementation of on_frames +/// iii. if the call of the underlying method was successfull, update the log header to the new +/// frame count. +/// +/// If either writing to the database of to the shadow wal fails, it must be noop. +unsafe impl WalHook for ReplicationLoggerHook { + type Context = ReplicationLoggerHookCtx; + + fn name() -> &'static CStr { + CStr::from_bytes_with_nul(b"replication_logger_hook\0").unwrap() + } + + fn on_frames( + wal: &mut Wal, + page_size: c_int, + page_headers: *mut PgHdr, + ntruncate: u32, + is_commit: c_int, + sync_flags: c_int, + orig: XWalFrameFn, + ) -> c_int { + assert_eq!(page_size, 4096); + let wal_ptr = wal as *mut _; + #[cfg(feature = "bottomless")] + let last_valid_frame = wal.hdr.mxFrame; + #[cfg(feature = "bottomless")] + let _frame_checksum = wal.hdr.aFrameCksum; + let ctx = Self::wal_extract_ctx(wal); + + for (page_no, data) in PageHdrIter::new(page_headers, page_size as _) { + ctx.write_frame(page_no, data) + } + if let Err(e) = ctx.flush(ntruncate) { + tracing::error!("error writing to replication log: {e}"); + // returning IO_ERR ensure that xUndo will be called by sqlite. + return SQLITE_IOERR; + } + + let rc = unsafe { + orig( + wal_ptr, + page_size, + page_headers, + ntruncate, + is_commit, + sync_flags, + ) + }; + + // FIXME: instead of block_on, we should consider replicating asynchronously in the background, + // e.g. by sending the data to another fiber by an unbounded channel (which allows sync insertions). + #[allow(clippy::await_holding_lock)] // uncontended -> only gets called under a libSQL write lock + #[cfg(feature = "bottomless")] + if rc == 0 { + let runtime = tokio::runtime::Handle::current(); + if let Some(replicator) = ctx.bottomless_replicator.as_mut() { + match runtime.block_on(async move { + let mut replicator = replicator.lock().unwrap(); + replicator.register_last_valid_frame(last_valid_frame); + // In theory it's enough to set the page size only once, but in practice + // it's a very cheap operation anyway, and the page is not always known + // upfront and can change dynamically. + // FIXME: changing the page size in the middle of operation is *not* + // supported by bottomless storage. + replicator.set_page_size(page_size as usize)?; + let frame_count = PageHdrIter::new(page_headers, page_size as usize).count(); + replicator.submit_frames(frame_count as u32); + Ok::<(), anyhow::Error>(()) + }) { + Ok(()) => {} + Err(e) => { + tracing::error!("error writing to bottomless: {e}"); + return SQLITE_IOERR_WRITE; + } + } + } + } + + if is_commit != 0 && rc == 0 { + if let Err(e) = ctx.commit() { + // If we reach this point, it means that we have commited a transaction to sqlite wal, + // but failed to commit it to the shadow WAL, which leaves us in an inconsistent state. + tracing::error!( + "fatal error: log failed to commit: inconsistent replication log: {e}" + ); + std::process::abort(); + } + + if let Err(e) = ctx + .logger + .log_file + .write() + .maybe_compact(&mut *ctx.logger.compactor.lock()) + { + tracing::error!("fatal error: {e}, exiting"); + std::process::abort() + } + } + + rc + } + + fn on_undo( + wal: &mut Wal, + func: Option i32>, + undo_ctx: *mut c_void, + orig: XWalUndoFn, + ) -> i32 { + let ctx = Self::wal_extract_ctx(wal); + ctx.rollback(); + + #[cfg(feature = "bottomless")] + tracing::error!( + "fixme: implement bottomless undo for {:?}", + ctx.bottomless_replicator + ); + + unsafe { orig(wal, func, undo_ctx) } + } + + fn on_savepoint_undo(wal: &mut Wal, wal_data: *mut u32, orig: XWalSavePointUndoFn) -> i32 { + let rc = unsafe { orig(wal, wal_data) }; + if rc != SQLITE_OK { + return rc; + }; + + #[cfg(feature = "bottomless")] + { + let ctx = Self::wal_extract_ctx(wal); + if let Some(replicator) = ctx.bottomless_replicator.as_mut() { + let last_valid_frame = unsafe { *wal_data }; + let mut replicator = replicator.lock().unwrap(); + let prev_valid_frame = replicator.peek_last_valid_frame(); + tracing::trace!( + "Savepoint: rolling back from frame {prev_valid_frame} to {last_valid_frame}", + ); + replicator.rollback_to_frame(last_valid_frame); + } + } + + rc + } + + #[allow(clippy::too_many_arguments)] + fn on_checkpoint( + wal: &mut Wal, + db: *mut sqlite3, + emode: i32, + busy_handler: Option i32>, + busy_arg: *mut c_void, + sync_flags: i32, + n_buf: i32, + z_buf: *mut u8, + frames_in_wal: *mut i32, + backfilled_frames: *mut i32, + orig: XWalCheckpointFn, + ) -> i32 { + #[cfg(feature = "bottomless")] + { + tracing::trace!("bottomless checkpoint"); + + /* In order to avoid partial checkpoints, passive checkpoint + ** mode is not allowed. Only TRUNCATE checkpoints are accepted, + ** because these are guaranteed to block writes, copy all WAL pages + ** back into the main database file and reset the frame number. + ** In order to avoid autocheckpoint on close (that's too often), + ** checkpoint attempts weaker than TRUNCATE are ignored. + */ + if emode < SQLITE_CHECKPOINT_TRUNCATE { + tracing::trace!("Ignoring a checkpoint request weaker than TRUNCATE"); + return SQLITE_OK; + } + } + let rc = unsafe { + orig( + wal, + db, + emode, + busy_handler, + busy_arg, + sync_flags, + n_buf, + z_buf, + frames_in_wal, + backfilled_frames, + ) + }; + + if rc != SQLITE_OK { + return rc; + } + + #[allow(clippy::await_holding_lock)] // uncontended -> only gets called under a libSQL write lock + #[cfg(feature = "bottomless")] + { + let ctx = Self::wal_extract_ctx(wal); + let runtime = tokio::runtime::Handle::current(); + if let Some(replicator) = ctx.bottomless_replicator.as_mut() { + let mut replicator = replicator.lock().unwrap(); + if replicator.commits_in_current_generation() == 0 { + tracing::debug!("No commits happened in this generation, not snapshotting"); + return SQLITE_OK; + } + let last_known_frame = replicator.last_known_frame(); + replicator.request_flush(); + if let Err(e) = runtime.block_on(replicator.wait_until_committed(last_known_frame)) + { + tracing::error!( + "Failed to wait for S3 replicator to confirm {} frames backup: {}", + last_known_frame, + e + ); + return SQLITE_IOERR_WRITE; + } + replicator.new_generation(); + if let Err(e) = + runtime.block_on(async move { replicator.snapshot_main_db_file().await }) + { + tracing::error!("Failed to snapshot the main db file during checkpoint: {e}"); + return SQLITE_IOERR_WRITE; + } + } + } + SQLITE_OK + } +} + +#[derive(Clone)] +pub struct WalPage { + pub page_no: u32, + /// 0 for non-commit frames + pub size_after: u32, + pub data: Bytes, +} + +impl ReplicationLoggerHookCtx { + pub fn new( + logger: Arc, + #[cfg(feature = "bottomless")] bottomless_replicator: Option< + Arc>, + >, + ) -> Self { + #[cfg(feature = "bottomless")] + tracing::trace!("bottomless replication enabled: {bottomless_replicator:?}"); + Self { + buffer: Default::default(), + logger, + #[cfg(feature = "bottomless")] + bottomless_replicator, + } + } + + fn write_frame(&mut self, page_no: u32, data: &[u8]) { + let entry = WalPage { + page_no, + size_after: 0, + data: Bytes::copy_from_slice(data), + }; + self.buffer.push(entry); + } + + /// write buffered pages to the logger, without commiting. + fn flush(&mut self, size_after: u32) -> anyhow::Result<()> { + if !self.buffer.is_empty() { + self.buffer.last_mut().unwrap().size_after = size_after; + self.logger.write_pages(&self.buffer)?; + self.buffer.clear(); + } + + Ok(()) + } + + fn commit(&self) -> anyhow::Result<()> { + let new_frame_no = self.logger.commit()?; + let _ = (self.logger.new_frame_notifier)(new_frame_no); + Ok(()) + } + + fn rollback(&mut self) { + self.logger.log_file.write().rollback(); + self.buffer.clear(); + } +} + +/// Represent a LogFile, and operations that can be performed on it. +/// A log file must only ever be opened by a single instance of LogFile, since it caches the file +/// header. +#[derive(Debug)] +pub struct LogFile { + file: File, + /// Path of the LogFile + path: PathBuf, + pub header: LogFileHeader, + /// number of frames in the log that have not been commited yet. On commit the header's frame + /// count is incremented by that ammount. New pages are written after the last + /// header.frame_count + uncommit_frame_count. + /// On rollback, this is reset to 0, so that everything that was written after the previous + /// header.frame_count is ignored and can be overwritten + pub(crate) uncommitted_frame_count: u64, +} + +#[derive(thiserror::Error, Debug)] +pub enum LogReadError { + #[error("could not fetch log entry, snapshot required")] + SnapshotRequired, + #[error("requested entry is ahead of log")] + Ahead, + #[error(transparent)] + Error(#[from] crate::error::Error), +} + +impl LogFile { + /// size of a single frame + pub const FRAME_SIZE: usize = size_of::() + WAL_PAGE_SIZE as usize; + + pub fn new(path: PathBuf) -> crate::Result { + // FIXME: we should probably take a lock on this file, to prevent anybody else to write to + // it. + let file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(&path)?; + + let file_end = file.metadata()?.len(); + + if file_end == 0 { + let db_id = Uuid::new_v4(); + let header = LogFileHeader { + version: 2, + start_frame_no: 0, + magic: WAL_MAGIC, + page_size: WAL_PAGE_SIZE, + db_id: db_id.as_u128(), + frame_count: 0, + sqld_version: Version::current().0, + _pad: 0, + }; + + let mut this = Self { + path, + file, + header, + uncommitted_frame_count: 0, + }; + + this.write_header()?; + + Ok(this) + } else { + let header = Self::read_header(&file)?; + Ok(Self { + file, + header, + uncommitted_frame_count: 0, + path, + }) + } + } + + pub fn can_compact(&mut self) -> bool { + self.header.frame_count > 0 && self.uncommitted_frame_count == 0 + } + + pub fn read_header(file: &File) -> crate::Result { + let mut buf = [0; size_of::()]; + file.read_exact_at(&mut buf, 0)?; + let header: LogFileHeader = pod_read_unaligned(&buf); + if header.magic != WAL_MAGIC { + return Err(crate::error::Error::InvalidLogHeader); + } + + Ok(header) + } + + pub fn header(&self) -> &LogFileHeader { + &self.header + } + + pub fn commit(&mut self) -> crate::Result<()> { + self.header.frame_count += self.uncommitted_frame_count; + self.uncommitted_frame_count = 0; + self.write_header()?; + + Ok(()) + } + + fn rollback(&mut self) { + self.uncommitted_frame_count = 0; + } + + pub fn write_header(&mut self) -> crate::Result<()> { + self.file.write_all_at(bytes_of(&self.header), 0)?; + self.file.flush()?; + + Ok(()) + } + + /// Returns an iterator over the WAL frame headers + pub fn frames_iter(&self) -> anyhow::Result> + '_> { + let mut current_frame_offset = 0; + Ok(std::iter::from_fn(move || { + if current_frame_offset >= self.header.frame_count { + return None; + } + let read_byte_offset = Self::absolute_byte_offset(current_frame_offset); + current_frame_offset += 1; + Some(self.read_frame_byte_offset(read_byte_offset)) + })) + } + + /// Returns an iterator over the WAL frame headers + pub fn rev_frames_iter(&self) -> impl Iterator> + '_ { + let mut current_frame_offset = self.header.frame_count; + + std::iter::from_fn(move || { + if current_frame_offset == 0 { + return None; + } + current_frame_offset -= 1; + let read_byte_offset = Self::absolute_byte_offset(current_frame_offset); + let frame = self.read_frame_byte_offset(read_byte_offset); + Some(frame) + }) + } + + /// If the log contains any frames, returns (start_frameno, end_frameno, iter), where iter, is + /// a deduplicated reversed iterator over the frames in the log + pub fn rev_deduped( + &self, + ) -> Option<( + FrameNo, + FrameNo, + impl Iterator> + '_, + )> { + let mut iter = self.rev_frames_iter(); + let mut seen = HashSet::new(); + let start_fno = self.header().start_frame_no; + let end_fno = self.header().last_frame_no()?; + let iter = std::iter::from_fn(move || loop { + match iter.next()? { + Ok(frame) => { + if !seen.contains(&frame.header().page_no) { + seen.insert(frame.header().page_no); + return Some(Ok(frame)); + } + } + Err(e) => return Some(Err(e)), + } + }); + + Some((start_fno, end_fno, iter)) + } + + pub fn push_page(&mut self, page: &WalPage) -> crate::Result<()> { + let frame = Frame::from_parts( + &FrameHeader { + frame_no: self.next_frame_no(), + page_no: page.page_no, + size_after: page.size_after, + }, + &page.data, + ); + + let byte_offset = self.next_byte_offset(); + tracing::trace!( + "writing frame {} at offset {byte_offset}", + frame.header().frame_no + ); + self.file.write_all_at(frame.as_slice(), byte_offset)?; + + self.uncommitted_frame_count += 1; + + Ok(()) + } + + /// offset in bytes at which to write the next frame + fn next_byte_offset(&self) -> u64 { + Self::absolute_byte_offset(self.header().frame_count + self.uncommitted_frame_count) + } + + fn next_frame_no(&self) -> FrameNo { + self.header().start_frame_no + self.header().frame_count + self.uncommitted_frame_count + } + + /// Returns the bytes position of the `nth` entry in the log + fn absolute_byte_offset(nth: u64) -> u64 { + std::mem::size_of::() as u64 + nth * Self::FRAME_SIZE as u64 + } + + fn byte_offset(&self, id: FrameNo) -> crate::Result> { + if id < self.header.start_frame_no + || id > self.header.start_frame_no + self.header.frame_count + { + return Ok(None); + } + Ok(Self::absolute_byte_offset(id - self.header.start_frame_no).into()) + } + + /// Returns bytes representing a WalFrame for frame `frame_no` + /// + /// If the requested frame is before the first frame in the log, or after the last frame, + /// Ok(None) is returned. + pub fn frame(&self, frame_no: FrameNo) -> std::result::Result { + if frame_no < self.header.start_frame_no { + return Err(LogReadError::SnapshotRequired); + } + + if frame_no >= self.header.start_frame_no + self.header.frame_count { + return Err(LogReadError::Ahead); + } + + let frame = self.read_frame_byte_offset(self.byte_offset(frame_no)?.unwrap())?; + + Ok(frame) + } + + fn maybe_compact(&mut self, compactor: &mut dyn LogCompactor) -> anyhow::Result<()> { + if self.can_compact() && compactor.should_compact(self) { + return self.do_compaction(compactor); + } + + Ok(()) + } + + fn do_compaction(&mut self, compactor: &mut dyn LogCompactor) -> anyhow::Result<()> { + tracing::info!("performing log compaction"); + let log_id = Uuid::new_v4(); + let temp_log_path = compactor.snapshot_dir().join(log_id.to_string()); + let last_frame = self + .rev_frames_iter() + .next() + .expect("there should be at least one frame to perform compaction")?; + let size_after = last_frame.header().size_after; + assert!(size_after != 0); + + let mut new_log_file = LogFile::new(temp_log_path.clone())?; + let new_header = LogFileHeader { + start_frame_no: self.header.start_frame_no + self.header.frame_count, + frame_count: 0, + ..self.header + }; + new_log_file.header = new_header; + new_log_file.write_header().unwrap(); + // swap old and new log + atomic_rename(&temp_log_path, &self.path).unwrap(); + std::mem::swap(&mut new_log_file.path, &mut self.path); + let _ = std::mem::replace(self, new_log_file); + compactor.compact(log_id).unwrap(); + + Ok(()) + } + + fn read_frame_byte_offset(&self, offset: u64) -> crate::Result { + let mut buffer = BytesMut::zeroed(LogFile::FRAME_SIZE); + self.file.read_exact_at(&mut buffer, offset)?; + let buffer = buffer.freeze(); + + Frame::try_from_bytes(buffer) + } + + fn last_commited_frame_no(&self) -> Option { + if self.header.frame_count == 0 { + None + } else { + Some(self.header.start_frame_no + self.header.frame_count - 1) + } + } + + fn reset(self) -> crate::Result { + // truncate file + self.file.set_len(0)?; + Self::new(self.path) + } + + /// return the size in bytes of the log + pub fn size(&self) -> usize { + size_of::() + Frame::SIZE * self.header().frame_count as usize + } +} + +#[cfg(target_os = "macos")] +fn atomic_rename(p1: impl AsRef, p2: impl AsRef) -> anyhow::Result<()> { + use std::ffi::CString; + use std::os::unix::prelude::OsStrExt; + + use nix::libc::renamex_np; + use nix::libc::RENAME_SWAP; + + let p1 = CString::new(p1.as_ref().as_os_str().as_bytes())?; + let p2 = CString::new(p2.as_ref().as_os_str().as_bytes())?; + unsafe { + let ret = renamex_np(p1.as_ptr(), p2.as_ptr(), RENAME_SWAP); + + if ret != 0 { + bail!( + "failed to perform snapshot file swap: {ret}, errno: {}", + std::io::Error::last_os_error() + ); + } + } + + Ok(()) +} + +#[cfg(target_os = "linux")] +fn atomic_rename(p1: impl AsRef, p2: impl AsRef) -> anyhow::Result<()> { + use anyhow::Context; + use nix::fcntl::{renameat2, RenameFlags}; + + renameat2( + None, + p1.as_ref(), + None, + p2.as_ref(), + RenameFlags::RENAME_EXCHANGE, + ) + .context("failed to perform snapshot file swap")?; + + Ok(()) +} + +#[derive(Debug, Clone, Copy, Zeroable, Pod)] +#[repr(C)] +pub struct LogFileHeader { + /// magic number: b"SQLDWAL\0" as u64 + pub magic: u64, + _pad: u64, + /// Uuid of the database associated with this log. + pub db_id: u128, + /// Frame_no of the first frame in the log + pub start_frame_no: FrameNo, + /// entry count in file + pub frame_count: u64, + /// Wal file version number, currently: 2 + pub version: u32, + /// page size: 4096 + pub page_size: i32, + /// sqld version when creating this log + pub sqld_version: [u16; 4], +} + +impl LogFileHeader { + pub fn last_frame_no(&self) -> Option { + if self.start_frame_no == 0 && self.frame_count == 0 { + None + } else { + Some(self.start_frame_no + self.frame_count - 1) + } + } + + fn sqld_version(&self) -> Version { + Version(self.sqld_version) + } +} + +pub trait LogCompactor: Sync + Send + 'static { + /// returns whether the passed log file should be compacted. If this method returns true, + /// compact should be called next. + fn should_compact(&self, log: &LogFile) -> bool; + /// Compact the given snapshot + fn compact( + &mut self, + log_id: Uuid, + ) -> Result<(), Box>; + + fn snapshot_dir(&self) -> PathBuf; +} + +#[cfg(test)] +impl LogCompactor for () { + fn compact( + &mut self, + _log_id: Uuid, + ) -> Result<(), Box> { + Ok(()) + } + + fn should_compact(&self, _file: &LogFile) -> bool { + false + } + + fn snapshot_dir(&self) -> PathBuf { + todo!() + } +} + +pub type FrameNotifierCb = Box; + +pub struct ReplicationLogger { + pub log_file: RwLock, + compactor: Box>, + /// a notifier channel other tasks can subscribe to, and get notified when new frames become + /// available. + pub new_frame_notifier: FrameNotifierCb, +} + +impl ReplicationLogger { + pub fn open( + db_path: &Path, + dirty: bool, + compactor: impl LogCompactor, + new_frame_notifier: FrameNotifierCb, + ) -> crate::Result { + let log_path = db_path.join("wallog"); + let data_path = db_path.join("data"); + + let fresh = !log_path.exists(); + + let log_file = LogFile::new(log_path)?; + let header = log_file.header(); + + let should_recover = if dirty { + tracing::info!("Replication log is dirty, recovering from database file."); + true + } else if header.version < 2 || header.sqld_version() != Version::current() { + tracing::info!("replication log version not compatible with current sqld version, recovering from database file."); + true + } else if fresh && data_path.exists() { + tracing::info!("replication log not found, recovering from database file."); + true + } else { + false + }; + + if should_recover { + Self::recover(log_file, data_path, compactor, new_frame_notifier) + } else { + Self::from_log_file(log_file, compactor, new_frame_notifier) + } + } + + fn from_log_file( + log_file: LogFile, + compactor: impl LogCompactor, + new_frame_notifier: FrameNotifierCb, + ) -> crate::Result { + let this = Self { + compactor: Box::new(Mutex::new(compactor)), + log_file: RwLock::new(log_file), + new_frame_notifier, + }; + + if let Some(last_frame) = this.log_file.read().last_commited_frame_no() { + (this.new_frame_notifier)(last_frame); + } + + Ok(this) + } + + fn recover( + log_file: LogFile, + mut data_path: PathBuf, + compactor: impl LogCompactor, + new_frame_notifier: FrameNotifierCb, + ) -> crate::Result { + // It is necessary to checkpoint before we restore the replication log, since the WAL may + // contain pages that are not in the database file. + checkpoint_db(&data_path)?; + let mut log_file = log_file.reset()?; + let snapshot_path = data_path.parent().unwrap().join("snapshots"); + // best effort, there may be no snapshots + let _ = remove_dir_all(snapshot_path); + + let data_file = File::open(&data_path)?; + let size = data_path.metadata()?.len(); + assert!( + size % WAL_PAGE_SIZE as u64 == 0, + "database file size is not a multiple of page size" + ); + let num_page = size / WAL_PAGE_SIZE as u64; + let mut buf = [0; WAL_PAGE_SIZE as usize]; + let mut page_no = 1; // page numbering starts at 1 + for i in 0..num_page { + data_file.read_exact_at(&mut buf, i * WAL_PAGE_SIZE as u64)?; + log_file.push_page(&WalPage { + page_no, + size_after: if i == num_page - 1 { num_page as _ } else { 0 }, + data: Bytes::copy_from_slice(&buf), + })?; + log_file.commit()?; + + page_no += 1; + } + + assert!(data_path.pop()); + + Self::from_log_file(log_file, compactor, new_frame_notifier) + } + + pub fn database_id(&self) -> anyhow::Result { + Ok(Uuid::from_u128((self.log_file.read()).header().db_id)) + } + + /// Write pages to the log, without updating the file header. + /// Returns the new frame count and checksum to commit + fn write_pages(&self, pages: &[WalPage]) -> anyhow::Result<()> { + let mut log_file = self.log_file.write(); + for page in pages.iter() { + log_file.push_page(page)?; + } + + Ok(()) + } + + /// commit the current transaction and returns the new top frame number + fn commit(&self) -> anyhow::Result { + let mut log_file = self.log_file.write(); + log_file.commit()?; + Ok(log_file + .header() + .last_frame_no() + .expect("there should be at least one frame after commit")) + } + + pub fn get_frame(&self, frame_no: FrameNo) -> Result { + self.log_file.read().frame(frame_no) + } + + pub fn compact(&self) { + let mut log_file = self.log_file.write(); + if log_file.can_compact() { + log_file.do_compaction(&mut *self.compactor.lock()).unwrap(); + } + } +} + +fn checkpoint_db(data_path: &Path) -> crate::Result<()> { + unsafe { + let conn = rusqlite::Connection::open(data_path)?; + conn.pragma_query(None, "page_size", |row| { + let page_size = row.get::<_, i32>(0).unwrap(); + assert_eq!( + page_size, WAL_PAGE_SIZE, + "invalid database file, expected page size to be {}, but found {} instead", + WAL_PAGE_SIZE, page_size + ); + Ok(()) + })?; + let mut num_checkpointed: c_int = 0; + let rc = rusqlite::ffi::sqlite3_wal_checkpoint_v2( + conn.handle(), + std::ptr::null(), + SQLITE_CHECKPOINT_TRUNCATE, + &mut num_checkpointed as *mut _, + std::ptr::null_mut(), + ); + + // TODO: ensure correct page size + assert!( + rc == 0 && num_checkpointed >= 0, + "failed to checkpoint database while recovering replication log" + ); + + conn.execute("VACUUM", ())?; + } + + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn write_and_read_from_frame_log() { + let dir = tempfile::tempdir().unwrap(); + let logger = ReplicationLogger::open(dir.path(), false, (), Box::new(|_| ())).unwrap(); + + let frames = (0..10) + .map(|i| WalPage { + page_no: i, + size_after: 0, + data: Bytes::from(vec![i as _; 4096]), + }) + .collect::>(); + logger.write_pages(&frames).unwrap(); + logger.commit().unwrap(); + + let log_file = logger.log_file.write(); + for i in 0..10 { + let frame = log_file.frame(i).unwrap(); + assert_eq!(frame.header().page_no, i as u32); + assert!(frame.page().iter().all(|x| i as u8 == *x)); + } + + assert_eq!( + log_file.header.start_frame_no + log_file.header.frame_count, + 10 + ); + } + + #[test] + fn index_out_of_bounds() { + let dir = tempfile::tempdir().unwrap(); + let logger = ReplicationLogger::open(dir.path(), false, (), Box::new(|_| ())).unwrap(); + let log_file = logger.log_file.write(); + assert!(matches!(log_file.frame(1), Err(LogReadError::Ahead))); + } + + #[test] + #[should_panic] + fn incorrect_frame_size() { + let dir = tempfile::tempdir().unwrap(); + let logger = ReplicationLogger::open(dir.path(), false, (), Box::new(|_| ())).unwrap(); + let entry = WalPage { + page_no: 0, + size_after: 0, + data: vec![0; 3].into(), + }; + + logger.write_pages(&[entry]).unwrap(); + logger.commit().unwrap(); + } + + #[test] + fn log_file_test_rollback() { + let f = tempfile::NamedTempFile::new().unwrap(); + let mut log_file = LogFile::new(f.path().to_path_buf()).unwrap(); + (0..5) + .map(|i| WalPage { + page_no: i, + size_after: 5, + data: Bytes::from_static(&[1; 4096]), + }) + .for_each(|p| { + log_file.push_page(&p).unwrap(); + }); + + assert_eq!(log_file.frames_iter().unwrap().count(), 0); + + log_file.commit().unwrap(); + + (0..5) + .map(|i| WalPage { + page_no: i, + size_after: 5, + data: Bytes::from_static(&[1; 4096]), + }) + .for_each(|p| { + log_file.push_page(&p).unwrap(); + }); + + log_file.rollback(); + assert_eq!(log_file.frames_iter().unwrap().count(), 5); + + log_file + .push_page(&WalPage { + page_no: 42, + size_after: 5, + data: Bytes::from_static(&[1; 4096]), + }) + .unwrap(); + + assert_eq!(log_file.frames_iter().unwrap().count(), 5); + log_file.commit().unwrap(); + assert_eq!(log_file.frames_iter().unwrap().count(), 6); + } +} diff --git a/libsqlx/src/database/libsql/replication_log/merger.rs b/libsqlx/src/database/libsql/replication_log/merger.rs new file mode 100644 index 00000000..d098a9e5 --- /dev/null +++ b/libsqlx/src/database/libsql/replication_log/merger.rs @@ -0,0 +1,137 @@ +use std::path::Path; +use std::sync::mpsc; +use std::thread::JoinHandle; + +use crate::database::frame::Frame; + +use super::snapshot::{ + parse_snapshot_name, snapshot_dir_path, snapshot_list, SnapshotBuilder, SnapshotFile, + MAX_SNAPSHOT_NUMBER, SNAPHOT_SPACE_AMPLIFICATION_FACTOR, +}; + +pub struct SnapshotMerger { + /// Sending part of a channel of (snapshot_name, snapshot_frame_count, db_page_count) to the merger thread + sender: mpsc::Sender<(String, u64, u32)>, + handle: Option>>, +} + +impl SnapshotMerger { + pub fn new(db_path: &Path, db_id: u128) -> anyhow::Result { + let (sender, receiver) = mpsc::channel(); + + let db_path = db_path.to_path_buf(); + let handle = + std::thread::spawn(move || Self::run_snapshot_merger_loop(receiver, &db_path, db_id)); + + Ok(Self { + sender, + handle: Some(handle), + }) + } + + fn should_compact(snapshots: &[(String, u64)], db_page_count: u32) -> bool { + let snapshots_size: u64 = snapshots.iter().map(|(_, s)| *s).sum(); + snapshots_size >= SNAPHOT_SPACE_AMPLIFICATION_FACTOR * db_page_count as u64 + || snapshots.len() > MAX_SNAPSHOT_NUMBER + } + + fn run_snapshot_merger_loop( + receiver: mpsc::Receiver<(String, u64, u32)>, + db_path: &Path, + db_id: u128, + ) -> anyhow::Result<()> { + let mut snapshots = Self::init_snapshot_info_list(db_path)?; + while let Ok((name, size, db_page_count)) = receiver.recv() { + snapshots.push((name, size)); + if Self::should_compact(&snapshots, db_page_count) { + let compacted_snapshot_info = Self::merge_snapshots(&snapshots, db_path, db_id)?; + snapshots.clear(); + snapshots.push(compacted_snapshot_info); + } + } + + Ok(()) + } + + /// Reads the snapshot dir and returns the list of snapshots along with their size, sorted in + /// chronological order. + /// + /// TODO: if the process was killed in the midst of merging snapshot, then the compacted snapshot + /// can exist alongside the snapshots it's supposed to have compacted. This is the place to + /// perform the cleanup. + fn init_snapshot_info_list(db_path: &Path) -> anyhow::Result> { + let snapshot_dir_path = snapshot_dir_path(db_path); + if !snapshot_dir_path.exists() { + return Ok(Vec::new()); + } + + let mut temp = Vec::new(); + for snapshot_name in snapshot_list(db_path)? { + let snapshot_path = snapshot_dir_path.join(&snapshot_name); + let snapshot = SnapshotFile::open(&snapshot_path)?; + temp.push(( + snapshot_name, + snapshot.header.frame_count, + snapshot.header.start_frame_no, + )) + } + + temp.sort_by_key(|(_, _, id)| *id); + + Ok(temp + .into_iter() + .map(|(name, count, _)| (name, count)) + .collect()) + } + + fn merge_snapshots( + snapshots: &[(String, u64)], + db_path: &Path, + db_id: u128, + ) -> anyhow::Result<(String, u64)> { + let mut builder = SnapshotBuilder::new(db_path, db_id)?; + let snapshot_dir_path = snapshot_dir_path(db_path); + for (name, _) in snapshots.iter().rev() { + let snapshot = SnapshotFile::open(&snapshot_dir_path.join(name))?; + let iter = snapshot.frames_iter().map(|b| Frame::try_from_bytes(b?)); + builder.append_frames(iter)?; + } + + let (_, start_frame_no, _) = parse_snapshot_name(&snapshots[0].0).unwrap(); + let (_, _, end_frame_no) = parse_snapshot_name(&snapshots.last().unwrap().0).unwrap(); + + builder.header.start_frame_no = start_frame_no; + builder.header.end_frame_no = end_frame_no; + + let compacted_snapshot_infos = builder.finish()?; + + for (name, _) in snapshots.iter() { + std::fs::remove_file(&snapshot_dir_path.join(name))?; + } + + Ok(compacted_snapshot_infos) + } + + pub fn register_snapshot( + &mut self, + snapshot_name: String, + snapshot_frame_count: u64, + db_page_count: u32, + ) -> anyhow::Result<()> { + if self + .sender + .send((snapshot_name, snapshot_frame_count, db_page_count)) + .is_err() + { + if let Some(handle) = self.handle.take() { + handle + .join() + .map_err(|_| anyhow::anyhow!("snapshot merger thread panicked"))??; + } + + anyhow::bail!("failed to register snapshot with log merger: thread exited"); + } + + Ok(()) + } +} diff --git a/libsqlx/src/database/libsql/replication_log/mod.rs b/libsqlx/src/database/libsql/replication_log/mod.rs new file mode 100644 index 00000000..a7f006ae --- /dev/null +++ b/libsqlx/src/database/libsql/replication_log/mod.rs @@ -0,0 +1,8 @@ +pub mod logger; +// pub mod merger; + +pub const WAL_PAGE_SIZE: i32 = 4096; +pub const WAL_MAGIC: u64 = u64::from_le_bytes(*b"SQLDWAL\0"); + +/// The frame uniquely identifying, monotonically increasing number +pub type FrameNo = u64; diff --git a/libsqlx/src/database/mod.rs b/libsqlx/src/database/mod.rs new file mode 100644 index 00000000..868eaf7f --- /dev/null +++ b/libsqlx/src/database/mod.rs @@ -0,0 +1,32 @@ +use crate::connection::Connection; +use crate::error::Error; + +mod frame; +pub mod libsql; +pub mod proxy; +#[cfg(test)] +mod test_utils; + +pub use frame::{Frame, FrameHeader}; + +pub type FrameNo = u64; + +#[derive(Debug)] +pub enum InjectError {} + +pub trait Database { + type Connection: Connection; + /// Create a new connection to the database + fn connect(&self) -> Result; +} + +pub trait InjectableDatabase { + fn injector(&self) -> crate::Result>; +} + +// Trait implemented by databases that support frame injection +pub trait Injector { + fn inject(&mut self, frame: Frame) -> Result, InjectError>; + /// clear internal buffer + fn clear(&mut self); +} diff --git a/libsqlx/src/database/proxy/connection.rs b/libsqlx/src/database/proxy/connection.rs new file mode 100644 index 00000000..4e433d7b --- /dev/null +++ b/libsqlx/src/database/proxy/connection.rs @@ -0,0 +1,341 @@ +use std::sync::Arc; + +use parking_lot::Mutex; + +use crate::connection::{Connection, DescribeResponse}; +use crate::database::FrameNo; +use crate::program::Program; +use crate::result_builder::{Column, QueryBuilderConfig, QueryResultBuilderError, ResultBuilder}; +use crate::Result; + +use super::WaitFrameNoCb; + +#[derive(Debug, Default)] +enum State { + Txn, + #[default] + Idle, + Unknown, +} + +impl State { + /// Returns `true` if the state is [`Idle`]. + /// + /// [`Idle`]: State::Idle + #[must_use] + fn is_idle(&self) -> bool { + matches!(self, Self::Idle) + } +} + +#[derive(Debug, Default)] +pub(crate) struct ConnState { + state: State, + last_frame_no: Option, +} + +/// A connection that proxies write operations to the `WriteDb` and the read operations to the +/// `ReadDb` +pub struct WriteProxyConnection { + pub(crate) read_conn: R, + pub(crate) write_conn: W, + pub(crate) wait_frame_no_cb: WaitFrameNoCb, + pub(crate) state: Arc>, +} + +impl WriteProxyConnection { + pub fn writer_mut(&mut self) -> &mut W { + &mut self.write_conn + } + + pub fn writer(&self) -> &W { + &self.write_conn + } + + pub fn reader_mut(&mut self) -> &mut R { + &mut self.read_conn + } + + pub fn reader(&self) -> &R { + &self.read_conn + } +} + +struct MaybeRemoteExecBuilder { + builder: Option>, + conn: W, + pgm: Program, + state: Arc>, +} + +impl ResultBuilder for MaybeRemoteExecBuilder +where + W: Connection + Send + 'static, +{ + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().init(config) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().begin_step() + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.builder + .as_mut() + .unwrap() + .finish_step(affected_row_count, last_insert_rowid) + } + + fn step_error(&mut self, error: crate::error::Error) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().step_error(error) + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().cols_description(cols) + } + + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().begin_rows() + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().begin_row() + } + + fn add_row_value( + &mut self, + v: rusqlite::types::ValueRef, + ) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().add_row_value(v) + } + + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().finish_row() + } + + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.as_mut().unwrap().finish_rows() + } + + fn finnalize( + &mut self, + is_txn: bool, + frame_no: Option, + ) -> Result { + if is_txn { + // a read only connection is not allowed to leave an open transaction. We mispredicted the + // final state of the connection, so we rollback, and execute again on the write proxy. + let builder = ExtractFrameNoBuilder { + builder: self + .builder + .take() + .expect("finnalize called more than once"), + state: self.state.clone(), + }; + + // set the connection state to unknown before executing on the remote + self.state.lock().state = State::Unknown; + + self.conn.execute_program(&self.pgm, Box::new(builder)); + + Ok(false) + } else { + self.builder.as_mut().unwrap().finnalize(is_txn, frame_no) + } + } + + fn finnalize_error(&mut self, e: String) { + self.builder.take().unwrap().finnalize_error(e) + } +} + +impl Connection for WriteProxyConnection +where + R: Connection, + W: Connection + Clone + Send + 'static, +{ + fn execute_program(&mut self, pgm: &Program, builder: Box) { + if self.state.lock().state.is_idle() && pgm.is_read_only() { + if let Some(frame_no) = self.state.lock().last_frame_no { + (self.wait_frame_no_cb)(frame_no); + } + + let builder = MaybeRemoteExecBuilder { + builder: Some(builder), + conn: self.write_conn.clone(), + state: self.state.clone(), + pgm: pgm.clone(), + }; + // We know that this program won't perform any writes. We attempt to run it on the + // replica. If it leaves an open transaction, then this program is an interactive + // transaction, so we rollback the replica, and execute again on the primary. + self.read_conn.execute_program(pgm, Box::new(builder)); + // rollback(&mut self.conn.read_db); + } else { + // we set the state to unknown because until we have received from the actual + // connection state from the primary. + self.state.lock().state = State::Unknown; + let builder = ExtractFrameNoBuilder { + builder, + state: self.state.clone(), + }; + self.write_conn.execute_program(pgm, Box::new(builder)); + } + } + + fn describe(&self, sql: String) -> crate::Result { + if let Some(frame_no) = self.state.lock().last_frame_no { + (self.wait_frame_no_cb)(frame_no); + } + self.read_conn.describe(sql) + } +} + +struct ExtractFrameNoBuilder { + builder: Box, + state: Arc>, +} + +impl ResultBuilder for ExtractFrameNoBuilder { + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.builder.init(config) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.begin_step() + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.builder + .finish_step(affected_row_count, last_insert_rowid) + } + + fn step_error(&mut self, error: crate::error::Error) -> Result<(), QueryResultBuilderError> { + self.builder.step_error(error) + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.builder.cols_description(cols) + } + + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.begin_rows() + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.begin_row() + } + + fn add_row_value( + &mut self, + v: rusqlite::types::ValueRef, + ) -> Result<(), QueryResultBuilderError> { + self.builder.add_row_value(v) + } + + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.finish_row() + } + + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.builder.finish_rows() + } + + fn finnalize( + &mut self, + is_txn: bool, + frame_no: Option, + ) -> Result { + let mut state = self.state.lock(); + state.last_frame_no = frame_no; + if is_txn { + state.state = State::Txn; + } else { + state.state = State::Idle; + } + self.builder.finnalize(is_txn, frame_no) + } + + fn finnalize_error(&mut self, e: String) { + self.builder.finnalize_error(e) + } +} + +#[cfg(test)] +mod test { + use std::sync::Arc; + + use parking_lot::Mutex; + + use crate::database::test_utils::MockDatabase; + use crate::database::{proxy::database::WriteProxyDatabase, Database}; + use crate::program::Program; + use crate::Connection; + + #[test] + fn simple_write_proxied() { + let write_called = Arc::new(Mutex::new(false)); + let write_db = MockDatabase::new().with_execute({ + let write_called = write_called.clone(); + move |_, mut b| { + b.finnalize(false, Some(42)).unwrap(); + *write_called.lock() = true; + Ok(()) + } + }); + + let read_called = Arc::new(Mutex::new(false)); + let read_db = MockDatabase::new().with_execute({ + let read_called = read_called.clone(); + move |_, _| { + *read_called.lock() = true; + Ok(()) + } + }); + + let wait_called = Arc::new(Mutex::new(false)); + let db = WriteProxyDatabase::new( + read_db, + write_db, + Arc::new({ + let wait_called = wait_called.clone(); + move |fno| { + assert_eq!(fno, 42); + *wait_called.lock() = true; + } + }), + ); + + let mut conn = db.connect().unwrap(); + conn.execute_program( + &Program::seq(&["insert into test values (12)"]), + Box::new(()), + ) + .unwrap(); + + assert!(!*wait_called.lock()); + assert!(!*read_called.lock()); + assert!(*write_called.lock()); + + conn.execute_program(&Program::seq(&["select * from test"]), Box::new(())) + .unwrap(); + + assert!(*read_called.lock()); + assert!(*wait_called.lock()); + } +} diff --git a/libsqlx/src/database/proxy/database.rs b/libsqlx/src/database/proxy/database.rs new file mode 100644 index 00000000..d0e7c5a0 --- /dev/null +++ b/libsqlx/src/database/proxy/database.rs @@ -0,0 +1,48 @@ +use crate::database::{Database, InjectableDatabase}; +use crate::error::Error; + +use super::connection::WriteProxyConnection; +use super::WaitFrameNoCb; + +pub struct WriteProxyDatabase { + read_db: RDB, + write_db: WDB, + wait_frame_no_cb: WaitFrameNoCb, +} + +impl WriteProxyDatabase { + pub fn new(read_db: RDB, write_db: WDB, wait_frame_no_cb: WaitFrameNoCb) -> Self { + Self { + read_db, + write_db, + wait_frame_no_cb, + } + } +} + +impl Database for WriteProxyDatabase +where + RDB: Database, + WDB: Database, + WDB::Connection: Clone + Send + 'static, +{ + type Connection = WriteProxyConnection; + /// Create a new connection to the database + fn connect(&self) -> Result { + Ok(WriteProxyConnection { + read_conn: self.read_db.connect()?, + write_conn: self.write_db.connect()?, + wait_frame_no_cb: self.wait_frame_no_cb.clone(), + state: Default::default(), + }) + } +} + +impl InjectableDatabase for WriteProxyDatabase +where + RDB: InjectableDatabase, +{ + fn injector(&self) -> crate::Result> { + self.read_db.injector() + } +} diff --git a/libsqlx/src/database/proxy/mod.rs b/libsqlx/src/database/proxy/mod.rs new file mode 100644 index 00000000..0fdf7ceb --- /dev/null +++ b/libsqlx/src/database/proxy/mod.rs @@ -0,0 +1,12 @@ +use std::sync::Arc; + +use super::FrameNo; + +mod connection; +mod database; + +pub use connection::WriteProxyConnection; +pub use database::WriteProxyDatabase; + +// Waits until passed frameno has been replicated back to the database +type WaitFrameNoCb = Arc; diff --git a/libsqlx/src/database/test_utils.rs b/libsqlx/src/database/test_utils.rs new file mode 100644 index 00000000..3034ca93 --- /dev/null +++ b/libsqlx/src/database/test_utils.rs @@ -0,0 +1,67 @@ +use std::sync::Arc; + +use crate::{ + connection::{Connection, DescribeResponse}, + program::Program, + result_builder::ResultBuilder, +}; + +use super::Database; + +pub struct MockDatabase { + #[allow(clippy::type_complexity)] + describe_fn: Arc crate::Result + Send + Sync>, + #[allow(clippy::type_complexity)] + execute_fn: Arc) -> crate::Result<()> + Send + Sync>, +} + +#[derive(Clone)] +pub struct MockConnection { + #[allow(clippy::type_complexity)] + describe_fn: Arc crate::Result + Send + Sync>, + #[allow(clippy::type_complexity)] + execute_fn: Arc) -> crate::Result<()> + Send + Sync>, +} + +impl MockDatabase { + pub fn new() -> Self { + MockDatabase { + describe_fn: Arc::new(|_| panic!("describe fn not set")), + execute_fn: Arc::new(|_, _| panic!("execute fn not set")), + } + } + + pub fn with_execute( + mut self, + f: impl Fn(&Program, Box) -> crate::Result<()> + Send + Sync + 'static, + ) -> Self { + self.execute_fn = Arc::new(f); + self + } +} + +impl Database for MockDatabase { + type Connection = MockConnection; + + fn connect(&self) -> Result { + Ok(MockConnection { + describe_fn: self.describe_fn.clone(), + execute_fn: self.execute_fn.clone(), + }) + } +} + +impl Connection for MockConnection { + fn execute_program( + &mut self, + pgm: &crate::program::Program, + reponse_builder: Box, + ) -> crate::Result<()> { + (self.execute_fn)(pgm, reponse_builder)?; + Ok(()) + } + + fn describe(&self, sql: String) -> crate::Result { + (self.describe_fn)(sql) + } +} diff --git a/libsqlx/src/error.rs b/libsqlx/src/error.rs new file mode 100644 index 00000000..091152b9 --- /dev/null +++ b/libsqlx/src/error.rs @@ -0,0 +1,50 @@ +use crate::result_builder::QueryResultBuilderError; +pub use rusqlite::ffi::ErrorCode; +pub use rusqlite::Error as RusqliteError; + +#[allow(clippy::enum_variant_names)] +#[derive(Debug, thiserror::Error)] +pub enum Error { + #[error("LibSQL failed to bind provided query parameters: `{0}`")] + LibSqlInvalidQueryParams(String), + #[error("Transaction timed-out")] + LibSqlTxTimeout, + #[error("Server can't handle additional transactions")] + LibSqlTxBusy, + #[error(transparent)] + IOError(#[from] std::io::Error), + #[error(transparent)] + RusqliteError(#[from] rusqlite::Error), + #[error("Database value error: `{0}`")] + DbValueError(String), + // Dedicated for most generic internal errors. Please use it sparingly. + // Consider creating a dedicate enum value for your error. + #[error("Internal Error: `{0}`")] + Internal(String), + #[error("Invalid batch step: {0}")] + InvalidBatchStep(usize), + #[error("Not authorized to execute query: {0}")] + NotAuthorized(String), + #[error("The replicator exited, instance cannot make any progress.")] + ReplicatorExited, + #[error("Timed out while openning database connection")] + DbCreateTimeout, + #[error(transparent)] + BuilderError(#[from] QueryResultBuilderError), + #[error("Operation was blocked{}", .0.as_ref().map(|msg| format!(": {}", msg)).unwrap_or_default())] + Blocked(Option), + #[error("invalid replication log header")] + InvalidLogHeader, + #[error("unsupported statement")] + UnsupportedStatement, + #[error("Syntax error at {line}:{col}: {found}")] + SyntaxError { + line: u64, + col: usize, + found: String, + }, + #[error(transparent)] + LexerError(#[from] sqlite3_parser::lexer::sql::Error), + #[error("invalid frame")] + InvalidFrame, +} diff --git a/libsqlx/src/lib.rs b/libsqlx/src/lib.rs new file mode 100644 index 00000000..24441571 --- /dev/null +++ b/libsqlx/src/lib.rs @@ -0,0 +1,23 @@ +pub mod analysis; +pub mod error; +pub mod query; + +mod connection; +mod database; +pub mod program; +pub mod result_builder; +mod seal; + +pub type Result = std::result::Result; + +pub use connection::{Connection, DescribeResponse}; +pub use database::libsql; +pub use database::libsql::replication_log::logger::{LogReadError, ReplicationLogger}; +pub use database::libsql::replication_log::FrameNo; +pub use database::proxy; +pub use database::{Database, InjectableDatabase, Injector}; +pub use database::{Frame, FrameHeader}; + +pub use sqld_libsql_bindings::wal_hook::WalHook; + +pub use rusqlite; diff --git a/libsqlx/src/program.rs b/libsqlx/src/program.rs new file mode 100644 index 00000000..fc30a4bf --- /dev/null +++ b/libsqlx/src/program.rs @@ -0,0 +1,76 @@ +use std::sync::Arc; + +use serde::{Deserialize, Serialize}; + +use crate::query::Query; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Program { + pub steps: Arc<[Step]>, +} + +impl Program { + pub fn new(steps: Vec) -> Self { + Self { + steps: steps.into(), + } + } + + pub fn is_read_only(&self) -> bool { + self.steps.iter().all(|s| s.query.stmt.is_read_only()) + } + + pub fn steps(&self) -> &[Step] { + &self.steps + } + + /// transforms a collection of queries into a batch program. The execution of each query + /// depends on the success of the previous one. + pub fn from_queries(qs: impl IntoIterator) -> Self { + let steps = qs + .into_iter() + .enumerate() + .map(|(idx, query)| Step { + cond: (idx > 0).then(|| Cond::Ok { step: idx - 1 }), + query, + }) + .collect(); + + Self { steps } + } + + pub fn seq(stmts: &[&str]) -> Self { + use crate::{analysis::Statement, query::Params}; + + let mut steps = Vec::with_capacity(stmts.len()); + for stmt in stmts { + let step = Step { + cond: None, + query: Query { + stmt: Statement::parse(stmt).next().unwrap().unwrap(), + params: Params::empty(), + want_rows: true, + }, + }; + + steps.push(step); + } + + Self::new(steps) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Step { + pub cond: Option, + pub query: Query, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Cond { + Ok { step: usize }, + Err { step: usize }, + Not { cond: Box }, + Or { conds: Vec }, + And { conds: Vec }, +} diff --git a/libsqlx/src/query.rs b/libsqlx/src/query.rs new file mode 100644 index 00000000..d3b1e5eb --- /dev/null +++ b/libsqlx/src/query.rs @@ -0,0 +1,267 @@ +use std::collections::HashMap; + +use anyhow::{anyhow, ensure, Context}; +use rusqlite::types::{ToSqlOutput, ValueRef}; +use rusqlite::ToSql; +use serde::{Deserialize, Serialize}; + +use crate::analysis::Statement; + +/// Mirrors rusqlite::Value, but implement extra traits +#[derive(Debug, Clone, Serialize, Deserialize)] +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +pub enum Value { + Null, + Integer(i64), + Real(f64), + Text(String), + Blob(Vec), +} + +impl<'a> From<&'a Value> for ValueRef<'a> { + fn from(value: &'a Value) -> Self { + match value { + Value::Null => ValueRef::Null, + Value::Integer(i) => ValueRef::Integer(*i), + Value::Real(x) => ValueRef::Real(*x), + Value::Text(s) => ValueRef::Text(s.as_bytes()), + Value::Blob(b) => ValueRef::Blob(b.as_slice()), + } + } +} + +impl TryFrom> for Value { + type Error = anyhow::Error; + + fn try_from(value: rusqlite::types::ValueRef<'_>) -> anyhow::Result { + let val = match value { + rusqlite::types::ValueRef::Null => Value::Null, + rusqlite::types::ValueRef::Integer(i) => Value::Integer(i), + rusqlite::types::ValueRef::Real(x) => Value::Real(x), + rusqlite::types::ValueRef::Text(s) => Value::Text(String::from_utf8(Vec::from(s))?), + rusqlite::types::ValueRef::Blob(b) => Value::Blob(Vec::from(b)), + }; + + Ok(val) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Query { + pub stmt: Statement, + pub params: Params, + pub want_rows: bool, +} + +impl ToSql for Value { + fn to_sql(&self) -> rusqlite::Result> { + let val = match self { + Value::Null => ToSqlOutput::Owned(rusqlite::types::Value::Null), + Value::Integer(i) => ToSqlOutput::Owned(rusqlite::types::Value::Integer(*i)), + Value::Real(x) => ToSqlOutput::Owned(rusqlite::types::Value::Real(*x)), + Value::Text(s) => ToSqlOutput::Borrowed(rusqlite::types::ValueRef::Text(s.as_bytes())), + Value::Blob(b) => ToSqlOutput::Borrowed(rusqlite::types::ValueRef::Blob(b)), + }; + + Ok(val) + } +} + +#[derive(Debug, Serialize, Clone, Deserialize)] +pub enum Params { + Named(HashMap), + Positional(Vec), +} + +impl Params { + pub fn empty() -> Self { + Self::Positional(Vec::new()) + } + + pub fn new_named(values: HashMap) -> Self { + Self::Named(values) + } + + pub fn new_positional(values: Vec) -> Self { + Self::Positional(values) + } + + pub fn get_pos(&self, pos: usize) -> Option<&Value> { + assert!(pos > 0); + match self { + Params::Named(_) => None, + Params::Positional(params) => params.get(pos - 1), + } + } + + pub fn get_named(&self, name: &str) -> Option<&Value> { + match self { + Params::Named(params) => params.get(name), + Params::Positional(_) => None, + } + } + + pub fn len(&self) -> usize { + match self { + Params::Named(params) => params.len(), + Params::Positional(params) => params.len(), + } + } + + pub fn is_empty(&self) -> bool { + self.len() == 0 + } + + pub fn bind(&self, stmt: &mut rusqlite::Statement) -> anyhow::Result<()> { + let param_count = stmt.parameter_count(); + ensure!( + param_count >= self.len(), + "too many parameters, expected {param_count} found {}", + self.len() + ); + + if param_count > 0 { + for index in 1..=param_count { + let mut param_name = None; + // get by name + let maybe_value = match stmt.parameter_name(index) { + Some(name) => { + param_name = Some(name); + let mut chars = name.chars(); + match chars.next() { + Some('?') => { + let pos = chars.as_str().parse::().context( + "invalid parameter {name}: expected a numerical position after `?`", + )?; + self.get_pos(pos) + } + _ => self + .get_named(name) + .or_else(|| self.get_named(chars.as_str())), + } + } + None => self.get_pos(index), + }; + + if let Some(value) = maybe_value { + stmt.raw_bind_parameter(index, value)?; + } else if let Some(name) = param_name { + return Err(anyhow!("value for parameter {} not found", name)); + } else { + return Err(anyhow!("value for parameter {} not found", index)); + } + } + } + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn test_bind_params_positional_simple() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT ?").unwrap(); + let params = Params::new_positional(vec![Value::Integer(10)]); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10"); + } + + #[test] + fn test_bind_params_positional_numbered() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT ? || ?2 || ?1").unwrap(); + let params = Params::new_positional(vec![Value::Integer(10), Value::Integer(20)]); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10 || 20 || 10"); + } + + #[test] + fn test_bind_params_positional_named() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT :first || $second").unwrap(); + let mut params = HashMap::new(); + params.insert(":first".to_owned(), Value::Integer(10)); + params.insert("$second".to_owned(), Value::Integer(20)); + let params = Params::new_named(params); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10 || 20"); + } + + #[test] + fn test_bind_params_positional_named_no_prefix() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT :first || $second").unwrap(); + let mut params = HashMap::new(); + params.insert("first".to_owned(), Value::Integer(10)); + params.insert("second".to_owned(), Value::Integer(20)); + let params = Params::new_named(params); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10 || 20"); + } + + #[test] + fn test_bind_params_positional_named_conflict() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT :first || $first").unwrap(); + let mut params = HashMap::new(); + params.insert("first".to_owned(), Value::Integer(10)); + params.insert("$first".to_owned(), Value::Integer(20)); + let params = Params::new_named(params); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10 || 20"); + } + + #[test] + fn test_bind_params_positional_named_repeated() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con + .prepare("SELECT :first || $second || $first || $second") + .unwrap(); + let mut params = HashMap::new(); + params.insert("first".to_owned(), Value::Integer(10)); + params.insert("$second".to_owned(), Value::Integer(20)); + let params = Params::new_named(params); + params.bind(&mut stmt).unwrap(); + + assert_eq!(stmt.expanded_sql().unwrap(), "SELECT 10 || 20 || 10 || 20"); + } + + #[test] + fn test_bind_params_too_many_params() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT :first || $second").unwrap(); + let mut params = HashMap::new(); + params.insert(":first".to_owned(), Value::Integer(10)); + params.insert("$second".to_owned(), Value::Integer(20)); + params.insert("$oops".to_owned(), Value::Integer(20)); + let params = Params::new_named(params); + assert!(params.bind(&mut stmt).is_err()); + } + + #[test] + fn test_bind_params_too_few_params() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT :first || $second").unwrap(); + let mut params = HashMap::new(); + params.insert(":first".to_owned(), Value::Integer(10)); + let params = Params::new_named(params); + assert!(params.bind(&mut stmt).is_err()); + } + + #[test] + fn test_bind_params_invalid_positional() { + let con = rusqlite::Connection::open_in_memory().unwrap(); + let mut stmt = con.prepare("SELECT ?invalid").unwrap(); + let params = Params::empty(); + assert!(params.bind(&mut stmt).is_err()); + } +} diff --git a/libsqlx/src/result_builder.rs b/libsqlx/src/result_builder.rs new file mode 100644 index 00000000..c5f159e7 --- /dev/null +++ b/libsqlx/src/result_builder.rs @@ -0,0 +1,735 @@ +use std::fmt; +use std::io::{self, ErrorKind}; + +use bytesize::ByteSize; +pub use rusqlite::types::ValueRef; + +use crate::database::FrameNo; + +#[derive(Debug)] +pub enum QueryResultBuilderError { + ResponseTooLarge(u64), + Internal(anyhow::Error), +} + +impl fmt::Display for QueryResultBuilderError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + QueryResultBuilderError::ResponseTooLarge(s) => { + write!(f, "query response exceeds the maximum size of {}. Try reducing the number of queried rows.", ByteSize(*s)) + } + QueryResultBuilderError::Internal(e) => e.fmt(f), + } + } +} + +impl std::error::Error for QueryResultBuilderError {} + +impl From for QueryResultBuilderError { + fn from(value: anyhow::Error) -> Self { + Self::Internal(value) + } +} + +impl QueryResultBuilderError { + pub fn from_any>(e: E) -> Self { + Self::Internal(e.into()) + } +} + +impl From for QueryResultBuilderError { + fn from(value: io::Error) -> Self { + if value.kind() == ErrorKind::OutOfMemory + && value.get_ref().is_some() + && value.get_ref().unwrap().is::() + { + return *value + .into_inner() + .unwrap() + .downcast::() + .unwrap(); + } + Self::Internal(value.into()) + } +} + +/// Identical to rusqlite::Column, with visible fields. +#[cfg_attr(test, derive(arbitrary::Arbitrary))] +pub struct Column<'a> { + pub name: &'a str, + pub decl_ty: Option<&'a str>, +} + +impl<'a> From<(&'a str, Option<&'a str>)> for Column<'a> { + fn from((name, decl_ty): (&'a str, Option<&'a str>)) -> Self { + Self { name, decl_ty } + } +} + +impl<'a> From<&'a rusqlite::Column<'a>> for Column<'a> { + fn from(value: &'a rusqlite::Column<'a>) -> Self { + Self { + name: value.name(), + decl_ty: value.decl_type(), + } + } +} + +#[derive(Debug, Clone, Copy, Default)] +pub struct QueryBuilderConfig { + pub max_size: Option, +} + +pub trait ResultBuilder: Send + 'static { + /// (Re)initialize the builder. This method can be called multiple times. + fn init(&mut self, _config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// start serializing new step + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// finish serializing current step + fn finish_step( + &mut self, + _affected_row_count: u64, + _last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// emit an error to serialize. + fn step_error(&mut self, _error: crate::error::Error) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// add cols description for current step. + /// This is called called at most once per step, and is always the first method being called + fn cols_description( + &mut self, + _cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// start adding rows + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// begin a new row for the current step + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// add value to current row + fn add_row_value(&mut self, _v: ValueRef) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// finish current row + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// end adding rows + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + Ok(()) + } + /// finish the builder, and pass the transaction state. + /// If false is returned, and is_txn is true, then the transaction is rolledback. + fn finnalize( + &mut self, + _is_txn: bool, + _frame_no: Option, + ) -> Result { + Ok(true) + } + + /// There was a fatal error and the request was aborted + fn finnalize_error(&mut self, _e: String) {} +} + +pub trait ResultBuilderExt: ResultBuilder { + /// Returns a `QueryResultBuilder` that wraps Self and takes at most `n` steps + fn take(self, limit: usize) -> Take + where + Self: Sized, + { + Take { + limit, + count: 0, + inner: self, + } + } +} + +impl ResultBuilderExt for T {} + +#[derive(Debug)] +pub enum StepResult { + Ok, + Err(crate::error::Error), + Skipped, +} + +/// A `QueryResultBuilder` that ignores rows, but records the outcome of each step in a `StepResult` +pub struct StepResultsBuilder { + current: Option, + step_results: Vec, + is_skipped: bool, + ret: Option, +} + +pub trait RetChannel: Send + 'static { + fn send(self, t: T); +} + +#[cfg(feature = "tokio")] +impl RetChannel for tokio::sync::oneshot::Sender { + fn send(self, t: T) { + let _ = self.send(t); + } +} + +impl StepResultsBuilder { + pub fn new(ret: R) -> Self { + Self { + current: None, + step_results: Vec::new(), + is_skipped: false, + ret: Some(ret), + } + } +} + +impl, String>>> ResultBuilder for StepResultsBuilder { + fn init(&mut self, _config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.current = None; + self.step_results.clear(); + self.is_skipped = false; + Ok(()) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.is_skipped = true; + Ok(()) + } + + fn finish_step( + &mut self, + _affected_row_count: u64, + _last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + let res = match self.current.take() { + Some(e) => StepResult::Err(e), + None if self.is_skipped => StepResult::Skipped, + None => StepResult::Ok, + }; + + self.step_results.push(res); + + Ok(()) + } + + fn step_error(&mut self, error: crate::error::Error) -> Result<(), QueryResultBuilderError> { + assert!(self.current.is_none()); + self.current = Some(error); + + Ok(()) + } + + fn cols_description( + &mut self, + _cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.is_skipped = false; + Ok(()) + } + + fn finnalize( + &mut self, + _is_txn: bool, + _frame_no: Option, + ) -> Result { + self.ret + .take() + .expect("finnalize called more than once") + .send(Ok(std::mem::take(&mut self.step_results))); + Ok(true) + } + + fn finnalize_error(&mut self, e: String) { + self.ret + .take() + .expect("finnalize called more than once") + .send(Err(e)); + } +} + +impl ResultBuilder for () {} + +// A builder that wraps another builder, but takes at most `n` steps +pub struct Take { + limit: usize, + count: usize, + inner: B, +} + +impl Take { + pub fn into_inner(self) -> B { + self.inner + } +} + +impl ResultBuilder for Take { + fn init(&mut self, config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.count = 0; + self.inner.init(config) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.begin_step() + } else { + Ok(()) + } + } + + fn finish_step( + &mut self, + affected_row_count: u64, + last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner + .finish_step(affected_row_count, last_insert_rowid)?; + self.count += 1; + } + + Ok(()) + } + + fn step_error(&mut self, error: crate::error::Error) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.step_error(error) + } else { + Ok(()) + } + } + + fn cols_description( + &mut self, + cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.cols_description(cols) + } else { + Ok(()) + } + } + + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.begin_rows() + } else { + Ok(()) + } + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.begin_row() + } else { + Ok(()) + } + } + + fn add_row_value(&mut self, v: ValueRef) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.add_row_value(v) + } else { + Ok(()) + } + } + + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.finish_row() + } else { + Ok(()) + } + } + + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + if self.count < self.limit { + self.inner.finish_rows() + } else { + Ok(()) + } + } + + fn finnalize( + &mut self, + is_txn: bool, + frame_no: Option, + ) -> Result { + self.inner.finnalize(is_txn, frame_no) + } + + fn finnalize_error(&mut self, e: String) { + self.inner.finnalize_error(e) + } +} + +#[cfg(test)] +pub mod test { + #![allow(dead_code)] + use std::fmt; + + use arbitrary::{Arbitrary, Unstructured}; + use itertools::Itertools; + use rand::{ + distributions::{Standard, WeightedIndex}, + prelude::Distribution, + thread_rng, Fill, Rng, + }; + use FsmState::*; + + use super::*; + + /// a dummy QueryResultBuilder that encodes the QueryResultBuilder FSM. It can be passed to a + /// driver to ensure that it is not mis-used + + #[derive(Debug, PartialEq, Eq, Clone, Copy)] + #[repr(usize)] + // do not reorder! + enum FsmState { + Init = 0, + Finish, + BeginStep, + FinishStep, + StepError, + ColsDescription, + FinishRows, + BeginRows, + FinishRow, + BeginRow, + AddRowValue, + BuilderError, + } + + #[rustfmt::skip] + static TRANSITION_TABLE: [[bool; 12]; 12] = [ + //FROM: + //Init Finish BeginStep FinishStep StepError ColsDes FinishRows BegRows FinishRow BegRow AddRowVal BuidlerErr TO: + [true , true , true , true , true , true , true , true , true , true , true , false], // Init, + [true , false, false, true , false, false, false, false, false, false, false, false], // Finish, + [true , false, false, true , false, false, false, false, false, false, false, false], // BeginStep + [false, false, true , false, true , false, true , false, false, false, false, false], // FinishStep + [false, false, true , false, false, true , true , true , true , true , true , false], // StepError + [false, false, true , false, false, false, false, false, false, false, false, false], // ColsDescr + [false, false, false, false, false, false, false, true , true , false, false, false], // FinishRows + [false, false, false, false, false, true , false, false, false, false, false, false], // BeginRows + [false, false, false, false, false, false, false, false, false, true , true , false], // FinishRow + [false, false, false, false, false, false, false, true , true , false, false, false], // BeginRow, + [false, false, false, false, false, false, false, false, false, true , true , false], // AddRowValue + [true , true , true , true , true , true , true , true , true , true , true , false], // BuilderError + ]; + + impl FsmState { + /// returns a random valid transition from the current state + fn rand_transition(self, allow_init: bool) -> Self { + let valid_next_states = TRANSITION_TABLE[..TRANSITION_TABLE.len() - 1] // ignore + // builder error + .iter() + .enumerate() + .skip(if allow_init { 0 } else { 1 }) + .filter_map(|(i, ss)| ss[self as usize].then_some(i)) + .collect_vec(); + // distribution is somewhat tweaked to be biased towards more real-world test cases + let weigths = valid_next_states + .iter() + .enumerate() + .map(|(p, i)| i.pow(p as _)) + .collect_vec(); + let dist = WeightedIndex::new(weigths).unwrap(); + unsafe { std::mem::transmute(valid_next_states[dist.sample(&mut thread_rng())]) } + } + + /// moves towards the finish step as fast as possible + fn toward_finish(self) -> Self { + match self { + Init => Finish, + BeginStep => FinishStep, + FinishStep => Finish, + StepError => FinishStep, + BeginRows | BeginRow | AddRowValue | FinishRow | FinishRows | ColsDescription => { + StepError + } + Finish => Finish, + BuilderError => Finish, + } + } + } + + pub fn random_builder_driver(mut max_steps: usize, mut b: B) -> B { + let mut rand_data = [0; 10_000]; + rand_data.try_fill(&mut rand::thread_rng()).unwrap(); + let mut u = Unstructured::new(&rand_data); + let mut trace = Vec::new(); + + #[derive(Arbitrary)] + pub enum ValueRef<'a> { + Null, + Integer(i64), + Real(f64), + Text(&'a str), + Blob(&'a [u8]), + } + + impl<'a> From> for rusqlite::types::ValueRef<'a> { + fn from(value: ValueRef<'a>) -> Self { + match value { + ValueRef::Null => rusqlite::types::ValueRef::Null, + ValueRef::Integer(i) => rusqlite::types::ValueRef::Integer(i), + ValueRef::Real(x) => rusqlite::types::ValueRef::Real(x), + ValueRef::Text(s) => rusqlite::types::ValueRef::Text(s.as_bytes()), + ValueRef::Blob(b) => rusqlite::types::ValueRef::Blob(b), + } + } + } + + let mut state = Init; + trace.push(state); + loop { + match state { + Init => b.init(&QueryBuilderConfig::default()).unwrap(), + BeginStep => b.begin_step().unwrap(), + FinishStep => b + .finish_step( + Arbitrary::arbitrary(&mut u).unwrap(), + Arbitrary::arbitrary(&mut u).unwrap(), + ) + .unwrap(), + StepError => b.step_error(crate::error::Error::LibSqlTxBusy).unwrap(), + ColsDescription => b + .cols_description(&mut >::arbitrary(&mut u).unwrap().into_iter()) + .unwrap(), + BeginRows => b.begin_rows().unwrap(), + BeginRow => b.begin_row().unwrap(), + AddRowValue => b + .add_row_value(ValueRef::arbitrary(&mut u).unwrap().into()) + .unwrap(), + FinishRow => b.finish_row().unwrap(), + FinishRows => b.finish_rows().unwrap(), + Finish => { + b.finnalize(false, None).unwrap(); + break; + } + BuilderError => return b, + } + + if max_steps > 0 { + state = state.rand_transition(false); + } else { + state = state.toward_finish() + } + + trace.push(state); + + max_steps = max_steps.saturating_sub(1); + } + + // this can be usefull to help debug the generated test case + dbg!(trace); + + b + } + + pub struct FsmQueryBuilder { + state: FsmState, + inject_errors: bool, + } + + impl fmt::Display for FsmState { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let s = match self { + Init => "init", + BeginStep => "begin_step", + FinishStep => "finish_step", + StepError => "step_error", + ColsDescription => "cols_description", + BeginRows => "begin_rows", + BeginRow => "begin_row", + AddRowValue => "add_row_value", + FinishRow => "finish_row", + FinishRows => "finish_rows", + Finish => "finish", + BuilderError => "a builder error", + }; + + f.write_str(s) + } + } + + impl FsmQueryBuilder { + fn new(inject_errors: bool) -> Self { + Self { + state: Init, + inject_errors, + } + } + + fn transition(&mut self, to: FsmState) -> Result<(), QueryResultBuilderError> { + let from = self.state as usize; + if TRANSITION_TABLE[to as usize][from] { + self.state = to; + } else { + panic!("{} can't be called after {}", to, self.state); + } + + Ok(()) + } + + fn maybe_inject_error(&mut self) -> Result<(), QueryResultBuilderError> { + if self.inject_errors { + let val: f32 = thread_rng().sample(Standard); + // < 0.1% change to generate error + if val < 0.001 { + self.state = BuilderError; + Err(anyhow::anyhow!("dummy"))?; + } + } + + Ok(()) + } + } + + impl ResultBuilder for FsmQueryBuilder { + fn init(&mut self, _config: &QueryBuilderConfig) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(Init) + } + + fn begin_step(&mut self) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(BeginStep) + } + + fn finish_step( + &mut self, + _affected_row_count: u64, + _last_insert_rowid: Option, + ) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(FinishStep) + } + + fn step_error( + &mut self, + _error: crate::error::Error, + ) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(StepError) + } + + fn cols_description( + &mut self, + _cols: &mut dyn Iterator, + ) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(ColsDescription) + } + + fn begin_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(BeginRows) + } + + fn begin_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(BeginRow) + } + + fn add_row_value(&mut self, _v: ValueRef) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(AddRowValue) + } + + fn finish_row(&mut self) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(FinishRow) + } + + fn finish_rows(&mut self) -> Result<(), QueryResultBuilderError> { + self.maybe_inject_error()?; + self.transition(FinishRows) + } + + fn finnalize( + &mut self, + _is_txn: bool, + _frame_no: Option, + ) -> Result { + self.maybe_inject_error()?; + self.transition(Finish)?; + Ok(true) + } + } + + pub fn test_driver( + iter: usize, + f: impl Fn(FsmQueryBuilder) -> Result, + ) { + for _ in 0..iter { + // inject random errors + let builder = FsmQueryBuilder::new(true); + match f(builder) { + Ok(b) => { + assert_eq!(b.state, Finish); + } + Err(e) => { + assert!(matches!(e, crate::error::Error::BuilderError(_))); + } + } + } + } + + #[test] + fn test_fsm_ok() { + let mut builder = FsmQueryBuilder::new(false); + builder.init(&QueryBuilderConfig::default()).unwrap(); + + builder.begin_step().unwrap(); + builder + .cols_description(&mut [("hello", None).into()].into_iter()) + .unwrap(); + builder.begin_rows().unwrap(); + builder.begin_row().unwrap(); + builder.add_row_value(ValueRef::Null).unwrap(); + builder.finish_row().unwrap(); + builder + .step_error(crate::error::Error::LibSqlTxBusy) + .unwrap(); + builder.finish_step(0, None).unwrap(); + + builder.begin_step().unwrap(); + builder + .cols_description(&mut [("hello", None).into()].into_iter()) + .unwrap(); + builder.begin_rows().unwrap(); + builder.begin_row().unwrap(); + builder.add_row_value(ValueRef::Null).unwrap(); + builder.finish_row().unwrap(); + builder.finish_rows().unwrap(); + builder.finish_step(0, None).unwrap(); + + builder.finnalize(false, None).unwrap(); + } + + #[test] + #[should_panic] + fn test_fsm_invalid() { + let mut builder = FsmQueryBuilder::new(false); + builder.init(&QueryBuilderConfig::default()).unwrap(); + builder.begin_step().unwrap(); + builder.begin_rows().unwrap(); + } + + #[allow(dead_code)] + fn is_trait_objectifiable(_: Box) {} +} diff --git a/libsqlx/src/seal.rs b/libsqlx/src/seal.rs new file mode 100644 index 00000000..393accc4 --- /dev/null +++ b/libsqlx/src/seal.rs @@ -0,0 +1,8 @@ +/// Hold some type, but prevent any access to it +pub struct Seal(T); + +impl Seal { + pub fn new(t: T) -> Self { + Seal(t) + } +} diff --git a/sqld-libsql-bindings/src/wal_hook.rs b/sqld-libsql-bindings/src/wal_hook.rs index 7f09ad31..30b21995 100644 --- a/sqld-libsql-bindings/src/wal_hook.rs +++ b/sqld-libsql-bindings/src/wal_hook.rs @@ -15,7 +15,7 @@ use crate::get_orig_wal_methods; macro_rules! init_static_wal_method { ($name:ident, $ty:path) => { pub static $name: $crate::Lazy<&'static $crate::WalMethodsHook<$ty>> = - once_cell::sync::Lazy::new(|| { + $crate::Lazy::new(|| { // we need a 'static address before we can register the methods. static METHODS: $crate::Lazy<$crate::WalMethodsHook<$ty>> = $crate::Lazy::new(|| $crate::WalMethodsHook::<$ty>::new()); @@ -45,7 +45,7 @@ macro_rules! init_static_wal_method { /// /// # Safety /// The implementer is responsible for calling the orig method with valid arguments. -pub unsafe trait WalHook { +pub unsafe trait WalHook: Send + Sync + 'static { type Context; fn name() -> &'static CStr; diff --git a/sqld/src/query_result_builder.rs b/sqld/src/query_result_builder.rs index a9aeadd7..29a2dc91 100644 --- a/sqld/src/query_result_builder.rs +++ b/sqld/src/query_result_builder.rs @@ -642,7 +642,6 @@ pub mod test { } // this can be usefull to help debug the generated test case - dbg!(trace); b } diff --git a/sqld/src/replication/replica/hook.rs b/sqld/src/replication/replica/hook.rs index 0cad303c..57241896 100644 --- a/sqld/src/replication/replica/hook.rs +++ b/sqld/src/replication/replica/hook.rs @@ -78,9 +78,9 @@ pub struct InjectorHookCtx { /// currently in a txn pub is_txn: bool, /// invoked before injecting frames - pre_commit: Box anyhow::Result<()>>, + pre_commit: Box anyhow::Result<()> + Send + 'static>, /// invoked after injecting frames - post_commit: Box anyhow::Result<()>>, + post_commit: Box anyhow::Result<()> + Send + 'static>, } impl InjectorHookCtx {