diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst new file mode 100644 index 00000000000..1812db58cce --- /dev/null +++ b/CONTRIBUTING.rst @@ -0,0 +1,189 @@ +******************************************** +Contributing to the ``cardano-node`` project +******************************************** + +The ``cardano-node`` development is primarily based on the Nix infrastructure (https://nixos.org/ ), which enables packaging, CI, development environments and deployments. + +On how to set up Nix for ``cardano-node`` development, please see `Building Cardano Node with nix `_. + +Supplementary tooling +==== + +GHCID +---- + +run *ghcid* with: ``ghcid -c "cabal repl exe:cardano-node --reorder-goals"`` + +Haskell Language Server +---- + +When using Haskell Language Server with Visual Studio Code, you may find that +`HLINT annotations are ignored `_. + +To work around this, you may run the script ``./scripts/reconfigure-hlint.sh`` to generate a ``.hlint.yaml`` +file with HLINT ignore rules derived from the source code. + +Testing +==== + +``cardano-node`` is essentially a container which implements several components such networking, consensus, and storage. These components have individual test coverage. The node goes through integration and release testing by Devops/QA while automated CLI tests are ongoing alongside development. + +Developers on ``cardano-node`` can `launch their own testnets `_ or `run the chairman tests `_ locally. + +Debugging +==== + +Pretty printing CBOR encoded files +---- + +It may be useful to print the on chain representations of blocks, delegation certificates, txs and update proposals. There are two commands that do this (for any cbor encoded file): + +To pretty print as CBOR: +``cabal exec cardano-cli -- pretty-print-cbor --filepath CBOREncodedFile`` + +Validate CBOR files +---- + +You can validate Byron era blocks, delegation certificates, txs and update proposals with the ``validate-cbor`` command. + +``cabal exec cardano-cli -- validate-cbor --byron-block 21600 --filepath CBOREncodedByronBlockFile`` + +Updating dependencies +==== + +... from Hackage +---- + +Updating package dependencies from Hackage should work like normal in a Haskell project. +The most important thing to note is that we pin the ``index-state`` of the Hackage package index in ``cabal.project``. +This means that cabal will always see Hackage “as if” it was that time, ensuring reproducibility. +But it also means that if you need a package version that was released *after* that time, you need to bump the ``index-state`` (and to run ``cabal update`` locally). + +Because of how we use Nix to manage our Haskell build, whenever you do this you will also need to pull in the Nix equivalent of the newer ``index-state``. +You can do this by running ``nix flake lock --update-input hackageNix``. + +... from the Cardano package repository +---- + +Many Cardano packages are not on Hackage and are instead in the `Cardano package repository `__, see the README for (lots) more information. +Getting new packages from there works much like getting them from Hackage. +The differences are that it has an independent ``index-state``, and that there is a different Nix command you need to run afterwards: ``nix flake lock --update-input CHaP``. + +Using unreleased versions of dependencies +~~~~ + +Sometimes we need to use an unreleased version of one of our dependencies, either to fix an issue in a package that is not under our control, or to experiment with a pre-release version of one of our own packages. +You can use a ``source-repository-package`` stanza to pull in the unreleased version. +Try only to do this for a short time, as it does not play very well with tooling, and will interfere with the ability to release the node itself. + +For packages that we do not control, we can end up in a situation where we have a fork that looks like it will be long-lived or permanent (e.g. the maintainer is unresponsive, or the change has been merged but not released). +In that case, release a patched version to the `Cardano package repository `__, which allows us to remove the ``source-repository-package`` stanza. +See the README for instructions. + +Releasing a version of the node +==== + +(There is much more to say here, this is just a small fragment) + +... to the Cardano package repository +---- + +When releasing a new version of the node, it and the other packages in this repository should be released to the `Cardano package repository `__. +See the README for instructions, including a script to automate most of the process. +Please note that libraries need bounds on the version of their dependencies to avoid bitrot and be effectively reusable. + +Workbench: a local cluster playground +==== + +You can quickly spin up a local cluster (on Linux and Darwin), based on any of a wide variety of configurations, and put it under a transaction generation workload -- using the ``workbench`` environment: + +1. Optional: choose a workbench profile: + - ``default`` stands for a light-state, 6-node cluster, under saturation workload, indefinite runtime + - ``ci-test`` is the profile run in the node CI -- very light, just two nodes and short runtime + - ``devops`` is an unloaded profile (no transaction generation) with short slots -- ``0.2`` sec. + - ..and many more -- which can be either: + - listed, by ``make ps`` + - observed at their point of definition: `nix/workbench/profiles/prof1-variants.jq `_ +2. Optional: select mode of operation, by optionally providing a suffix: + - default -- no suffix -- just enter the workbench shell, allowing you to run ``start-cluster`` at any time. Binaries will be built locally, by ``cabal``. + - ``autostay`` suffix -- enter the workbench shell, start the cluster, and stay in the shell afterwards. Binaries will be built locally, by ``cabal``. + - ``autonix`` suffix -- enter the workbench shell, start the cluster. All binaries will be provided by the Nix CI. + - ..there are other modes, as per `lib.mk `_ +3. Enter the workbench shell for the chosen profile & mode: + ``make `` or ``make -`` (when there is a suffix). +4. Optional: start cluster: + Depending on the chosen mode, your cluster might already start, or you are expected to start it yourself, using ``start-cluster``. + +The workbench services are available only inside the workbench shell. + +Using Cabal +---- + +By default, all binaries originating in the ``cardano-node`` repository are available to ``cabal build`` and ``cabal run``, unless the workbench was entered using one of the pure ``*nix`` modes. Note that in all cases, the dependencies for the workbench are supplied though Nix and have been built/tested on CI. + +**Dependency localisation** -or- *Cabal&Nix for painless cross-repository work* +---- + +The Cabal workflow described above only extends to the repository-local packages. Therefore, ordinarily, to work on ``cardano-node`` dependencies in the context of the node itself, one needs to go through an expensive multi-step process -- with committing, pushing and re-pinning of the dependency changes. + +The **dependency localisation** workflow allows us to pick a subset of leaf dependencies of the ``cardano-node`` repository, and declare them *local* -- so they can be iterated upon using the ``cabal build`` / ``cabal run`` of ``cardano-node`` itself. This cuts development iteration time dramatically and enables effective cross-repo development of the full stack of Cardano packages. + +Without further ado (**NOTE**: *the order of steps is important!*): + +1. Ensure that your ``cardano-node`` checkout is clean, with no local modifications. Also, ensure that you start outside the node's Nix shell. +2. Check out the repository with the dependencies, *beside* the ``cardano-node`` checkout. You have to check out the git revision of the dependency used by your ``cardano-node`` checkout -- as listed in ``cardano-node/cabal.project``. + - we'll assume the ``ouroboros-network`` repository + - ..so a certain parent directory will include checkouts of both ``ouroboros-network`` and ``cardano-node``, at the same level + - ..and the git revision checked out in ``ouroboros-network`` will match the version of the ``ouroboros-network`` packages used currently + - Extra point #1: you can localise/check out several dependency repositories + - Extra point #2: for the dependencies that are not listed in ``cabal.project`` of the node -- how do you determine the version to check out? You can ask the workbench shell: + 1. Temporarily enter the workbench shell + 2. Look for the package version in ``ghc-pkg list`` + 3. Use that version to determine the git revision of the dependency's repository (using a tag or some special knowledge about the version management of said dependency). +3. Enter the workbench shell, as per instructions in previous sections -- or just a plain Nix shell. +4. Ensure you can build ``cardano-node`` with Cabal: ``cabal build exe:cardano-node``. If you can't something else is wrong. +5. Determine the *leaf dependency set* you will have to work on. The *leaf dependency set* is defined to include the target package you want to modify, and its reverse dependencies -- that is, packages that depend on it (inside the dependency repository). + - let's assume, for example, that you want to modify ``ouroboros-consensus-shelley`` + - ``ouroboros-consensus-shelley`` is not a leaf dependency in itself, since ``ouroboros-consensus-cardano`` (of the same ``ouroboros-network`` repository) depends on it -- so the *leaf dependency set* will include both of them. + - you might find out that you have to include a significant fraction of packages in ``ouroboros-network`` into this *leaf dependency set* -- do not despair. + - if the *leaf dependency set* is hard to determine, you can use ``cabal-plan`` -- which is included in the workbench shell (which you, therefore, have to enter temporarily): + .. code-block:: console + + [nix-shell:~/cardano-node]$ cabal-plan dot-png --revdep ouroboros-consensus-shelley + + This command will produce a HUGE ``deps.png`` file, which will contain the entire chart of the project dependencies. The important part to look for will be the subset of packages highlighted in red -- those, which belong to the ``ouroboros-network`` repository. This will be the full *leaf dependency set*. +6. Edit the ``cardano-node/cabal.project`` as follows: + - for the *leaf dependency set* in the very beginning of the ``cabal.project``, add their relative paths to the ``packages:`` section, e.g.: + .. code-block:: console + + packages: + cardano-api + cardano-cli + ... + trace-resources + trace-forward + ../ouroboros-network/ouroboros-consensus-shelley + ../ouroboros-network/ouroboros-consensus-cardano + +7. The two packages have now became **local** -- when you try ``cabal build exe:cardano-node`` now, you'll see that Cabal starts to build these dependencies you just localised. Hacking time! + +Hoogle +---- + +The workbench shell provides ``hoogle``, with a local database for the full set of dependencies: + +.. code-block:: console + + [nix-shell:~/cardano-node]$ hoogle search TxId + Byron.Spec.Ledger.UTxO newtype TxId + Byron.Spec.Ledger.UTxO TxId :: Hash -> TxId + Cardano.Chain.UTxO type TxId = Hash Tx + Cardano.Ledger.TxIn newtype TxId crypto + Cardano.Ledger.TxIn TxId :: SafeHash crypto EraIndependentTxBody -> TxId crypto + Cardano.Ledger.Shelley.API.Types newtype TxId crypto + Cardano.Ledger.Shelley.API.Types TxId :: SafeHash crypto EraIndependentTxBody -> TxId crypto + Cardano.Ledger.Shelley.Tx newtype TxId crypto + Cardano.Ledger.Shelley.Tx TxId :: SafeHash crypto EraIndependentTxBody -> TxId crypto + Ouroboros.Consensus.HardFork.Combinator data family TxId tx :: Type + -- plus more results not shown, pass --count=20 to see more + diff --git a/README.rst b/README.rst index dbd38eea56a..77698fa7c28 100644 --- a/README.rst +++ b/README.rst @@ -15,8 +15,10 @@ +.. contents:: Contents + ************************* -``cardano-node`` Overview +Overview of the ``cardano-node`` repository ************************* Integration of the `ledger `_, `consensus `_, @@ -37,32 +39,25 @@ Integration of the `ledger `_ +The latest supported networks can be found at ``_ -How to build -============ +**** +Obtaining ``cardano-node`` +**** + +Building from source +==== Documentation for building the node can be found `here `_. Executables =========== -You can download the latest version of ``cardano-node`` and ``cardano-cli``: - -* `linux `_ -* `win64 `_ -* `macos `_ - -Windows Executable -================== - -Download --------- +You can download the hydra binaries of ``cardano-node`` and ``cardano-cli`` from the [release notes](https://github.com/input-output-hk/cardano-node/releases) -You can download `here `_. -Instructions ------------- +Running the node on Windows +---- The download includes cardano-node.exe and a .dll. To run the node with cardano-node run you need to reference a few files and directories as arguments. These can be copied from the cardano-node repo into the executables directory. The command to run the node on mainnet looks like this: @@ -79,8 +74,12 @@ You can pull the docker image with the latest version of cardano-node from `here docker pull inputoutput/cardano-node -``cardano-node`` -================ +**** +Using ``cardano-node`` +**** + +Command line summary: ``cardano-node`` +==== This refers to the client that is used for running a node. The general synopsis is as follows: @@ -128,37 +127,26 @@ The general synopsis is as follows: * ``--validate-db`` - Flag to revalidate all on-disk database files -Configuration ``.yaml`` files -============================= +Configuration +==== -The ``--config`` flag points to a ``.yaml`` file that is responsible to configuring the logging & other important settings for the node. E.g. see the Byron mainnet configuration in this +The ``--config`` flag points to a ``.yaml`` (or a structurally equivalent ``.json``) file that is responsible to configuring the logging & other important settings for the node. E.g. see the Byron mainnet configuration in this `configuration.yaml `_. + Some of the more important settings are as follows: * ``Protocol: RealPBFT`` -- Protocol the node will execute * ``RequiresNetworkMagic``: RequiresNoMagic -- Used to distinguish between mainnet (``RequiresNoMagic``) and testnets (``RequiresMagic``) - -Logging -======== - -Logs are output to the ``logs/`` dir. - -Profiling & statistics -====================== - -Profiling data and RTS run stats are stored in the ``profile/`` dir. - -Please see ``scripts/README.md`` for how to obtain profiling information using the scripts. - Scripts ======= Please see ``scripts/README.md`` for information on the various scripts. -``cardano-cli`` -=============== +**** +Using ``cardano-cli`` +**** A CLI utility to support a variety of key material operations (genesis, migration, pretty-printing..) for different system generations. Usage documentation can be found at ``cardano-cli/README.md``. @@ -172,6 +160,9 @@ The general synopsis is as follows: > NOTE: the exact invocation command depends on the environment. If you have only built ``cardano-cli``, without installing it, then you have to prepend ``cabal run -- `` before ``cardano-cli``. We henceforth assume that the necessary environment-specific adjustment has been made, so we only mention ``cardano-cli``. +Command line options: ``cardano-cli`` +==== + The subcommands are subdivided in groups, and their full list can be seen in the output of ``cardano-cli --help``. All subcommands have help available. For example: @@ -194,11 +185,8 @@ All subcommands have help available. For example: -h,--help Show this help text -Genesis operations -================== - -Generation ----------- +Genesis generation +==== The Byron genesis generation operations will create a directory that contains: @@ -322,7 +310,7 @@ Local node queries You can query the tip of your local node via the ``get-tip`` command as follows -1. Open `tmux` +1. Open ``tmux`` 2. Run ``cabal build cardano-node`` 3. Run ``./scripts/lite/shelley-testnet.sh example`` 4. Run ``export CARDANO_NODE_SOCKET_PATH=/cardano-node/example/socket/node-1-socket @@ -365,7 +353,7 @@ The mandatory arguments are ``--mainnet | --testnet-magic``, ``signing-key``, `` The remaining arguments are optional parameters you want to update in your update proposal. -You can also check your proposal's validity using the `validate-cbor` command. See: `Validate CBOR files`_. +You can also check your proposal's validity using the ``validate-cbor`` command. See: `Validate CBOR files`_. See the `Byron specification `_ for more details on update proposals. @@ -413,54 +401,9 @@ Byron vote submission: (--mainnet | --testnet-magic NATURAL) --filepath UpdateProposalVoteFile -Development -=========== - -GHCID ------ - -run *ghcid* with: ``ghcid -c "cabal repl exe:cardano-node --reorder-goals"`` - -Haskell Language Server ------------------------ - -When using Haskell Language Server with Visual Studio Code, you may find that -`HLINT annotations are ignored`. - -To work around this, you may run the script `./scripts/reconfigure-hlint.sh` to generate a `.hlint.yaml` -file with HLINT ignore rules derived from the source code. - -Testing -======== - -``cardano-node`` is essentially a container which implements several components such networking, consensus, and storage. These components have individual test coverage. The node goes through integration and release testing by Devops/QA while automated CLI tests are ongoing alongside development. - -Developers on ``cardano-node`` can `launch their own testnets `_ or `run the chairman tests `_ locally. - -Chairman tests --------------- - -Debugging -========= - -Pretty printing CBOR encoded files ----------------------------------- - -It may be useful to print the on chain representations of blocks, delegation certificates, txs and update proposals. There are two commands that do this (for any cbor encoded file): - -To pretty print as CBOR: -``cabal exec cardano-cli -- pretty-print-cbor --filepath CBOREncodedFile`` - -Validate CBOR files -------------------- - -You can validate Byron era blocks, delegation certificates, txs and update proposals with the ``validate-cbor`` command. - -``cabal exec cardano-cli -- validate-cbor --byron-block 21600 --filepath CBOREncodedByronBlockFile`` - - +**** Native Tokens -======================================= +**** Native tokens is a new feature that enables the transacting of multi-assets on Cardano. Native tokens are now supported on mainnet and users can transact with ada, and an unlimited number of user-defined (custom) tokens natively. @@ -472,10 +415,19 @@ To help you get started we have compiled a handy list of resources: You can also read more about `native tokens and how they compare to ada and ERC20 `_. Browse native tokens created on the Cardano blockchain and see their transactions in an interactive dashboard that allows filtering and searching: nativetokens.da.iogservices.io. +**** API Documentation -================= +**** + The API documentation is published `here `_. -The documentation is built with each push, but is only published from `master` branch. In order to -test if the documentation is working, build the documentation locally with `./scripts/haddocs.sh` and -open `haddocks/index.html` in the browser. +The documentation is built with each push, but is only published from ``master`` branch. In order to +test if the documentation is working, build the documentation locally with ``./scripts/haddocs.sh`` and +open ``haddocks/index.html`` in the browser. + +**** +Using the ``cardano-node`` Haskell packages +**** + +If you want to use the ``cardano-node`` Haskell packages from another project, you should use `CHaP `_ to get the packages defined in this repository. +Please note that you may need to use any ``source-repository-package`` stanzas defined in ``cabal.project``, although we will endeavour to keep these to an absolute minimum. diff --git a/cabal.project b/cabal.project index fd101d4ba3c..06de23bceed 100644 --- a/cabal.project +++ b/cabal.project @@ -1,5 +1,19 @@ --- run `nix flake lock --update-input hackageNix` after updating index-state. -index-state: 2022-02-18T00:00:00Z +-- Custom repository for cardano haskell packages, see CONTRIBUTING for more +repository cardano-haskell-packages + url: https://input-output-hk.github.io/cardano-haskell-packages + secure: True + root-keys: + 3e0cce471cf09815f930210f7827266fd09045445d65923e6d0238a6cd15126f + 443abb7fb497a134c343faf52f0b659bd7999bc06b7f63fa76dc99d631f9bea1 + a86a1f6ce86c449c46666bda44268677abf29b5b2d2eb5ec7af903ec2f117a82 + bcec67e8e99cabfa7764d75ad9b158d72bfacf70ca1d0ec8bc6b4406d1bf8413 + c00aae8461a256275598500ea0e187588c35a5d5d7454fb57eac18d9edb86a56 + d4a35cd3121aa00d18544bb0ac01c3e1691d618f462c46129271bccf39f7e8ee + +-- See CONTRIBUTING for information about these, including some Nix commands +-- you need to run if you change them +index-state: 2022-09-27T00:00:00Z +index-state: cardano-haskell-packages 2022-10-17T00:00:00Z packages: cardano-api @@ -54,305 +68,76 @@ package cryptonite -- generation is dubious. Set the flag so we use /dev/urandom by default. flags: -support_rdrand --- --------------------------------------------------------- --- Disable all tests by default - -tests: False +tests: True test-show-details: direct --- Then enable specific tests in this repo - -package cardano-api - tests: True - -package cardano-cli - tests: True - -package cardano-node - tests: True - -package cardano-node-chairman - tests: True - -package cardano-submit-api - tests: True - -package cardano-testnet - tests: True - -package trace-resources - tests: True - -package trace-dispatcher - tests: True - -package trace-forward - tests: True - -package cardano-tracer - tests: True - -package trace-resources - tests: True - -package locli - tests: True - --- The following is needed because Nix is doing something crazy. -package byron-spec-ledger - tests: False - -package vector-map - ghc-options: -Werror - -package iohk-monitoring - tests: False - -package ouroboros-consensus-test - tests: False - -package ouroboros-consensus-cardano-test - tests: False - -package ouroboros-network - tests: False - -package ouroboros-network-framework - tests: False - -package plutus-tx - tests: False - -package prettyprinter-configurable - tests: False - -package small-steps - tests: False - -package small-steps-test - tests: False - -package tx-generator - tests: True - -package goblins - tests: False +constraints: + hedgehog >= 1.0 + , bimap >= 0.4.0 + , libsystemd-journal >= 1.4.4 + , systemd >= 2.3.0 + -- systemd-2.3.0 requires at least network 3.1.1.0 but it doesn't declare + -- that dependency + , network >= 3.1.1.0 + , HSOpenSSL >= 0.11.7.2 + , algebraic-graphs < 0.7 + , protolude < 0.3.1 + -- TODO: these should be set in cabal files, but avoiding setting them in lower dependencies for initial CHaP release + , cardano-prelude == 0.1.0.0 + , base-deriving-via == 0.1.0.0 + , cardano-binary == 1.5.0 + , cardano-binary-test == 1.3.0 + , cardano-crypto-class == 2.0.0 + , cardano-crypto-praos == 2.0.0 + , cardano-crypto-tests == 2.0.0 + , cardano-slotting == 0.1.0.0 + , measures == 0.1.0.0 + , orphans-deriving-via == 0.1.0.0 + , strict-containers == 0.1.0.0 + , plutus-core == 1.0.0.0 + , plutus-ledger-api == 1.0.0.0 + , plutus-tx == 1.0.0.0 + , plutus-tx-plugin == 1.0.0.0 + , prettyprinter-configurable == 0.1.0.0 + , plutus-ghc-stub == 8.6.5 + , word-array == 0.1.0.0 -package io-classes - tests: False +package snap-server + flags: +openssl -package cardano-ledger-alonzo-test - tests: False +package comonad + flags: -test-doctests --- --------------------------------------------------------- +allow-newer: + ekg:aeson, + ekg-json:aeson, + threepenny-gui:aeson, + monoidal-containers:aeson, + size-based:template-haskell, + snap-server:attoparsec, --- The two following one-liners will cut off / restore the remainder of this file (for nix-shell users): --- when using the "cabal" wrapper script provided by nix-shell. --- --------------------------- 8< -------------------------- --- Please do not put any `source-repository-package` clause above this line. +-- IMPORTANT +-- Do NOT add more source-repository-package stanzas here unless they are strictly +-- temporary! Please read the section in CONTRIBUTING about updating dependencies. --- Using a fork until our patches can be merged upstream source-repository-package type: git - location: https://github.com/input-output-hk/optparse-applicative - tag: 7497a29cb998721a9068d5725d49461f2bba0e7a - --sha256: 1gvsrg925vynwgqwplgjmp53vj953qyh3wbdf34pw21c8r47w35r + location: https://github.com/input-output-hk/moo + tag: 8c487714fbfdea66188fcb85053e7e292e0cc348 + --sha256: 1mdj218hgh7s5a6b9k14vg9i06zxah0wa42ycdgh245izs8nfv0x +-- Open PR upstream, maintainer unresponsive, hopefully short-lived fork. +-- TODO (mpj): release into CHaP as a patched version and delete source-repository-package type: git location: https://github.com/vshabanov/ekg-json tag: 00ebe7211c981686e65730b7144fbf5350462608 --sha256: 1zvjm3pb38w0ijig5wk5mdkzcszpmlp5d4zxvks2jk1rkypi8gsm -source-repository-package - type: git - location: https://github.com/input-output-hk/hedgehog-extras - tag: 714ee03a5a786a05fc57ac5d2f1c2edce4660d85 - --sha256: 1qa4mm36xynaf17990ijmzww0ij8hjrc0vw5nas6d0zx6q9hb978 - -source-repository-package - type: git - location: https://github.com/haskell-works/hw-aeson - tag: 6dc309ff4260c71d9a18c220cbae8aa1dfe2a02e - --sha256: 08zxzkk1fy8xrvl46lhzmpyisizl0nzl1n00g417vc0l170wsr9j - -source-repository-package - type: git - location: https://github.com/input-output-hk/cardano-base - tag: 0f3a867493059e650cda69e20a5cbf1ace289a57 - --sha256: 0p0az3sbkhb7njji8xxdrfb0yx2gc8fmrh872ffm8sfip1w29gg1 - subdir: - base-deriving-via - binary - binary/test - cardano-crypto-class - cardano-crypto-praos - cardano-crypto-tests - measures - orphans-deriving-via - slotting - strict-containers - -source-repository-package - type: git - location: https://github.com/input-output-hk/cardano-crypto - tag: f73079303f663e028288f9f4a9e08bcca39a923e - --sha256: 1n87i15x54s0cjkh3nsxs4r1x016cdw1fypwmr68936n3xxsjn6q - -source-repository-package - type: git - location: https://github.com/input-output-hk/cardano-ledger - tag: c7c63dabdb215ebdaed8b63274965966f2bf408f - --sha256: 1cn1z3dh5dy5yy42bwfd8rg25mg8qp3m55gyfsl563wgw4q1nd6d - subdir: - eras/alonzo/impl - eras/alonzo/test-suite - eras/babbage/impl - eras/babbage/test-suite - eras/byron/chain/executable-spec - eras/byron/crypto - eras/byron/crypto/test - eras/byron/ledger/executable-spec - eras/byron/ledger/impl - eras/byron/ledger/impl/test - eras/shelley/impl - eras/shelley/test-suite - eras/shelley-ma/impl - eras/shelley-ma/test-suite - libs/cardano-ledger-core - libs/cardano-ledger-pretty - libs/cardano-protocol-tpraos - libs/cardano-data - libs/vector-map - libs/set-algebra - libs/small-steps - libs/small-steps-test - libs/non-integral - -source-repository-package - type: git - location: https://github.com/input-output-hk/cardano-prelude - tag: bb4ed71ba8e587f672d06edf9d2e376f4b055555 - --sha256: 00h10l5mmiza9819p9v5q5749nb9pzgi20vpzpy1d34zmh6gf1cj - subdir: - cardano-prelude - cardano-prelude-test - -source-repository-package - type: git - location: https://github.com/input-output-hk/goblins - tag: cde90a2b27f79187ca8310b6549331e59595e7ba - --sha256: 17c88rbva3iw82yg9srlxjv2ia5wjb9cyqw44hik565f5v9svnyg - -source-repository-package - type: git - location: https://github.com/input-output-hk/iohk-monitoring-framework - tag: 066f7002aac5a0efc20e49643fea45454f226caa - --sha256: 0s6x4in11k5ba7nl7la896g28sznf9185xlqg9c604jqz58vj9nj - subdir: - contra-tracer - iohk-monitoring - plugins/backend-aggregation - plugins/backend-ekg - plugins/backend-monitoring - plugins/backend-trace-forwarder - plugins/scribe-systemd - tracer-transformers - -source-repository-package - type: git - location: https://github.com/input-output-hk/Win32-network - tag: 3825d3abf75f83f406c1f7161883c438dac7277d - --sha256: 19wahfv726fa3mqajpqdqhnl9ica3xmf68i254q45iyjcpj1psqx - -source-repository-package - type: git - location: https://github.com/input-output-hk/ouroboros-network - tag: cb9eba406ceb2df338d8384b35c8addfe2067201 - --sha256: 066llskxzjgcs13lwlvklb28azb9kd9b77j61x8fvrj1rlf5njfw - subdir: - monoidal-synchronisation - network-mux - ouroboros-consensus - ouroboros-consensus-byron - ouroboros-consensus-cardano - ouroboros-consensus-protocol - ouroboros-consensus-shelley - ouroboros-network - ouroboros-network-framework - ouroboros-network-testing - -source-repository-package - type: git - location: https://github.com/input-output-hk/io-sim - tag: f4183f274d88d0ad15817c7052df3a6a8b40e6dc - --sha256: 0vb2pd9hl89v2y5hrhrsm69yx0jf98vppjmfncj2fraxr3p3lldw - subdir: - io-classes - io-sim - strict-stm - -source-repository-package - type: git - location: https://github.com/input-output-hk/typed-protocols - tag: 181601bc3d9e9d21a671ce01e0b481348b3ca104 - --sha256: 1lr97b2z7l0rpsmmz92rsv27qzd5vavz10cf7n25svya4kkiysp5 - subdir: - typed-protocols - typed-protocols-cborg - typed-protocols-examples - -source-repository-package - type: git - location: https://github.com/input-output-hk/plutus - tag: a56c96598b4b25c9e28215214d25189331087244 - --sha256: 12d6bndmj0dxl6xlaqmf78326yp5hw093bmybmqfpdkvk4mgz03j - subdir: - plutus-core - plutus-ledger-api - plutus-tx - plutus-tx-plugin - prettyprinter-configurable - stubs/plutus-ghc-stub - word-array - -source-repository-package - type: git - location: https://github.com/input-output-hk/ekg-forward - tag: 297cd9db5074339a2fb2e5ae7d0780debb670c63 - --sha256: 1zcwry3y5rmd9lgxy89wsb3k4kpffqji35dc7ghzbz603y1gy24g - source-repository-package type: git location: https://github.com/denisshevchenko/threepenny-gui tag: 4ec92ded05ccf59ba4a874be4b404ac1b6d666b6 --sha256: 00fvvaf4ir4hskq4a6gggbh2wmdvy8j8kn6s4m1p1vlh8m8mq514 --- Drops an instance breaking our code. Should be released to Hackage eventually. -source-repository-package - type: git - location: https://github.com/input-output-hk/flat - tag: ee59880f47ab835dbd73bea0847dab7869fc20d8 - --sha256: 1lrzknw765pz2j97nvv9ip3l1mcpf2zr4n56hwlz0rk7wq7ls4cm - -constraints: - hedgehog >= 1.0 - , bimap >= 0.4.0 - , libsystemd-journal >= 1.4.4 - , systemd >= 2.3.0 - -- systemd-2.3.0 requires at least network 3.1.1.0 but it doesn't declare - -- that dependency - , network >= 3.1.1.0 - , HSOpenSSL >= 0.11.7.2 - -package snap-server - flags: +openssl - -package comonad - flags: -test-doctests - -allow-newer: - *:aeson, - monoidal-containers:aeson, - size-based:template-haskell diff --git a/flake.lock b/flake.lock index 751268fccd0..bc8967f7601 100644 --- a/flake.lock +++ b/flake.lock @@ -1,5 +1,22 @@ { "nodes": { + "CHaP": { + "flake": false, + "locked": { + "lastModified": 1666067814, + "narHash": "sha256-2TbQs7HSZRY5G3/jDggaY8ahxY8V5DhQ/+Xg1B9csTY=", + "owner": "input-output-hk", + "repo": "cardano-haskell-packages", + "rev": "e79a09bf6018fa0e5c3196f0ac01e58224caf73d", + "type": "github" + }, + "original": { + "owner": "input-output-hk", + "ref": "repo", + "repo": "cardano-haskell-packages", + "type": "github" + } + }, "HTTP": { "flake": false, "locked": { @@ -1613,7 +1630,7 @@ "inputs": { "cardano-node-workbench": "cardano-node-workbench_4", "customConfig": "customConfig_3", - "flake-compat": "flake-compat_3", + "flake-compat": "flake-compat_4", "haskellNix": "haskellNix_3", "hostNixpkgs": [ "node-measured", @@ -2290,6 +2307,22 @@ } }, "flake-compat_3": { + "flake": false, + "locked": { + "lastModified": 1635892615, + "narHash": "sha256-harGbMZr4hzat2BWBU+Y5OYXlu+fVz7E4WeQzHi5o8A=", + "owner": "input-output-hk", + "repo": "flake-compat", + "rev": "eca47d3377946315596da653862d341ee5341318", + "type": "github" + }, + "original": { + "owner": "input-output-hk", + "repo": "flake-compat", + "type": "github" + } + }, + "flake-compat_4": { "flake": false, "locked": { "lastModified": 1647532380, @@ -2306,7 +2339,7 @@ "type": "github" } }, - "flake-compat_4": { + "flake-compat_5": { "flake": false, "locked": { "lastModified": 1638445031, @@ -2323,7 +2356,7 @@ "type": "github" } }, - "flake-compat_5": { + "flake-compat_6": { "flake": false, "locked": { "lastModified": 1638445031, @@ -2340,7 +2373,7 @@ "type": "github" } }, - "flake-compat_6": { + "flake-compat_7": { "flake": false, "locked": { "lastModified": 1638445031, @@ -2952,11 +2985,11 @@ "hackageNix": { "flake": false, "locked": { - "lastModified": 1646961339, - "narHash": "sha256-hsXNxSugSyOALfOt0I+mXrKioJ/nWX49/RhF/88N6D0=", + "lastModified": 1665882657, + "narHash": "sha256-3eiHY9Lt2vTeMsrT6yssbd+nfx/i5avfxosigx7bCxU=", "owner": "input-output-hk", "repo": "hackage.nix", - "rev": "5dea95d408c29b56a14faae378ae4e39d63126f4", + "rev": "8e5b6856f99ed790c387fa76bdad9dcc94b3a54c", "type": "github" }, "original": { @@ -3270,7 +3303,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_10", "hackage": "hackage_9", "hpc-coveralls": "hpc-coveralls_10", - "nix-tools": "nix-tools_10", + "nix-tools": "nix-tools_9", "nixpkgs": [ "node-measured", "node-process", @@ -3310,7 +3343,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_11", "hackage": "hackage_10", "hpc-coveralls": "hpc-coveralls_11", - "nix-tools": "nix-tools_11", + "nix-tools": "nix-tools_10", "nixpkgs": [ "node-measured", "node-process", @@ -3351,7 +3384,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_12", "hackage": "hackage_11", "hpc-coveralls": "hpc-coveralls_12", - "nix-tools": "nix-tools_12", + "nix-tools": "nix-tools_11", "nixpkgs": [ "node-measured", "node-process", @@ -3392,7 +3425,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_13", "hackage": "hackage_12", "hpc-coveralls": "hpc-coveralls_13", - "nix-tools": "nix-tools_13", + "nix-tools": "nix-tools_12", "nixpkgs": [ "node-measured", "node-snapshot", @@ -3430,7 +3463,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_14", "hackage": "hackage_13", "hpc-coveralls": "hpc-coveralls_14", - "nix-tools": "nix-tools_14", + "nix-tools": "nix-tools_13", "nixpkgs": [ "node-measured", "node-snapshot", @@ -3469,7 +3502,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_15", "hackage": "hackage_14", "hpc-coveralls": "hpc-coveralls_15", - "nix-tools": "nix-tools_15", + "nix-tools": "nix-tools_14", "nixpkgs": [ "node-measured", "node-snapshot", @@ -3508,7 +3541,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_16", "hackage": "hackage_15", "hpc-coveralls": "hpc-coveralls_16", - "nix-tools": "nix-tools_16", + "nix-tools": "nix-tools_15", "nixpkgs": [ "node-snapshot", "nixpkgs" @@ -3545,7 +3578,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_17", "hackage": "hackage_16", "hpc-coveralls": "hpc-coveralls_17", - "nix-tools": "nix-tools_17", + "nix-tools": "nix-tools_16", "nixpkgs": [ "node-snapshot", "membench", @@ -3583,7 +3616,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_18", "hackage": "hackage_17", "hpc-coveralls": "hpc-coveralls_18", - "nix-tools": "nix-tools_18", + "nix-tools": "nix-tools_17", "nixpkgs": [ "node-snapshot", "plutus-example", @@ -3617,6 +3650,7 @@ "cabal-34": "cabal-34_2", "cabal-36": "cabal-36_2", "cardano-shell": "cardano-shell_2", + "flake-compat": "flake-compat_3", "flake-utils": "flake-utils_2", "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_2", "hackage": [ @@ -3624,23 +3658,23 @@ ], "hpc-coveralls": "hpc-coveralls_2", "hydra": "hydra_2", - "nix-tools": "nix-tools_2", "nixpkgs": [ "nixpkgs" ], "nixpkgs-2003": "nixpkgs-2003_2", "nixpkgs-2105": "nixpkgs-2105_2", "nixpkgs-2111": "nixpkgs-2111_2", + "nixpkgs-2205": "nixpkgs-2205", "nixpkgs-unstable": "nixpkgs-unstable_2", "old-ghc-nix": "old-ghc-nix_2", "stackage": "stackage_2" }, "locked": { - "lastModified": 1649639788, - "narHash": "sha256-nBzRclDcVCEwrIMOYTNOZltd0bUhSyTk0c3UIrjqFhI=", + "lastModified": 1665882789, + "narHash": "sha256-vD9voCqq4F100RDO3KlfdKZE81NyD++NJjvf3KNNbHA=", "owner": "input-output-hk", "repo": "haskell.nix", - "rev": "fd74389bcf72b419f25cb6fe81c951b02ede4985", + "rev": "9af167fb4343539ca99465057262f289b44f55da", "type": "github" }, "original": { @@ -3661,7 +3695,7 @@ "hackage": "hackage_2", "hpc-coveralls": "hpc-coveralls_3", "hydra": "hydra_3", - "nix-tools": "nix-tools_3", + "nix-tools": "nix-tools_2", "nixpkgs": [ "node-measured", "cardano-node-workbench", @@ -3699,7 +3733,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_4", "hackage": "hackage_3", "hpc-coveralls": "hpc-coveralls_4", - "nix-tools": "nix-tools_4", + "nix-tools": "nix-tools_3", "nixpkgs": [ "node-measured", "nixpkgs" @@ -3736,7 +3770,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_5", "hackage": "hackage_4", "hpc-coveralls": "hpc-coveralls_5", - "nix-tools": "nix-tools_5", + "nix-tools": "nix-tools_4", "nixpkgs": [ "node-measured", "node-measured", @@ -3774,7 +3808,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_6", "hackage": "hackage_5", "hpc-coveralls": "hpc-coveralls_6", - "nix-tools": "nix-tools_6", + "nix-tools": "nix-tools_5", "nixpkgs": [ "node-measured", "node-measured", @@ -3814,7 +3848,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_7", "hackage": "hackage_6", "hpc-coveralls": "hpc-coveralls_7", - "nix-tools": "nix-tools_7", + "nix-tools": "nix-tools_6", "nixpkgs": [ "node-measured", "node-measured", @@ -3855,7 +3889,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_8", "hackage": "hackage_7", "hpc-coveralls": "hpc-coveralls_8", - "nix-tools": "nix-tools_8", + "nix-tools": "nix-tools_7", "nixpkgs": [ "node-measured", "node-measured", @@ -3896,7 +3930,7 @@ "ghc-8.6.5-iohk": "ghc-8.6.5-iohk_9", "hackage": "hackage_8", "hpc-coveralls": "hpc-coveralls_9", - "nix-tools": "nix-tools_9", + "nix-tools": "nix-tools_8", "nixpkgs": [ "node-measured", "node-process", @@ -5334,30 +5368,14 @@ "type": "github" } }, - "nix-tools_18": { - "flake": false, - "locked": { - "lastModified": 1636018067, - "narHash": "sha256-ng306fkuwr6V/malWtt3979iAC4yMVDDH2ViwYB6sQE=", - "owner": "input-output-hk", - "repo": "nix-tools", - "rev": "ed5bd7215292deba55d6ab7a4e8c21f8b1564dda", - "type": "github" - }, - "original": { - "owner": "input-output-hk", - "repo": "nix-tools", - "type": "github" - } - }, "nix-tools_2": { "flake": false, "locked": { - "lastModified": 1649424170, - "narHash": "sha256-XgKXWispvv5RCvZzPb+p7e6Hy3LMuRjafKMl7kXzxGw=", + "lastModified": 1644395812, + "narHash": "sha256-BVFk/BEsTLq5MMZvdy3ZYHKfaS3dHrsKh4+tb5t5b58=", "owner": "input-output-hk", "repo": "nix-tools", - "rev": "e109c94016e3b6e0db7ed413c793e2d4bdb24aa7", + "rev": "d847c63b99bbec78bf83be2a61dc9f09b8a9ccc1", "type": "github" }, "original": { @@ -5369,11 +5387,11 @@ "nix-tools_3": { "flake": false, "locked": { - "lastModified": 1644395812, - "narHash": "sha256-BVFk/BEsTLq5MMZvdy3ZYHKfaS3dHrsKh4+tb5t5b58=", + "lastModified": 1636018067, + "narHash": "sha256-ng306fkuwr6V/malWtt3979iAC4yMVDDH2ViwYB6sQE=", "owner": "input-output-hk", "repo": "nix-tools", - "rev": "d847c63b99bbec78bf83be2a61dc9f09b8a9ccc1", + "rev": "ed5bd7215292deba55d6ab7a4e8c21f8b1564dda", "type": "github" }, "original": { @@ -6000,11 +6018,11 @@ }, "nixpkgs-2105_2": { "locked": { - "lastModified": 1645296114, - "narHash": "sha256-y53N7TyIkXsjMpOG7RhvqJFGDacLs9HlyHeSTBioqYU=", + "lastModified": 1659914493, + "narHash": "sha256-lkA5X3VNMKirvA+SUzvEhfA7XquWLci+CGi505YFAIs=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "530a53dcbc9437363471167a5e4762c5fcfa34a1", + "rev": "022caabb5f2265ad4006c1fa5b1ebe69fb0c3faf", "type": "github" }, "original": { @@ -6288,11 +6306,11 @@ }, "nixpkgs-2111_2": { "locked": { - "lastModified": 1648744337, - "narHash": "sha256-bYe1dFJAXovjqiaPKrmAbSBEK5KUkgwVaZcTbSoJ7hg=", + "lastModified": 1659446231, + "narHash": "sha256-hekabNdTdgR/iLsgce5TGWmfIDZ86qjPhxDg/8TlzhE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "0a58eebd8ec65ffdef2ce9562784123a73922052", + "rev": "eabc38219184cc3e04a974fe31857d8e0eac098d", "type": "github" }, "original": { @@ -6414,6 +6432,22 @@ "type": "github" } }, + "nixpkgs-2205": { + "locked": { + "lastModified": 1663981975, + "narHash": "sha256-TKaxWAVJR+a5JJauKZqibmaM5e/Pi5tBDx9s8fl/kSE=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "309faedb8338d3ae8ad8f1043b3ccf48c9cc2970", + "type": "github" + }, + "original": { + "owner": "NixOS", + "ref": "nixpkgs-22.05-darwin", + "repo": "nixpkgs", + "type": "github" + } + }, "nixpkgs-regression": { "locked": { "lastModified": 1643052045, @@ -6621,11 +6655,11 @@ }, "nixpkgs-unstable_2": { "locked": { - "lastModified": 1648219316, - "narHash": "sha256-Ctij+dOi0ZZIfX5eMhgwugfvB+WZSrvVNAyAuANOsnQ=", + "lastModified": 1663905476, + "narHash": "sha256-0CSwRKaYravh9v6qSlBpM0gNg0UhKT2lL7Yn6Zbx7UM=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "30d3d79b7d3607d56546dd2a6b49e156ba0ec634", + "rev": "e14f9fb57315f0d4abde222364f19f88c77d2b79", "type": "github" }, "original": { @@ -6951,7 +6985,7 @@ "cardano-mainnet-mirror": "cardano-mainnet-mirror_2", "cardano-node-workbench": "cardano-node-workbench_3", "customConfig": "customConfig_4", - "flake-compat": "flake-compat_4", + "flake-compat": "flake-compat_5", "haskellNix": "haskellNix_4", "hostNixpkgs": [ "node-measured", @@ -6987,7 +7021,7 @@ "inputs": { "cardano-node-workbench": "cardano-node-workbench_5", "customConfig": "customConfig_5", - "flake-compat": "flake-compat_5", + "flake-compat": "flake-compat_6", "haskellNix": "haskellNix_5", "hostNixpkgs": [ "node-measured", @@ -7023,7 +7057,7 @@ "inputs": { "cardano-node-workbench": "cardano-node-workbench_6", "customConfig": "customConfig_9", - "flake-compat": "flake-compat_6", + "flake-compat": "flake-compat_7", "haskellNix": "haskellNix_9", "hostNixpkgs": [ "node-measured", @@ -7815,6 +7849,7 @@ }, "root": { "inputs": { + "CHaP": "CHaP", "cardano-mainnet-mirror": "cardano-mainnet-mirror", "cardano-node-workbench": "cardano-node-workbench", "customConfig": "customConfig_2", @@ -8000,11 +8035,11 @@ "stackage_2": { "flake": false, "locked": { - "lastModified": 1649639721, - "narHash": "sha256-i/nyHyfpvw6en4phdjLS96DhJI95MVX3KubfUJwDtuU=", + "lastModified": 1665537461, + "narHash": "sha256-60tLFJ0poKp3IIPMvIDx3yzmjwrX7CngypfCQqV+oXE=", "owner": "input-output-hk", "repo": "stackage.nix", - "rev": "9d1954e8bf7ce40ce21d59794d19a8d1ddf06cd6", + "rev": "fbf47f75f32aedcdd97143ec59c578f403fae35f", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 9da676a5c9a..54cff8bec41 100644 --- a/flake.nix +++ b/flake.nix @@ -18,6 +18,10 @@ inputs.nixpkgs.follows = "nixpkgs"; inputs.hackage.follows = "hackageNix"; }; + CHaP = { + url = "github:input-output-hk/cardano-haskell-packages?ref=repo"; + flake = false; + }; utils.url = "github:numtide/flake-utils"; iohkNix = { url = "github:input-output-hk/iohk-nix"; @@ -67,6 +71,7 @@ , hostNixpkgs , utils , haskellNix + , CHaP , iohkNix , plutus-apps , cardano-mainnet-mirror @@ -202,6 +207,9 @@ project = (import ./nix/haskell.nix { inherit (pkgs) haskell-nix gitrev; inherit projectPackagesExes; + inputMap = { + "https://input-output-hk.github.io/cardano-haskell-packages" = CHaP; + }; }).appendModule customConfig.haskellNix // { profiled = profiledProject; asserted = assertedProject; diff --git a/nix/haskell.nix b/nix/haskell.nix index 4a937c575e4..70f2156ee04 100644 --- a/nix/haskell.nix +++ b/nix/haskell.nix @@ -6,6 +6,7 @@ gitrev , # Pre-computed package list (generated by nix/regenerate.sh) to avoid double evaluation: projectPackagesExes +, inputMap }: let @@ -23,6 +24,7 @@ haskell-nix.cabalProject' ({ pkgs , buildProject , ... }: { + inherit inputMap; name = "cardano-node"; src = haskellLib.cleanSourceWith { src = ../.; @@ -43,8 +45,6 @@ haskell-nix.cabalProject' ({ pkgs shell = { name = "cabal-dev-shell"; - packages = lib.attrVals projectPackageNames; - # These programs will be available inside the nix-shell. nativeBuildInputs = with pkgs.buildPackages.buildPackages; [ nix-prefetch-git @@ -52,15 +52,9 @@ haskell-nix.cabalProject' ({ pkgs hlint ghcid haskell-language-server - cabalWrapped - # we also add cabal (even if cabalWrapped will be used by default) for shell completion: cabal ]; - # Prevents cabal from choosing alternate plans, so that - # *all* dependencies are provided by Nix. - exactDeps = true; - withHoogle = true; }; modules = @@ -72,46 +66,6 @@ haskell-nix.cabalProject' ({ pkgs setGitRev = ''${pkgs.buildPackages.haskellBuildUtils}/bin/set-git-rev "${gitrev}" $out/bin/*''; in [ - # Allow reinstallation of Win32 - ({ pkgs, ... }: lib.mkIf pkgs.stdenv.hostPlatform.isWindows { - nonReinstallablePkgs = - [ - "rts" - "ghc-heap" - "ghc-prim" - "integer-gmp" - "integer-simple" - "base" - "deepseq" - "array" - "ghc-boot-th" - "pretty" - "template-haskell" - # ghcjs custom packages - "ghcjs-prim" - "ghcjs-th" - "ghc-boot" - "ghc" - "array" - "binary" - "bytestring" - "containers" - "filepath" - "ghc-boot" - "ghc-compact" - "ghc-prim" - # "ghci" "haskeline" - "hpc" - "mtl" - "parsec" - "text" - "transformers" - "xhtml" - # "stm" "terminfo" - ]; - # When cross compfixesiling we don't have a `ghc` package - packages.plutus-tx-plugin.flags.use-ghc-stub = true; - }) ({ pkgs, ... }: { packages.tx-generator.package.buildable = with pkgs.stdenv.hostPlatform; isUnix && !isMusl; packages.cardano-tracer.package.buildable = with pkgs.stdenv.hostPlatform; isUnix && !isMusl; @@ -130,9 +84,8 @@ haskell-nix.cabalProject' ({ pkgs }) ({ pkgs, ... }: { # Use the VRF fork of libsodium - packages = lib.genAttrs [ "cardano-crypto-praos" "cardano-crypto-class" ] (_: { - components.library.pkgconfig = lib.mkForce [ [ pkgs.libsodium-vrf ] ]; - }); + packages.cardano-crypto-praos.components.library.pkgconfig = lib.mkForce [ [ pkgs.libsodium-vrf ] ]; + packages.cardano-crypto-class.components.library.pkgconfig = lib.mkForce [ [ pkgs.libsodium-vrf pkgs.secp256k1 ] ]; }) ({ pkgs, options, ... }: { # make sure that libsodium DLLs are available for windows binaries, diff --git a/nix/nixos/tx-generator-service.nix b/nix/nixos/tx-generator-service.nix index 0642af47ebc..1d4d697e472 100644 --- a/nix/nixos/tx-generator-service.nix +++ b/nix/nixos/tx-generator-service.nix @@ -96,6 +96,8 @@ in pkgs.commonLib.defServiceModule sigKey = mayOpt str "Key with funds"; + tracerSocketPath = + mayOpt str "Socket path of cardano-tracer"; localNodeSocketPath = mayOpt str "Local node socket path"; localNodeConf = mayOpt attrs "Config of the local node"; @@ -119,7 +121,9 @@ in pkgs.commonLib.defServiceModule configExeArgsFn = cfg: [ "json_highlevel" "${pkgs.writeText "tx-gen-config.json" (cfg.decideRunScript cfg)}" - ]; + ] ++ optionals (cfg.tracerSocketPath != null) [ + "--cardano-tracer" cfg.tracerSocketPath + ]; configSystemdExtraConfig = _: {}; @@ -129,7 +133,7 @@ in pkgs.commonLib.defServiceModule User = "cardano-node"; Group = "cardano-node"; Restart = "no"; - RuntimeDirectory = localNodeConf.runtimeDir; - WorkingDirectory = localNodeConf.stateDir; + RuntimeDirectory = localNodeConf.runtimeDir 0; + WorkingDirectory = localNodeConf.stateDir 0; }; }) diff --git a/nix/workbench/backend/services-config.nix b/nix/workbench/backend/services-config.nix new file mode 100644 index 00000000000..4957d5949ca --- /dev/null +++ b/nix/workbench/backend/services-config.nix @@ -0,0 +1,119 @@ +{ lib +, workbench +## +, basePort ? 30000 +, stateDir ? "run/current" +, useCabalRun ? false +, enableEKG ? true +}: +with lib; +{ + ## Generic Nix bits: + topologyForNodeSpec = + { profile, nodeSpec }: + let inherit (nodeSpec) name i; in + workbench.runWorkbench + "topology-${name}.json" + "topology projection-for local-${nodeSpec.kind} ${toString i} ${profile.name} ${profile.topology.files} ${toString basePort}"; + + nodePublicIP = + { i, name, ... }@nodeSpec: + "127.0.0.1"; + + finaliseNodeService = + let time_fmtstr = + "{ " + escape [''"''] (concatStringsSep ''\n, '' time_entries) + " }"; + time_entries = [ + ''"wall_clock_s": %e'' + ''"user_cpu_s": %U'' + ''"sys_cpu_s": %S'' + ''"avg_cpu_pct": "%P"'' + ''"rss_peak_kb": %M'' + ''"signals_received": %k'' + ''"ctxsw_involuntary": %c'' + ''"ctxsw_volunt_waits": %w'' + ''"pageflt_major": %F'' + ''"pageflt_minor": %R'' + ''"swaps": %W'' + ''"io_fs_reads": %I'' + ''"io_fs_writes": %O'' + ''"cmdline": "%C"'' + ''"exit_code": %x'' + ]; + in + profile: { name, i, isProducer, ... }: svc: recursiveUpdate svc + ({ + stateDir = stateDir + "/${name}"; + ## Everything is local in the supervisord setup: + socketPath = "node.socket"; + topology = "topology.json"; + nodeConfigFile = "config.json"; + } // optionalAttrs useCabalRun { + # Make the shell function take over. + executable = "cardano-node"; + # executable = ''time -f "${time_fmtstr}" -o kernel-resource-summary.json cabal run exe:cardano-node ''${WB_FLAGS_RTS} -- +RTS -sghc-rts-report.txt -RTS''; + } // optionalAttrs isProducer { + operationalCertificate = "./genesis/node-keys/node${toString i}.opcert"; + kesKey = "./genesis/node-keys/node-kes${toString i}.skey"; + vrfKey = "./genesis/node-keys/node-vrf${toString i}.skey"; + } // optionalAttrs profile.node.tracer { + tracerSocketPathConnect = "../tracer/tracer.socket"; + }); + + finaliseNodeConfig = + { port, ... }: cfg: recursiveUpdate cfg + ( + { + AlonzoGenesisFile = "./genesis/genesis.alonzo.json"; + ShelleyGenesisFile = "./genesis/genesis-shelley.json"; + ByronGenesisFile = "./genesis/byron/genesis.json"; + } + // optionalAttrs enableEKG + (let portShiftEkg = 100; + portShiftPrometheus = 200; + in { + hasEKG = port + portShiftEkg; + hasPrometheus = ["127.0.0.1" (port + portShiftPrometheus)]; + setupBackends = [ + "EKGViewBK" + ]; + }) + ); + + finaliseNodeArgs = + profile: nodeSpec: args: args; + + finaliseGeneratorService = + profile: svc: recursiveUpdate svc + ({ + sigKey = "./genesis/utxo-keys/utxo1.skey"; + runScriptFile = "run-script.json"; + ## path to the config and socket of the locally running node. + nodeConfigFile = "./node-0/config.json"; + localNodeSocketPath = "./node-0/node.socket"; + } // optionalAttrs profile.node.tracer { + tracerSocketPath = "../tracer/tracer.socket"; + } // optionalAttrs useCabalRun { + executable = "cabal run exe:tx-generator --"; + }); + + finaliseGeneratorConfig = + cfg: recursiveUpdate cfg + ({ + AlonzoGenesisFile = "./genesis/genesis.alonzo.json"; + ShelleyGenesisFile = "./genesis/genesis-shelley.json"; + ByronGenesisFile = "./genesis/byron/genesis.json"; + } // optionalAttrs useCabalRun { + executable = "tx-generator"; + }); + + finaliseTracerService = + svc: recursiveUpdate svc + ({ + configFile = "config.json"; + logRoot = "."; + } // optionalAttrs useCabalRun { + executable = "cardano-tracer"; + }); + +} diff --git a/nix/workbench/backend/supervisor-conf.nix b/nix/workbench/backend/supervisor-conf.nix new file mode 100644 index 00000000000..7ad0b53dec4 --- /dev/null +++ b/nix/workbench/backend/supervisor-conf.nix @@ -0,0 +1,84 @@ +{ pkgs +, lib +, stateDir +, basePort +, node-services + ## Last-moment overrides: +, extraBackendConfig +}: + +with lib; + +let + ## + ## supervisorConf :: SupervisorConf + ## + ## Refer to: http://supervisord.org/configuration.html + ## + supervisorConf = + { + supervisord = { + logfile = "${stateDir}/supervisor/supervisord.log"; + pidfile = "${stateDir}/supervisor/supervisord.pid"; + strip_ansi = true; + }; + supervisorctl = {}; + inet_http_server = { + port = "127.0.0.1:9001"; + }; + "rpcinterface:supervisor" = { + "supervisor.rpcinterface_factory" = "supervisor.rpcinterface:make_main_rpcinterface"; + }; + } + // + listToAttrs + (mapAttrsToList (_: nodeSvcSupervisorProgram) node-services) + // + { + "program:generator" = { + directory = "${stateDir}/generator"; + command = "sh start.sh"; + stdout_logfile = "${stateDir}/generator/stdout"; + stderr_logfile = "${stateDir}/generator/stderr"; + autostart = false; + autorestart = false; + startretries = 1; + startsecs = 5; + }; + } + // + { + "program:tracer" = { + directory = "${stateDir}/tracer"; + command = "sh start.sh"; + stdout_logfile = "${stateDir}/tracer/stdout"; + stderr_logfile = "${stateDir}/tracer/stderr"; + autostart = false; + autorestart = false; + startretries = 1; + stopasgroup = true; + killasgroup = true; + }; + } + // + extraBackendConfig; + + ## + ## nodeSvcSupervisorProgram :: NodeService -> SupervisorConfSection + ## + ## Refer to: http://supervisord.org/configuration.html#program-x-section-settings + ## + nodeSvcSupervisorProgram = { nodeSpec, service, ... }: + nameValuePair "program:${nodeSpec.value.name}" { + directory = "${service.value.stateDir 0}"; + command = "sh start.sh"; + stdout_logfile = "${service.value.stateDir 0}/stdout"; + stderr_logfile = "${service.value.stateDir 0}/stderr"; + autostart = false; + autorestart = false; + startretries = 1; + }; + +in + pkgs.writeText "supervisor.conf" + (generators.toINI {} supervisorConf) diff --git a/nix/workbench/backend/supervisor-run.nix b/nix/workbench/backend/supervisor-run.nix new file mode 100644 index 00000000000..f1b4f890e2f --- /dev/null +++ b/nix/workbench/backend/supervisor-run.nix @@ -0,0 +1,150 @@ +let + batchNameDefault = "plain"; + profileNameDefault = "default-bage"; +in +{ pkgs +, cardanoNodePackages +, supervisord-workbench +## +, profileName ? profileNameDefault +, batchName ? batchNameDefault +## +, workbenchDevMode ? false +, cardano-node-rev ? "0000000000000000000000000000000000000000" +}: +let + inherit (supervisord-workbench) workbench backend cacheDir stateDir basePort; + + with-supervisord-profile = + { envArgsOverride ? {} }: ## TODO: envArgsOverride is not used! + workbench.with-profile + { inherit backend profileName; }; + + inherit (with-supervisord-profile {}) profileNix profile topology genesis; +in + let + + inherit (profile.value) era composition monetary; + + path = pkgs.lib.makeBinPath path'; + path' = + [ cardanoNodePackages.bech32 pkgs.jq pkgs.gnused pkgs.coreutils pkgs.bash pkgs.moreutils + ] + ## In dev mode, call the script directly: + ++ pkgs.lib.optionals (!workbenchDevMode) + [ workbench.workbench ]; + + interactive-start = pkgs.writeScriptBin "start-cluster" '' + set -euo pipefail + + export PATH=$PATH:${path} + + wb start \ + --batch-name ${batchName} \ + --profile-name ${profileName} \ + --profile ${profile} \ + --cache-dir ${cacheDir} \ + --base-port ${toString basePort} \ + ''${WB_MODE_CABAL:+--cabal} \ + "$@" + ''; + + interactive-stop = pkgs.writeScriptBin "stop-cluster" '' + set -euo pipefail + + wb finish "$@" + ''; + + interactive-restart = pkgs.writeScriptBin "restart-cluster" '' + set -euo pipefail + + wb run restart "$@" && \ + echo "workbench: alternate command for this action: wb run restart" >&2 + ''; + + nodeBuildProduct = + name: + "report ${name}-log $out ${name}/stdout"; + + profile-run = + { trace ? false }: + let + inherit + (with-supervisord-profile + { envArgsOverride = { cacheDir = "./cache"; stateDir = "./"; }; }) + profileNix profile topology genesis; + + run = pkgs.runCommand "workbench-run-supervisord-${profileName}" + { requiredSystemFeatures = [ "benchmark" ]; + nativeBuildInputs = with cardanoNodePackages; with pkgs; [ + bash + bech32 + coreutils + gnused + jq + moreutils + nixWrapped + pstree + python3Packages.supervisor + workbench.workbench + zstd + ]; + } + '' + mkdir -p $out/{cache,nix-support} + cd $out + export HOME=$out + + export WB_BACKEND=supervisor + export CARDANO_NODE_SOCKET_PATH=$(wb backend get-node-socket-path ${stateDir} node-0) + + cmd=( + wb + ${pkgs.lib.optionalString trace "--trace"} + start + --profile-name ${profileName} + --profile ${profile} + --topology ${topology} + --genesis-cache-entry ${genesis} + --batch-name smoke-test + --base-port ${toString basePort} + --node-source ${cardanoNodePackages.cardano-node.src.origSrc} + --node-rev ${cardano-node-rev} + --cache-dir ./cache + ) + echo "''${cmd[*]}" > $out/wb-start.sh + + time "''${cmd[@]}" 2>&1 | + tee $out/wb-start.log + + ## Convert structure from $out/run/RUN-ID/* to $out/*: + rm -rf cache + rm -f run/{current,-current} + find $out -type s | xargs rm -f + run=$(cd run; ls) + (cd run; tar c $run --zstd) > archive.tar.zst + mv run/$run/* . + rmdir run/$run run + + cat > $out/nix-support/hydra-build-products < SupervisorConf/DockerConf + supervisorConfPath = + import ./supervisor-conf.nix + { inherit (profileNix) node-services; + inherit + pkgs lib stateDir + basePort + extraBackendConfig; + }; + } + '' + mkdir $out + cp $supervisorConfPath $out/supervisor.conf + ''; + }; +in +{ + inherit cacheDir stateDir basePort; + inherit workbench; + inherit backend; +} diff --git a/nix/workbench/backend/supervisor.sh b/nix/workbench/backend/supervisor.sh new file mode 100755 index 00000000000..8d5fd07638a --- /dev/null +++ b/nix/workbench/backend/supervisor.sh @@ -0,0 +1,300 @@ +usage_supervisor() { + usage "supervisor" "Backend: manages a local cluster using 'supervisord'" </dev/null | grep ':9001 ' | wc -l)" != "0";; + + setenv-defaults ) + local usage="USAGE: wb supervisor $op PROFILE-DIR" + local profile_dir=${1:?$usage} + + setenvjq 'port_shift_ekg' 100 + setenvjq 'port_shift_prometheus' 200 + setenvjq 'port_shift_rtview' 300 + setenvjqstr 'supervisor_conf' "$profile_dir"/supervisor.conf + ;; + + allocate-run ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + while test $# -gt 0 + do case "$1" in + --* ) msg "FATAL: unknown flag '$1'"; usage_supervisor;; + * ) break;; esac; shift; done + + local supervisor_conf=$(envjqr 'supervisor_conf') + + mkdir -p "$dir"/supervisor + cp -f $supervisor_conf "$dir"/supervisor/supervisord.conf + + # Node's config.json expects a genesis folder inside its same directory. + # Same for generator with genesis and also with the node.socket paths. + for node in $(jq_tolist 'keys' "$dir"/node-specs.json) + do local node_dir="$dir"/$node + ln -s "$dir"/genesis "$node_dir"/genesis + ln -s "$dir"/$node "$dir"/generator/$node + done + ln -s "$dir"/genesis "$dir"/generator/genesis + ;; + + describe-run ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage} + + local basePort=$( envjq 'basePort') + local port_ekg=$(( basePort+$(envjq 'port_shift_ekg'))) + local port_prometheus=$((basePort+$(envjq 'port_shift_prometheus'))) + local port_rtview=$(( basePort+$(envjq 'port_shift_rtview'))) + + cat <&2 + while test ! -S $socket + do printf "%3d" $i; sleep 1 + i=$((i+1)) + if test $i -ge $patience + then echo + progress "supervisor" "$(red FATAL): workbench: supervisor: patience ran out for $(white $node) after ${patience}s, socket $socket" + backend_supervisor stop-cluster "$dir" + fatal "$node startup did not succeed: check logs in $(dirname $socket)/stdout & stderr" + fi + echo -ne "\b\b\b" + done >&2 + echo " $node up (${i}s)" >&2 + ;; + + start-nodes ) + local usage="USAGE: wb supervisor $op RUN-DIR [HONOR_AUTOSTART=]" + local dir=${1:?$usage}; shift + local honor_autostart=${1:-} + + local nodes=($(jq_tolist keys "$dir"/node-specs.json)) + + if test -n "$honor_autostart" + then for node in ${nodes[*]} + do jqtest ".\"$node\".autostart" "$dir"/node-specs.json && + supervisorctl start $node; done; + else supervisorctl start ${nodes[*]}; fi + + for node in ${nodes[*]} + do jqtest ".\"$node\".autostart" "$dir"/node-specs.json && + backend_supervisor wait-node "$dir" $node; done + + if test ! -v CARDANO_NODE_SOCKET_PATH + then export CARDANO_NODE_SOCKET_PATH=$(backend_supervisor get-node-socket-path "$dir" 'node-0') + fi + + backend_supervisor save-child-pids "$dir" + backend_supervisor save-pid-maps "$dir" + ;; + + start ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + if ! supervisord --config "$dir"/supervisor/supervisord.conf $@ + then progress "supervisor" "$(red fatal: failed to start) $(white supervisord)" + echo "$(red supervisord.conf) --------------------------------" >&2 + cat "$dir"/supervisor/supervisord.conf + echo "$(red supervisord.log) ---------------------------------" >&2 + cat "$dir"/supervisor/supervisord.log + echo "$(white -------------------------------------------------)" >&2 + fatal "could not start $(white supervisord)" + fi + + if jqtest ".node.tracer" "$dir"/profile.json + then if ! supervisorctl start tracer + then progress "supervisor" "$(red fatal: failed to start) $(white cardano-tracer)" + echo "$(red tracer-config.json) ------------------------------" >&2 + cat "$dir"/tracer/tracer-config.json + echo "$(red tracer stdout) -----------------------------------" >&2 + cat "$dir"/tracer/stdout + echo "$(red tracer stderr) -----------------------------------" >&2 + cat "$dir"/tracer/stderr + echo "$(white -------------------------------------------------)" >&2 + fatal "could not start $(white cardano-tracer)" + fi + + progress_ne "supervisor" "waiting for $(yellow cardano-tracer) to create socket: " + while test ! -e "$dir"/tracer/tracer.socket; do sleep 1; done + echo $(green ' OK') >&2 + backend_supervisor save-child-pids "$dir" + fi;; + + get-node-socket-path ) + local usage="USAGE: wb supervisor $op STATE-DIR NODE-NAME" + local state_dir=${1:?$usage} + local node_name=${2:?$usage} + + echo -n $state_dir/$node_name/node.socket + ;; + + start-generator ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + while test $# -gt 0 + do case "$1" in + --* ) msg "FATAL: unknown flag '$1'"; usage_supervisor;; + * ) break;; esac; shift; done + + if ! supervisorctl start generator + then progress "supervisor" "$(red fatal: failed to start) $(white generator)" + echo "$(red generator.json) ------------------------------" >&2 + cat "$dir"/tracer/tracer-config.json + echo "$(red tracer stdout) -----------------------------------" >&2 + cat "$dir"/tracer/stdout + echo "$(red tracer stderr) -----------------------------------" >&2 + cat "$dir"/tracer/stderr + echo "$(white -------------------------------------------------)" >&2 + fatal "could not start $(white supervisord)" + fi + backend_supervisor save-child-pids "$dir";; + + wait-node-stopped ) + local usage="USAGE: wb supervisor $op RUN-DIR NODE" + local dir=${1:?$usage}; shift + local node=${1:?$usage}; shift + + progress_ne "supervisor" "waiting until $node stops: ....." + local i=0 + while supervisorctl status $node > /dev/null + do echo -ne "\b\b\b\b\b"; printf "%5d" $i >&2; i=$((i+1)); sleep 1 + done >&2 + echo -e "\b\b\b\b\bdone, after $(with_color white $i) seconds" >&2 + ;; + + wait-pools-stopped ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + local i=0 pools=$(jq .composition.n_pool_hosts $dir/profile.json) start_time=$(date +%s) + msg_ne "supervisor: waiting until all pool nodes are stopped: 000000" + touch $dir/flag/cluster-termination + + for ((pool_ix=0; pool_ix < $pools; pool_ix++)) + do while supervisorctl status node-${pool_ix} > /dev/null && + test -f $dir/flag/cluster-termination + do echo -ne "\b\b\b\b\b\b"; printf "%6d" $((i + 1)); i=$((i+1)); sleep 1; done + echo -ne "\b\b\b\b\b\b"; echo -n "node-${pool_ix} 000000" + done >&2 + echo -ne "\b\b\b\b\b\b" + local elapsed=$(($(date +%s) - start_time)) + if test -f $dir/flag/cluster-termination + then echo " All nodes exited -- after $(yellow $elapsed)s" >&2 + else echo " Termination requested -- after $(yellow $elapsed)s" >&2; fi + ;; + + stop-cluster ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + supervisorctl stop all || true + + if test -f "${dir}/supervisor/supervisord.pid" + then kill $(<${dir}/supervisor/supervisord.pid) $(<${dir}/supervisor/child.pids) 2>/dev/null + else pkill supervisord + fi + ;; + + cleanup-cluster ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + msg "supervisor: resetting cluster state in: $dir" + rm -f $dir/*/std{out,err} $dir/node-*/*.socket $dir/*/logs/* 2>/dev/null || true + rm -fr $dir/node-*/state-cluster/;; + + save-child-pids ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + local svpid=$dir/supervisor/supervisord.pid + local pstree=$dir/supervisor/ps.tree + pstree -p "$(cat "$svpid")" > "$pstree" + + local pidsfile="$dir"/supervisor/child.pids + { grep -e '---\|--=' "$pstree" || true; } | + sed 's/^.*--[=-] \([0-9]*\) .*/\1/; s/^[ ]*[^ ]* \([0-9]+\) .*/\1/ + ' > "$pidsfile" + ;; + + save-pid-maps ) + local usage="USAGE: wb supervisor $op RUN-DIR" + local dir=${1:?$usage}; shift + + local mapn2p=$dir/supervisor/node2pid.map; echo '{}' > "$mapn2p" + local mapp2n=$dir/supervisor/pid2node.map; echo '{}' > "$mapp2n" + local pstree=$dir/supervisor/ps.tree + + for node in $(jq_tolist keys "$dir"/node-specs.json) + do ## supervisord's service PID is the immediately invoked binary, + ## ..which isn't necessarily 'cardano-node', but could be 'time' or 'cabal' or.. + local service_pid=$(supervisorctl pid $node) + if test $service_pid = '0' + then continue + elif test -z "$(ps h --ppid $service_pid)" ## Any children? + then local pid=$service_pid ## <-=^^^ none, in case we're running executables directly. + ## ..otherwise, it's a chain of children, e.g.: time -> cabal -> cardano-node + else local pid=$(grep -e "[=-] $(printf %05d $service_pid) " -A5 "$pstree" | + grep -e '---\|--=' | + head -n1 | + sed 's/^.*--[=-] \([0-9]*\) .*/\1/; + s/^[ ]*[^ ]* \([0-9]*\) .*/\1/') + fi + if test -z "$pid" + then warn "supervisor" "failed to detect PID of $(white $node)"; fi + jq_fmutate "$mapn2p" '. * { "'$node'": '$pid' }' + jq_fmutate "$mapp2n" '. * { "'$pid'": "'$node'" }' + done + ;; + + * ) usage_supervisor;; esac +} diff --git a/nix/workbench/chain-filters/epoch3+.json b/nix/workbench/chain-filters/epoch3+.json new file mode 100644 index 00000000000..1e5722d3f1a --- /dev/null +++ b/nix/workbench/chain-filters/epoch3+.json @@ -0,0 +1,7 @@ +[ { "tag": "CSlot" + , "contents": + { "tag": "EpochGEq" + , "contents": 2 + } + } +] diff --git a/nix/workbench/chain-filters/model.json b/nix/workbench/chain-filters/model.json new file mode 100644 index 00000000000..c26fcf3f8f3 --- /dev/null +++ b/nix/workbench/chain-filters/model.json @@ -0,0 +1,13 @@ +[ { "tag": "CSlot" + , "contents": + { "tag": "SlotLEq" + , "contents": 56000 + } + } +, { "tag":"CBlock" + , "contents": + { "tag": "BMinimumAdoptions" + , "contents": 50 + } + } +] diff --git a/nix/workbench/chain-filters/unitary.json b/nix/workbench/chain-filters/unitary.json new file mode 100644 index 00000000000..b1f1f3ee3cb --- /dev/null +++ b/nix/workbench/chain-filters/unitary.json @@ -0,0 +1,7 @@ +[ { "tag": "CBlock" + , "contents": + { "tag": "BUnitaryChainDelta" + , "contents": true + } + } +] diff --git a/nix/workbench/ede/chart.ede b/nix/workbench/ede/chart.ede new file mode 100644 index 00000000000..68e981c35fc --- /dev/null +++ b/nix/workbench/ede/chart.ede @@ -0,0 +1,25 @@ +#+begin_src gnuplot :file {{ args.metric }}.png +load "../../bench/workbench.gnuplot" +{% if args.logscale %} +set logscale y +{% else %} +unset logscale y +{% endif %} +{% if args.yrange %} +set yrange [{{ args.yrange }}] +{% else %} +set yrange [*:*] +{% endif %} +{% if args.ylabel %} +set ylabel "{{ args.ylabel }}" +{% endif %} +eval cdfI_{{ (runs | length) + 1 }}("{{ args.metric }}", "{{ args.title }}", \ + "{{ base.ver }}/{{ base.meta.era | toTitle }}", "{{ base.meta.tag }}", \ +{% for run in runs %} + "{{ run.value.ver }}/{{ run.value.meta.era | toTitle }}", "{{ run.value.meta.tag }}"{% if !run.last %},{% endif %} \ +{% endfor %} + ) +#+end_src + +#+RESULTS: + diff --git a/nix/workbench/ede/manifest.ede b/nix/workbench/ede/manifest.ede new file mode 100644 index 00000000000..2b3c52b7864 --- /dev/null +++ b/nix/workbench/ede/manifest.ede @@ -0,0 +1,9 @@ +#+LATEX: \scriptsize +| load | era | approx ver | node | plutu | ledge | conse | | +|-------+---------+------------+-------+-------+-------+-------+---------------------------------------------------------------------| +| {{ base.workload }} | {{ base.meta.era | toTitle }} | {{ base.ver}} | {{ base.rev.node }} | {{ base.rev.plutus }} | {{ base.rev.ledger }} | {{ base.rev.network }} | {{ base.meta.tag }} | +{% for run in runs %} +| {{ run.value.workload }} | {{ run.value.meta.era | toTitle }} | {{ run.value.ver}} | {{ run.value.rev.node }} | {{ run.value.rev.plutus }} | {{ run.value.rev.ledger }} | {{ run.value.rev.network }} | {{ run.value.meta.tag }} | +{% endfor %} +|-------+---------+------------+-------+-------+-------+-------+---------------------------------------------------------------------| +#+LATEX: \normalsize diff --git a/nix/workbench/ede/report.ede b/nix/workbench/ede/report.ede new file mode 100644 index 00000000000..3b55bf0c43a --- /dev/null +++ b/nix/workbench/ede/report.ede @@ -0,0 +1,75 @@ +#+CONSTANTS: perf=clusterperf.report.org forge=blockprop.forger.org peers=blockprop.peers.org end2end=blockprop.endtoend.org +#+CONSTANTS: base=../{{ base.meta.tag }}/analysis +{% for run in runs %} +#+CONSTANTS: run{{ run.index }}=../{{ run.value.meta.tag }}/analysis +{% endfor %} +#+LATEX_CLASS: report +#+LATEX_CLASS_OPTIONS: [a4paper,7pt] +#+LATEX_HEADER: \usepackage[margin=0.5in]{geometry} +#+latex_header: \usepackage{setspace} +#+latex_header: \onehalfspacing +#+LATEX_COMPILER: xelatex +#+EXPORT_FILE_NAME: {{ report.target }}.{{ base.workload }}.pdf +#+TITLE: {{ report.target }} against {{ base.ver }} +#+SUBTITLE: {{ base.workload }} workload +#+SUBTITLE: \break\small{revision} ={{ report.revision }}= +#+AUTHOR: {{ report.author }}, Cardano Performance team +#+DATE: {{ report.date }} + +*** Manifest + +We compare ... relative to ={{ base.ver }}= /{{ base.meta.era | toTitle }}, under {{ base.workload }} workload. + +{% include "manifest.ede" %} + +***** Revision history + - rev 1, {{ report.date }}: initial release + +*** Analysis +***** Resource Usage + +{% include "table.ede" with table = { "rows":"1234567", "src":"perf" } %} + +***** Forging + +{% include "table.ede" with table = { "rows":"1234", "src":"forge" } %} + +***** Individual peer propagation + +{% include "table.ede" with table = { "rows":"1234", "src":"peers" } %} + +***** End-to-end propagation + +{% include "table.ede" with table = { "rows":"12345678", "src":"end2end" } %} + +*** Observations +***** Resources + +1. ... + +***** Forging + +1. ... + +***** Peer propagation + +1. ... + +***** End-to-end propagation + +... + +@duncan, @Kevin Hammond, @Nigel Hemsley, @neil, @jared.corduan, @Damian, @nfrisby, @amesgen, @marcin, @Vitor Silva, @Javier Franco, @carlos.lopezdelara, @disasm, @michael.karg + +* Appendix: charts + +{% include "chart.ede" with args = { "title": "Kernel-reported CPU usage", "ylabel": "CPU usage, %", "metric": "cpuProcess", "yrange": "1:200", "logscale": false } %} +{% include "chart.ede" with args = { "title": "RTS memory allocation rate", "ylabel": "Memory allocation rate, MB/s", "metric": "rtsAllocation", "yrange": "0:5000", "logscale": false } %} +{% include "chart.ede" with args = { "title": "Kernel reported memory usage", "ylabel": "RSS, MB", "metric": "memRSS", "logscale": false } %} +{% include "chart.ede" with args = { "title": "RTS GC live bytes", "ylabel": "GC live bytes, MB", "metric": "rtsLiveBytes", "logscale": false } %} +{% include "chart.ede" with args = { "title": "Single peer fetched-to-adopted time", "metric": "pAdopted" } %} +{% include "chart.ede" with args = { "title": "First peer notice time", "metric": "pNoticed" } %} +{% include "chart.ede" with args = { "title": "Block adoption, 50% of cluster", "metric": "prop0.50" } %} +{% include "chart.ede" with args = { "title": "Block adoption, 80% of cluster", "metric": "prop0.80" } %} +{% include "chart.ede" with args = { "title": "Block adoption, 90% of cluster", "metric": "prop0.90" } %} +{% include "chart.ede" with args = { "title": "Block adoption, 96% of cluster", "metric": "prop0.96" } %} diff --git a/nix/workbench/ede/table.ede b/nix/workbench/ede/table.ede new file mode 100644 index 00000000000..c98082ab147 --- /dev/null +++ b/nix/workbench/ede/table.ede @@ -0,0 +1,10 @@ +| | {{ base.ver }}{% for run in runs %} | {{ run.value.ver }} | Δ | Δ%{% endfor %} | +|---------------------------+-----{% for run in runs %}-+------+------+-----{% endfor %}-| +{% for i in table.rows %} +| | {% for run in runs %} | | | {% endfor %} | +{% endfor %} +|---------------------------+-----{% for run in runs %}-+------+------+-----{% endfor %}-| +#+TBLFM: $2=remote(file:$base/${{ table.src }},@@#$average);p3::$1=remote(file:$base/${{ table.src }},@@#$metric) +{% for run in runs %} +#+TBLFM: ${{ run.index * 3 }}=remote(file:$run{{ run.index }}/${{ table.src }},@@#$average);p3::${{ run.index * 3 + 1 }}=${{ run.index * 3 }}-$2;p3::${{ run.index * 3 + 2 }}=round(100*${{ run.index * 3 + 1 }}/$2) +{% endfor %} diff --git a/nix/workbench/profiles/prof0-defaults.jq b/nix/workbench/profiles/prof0-defaults.jq new file mode 100644 index 00000000000..00f66f774fe --- /dev/null +++ b/nix/workbench/profiles/prof0-defaults.jq @@ -0,0 +1,123 @@ +## Testable with: +## +## jq -n 'include "prof0-defaults" { search: "nix/workbench/profiles" }; era_defaults("babbage")' +## +def era_defaults($era): +{ common: + { era: $era + + ## Choice of a cluster run scenario (wb scenario --help): + , scenario: "fixed-loaded" + + ## Cluster topology and composition: + , composition: + { locations: ["LO"] + , n_bft_hosts: 0 + , n_singular_hosts: 5 + , n_dense_hosts: 1 + , dense_pool_density: 1 + , with_proxy: false + , with_observer: false + , topology: "uni-circle" + } + + , genesis: + ## Trivia + { network_magic: 42 + + ## Incrementality + , single_shot: true + + ## UTxO & delegation + , per_pool_balance: 1000000000000000 + , funds_balance: 10000000000000 + , utxo: 0 + , decentralisation_param: 0 + + ## Blockchain time & block density + , active_slots_coeff: 0.05 + , epoch_length: 600 # Ought to be at least (10 * k / f). + , parameter_k: 3 + , slot_duration: 1 + + ## Block size & contents + , max_block_size: 80000 + , max_tx_size: 16384 + + ## Verbatim overlay, for all era-specific genesis slices: + , shelley: + { protocolParams: + { poolDeposit: 500000000 + , keyDeposit: 400000 + , rho: 0.0022 + , tau: 0.05 + , a0: 0.3 + , minFeeA: 0 + , minFeeB: 0 + , decentralisationParam: 0 + , nOpt: 50 + } + } + } + + , generator: + { add_tx_size: 100 + , init_cooldown: 5 + , inputs_per_tx: 2 + , outputs_per_tx: 2 + , tx_fee: 1000000 + , epochs: 3 + , tps: 12 + } + + , node: + { rts_flags_override: [] + , shutdown_on_slot_synced: null + , shutdown_on_block_synced: null + , tracing_backend: "trace-dispatcher" ## or "iohk-monitoring" + , tracer: true + , verbatim: + { + } + } + + , analysis: + { type: "standard" + , cluster_startup_overhead_s: 10 + , start_log_spread_s: 120 + , last_log_spread_s: 120 + , silence_since_last_block_s: 120 + , tx_loss_ratio: 0.02 + , finish_patience: 21 + , filters: ["unitary"] + } + } + +, shelley: + { analysis: + { maximum_missed_slots: 0 + } + } + +, allegra: + { + } + +, mary: + { + } + +, alonzo: + ({} | + .genesis.shelley.protocolParams.protocolVersion = + { major: 5 + , minor: 0 + }) + +, babbage: + ({} | + .genesis.shelley.protocolParams.protocolVersion = + { major: 5 + , minor: 0 + }) +} | (.common * .[$era]); diff --git a/nix/workbench/profiles/prof1-variants.jq b/nix/workbench/profiles/prof1-variants.jq new file mode 100644 index 00000000000..8372e3c34f3 --- /dev/null +++ b/nix/workbench/profiles/prof1-variants.jq @@ -0,0 +1,526 @@ +## Profile variants are defined as a cartesian product of +## variations of genesis/generator/node axes. + +def all_profile_variants: + 1024 as $Ki + | 1000000 as $M + #################################################################################################### + ## + ### Historic record + ## + | { genesis: + { utxo: (4 * $M) + , delegators: (1 * $M) + } + } as $dataset_oct2021 + | + ({} | + .genesis.max_block_size = (72 * $Ki) + ) as $blocksize_dec2021 + | + { genesis: + ({} | + .max_block_size = (72 * $Ki) | ## ??? + .alonzo.maxTxExUnits.exUnitsMem = (12.5 * $M) ) ## RMT-54 CR.059 + } as $params_jan2022 + | + { genesis: + ({}| + .max_block_size = (80 * $Ki) | ## RMT-56 CAD-3891 CR.061 + .alonzo.maxTxExUnits.exUnitsMem = (14 * $M) | ## RMT-56 + .alonzo.maxBlockExUnits.exUnitsMem = (56 * $M)) ## CAD-3945 + } as $params_feb2022 + | + { genesis: + { utxo: (6 * $M) + , delegators: (1.3 * $M) + } + } as $dataset_mar2022 + | + { genesis: + { max_block_size: (88 * $Ki) } ## CAD-4153 CR.068 + } as $blocksize_may2022 + | + { genesis: + { utxo: (8 * $M) + , delegators: (1.3 * $M) + } + } as $dataset_jun2022 + | + { genesis: + ({}| + .alonzo.maxBlockExUnits.exUnitsMem = (62 * $M)) ## CAD-3991 CR.064 + } as $plutus_next + | + #################################################################################################### + ## + ### Definition vocabulary: dataset size + ## + $dataset_jun2022 + as $current_dataset + | ({}| + .generator.tps = 15 + ) as $current_tps_saturation_value + | + ({}| + .generator.tps = 0.2 + ) as $current_tps_saturation_plutus + | + ({}| + .genesis.max_block_size = $params_feb2022.genesis.max_block_size + ) as $current_block_size + | + ({}| + .genesis.alonzo = $params_feb2022.genesis.alonzo + ) as $current_plutus + | + ($current_block_size * + $current_plutus * + { genesis: + { utxo: 0 + , delegators: 0 + } + }) as $dataset_empty + | + ($current_block_size * + $current_plutus * + { genesis: + { utxo: (0.5 * $M) + , delegators: (0.1 * $M) + } + }) as $dataset_miniature + | + ($current_block_size * + $current_plutus * + $current_dataset + ) as $dataset_status_quo + | + { genesis: + { utxo: (30 * $M) + , delegators: 0 + , max_block_size: (1 * $M) + } + , generator: + { tps: (1 * $M / (360 * 20)) + } + } as $dataset_dish + | + ## + ### Definition vocabulary: chain + ## + { chaindb: + { mainnet_chunks: + { chaindb_server: 10 + , observer: 0 + } + , ledger_snapshot: + { chaindb_server: 237599 + , observer: 0 + } + } + , node: + { shutdown_on_slot_synced: + { observer: 237599 + } + } + } as $chaindb_early_byron + | + ($dataset_oct2021 * + { chaindb: + { mainnet_chunks: + { chaindb_server: 1800 + , observer: 1799 + } + , ledger_snapshot: + { chaindb_server: 38901589 + , observer: 37173650 + } + } + , node: + { shutdown_on_slot_synced: + { observer: 38901589 + } + } + }) as $chaindb_early_alonzo + | + ## + ### Definition vocabulary: cluster size + ## + { composition: + { n_singular_hosts: 1 + , n_dense_hosts: 0 + } + } as $singleton + | + { composition: + { n_singular_hosts: 0 + , n_dense_hosts: 1 + , dense_pool_density: 10 + } + } as $singleton_dense10 + | + { composition: + { n_singular_hosts: 2 + , n_dense_hosts: 0 + } + } as $doublet + | + { composition: + { n_singular_hosts: 3 + , n_dense_hosts: 0 + } + } as $triplet + | + { composition: + { n_singular_hosts: 6 + , n_dense_hosts: 0 + } + } as $hexagon + | + { composition: + { n_singular_hosts: 10 + , n_dense_hosts: 0 + } + } as $tenner + | + { composition: + { n_singular_hosts: 0 + , n_dense_hosts: 0 + , with_chaindb_server: true + , with_observer: true + } + } as $chainsync_cluster + | + ## + ### Definition vocabulary: timescale + ## + { genesis: + { epoch_length: 600 + , parameter_k: 3 + } + } as $compressed_timescale + | + { genesis: + { epoch_length: (3600 * 24 * 5) + , parameter_k: (18 * 24 * 5) + } + } as $mainnet_timescale + | + ## + ### Definition vocabulary: duration + ## + ({} | + .generator.epochs = 3 + ) as $for_3ep + | + ({} | + .generator.epochs = 4 + ) as $for_4ep + | + ({} + | .node.shutdown_on_block_synced = 1 + | .analysis.filters = [] + ) as $for_1blk + | + ({} + | .node.shutdown_on_block_synced = 3 + | .analysis.filters = [] + ) as $for_3blk + | + ({} + | .node.shutdown_on_block_synced = 15 + | .analysis.filters = [] + ) as $for_15blk + | + ({} + | .node.shutdown_on_block_synced = 30 + | .analysis.filters = [] + ) as $for_30blk + | + ## + ### Definition vocabulary: workload + ## + ($current_tps_saturation_plutus * + { extra_desc: "with Plutus workload" + , generator: + { inputs_per_tx: 1 + , outputs_per_tx: 1 + , plutusMode: true + , plutusAutoMode: true + } + , analysis: + { filters: ["size-small"] + } + }) as $plutus + | + ## + ### Definition vocabulary: node config variants + ## + ({ extra_desc: "without cardano-tracer" + , suffix: "notracer" + }| + .node.tracer = false + ) as $without_tracer + | + ({ extra_desc: "with legacy iohk-monitoring" + , suffix: "oldtracing" + }| + .node.tracing_backend = "iohk-monitoring" + ) as $old_tracing + | + ({ extra_desc: "with P2P networking" + , suffix: "p2p" + }| + .node.verbatim.EnableP2P = true + ) as $p2p + | + ## + ### Definition vocabulary: scenario + ## + ($mainnet_timescale * $chainsync_cluster * + { desc: "Mainnet chain syncing benchmark" + , scenario: "chainsync" + , preset: "mainnet" + , analysis: + { type: "performance" + , filters: [] + } + }) as $scenario_chainsync + | + ($compressed_timescale * $current_tps_saturation_value * + { scenario: "fixed-loaded" + }) as $scenario_fixed_loaded + | + ## + ### Definition vocabulary: base variant + ## + ($scenario_fixed_loaded * $doublet * $dataset_empty * $for_1blk * + { desc: "Stop as soon as we've seen a single block" + }) as $startstop_base + | + ($scenario_fixed_loaded * $doublet * $dataset_empty * $for_3blk * + { desc: "Miniature dataset, CI-friendly duration, test scale" + }) as $citest_base + | + ($scenario_fixed_loaded * $doublet * $dataset_miniature * $for_15blk * + { desc: "Miniature dataset, CI-friendly duration, bench scale" + }) as $cibench_base + | + ($scenario_fixed_loaded * $doublet * $dataset_oct2021 * + { node: + { shutdown_on_slot_synced: 2400 + } + , desc: "Oct 2021 dataset size, honest four epochs." + }) as $forge_stress_pre_base + | + ($scenario_fixed_loaded * $doublet * $dataset_status_quo * + { node: + { shutdown_on_slot_synced: 2400 + } + , desc: "Status-quo dataset size, honest four epochs." + }) as $forge_stress_base + | + ($scenario_fixed_loaded * $triplet * $dataset_dish * + { node: + { shutdown_on_slot_synced: 2400 + } + , desc: "Dish dataset & setup" + }) as $dish_base + | + #################################################################################################### + ## + ### Actual profiles + ## + + ### First, auto-named profiles: + ### + ## Short slots: + [ $dataset_status_quo * + ({}| + .genesis.slot_duration = 0.2 ) + + ## Dense pool: + , $dataset_status_quo * + ({}| + .genesis.dense_pool_density = 10 ) + + ## Sub-saturation TPS: + , ($dataset_status_quo | .generator.tps = 5 ) + , ($dataset_status_quo | .generator.tps = 10 ) + + ## Block size: + , ($dataset_status_quo | .genesis.max_block_size = 128000 | .generator.tps = 16 ) + , ($dataset_status_quo | .genesis.max_block_size = 256000 | .generator.tps = 32 ) + , ($dataset_status_quo | .genesis.max_block_size = 512000 | .generator.tps = 64 ) + , ($dataset_status_quo | .genesis.max_block_size = 1024000 | .generator.tps = 128 ) + , ($dataset_status_quo | .genesis.max_block_size = 2048000 | .generator.tps = 256 ) + + ### Next, semantically-named profiles: + ### + ## Base variants: + , { name: "default" + , desc: "Default, as per nix/workbench/profiles/defaults.jq" + } + , $plutus * + { name: "plutus" + , desc: "Default with Plutus workload" + } + , $old_tracing * + { name: "oldtracing" + , desc: "Default in legacy tracing mode" + } + + ## Fastest -- start-stop + , $startstop_base * + { name: "startstop" + } + , $startstop_base * $p2p * + { name: "startstop-p2p" + } + , $startstop_base * $plutus * + { name: "startstop-plutus" + } + , $startstop_base * $without_tracer * + { name: "startstop-notracer" + } + , $startstop_base * $old_tracing * + { name: "startstop-oldtracing" + } + + ## CI variants: test duration, 3 blocks + , $citest_base * + { name: "ci-test" + } + , $citest_base * $p2p * + { name: "ci-test-p2p" + } + , $citest_base * $plutus * + { name: "ci-test-plutus" + } + , $citest_base * $without_tracer * + { name: "ci-test-notracer" + } + + ## CI variants: bench duration, 15 blocks + , $cibench_base * + { name: "ci-bench" + } + , $cibench_base * $p2p * + { name: "ci-bench-p2p" + } + , $cibench_base * $plutus * + { name: "ci-bench-plutus" + } + , $cibench_base * $without_tracer * + { name: "ci-bench-notracer" + } + + ## CI variants: test duration, 3 blocks, dense10 + , $citest_base * $singleton_dense10 * + { name: "ci-test-dense10" + } + +## Dish variants + , $dish_base * + { name: "dish" + } + , $dish_base * + { name: "dish-10M" + , genesis: + { utxo: (10 * $M) + } + } + , $dish_base * $plutus * + { name: "dish-plutus" + } + , $dish_base * $plutus * + { name: "dish-10M-plutus" + , genesis: + { utxo: (10 * $M) + } + } + + ## Large local cluster -- 10 nodes + , $cibench_base * $tenner * + { name: "10" + } + , $cibench_base * $tenner * $p2p * + { name: "10-p2p" + } + , $cibench_base * $tenner * $plutus * + { name: "10-plutus" + } + , $cibench_base * $tenner * $without_tracer * + { name: "10-notracer" + } + + ## Status-quo (huge) dataset, small cluster (2 nodes) + , $forge_stress_base * + { name: "forge-stress" + } + , $forge_stress_base * $plutus * + { name: "forge-stress-p2p" + } + , $forge_stress_base * $plutus * + { name: "forge-stress-plutus" + } + , $forge_stress_base * $plutus * $singleton * + { name: "forge-stress-plutus-singleton" + } + , $forge_stress_base * $without_tracer * + { name: "forge-stress-notracer" + } + + , $forge_stress_pre_base * + { name: "forge-stress-pre" + } + , $forge_stress_pre_base * $plutus * + { name: "forge-stress-pre-plutus" + } + , $forge_stress_pre_base * $without_tracer * + { name: "forge-stress-pre-notracer" + } + + , $scenario_chainsync * $chaindb_early_byron * + { name: "chainsync-early-byron" + } + , $scenario_chainsync * $chaindb_early_byron * $without_tracer * + { name: "chainsync-early-byron-notracer" + } + , $scenario_chainsync * $chaindb_early_byron * $old_tracing * + { name: "chainsync-early-byron-oldtracing" + } + + , $scenario_chainsync * $chaindb_early_alonzo * + { name: "chainsync-early-alonzo" + } + , $scenario_chainsync * $chaindb_early_alonzo * $without_tracer * + { name: "chainsync-early-alonzo-notracer" + } + , $scenario_chainsync * $chaindb_early_alonzo * $old_tracing * + { name: "chainsync-early-alonzo-oldtracing" + } + , $scenario_chainsync * $chaindb_early_alonzo * $p2p * + { name: "chainsync-early-alonzo-p2p" + } + + ## Last, but not least, the profile used by "nix-shell -A devops": + , { name: "devops" + , scenario: "idle" + , genesis: + { slot_duration: 0.2 + , parameter_k: 10 + , epoch_length: 1000 + , active_slots_coeff: 0.1 + , genesis_future_offset: "10 seconds" + , utxo: 0 + + , shelley: + { updateQuorum: 1 + } + } + , analysis: + { type: null + } + } + ]; diff --git a/nix/workbench/profiles/prof2-derived.jq b/nix/workbench/profiles/prof2-derived.jq new file mode 100644 index 00000000000..39f1422005a --- /dev/null +++ b/nix/workbench/profiles/prof2-derived.jq @@ -0,0 +1,260 @@ +include "prof0-defaults"; +include "genesis"; +include "lib"; + +def default_value_tx_size_estimate: + 381; + +def may_mult($x): + if type != "number" then null else . * $x end; + +def may_attr($attr; $dict; $defdict; $scale; $suf): + if ($dict[$attr] // + error("undefined attr: \($attr)")) + != $defdict[$attr] + then [($dict[$attr] | . / $scale | tostring) + $suf] else [] end; + +def profile_name($p): + era_defaults($p.era).genesis as $genesis_defaults +| era_defaults($p.era).generator as $generator_defaults +| era_defaults($p.era).composition as $composition_defaults +| era_defaults($p.era).node as $node_defaults +| $p.node.shutdown_on_block_synced as $shutdown_block +| $p.node.shutdown_on_slot_synced as $shutdown_slots + ## Genesis +| [ "k\($p.composition.n_pools)" ] + + if $p.composition.n_dense_hosts > 0 + then may_attr("dense_pool_density"; + $p.composition; $composition_defaults; 1; "ppn") + else [] end + + if $shutdown_slots | type == "number" + then [($shutdown_slots | tostring) + "slots"] + else [ ($p.generator.epochs | tostring) + "ep" + , ($p.generator.tx_count | . / 1000 | ceil | tostring) + "kTx" ] + end + + [ ($p.genesis.utxo | . / 1000 | tostring) + "kU" + , ($p.genesis.delegators | . / 1000 | tostring) + "kD" + , ($p.genesis.max_block_size | . / 1000 | tostring) + "kbs" + ] + + may_attr("tps"; + $p.generator; $generator_defaults; 1; "tps") + + may_attr("add_tx_size"; + $p.generator; $generator_defaults; 1; "b") + + may_attr("inputs_per_tx"; + $p.generator; $generator_defaults; 1; "i") + + may_attr("outputs_per_tx"; + $p.generator; $generator_defaults; 1; "o") + + if $p.generator.plutusMode | not then [] + else [ ($p.generator.plutusScript | rtrimstr(".plutus")) + , ($p.generator.plutusData | tostring) + ] end + + if $p.node.rts_flags_override == [] then [] + else ["RTS", ($p.node.rts_flags_override | join(""))] end + + if $p.composition.with_proxy + then ["prox"] + else [] end + + if $p.composition.with_chaindb_server + then ["chdb"] + else [] end + + if $p.composition.with_observer + then ["obsrv"] + else [] end + + if $p.scenario == "default" then [] else [$p.scenario] end + | join("-"); + +def profile_name_era_suffix($era): + "-\($era | (.[0:2] + .[-2:]))"; + +def add_derived_params: + (.genesis.utxo + .genesis.delegators) as $dataset_measure +| (if $dataset_measure < 10000 then 10 + else $dataset_measure / 50000 + end | ceil) as $dataset_induced_startup_delay_optimistic +| (if $dataset_measure < 10000 then 10 + else $dataset_measure / 10000 + end | ceil) as $dataset_induced_startup_delay_conservative +| (.derived.genesis_future_offset // + "\($dataset_induced_startup_delay_optimistic) seconds") + as $genesis_future_offset +| .composition as $compo +| .genesis as $gsis +| .generator as $gtor +| .analysis as $ana +| .era as $era +| .node as $node + +## Absolute durations: +| ($gsis.epoch_length * $gsis.slot_duration) as $epoch_duration +| ($gsis.slot_duration / $gsis.active_slots_coeff) as $block_duration +| $node.shutdown_on_block_synced as $shutdown_blocks +| $node.shutdown_on_slot_synced as $shutdown_slots +| (if $shutdown_slots | type == "number" + then $shutdown_slots / $gsis.epoch_length | ceil else + if $shutdown_blocks | type == "number" + then $shutdown_blocks / $gsis.epoch_length | ceil else + $gtor.epochs + end end) as $effective_epochs +| ($epoch_duration * $effective_epochs) as $generator_requested_duration +| ($shutdown_slots | may_mult($gsis.slot_duration)) as $shutdown_time +| ([ $generator_requested_duration + , $shutdown_time + ] | drop_nulls | min) as $generator_duration + +## Tx count for inferred absolute duration. +## Note that this the workload would take longer, if we saturate the cluster. +| ($gtor.tx_count // ($generator_duration * $gtor.tps) | ceil) + as $generator_tx_count +## Effective cluster composition: +| (if $compo.dense_pool_density > 1 + then { singular: $compo.n_singular_hosts + , dense: $compo.n_dense_hosts } + else { singular: ($compo.n_singular_hosts + $compo.n_dense_hosts) + , dense: 0 } + end) as $hosts +| ($hosts.dense * $compo.dense_pool_density) as $n_dense_pools +| ($hosts.singular + $n_dense_pools) as $n_pools + +| ($gsis.delegators // $n_pools) as $effective_delegators + +| ($generator_tx_count * $gtor.inputs_per_tx) + as $utxo_generated +| (($gsis.max_block_size / default_value_tx_size_estimate) | floor) + as $default_value_tx_per_block_estimate +## Note how derivations come in phases, too: +## +| (## First derivation: + { common: + (($gsis.per_pool_balance * $n_pools) as $supply_delegated + | + { derived: + { supply_delegated: $supply_delegated + , supply_total: ($supply_delegated + $gsis.funds_balance) + + , utxo_delegated: $effective_delegators + , utxo_generated: $utxo_generated + ## Stuffed UTxO is what we need over requested-UTxO + delegators' UTxO: + , utxo_stuffed: ([ $gsis.utxo + , 0 + ] | max) + + , delegators_effective: ([ $n_pools + , $gsis.delegators + ] | max) + + , dataset_measure: $dataset_measure + , dataset_induced_startup_delay_optimistic: $dataset_induced_startup_delay_optimistic + , dataset_induced_startup_delay_conservative: $dataset_induced_startup_delay_conservative + + , genesis_future_offset: $genesis_future_offset + , epoch_duration: $epoch_duration + , generator_duration: $generator_duration + , shutdown_time: $shutdown_time + + , effective_epochs: $effective_epochs + + , generator_tx_count: $generator_tx_count + + , default_value_tx_size_estimate: default_value_tx_size_estimate + , default_value_tx_per_block_estimate: $default_value_tx_per_block_estimate + , generator_blocks_lower_bound: ($generator_tx_count / $default_value_tx_per_block_estimate + | . * 1.15 | ceil) + } + , composition: + { n_hosts: ($compo.n_bft_hosts + $hosts.singular + $hosts.dense) + , n_pools: $n_pools + , n_singular_hosts: $hosts.singular + , n_singular_pools: $hosts.singular + , n_dense_hosts: $hosts.dense + , n_dense_pools: $n_dense_pools + , n_pool_hosts: ($hosts.singular + $hosts.dense) + } + , genesis: + { delegators: $effective_delegators + , pool_coin: (if $n_pools == 0 then 0 + else $gsis.per_pool_balance end) + , shelley: + ({ + activeSlotsCoeff: $gsis.active_slots_coeff + , epochLength: $gsis.epoch_length + , securityParam: $gsis.parameter_k + , slotLength: $gsis.slot_duration + , protocolParams: + { maxTxSize: $gsis.max_tx_size + , decentralisationParam: $gsis.decentralisation_param + , maxBlockBodySize: $gsis.max_block_size + , nOpt: $compo.n_pools + } + } * ($gsis.shelley // {})) + # , alonzo: supposed to already be filled + } + , generator: + { tx_count: $generator_tx_count + } + , node: + { + } + , analysis: + { minimum_chain_density: ($gsis.active_slots_coeff * 0.5) + , cluster_startup_overhead_s: $dataset_induced_startup_delay_conservative + , filter_exprs: + ($ana.filter_exprs + + [ { tag: "CBlock", contents: { tag: "BMinimumAdoptions" + , contents: ($n_pools - 1) } } + ]) + } + }) + } + | . * + ## Second derivation: + { common: + { genesis: + ## Depends on computed delegators: + { delegator_coin: (if .common.genesis.delegators == 0 then 0 + else $gsis.per_pool_balance + end) + } + } + }) as $derived +| . * $derived.common + * ($derived[.era] // {}) +| . * + { name: ( .era as $era + | (.name // profile_name(.)) + | . + profile_name_era_suffix($era) + ) + , cli_args: profile_cli_args(.) + } +; + +def profile_pretty_describe($p): + [ " - era: \($p.era)" + , " - epoch slots: \($p.genesis.epoch_length)" + , " - slot duration: \($p.genesis.slot_duration)" + , " - k: \($p.genesis.parameter_k)" + , " - active slots coeff: \($p.genesis.active_slots_coeff)" + , " - hosts: \($p.composition.n_hosts)" + , " - pools: \($p.composition.n_pools)" + , " - normal: \($p.composition.n_singular_pools)" + , " - dense: \($p.composition.n_dense_pools)" + , " - UTxO: \($p.genesis.utxo), of which:" + , " - delegated: \($p.derived.utxo_delegated)" + , " - generated: \($p.derived.utxo_generated)" + , " - stuffed: \($p.derived.utxo_stuffed)" + , " - delegators: \($p.genesis.delegators)" + , " - generator duration: \($p.derived.generator_duration | tostring)s" + , " - requested epochs: \($p.generator.epochs | tostring)ep" + , " - effective epochs: \($p.derived.effective_epochs | tostring)ep" + , " - transaction count: \($p.derived.generator_tx_count | . / 1000 | ceil | tostring)kTx" + , " - full blocks: \($p.derived.generator_blocks_lower_bound | tostring)" + , "" + ] + | . + if $p.node.shutdown_on_slot_synced == null then [] + else [ + " - terminate at slot: \($p.node.shutdown_on_slot_synced)" + ] end + | . + if $p.node.shutdown_on_block_synced == null then [] + else [ + " - terminate at block: \($p.node.shutdown_on_block_synced)" + ] end + | . + [""] + | join("\n"); diff --git a/shell.nix b/shell.nix index a2fb93bfaed..9b579fde46e 100644 --- a/shell.nix +++ b/shell.nix @@ -149,10 +149,6 @@ let pkgs.time ]; - # Prevents cabal from choosing alternate plans, so that - # *all* dependencies are provided by Nix. - exactDeps = true; - shellHook = '' echo "DevOps Tools" \ | ${figlet}/bin/figlet -f banner -c \ diff --git a/trace-dispatcher/trace-dispatcher.cabal b/trace-dispatcher/trace-dispatcher.cabal index dd8078c967b..587563dc227 100644 --- a/trace-dispatcher/trace-dispatcher.cabal +++ b/trace-dispatcher/trace-dispatcher.cabal @@ -17,7 +17,7 @@ library Cardano.Logging.Types Cardano.Logging.Trace Cardano.Logging.Configuration - Cardano.Logging.ConfigurationParser + Cardano.Logging.ConfigurationParser Cardano.Logging.DocuGenerator Cardano.Logging.Formatter Cardano.Logging.Forwarding @@ -43,7 +43,7 @@ library , contra-tracer , ekg , ekg-core - , ekg-forward + , ekg-forward == 0.1.0 , hostname , network , optparse-applicative-fork