diff --git a/nix/cardano-services/deployments/blockfrost-backend-ingress.nix b/nix/cardano-services/deployments/blockfrost-backend-ingress.nix new file mode 100644 index 00000000000..5c12735528c --- /dev/null +++ b/nix/cardano-services/deployments/blockfrost-backend-ingress.nix @@ -0,0 +1,83 @@ +{ + lib, + values, + config, + utils, + ... +}: { + templates.accelerator = lib.mkIf (values.blockfrost-backend.useAccelerator && values.ingress.enabled) { + apiVersion = "operator.h3poteto.dev/v1alpha1"; + kind = "EndpointGroupBinding"; + metadata.name = "${config.name}-blockfrost"; + spec = { + endpointGroupArn = values.acceleratorArn; + ingressRef.name = "${config.name}-blockfrost"; + }; + }; + + templates.blockfrost-backend-ingress = lib.mkIf values.ingress.enabled { + apiVersion = "networking.k8s.io/v1"; + kind = "Ingress"; + metadata = { + name = "${config.name}-blockfrost"; + labels = utils.appLabels "blockfrost"; + annotations = + if values.blockfrost-backend.useAccelerator + then { + "service.beta.kubernetes.io/aws-load-balancer-backend-protocol" = "tcp"; + "service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled" = "true"; + "service.beta.kubernetes.io/aws-load-balancer-type" = "external"; + "alb.ingress.kubernetes.io/scheme" = "internet-facing"; + "service.beta.kubernetes.io/aws-load-balancer-scheme" = "internet-facing"; + "alb.ingress.kubernetes.io/target-type" = "ip"; + "service.beta.kubernetes.io/aws-load-balancer-nlb-target-type" = "ip"; + "service.beta.kubernetes.io/aws-load-balancer-proxy-protocol" = "*"; + "service.beta.kubernetes.io/aws-load-balancer-target-group-attributes" = "proxy_protocol_v2.enabled=true,preserve_client_ip.enabled=true"; + + "alb.ingress.kubernetes.io/listen-ports" = builtins.toJSON [{HTTP = 80;} {HTTPS = 443;}]; + "alb.ingress.kubernetes.io/healthcheck-path" = "${values.cardano-services.httpPrefix}/health"; + "alb.ingress.kubernetes.io/healthcheck-interval-seconds" = toString values.blockfrost-backend.albHealthcheck.interval; + "alb.ingress.kubernetes.io/healthcheck-timeout-seconds" = toString values.blockfrost-backend.albHealthcheck.timeout; + "alb.ingress.kubernetes.io/group.order" = toString values.cardano-services.ingresOrder; + "external-dns.alpha.kubernetes.io/disabled" = "true"; + } + else { + "alb.ingress.kubernetes.io/actions.ssl-redirect" = builtins.toJSON { + Type = "redirect"; + RedirectConfig = { + Protocol = "HTTPS"; + Port = "443"; + StatusCode = "HTTP_301"; + }; + }; + "alb.ingress.kubernetes.io/listen-ports" = builtins.toJSON [{HTTP = 80;} {HTTPS = 443;}]; + "alb.ingress.kubernetes.io/target-type" = "ip"; + "alb.ingress.kubernetes.io/scheme" = "internet-facing"; + "alb.ingress.kubernetes.io/wafv2-acl-arn" = values.backend.wafARN; + # Use latency routing policy + "external-dns.alpha.kubernetes.io/aws-region" = config.region; + "external-dns.alpha.kubernetes.io/set-identifier" = values.blockfrost-backend.dnsId; + "alb.ingress.kubernetes.io/group.name" = config.namespace; + "alb.ingress.kubernetes.io/group.order" = toString values.cardano-services.ingresOrder; + }; + }; + spec = { + ingressClassName = "alb"; + rules = + map (hostname: { + host = hostname; + http.paths =[ + { + pathType = "Prefix"; + path = "/"; + backend.service = { + name = "${config.name}-blockfrost-backend"; + port.name = "http"; + }; + } + ]; + }) + values.blockfrost-backend.hostnames; + }; + }; +} diff --git a/nix/cardano-services/deployments/blockfrost-backend.provider.nix b/nix/cardano-services/deployments/blockfrost-backend.provider.nix new file mode 100644 index 00000000000..4e79f21219e --- /dev/null +++ b/nix/cardano-services/deployments/blockfrost-backend.provider.nix @@ -0,0 +1,97 @@ +{ + config, + values, + lib, + ... +}: { + providers.blockfrost-backend = { + inherit (values.cardano-services) image; + args = ["start-provider-server"]; + port = 3000; + metricsPath = "${values.cardano-services.httpPrefix}/metrics"; + + livenessProbe = { + timeoutSeconds = 30; + periodSeconds = 60; + httpGet = { + path = "${values.cardano-services.httpPrefix}/health"; + port = 3000; + }; + }; + + env = + { + NETWORK = config.network; + ENABLE_METRICS = "true"; + SERVICE_NAMES = "asset,chain-history,network-info,rewards,utxo"; + OGMIOS_SRV_SERVICE_NAME = values.backend.ogmiosSrvServiceName; + LOGGER_MIN_SEVERITY = values.cardano-services.loggingLevel; + TOKEN_METADATA_SERVER_URL = values.cardano-services.tokenMetadataServerUrl; + HANDLE_POLICY_IDS = "f0ff48bbb7bbe9d59a40f1ce90e9e9d0ff5002ec48f232b49ca0fb9a"; + USE_BLOCKFROST = builtins.toJSON values.blockfrost-worker.enabled; + USE_KORA_LABS = "true"; + DISABLE_STAKE_POOL_METRIC_APY = "true"; + PAGINATION_PAGE_SIZE_LIMIT = "5500"; + NODE_ENV = values.cardano-services.nodeEnv; + ASSET_PROVIDER = "blockfrost"; + CHAIN_HISTORY_PROVIDER = "blockfrost"; + NETWORK_INFO_PROVIDER = "blockfrost"; + REWARDS_PROVIDER = "blockfrost"; + UTXO_PROVIDER = "blockfrost"; + BLOCKFROST_API_KEY = { + valueFrom.secretKeyRef = { + name = "blockfrost"; + key = "api-key"; + }; + }; + + + HANDLE_PROVIDER_SERVER_URL = + if config.network == "mainnet" + then "https://api.handle.me" + else "https://${config.network}.api.handle.me"; + + BUILD_INFO = values.cardano-services.buildInfo; + ALLOWED_ORIGINS = values.backend.allowedOrigins; + + POSTGRES_POOL_MAX_DB_SYNC = "50"; + POSTGRES_HOST_DB_SYNC = values.postgresName; + POSTGRES_PORT_DB_SYNC = "5432"; + POSTGRES_DB_DB_SYNC = "cardano"; + POSTGRES_PASSWORD_DB_SYNC = { + valueFrom.secretKeyRef = { + name = "cardano-owner-user.${values.postgresName}.credentials.postgresql.acid.zalan.do"; + key = "password"; + }; + }; + POSTGRES_USER_DB_SYNC = { + valueFrom.secretKeyRef = { + name = "cardano-owner-user.${values.postgresName}.credentials.postgresql.acid.zalan.do"; + key = "username"; + }; + }; + POSTGRES_SSL_DB_SYNC = "true"; + POSTGRES_SSL_CA_FILE_DB_SYNC = "/tls/ca.crt"; + } + // lib.optionalAttrs values.backend.passHandleDBArgs { + POSTGRES_POOL_MAX_HANDLE = "10"; + POSTGRES_HOST_HANDLE = values.postgresName; + POSTGRES_PORT_HANDLE = "5432"; + POSTGRES_DB_HANDLE = "handle"; + POSTGRES_PASSWORD_HANDLE = { + valueFrom.secretKeyRef = { + name = "handle-owner-user.${values.postgresName}.credentials.postgresql.acid.zalan.do"; + key = "password"; + }; + }; + POSTGRES_USER_HANDLE = { + valueFrom.secretKeyRef = { + name = "handle-owner-user.${values.postgresName}.credentials.postgresql.acid.zalan.do"; + key = "username"; + }; + }; + POSTGRES_SSL_HANDLE = "true"; + POSTGRES_SSL_CA_FILE_HANDLE = "/tls/ca.crt"; + }; + }; +} diff --git a/nix/cardano-services/deployments/default.nix b/nix/cardano-services/deployments/default.nix index 7a9825aaec8..c5b02f30149 100644 --- a/nix/cardano-services/deployments/default.nix +++ b/nix/cardano-services/deployments/default.nix @@ -66,6 +66,10 @@ in resources.requests = mkPodResources "350Mi" "1000m"; }; + blockfrost-backend = { + resources.requests = mkPodResources "350Mi" "1000m"; + }; + stake-pool-provider = { resources.requests = mkPodResources "150Mi" "700m"; env.OVERRIDE_FUZZY_OPTIONS = builtins.toJSON (!(lib.hasPrefix "live" final.namespace)); @@ -184,6 +188,25 @@ in (map (v: "/v${v}/utxo") versions.utxo) ]; }; + + blockfrost-backend = { + allowedOrigins = lib.concatStringsSep "," allowedOrigins; + useAccelerator = false; + passHandleDBArgs = true; + hostnames = ["blockfrost-${final.namespace}.${baseUrl}" "blockfrost-${final.namespace}.${final.region}.${baseUrl}"]; + dnsId = lib.toLower "${final.region}-${final.namespace}-blockfrost"; + ogmiosSrvServiceName = "${final.namespace}-cardano-core.${final.namespace}.svc.cluster.local"; + + wafARN = tf-outputs.${final.region}.waf_arn; + # Healthcheck parameters for ALB + # For mainnet, default value of timeout of 5 is too short, so have to increase it significantly + # Interval cannot be less than timeout + # Note that Kubernetes healthchecks are picked up by balancer controller and reflected in the target group anyway + albHealthcheck = { + interval = 60; + timeout = 30; + }; + }; }; imports = [ ./ci.nix @@ -198,6 +221,8 @@ in ./handle.nix ./asset.nix ./backend-ingress.nix + ./blockfrost-backend-ingress.nix + ./blockfrost-backend.provider.nix ./pg-boss-worker-deployment.nix ./blockfrost-worker-deployment.nix ]; @@ -697,6 +722,7 @@ in providers = { backend.enabled = true; + blockfrost-backend.enabled = true; handle-provider.enabled = true; chain-history-provider.enabled = true; stake-pool-provider.enabled = true;