Stacktape
Stacktape


Worker Service

This example shows a basic worker service configuration.

Wroker service resource

  • Provides continuously running container inaccessible from outside.
  • Ideal for background jobs and processing.

Basic example

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: union (anyOf)
# - Required: true
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: stacktape-image-buildpack
# Configures properties for the image automatically built by Stacktape from the source code.
#
# - Type: object
# - Required: true
properties:
# Path to the entry point of your compute resource (relative to the stacktape config file)
#
# - Stacktape tries to bundle all your source code with its dependencies into a single file.
# - If a certain dependency doesn't support static bundling (because it depends on binary executable, uses dynamic require() calls, etc.),
# Stacktape will install it and copy it to the bundle
#
# - Type: string
# - Required: true
entryfilePath: ./src/index.ts
# Configuration of packaging properties specific to given language
#
# - Type: union (anyOf)
# - Required: false
# Builds image with support for glibc-based binaries
#
# - You can use this option to add support for glibc-based native dependencies.
# - This means that Stacktape will use different (and significantly larger) base-image for your container.
# - Stacktape uses alpine Docker images by default. These images use musl, instead of glibc.
# - Packages with C-based binaries compiled using glibc doesn't work with musl.
#
# - Type: boolean
# - Required: false
requiresGlibcBinaries: true
# List of commands to be executed during docker image building.
#
# - This property enables you to execute custom commands in your container during image building.
# - Commands are executed using docker `RUN` directive.
# - Commands can be used to install required additional dependencies into your container.
#
# - Type: array<string>
# - Required: false
customDockerBuildCommands:
- apt-get update && apt-get install -y curl
- npm install -g pm2
# Files that should be explicitly included in the deployment package (glob pattern)
#
# - Example glob pattern: `images/*.jpg`
# - The path is relative to the stacktape configuration file location or to `cwd` if configured using `--currentWorkingDirectory` command line option.
#
# - Type: array<string>
# - Required: false
includeFiles:
- public/**/*
- assets/*.png
# Files that should be explicitly excluded from deployment package (glob pattern)
#
# Example glob pattern: `images/*.jpg`
#
# - Type: array<string>
# - Required: false
excludeFiles:
- *.test.ts
- node_modules/**
# Dependencies to ignore.
#
# - These dependencies won't be a part of your deployment package.
#
# - Type: array<string>
# - Required: false
excludeDependencies:
- example-value
# Environment variables injected to the runtime environment
#
# - Environment variables are often used to inject information about other parts of the infrastructure (such as database URLs, secrets, etc.).
#
# - Type: array<object (reference)>
# - Required: false
environment:
- name: NODE_ENV
value: production
- name: DATABASE_URL
value: $ResourceParam(myDatabase, connectionString)
# Configures logging behavior for the service container
#
# - Container logs (stdout and stderr) are automatically sent to a pre-created CloudWatch log group.
# - By default, logs are retained for 180 days..
# - You can browse your logs in 2 ways:
# - go to the log group page in the AWS CloudWatch console. You can use `stacktape stack-info` command to get a
# direct link.
# - use [stacktape logs command](https://docs.stacktape.com/cli/commands/logs/) to print logs to the console
#
# - Type: object
# - Required: false
logging:
# Disables the collection of containers's application logs (stdout and stderr) to CloudWatch
#
# - Type: boolean
# - Required: false
# - Default: false
disabled: false
# Amount of days the logs will be retained in the log group
#
# - Type: enum: [1, 120, 14, 150, 180, 1827, 3, 30, 365, 3653, 400, 5, 545, 60, 7, 731, 90]
# - Required: false
# - Default: 90
# - Allowed values: [1, 120, 14, 150, 180, 1827, 3, 30, 365, 3653, 400, 5, 545, 60, 7, 731, 90]
retentionDays: 90
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: union (anyOf)
# - Required: false
#
# - Type: object
# - Required: false
logForwarding:
#
# - Type: string
# - Required: true
type: http-endpoint
#
# - Type: object
# - Required: true
properties:
# HTTPS endpoint where logs will be forwarded
#
# - Type: string
# - Required: true
endpointUrl: https://example.com
# Specifies whether to use GZIP compression for the request
#
# - When enabled, Firehose uses the content encoding to compress the body of a request before sending the request to the destination
#
# - Type: boolean
# - Required: false
gzipEncodingEnabled: true
# Parameters included in each call to HTTP endpoint
#
# - Key/Value pairs containing additional metadata you wish to send to the HTTP endpoint.
# - Parameters are delivered within **X-Amz-Firehose-Common-Attributes** header as a JSON object with following format: `{"commonAttributes":{"param1":"val1", "param2":"val2"}}`
#
# - Type: object
# - Required: false
# Amount of time spend on retries.
#
# - The total amount of time that Kinesis Data Firehose spends on retries.
# - This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails.
# - Logs that fail to be delivered to the HTTP endpoint even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: number
# - Required: false
retryDuration: 100
# Access key (credentials), needed for authenticating with endpoint
#
# - Access key is carried within a **X-Amz-Firehose-Access-Key** header
# - The configured key is copied verbatim into the value of this header.The contents can be arbitrary and can potentially represent a JWT token or an ACCESS_KEY.
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your access key.
#
# - Type: string
# - Required: false
accessKey: example-value
# Configures computing resources(CPU/memory and EC2 instance types) for the service container
#
# - When specifying resources there are two underlying compute engines to use:
# - **Fargate** - abstracts the server and cluster management away from the user, allowing them to run containers without
# managing the underlying servers, simplifying deployment and management of applications but offering less control over the computing environment.
# - **EC2 (Elastic Compute Cloud)** - provides granular control over the underlying servers (instances).
# By choosing `instanceTypes` you get complete control over the computing environment and the ability to optimize for specific workloads.
#
# - **To use Fargate**: Do NOT specify `instanceTypes` and specify `cpu` and `memory` properties.
# - **To use EC2 instances**: specify `instanceTypes`.
#
# - Type: object
# - Required: true
resources:
# Number of virtual CPUs available to containers
#
# - If you specify `instanceTypes` property, and do not set `cpu`, cpus of an EC2 instance are shared between **instances of your compute resource** running on the EC2 instance.
#
# - Type: enum: [0.25, 0.5, 1, 16, 2, 4, 8]
# - Required: false
# - Allowed values: [0.25, 0.5, 1, 16, 2, 4, 8]
cpu: 0.5
# Amount of memory in MB available to containers
#
# - If you do not specify `instanceTypes` property you are using Fargate and you are only allowed to use following memory and vCPU configurations:
# - 0.25 vCPU: `512`, `1024`, `2048`
# - 0.5 vCPU: `1024`, `2048`, `3072`, `4096`
# - 1 vCPU: `2048`, `3072`, `4096`, `5120`, `6144`, `7168`, `8192`
# - 2 vCPU: Between `4096` and `16384` GB in `1024-MB` increments
# - 4 vCPU: Between `8192` and `30720` in `1024-MB` increments
# - 8 vCPU: Between `16384` and `61440` in `4096-MB` increments
# - 16 vCPU: Between `32768` and `122880` in `8192-MB` increments
# - If you specify `instanceTypes` property, this property is optional. If you do not set `memory` property, Stacktape sets the memory
# to a maximum possible value so that all EC2 instance types listed in `instanceTypes` are able provide that amount of memory.
#
# In other words: Stacktape sets the memory so that the smallest instance type in `instanceTypes`(in terms of memory) is able to provide that amount of memory.
#
# - Type: number
# - Required: false
memory: 2048
# Types of EC2 instances(VMs) that can be used
#
# - EC2 instances are automatically added or removed to meet the scaling needs of your compute resource(see also `scaling` property).
# - When using `instanceTypes`, **we recommend to specify only one instance type and to NOT set `cpu` or `memory` properties**.
# By doing so, Stacktape will set the cpu and memory to fit the instance precisely - resulting in the optimal resource utilization.
# - Stacktape leverages [ECS Managed Scaling](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-auto-scaling.html) with target utilization 100%.
# This means that there are no unused EC2 instances(unused = not running your workload/service) running. Unused EC2 instances are terminated.
# - Ordering in `instanceTypes` list matters. Instance types which are higher on the list are preferred over the instance types which are lower on the list.
# Only when instance type higher on the list is not available, next instance type on the list will be used.
# - For exhaustive list of available EC2 instance types refer to [AWS docs](https://aws.amazon.com/ec2/instance-types/).
#
# > To ensure that your containers are running on patched and up-to-date EC2 instances, your instances are automatically
# > refreshed (replaced) once a week(Sunday 00:00 UTC). Your compute resource stays available throughout this process.
#
# - Type: array<string>
# - Required: false
instanceTypes:
- t3.medium
- t3.large
# Enable EC2 Auto Scaling warm pool
#
# - **Only works when you specify exactly one instance type in `instanceTypes`**. Warm pools are not supported with mixed instance types.
# - Creates a warm pool of pre-initialized EC2 instances that are kept in a `Stopped` state, ready to be quickly launched when scaling up.
# - Warm pool instances are maintained between the desired capacity count and the maximum capacity count of your Auto Scaling group.
# - When scaling up is needed, instances from the warm pool are started much faster than launching new instances from scratch.
# - **Cost optimization**: Instances in the warm pool are in `Stopped` state, so you only pay for EBS storage, not for compute time.
# - Improves scaling performance by reducing the time needed to launch new instances during traffic spikes.
# - The warm pool size is automatically managed based on your workload's scaling configuration.
# - For more details, see [AWS Auto Scaling warm pools documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/ec2-auto-scaling-warm-pools.html).
#
# - Type: boolean
# - Required: false
enableWarmPool: true
# Configures service scaling
#
# - Scaling is done horizontally (adding more parallel containers)
# - Incoming requests to your service/workload are split between all available containers
#
# - Type: object
# - Required: false
scaling:
# Minimum number of workload/service instances running in parallel
#
# - Type: number
# - Required: false
# - Default: 1
minInstances: 1
# Maximum number of workload/service instances running in parallel
#
# - Type: number
# - Required: false
# - Default: 1
maxInstances: 3
# Configures when the scaling is triggered
#
# - Type: object
# - Required: false
scalingPolicy:
# Maximum amount for CPU utilization after which the `scale out`(adding new workload/service instance) is triggered
#
# - Utilization is calculated as an average utilization of all workload/service instances running.
# - Metrics are collected in 1 minute intervals.
# - If average CPU utilization metric is below this value, the `scale in` is triggered (removing an instance).
#
# - Type: number
# - Required: false
# - Default: 80
keepAvgCpuUtilizationUnder: 80
# Maximum amount for memory utilization after which the `scale out`(adding new workload/service instance) is triggered
#
# - Utilization is calculated as an average utilization of all workload/service instances running.
# - Metrics are collected in 1 minute intervals.
# - If average memory utilization metric is below this value, the `scale in` is triggered (removing an instance).
#
# - Type: number
# - Required: false
# - Default: 80
keepAvgMemoryUtilizationUnder: 80
# Configuration of internal healthcheck (used to determine, if the service container is healthy)
#
# - If the container is considered unhealthy, it is killed and replaced with a new one.
#
# - Type: object
# - Required: false
internalHealthCheck:
# Command to run that determines if the container is healthy
#
# - Must start with either:
# - `CMD` to execute the command arguments directly
# - `CMD-SHELL` to run the command with the container's default shell.
# - An exit code `0` indicates success, and non-zero exit code indicates failure.
# - Example: `[ "CMD-SHELL", "curl -f http://localhost/ || exit 1" ]`</code>`
#
# - Type: array<string>
# - Required: true
healthCheckCommand:
- example-value
# Time period in seconds between health check executions
#
# - Must be between 5 and 300 seconds.
#
# - Type: number
# - Required: false
# - Default: 30
intervalSeconds: 30
# The time period in seconds to wait for a health check to succeed before it is considered failed
#
# - Must be between 2 and 60 seconds.
#
# - Type: number
# - Required: false
# - Default: 5
timeoutSeconds: 5
# The number of times to retry a failed health check before the container is considered unhealthy.
#
# - Must be between 1 and 10 retries.
#
# - Type: number
# - Required: false
# - Default: 3
retries: 3
# "Grace" period to give the container to bootstrap before failed health checks count towards the maximum number of retries.
#
# - Must be between 0 and 300 seconds.
# - Disabled by default.
#
# - Type: number
# - Required: false
startPeriodSeconds: 100
# Amount of time in seconds to wait before the container is forcefully killed if it doesn't exit normally on its own.
#
# - When a container is stopped, a `SIGTERM` signal is sent to container's entry process, usually PID 1.
# After a timeout has lapsed, the process will be sent a `SIGKILL` signal.
# By default, there is a 2 second delay between the delivery of `SIGTERM` and `SIGKILL` signals.
# - Setting stop timeout can help you give container the time to "finish the job" or to "cleanup" when deploying new version of container or when deleting the service.
# - Minimum stop timeout is 2 seconds, maximum 120 seconds.
#
# - Type: number
# - Required: false
stopTimeout: 30
# Enables remote interactive shell sessions into running containers
#
# - When enabled, you can use `stacktape container:session` command to start an interactive shell session inside a running container
# - Uses AWS ECS Exec and SSM Session Manager under the hood to establish secure connection to the container
# - SSM agent binaries are mounted into your container and the SSM core agent runs alongside your application (using small amount of CPU/memory)
# - Useful for debugging issues and quick inspecting deployed containers
#
# - Type: boolean
# - Required: false
enableRemoteSessions: true
# List of volume mounts to attach to the container
#
# - Volumes provide persistent storage for your container
# - Currently supports EFS (Elastic File System) volumes
# - Multiple containers can share the same volume
# - Volumes persist even if the container is stopped or replaced
#
# - Type: array<object (reference)>
# - Required: false
volumeMounts:
- type: efs
# List of helper containers to extend the service
#
# Helper containers provide additional functionality alongside your main service container.
#
# **Container types:**
# - **`run-on-init`** - Runs before the service starts. The service container waits until the run-on-init container exits successfully (exit code 0).
# Use for setup tasks like database migrations, environment preparation, or dependency initialization.
# - **`always-running`** - Runs alongside the service container throughout its lifecycle. Starts before the service and stops when the service stops.
# Use for supporting services like monitoring agents, log collectors, or debugging tools.
#
# **Networking:**
# - always-running containers can communicate with the service container via `localhost:<port>` where `<port>` is the service's exposed port (default: 3000).
#
# - Type: array<object (reference)>
# - Required: false
sideContainers:
- containerType: always-running
name: example-name
essential: true
# Configures access to other resources of your stack (such as databases, buckets, event-buses, etc.) and aws services
#
# By referencing resources (or services) in `connectTo` list, Stacktape automatically:
# - configures correct compute resource's **IAM role permissions** if needed
# - sets up correct **security group rules** to allow access if needed
# - **injects relevant environment variables** containing information about resource you are connecting to into the compute resource's runtime
# - names of environment variables use upper-snake-case and are in form `STP_[RESOURCE_NAME]_[VARIABLE_NAME]`,
# - examples: `STP_MY_DATABASE_CONNECTION_STRING` or `STP_MY_EVENT_BUS_ARN`,
# - list of injected variables for each resource type can be seen below.
#
#
# Granted permissions and injected environment variables are different depending on resource type:
#
#
# `Bucket`
# - **Permissions:**
# - list objects in a bucket
# - create / get / delete / tag object in a bucket
# - **Injected env variables**: `NAME`, `ARN`
#
#
# `DynamoDB table`
# - **Permissions:**
# - get / put / update / delete item in a table
# - scan / query a table
# - describe table stream
# - **Injected env variables**: `NAME`, `ARN`, `STREAM_ARN`
#
#
# `MongoDB Atlas cluster`
# - **Permissions:**
# - Allows connection to a cluster with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# MongoDB Atlas clusters accessibility modes, refer to
# [MongoDB Atlas cluster docs](https://docs.stacktape.com/3rd-party-resources/mongo-db-atlas-clusters/#accessibility).
# - Creates access "user" associated with compute resource's role to allow for secure credential-less access to the the cluster
# - **Injected env variables**: `CONNECTION_STRING`
#
#
# `Relational(SQL) database`
# - **Permissions:**
# - Allows connection to a relational database with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# relational database accessibility modes, refer to [Relational databases docs](https://docs.stacktape.com/resources/relational-databases#accessibility).
# - **Injected env variables**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`
# (in case of aurora multi instance cluster additionally: `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, `READER_HOST`)
#
#
# `Redis cluster`
# - **Permissions:**
# - Allows connection to a redis cluster with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# redis cluster accessibility modes, refer to [Redis clusters docs](https://docs.stacktape.com/resources/redis-clusters#accessibility).
# - **Injected env variables**: `HOST`, `READER_HOST`, `PORT`
#
#
# `Event bus`
# - **Permissions:**
# - publish events to the specified Event bus
# - **Injected env variables**: `ARN`
#
#
# `Function`
# - **Permissions:**
# - invoke the specified function
# - invoke the specified function via url (if lambda has URL enabled)
# - **Injected env variables**: `ARN`
#
#
# `Batch job`
# - **Permissions:**
# - submit batch-job instance into batch-job queue
# - list submitted job instances in a batch-job queue
# - describe / terminate a batch-job instance
# - list executions of state machine which executes the batch-job according to its strategy
# - start / terminate execution of a state machine which executes the batch-job according to its strategy
# - **Injected env variables**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN`
#
#
# `User auth pool`
# - **Permissions:**
# - full control over the user pool (`cognito-idp:*`)
# - for more information about allowed methods refer to [AWS docs](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncognitouserpools.html)
# - **Injected env variables**: `ID`, `CLIENT_ID`, `ARN`
#
#
#
# `SNS Topic`
# - **Permissions:**
# - confirm/list subscriptions of the topic
# - publish/subscribe to the topic
# - unsubscribe from the topic
# - **Injected env variables**: `ARN`, `NAME`
#
#
#
# `SQS Queue`
# - **Permissions:**
# - send/receive/delete message
# - change visibility of message
# - purge queue
# - **Injected env variables**: `ARN`, `NAME`, `URL`
#
#
# `Upstash Kafka topic`
# - **Injected env variables**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL`
#
#
# `Upstash Redis`
# - **Injected env variables**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL`
#
#
# `Private service`
# - **Injected env variables**: `ADDRESS`
#
#
# `aws:ses`(Macro)
# - **Permissions:**
# - gives full permissions to aws ses (`ses:*`).
# - for more information about allowed methods refer to [AWS docs](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html)
#
# - Type: array<string>
# - Required: false
connectTo:
- myDatabase
- myBucket
# Raw AWS IAM role statements appended to your resources's role.
#
# - Type: array<object (reference)>
# - Required: false
iamRoleStatements:
- Resource: ["example-value"]
Sid: example-value

Packaging alternatives

stacktape-image-buildpack

This example shows how to configure packaging using stacktape-image-buildpack.

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: stacktape-image-buildpack
# Configures properties for the image automatically built by Stacktape from the source code.
#
# - Type: object
# - Required: true
properties:
# Path to the entry point of your compute resource (relative to the stacktape config file)
#
# - Stacktape tries to bundle all your source code with its dependencies into a single file.
# - If a certain dependency doesn't support static bundling (because it depends on binary executable, uses dynamic require() calls, etc.),
# Stacktape will install it and copy it to the bundle
#
# - Type: string
# - Required: true
entryfilePath: ./src/index.ts
# Configuration of packaging properties specific to given language
#
# - Type: union (anyOf)
# - Required: false
# Builds image with support for glibc-based binaries
#
# - You can use this option to add support for glibc-based native dependencies.
# - This means that Stacktape will use different (and significantly larger) base-image for your container.
# - Stacktape uses alpine Docker images by default. These images use musl, instead of glibc.
# - Packages with C-based binaries compiled using glibc doesn't work with musl.
#
# - Type: boolean
# - Required: false
requiresGlibcBinaries: true
# List of commands to be executed during docker image building.
#
# - This property enables you to execute custom commands in your container during image building.
# - Commands are executed using docker `RUN` directive.
# - Commands can be used to install required additional dependencies into your container.
#
# - Type: array<string>
# - Required: false
customDockerBuildCommands:
- apt-get update && apt-get install -y curl
- npm install -g pm2
# Files that should be explicitly included in the deployment package (glob pattern)
#
# - Example glob pattern: `images/*.jpg`
# - The path is relative to the stacktape configuration file location or to `cwd` if configured using `--currentWorkingDirectory` command line option.
#
# - Type: array<string>
# - Required: false
includeFiles:
- public/**/*
- assets/*.png
# Files that should be explicitly excluded from deployment package (glob pattern)
#
# Example glob pattern: `images/*.jpg`
#
# - Type: array<string>
# - Required: false
excludeFiles:
- *.test.ts
- node_modules/**
# Dependencies to ignore.
#
# - These dependencies won't be a part of your deployment package.
#
# - Type: array<string>
# - Required: false
excludeDependencies:
- example-value

external-buildpack

This example shows how to configure packaging using external-buildpack.

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: external-buildpack
#
# - Type: object
# - Required: true
properties:
# Path to the directory where the buildpack will be executed
#
# - Type: string
# - Required: true
sourceDirectoryPath: ./
# Buildpack Builder to use
#
# - By default, [paketobuildpacks/builder-jammy-base](https://github.com/paketo-buildpacks/builder-jammy-base) is used.
#
# - Type: string
# - Required: false
# - Default: paketobuildpacks/builder-jammy-base
builder: paketobuildpacks/builder-jammy-base
# Buildpack to use
#
# - By default, buildpacks is detected automatically.
#
# - Type: array<string>
# - Required: false
buildpacks:
- example-value
# Command to be executed when the container starts.
#
# - Example: `['app.py']`.
#
# - Type: array<string>
# - Required: false
command:
- node
- dist/index.js

prebuilt-image

This example shows how to configure packaging using prebuilt-image.

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: prebuilt-image
# Configures properties for the image pre-built by user.
#
# - Type: object
# - Required: true
properties:
# Name or the URL of the image
#
# - Type: string
# - Required: true
image: example-value
# ARN (Amazon resource name) of the secret containing credentials for the private registry containing the image.
#
# - You can create a secret with you credentials using [stacktape secret:create](https://docs.stacktape.com/resources/secrets/) command.
# - The body of the secret should have the following format: `{"username" : "<<privateRegistryUsername>>", "password" : "<<privateRegistryPassword>>"}`
# - After you create the secret, its ARN can be retrieved using [stacktape secret:get](https://docs.stacktape.com/cli/commands/secret-get/) command
#
# - Type: string
# - Required: false
repositoryCredentialsSecretArn: example-value
# Script to be executed when the container starts. Overrides ENTRYPOINT instruction in the Dockerfile.
#
# - Type: array<string>
# - Required: false
entryPoint:
- /usr/local/bin/docker-entrypoint.sh
# Command to be executed when the container starts. Overrides CMD instruction in the Dockerfile.
#
# - Example: `['app.py']`
#
# - Type: array<string>
# - Required: false
command:
- node
- dist/index.js

custom-dockerfile

This example shows how to configure packaging using custom-dockerfile.

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: custom-dockerfile
# Configures properties for the image built by Stacktape using specified Dockerfile.
#
# - Type: object
# - Required: true
properties:
# Path to directory (relative to stacktape config file) used as build context
#
# - Type: string
# - Required: true
buildContextPath: ./
# Script to be executed when the container starts. Overrides ENTRYPOINT instruction in the Dockerfile.
#
# - Type: array<string>
# - Required: false
entryPoint:
- /usr/local/bin/docker-entrypoint.sh
# Path to Dockerfile (relative to `buildContextPath`) used to build application image.
#
# - Type: string
# - Required: false
dockerfilePath: Dockerfile
# List of arguments passed to the `docker build` command when building the image
#
# - Type: array<object (reference)>
# - Required: false
buildArgs:
- argName: NODE_ENV
value: production
- argName: BUILD_VERSION
value: 1.0.0
# Command to be executed when the container starts. Overrides CMD instruction in the Dockerfile.
#
# - Example: `['app.py']`
#
# - Type: array<string>
# - Required: false
command:
- node
- dist/index.js

nixpacks

This example shows how to configure packaging using nixpacks.

resources:
myWorkerService:
type: worker-service
properties:
# Configures an image used for the service container
#
# - Type: object
# - Required: true
packaging:
#
# - Type: string
# - Required: true
type: nixpacks
#
# - Type: object
# - Required: true
properties:
# Path to the directory where the buildpack will be executed
#
# - Type: string
# - Required: true
sourceDirectoryPath: ./
# Build Image
#
# - The image to use as the base when building the application.
# - To learn more, refer to [nixpacks docs](https://nixpacks.com/docs/configuration/file#build-image)
#
# - Type: string
# - Required: false
buildImage: example-value
# Providers
#
# - A list of provider names used to determine build and runtime environments.
#
# - Type: array<string>
# - Required: false
providers:
- example-value
# Start Command
#
# - The command to execute when starting the application.
# - Overrides default start commands inferred by nixpacks.
#
# - Type: string
# - Required: false
startCmd: example-value
# Start Run Image
#
# - The image to use as the base when running the application.
#
# - Type: string
# - Required: false
startRunImage: example-value
# Start Only Include Files
#
# - A list of file paths to include in the runtime environment.
# - Other files will be excluded.
#
# - Type: array<string>
# - Required: false
startOnlyIncludeFiles:
- example-value
# Phases
#
# - Defines the build phases for the application.
# - Each phase specifies commands, dependencies, and settings.
#
# - Type: array<object (reference)>
# - Required: false
phases:
- name: example-name
cmds: ["example-value"]

LogForwarding alternatives

http-endpoint

This example shows how to configure logforwarding using http-endpoint.

resources:
myWorkerService:
type: worker-service
properties:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: http-endpoint
#
# - Type: object
# - Required: true
properties:
# HTTPS endpoint where logs will be forwarded
#
# - Type: string
# - Required: true
endpointUrl: https://example.com
# Specifies whether to use GZIP compression for the request
#
# - When enabled, Firehose uses the content encoding to compress the body of a request before sending the request to the destination
#
# - Type: boolean
# - Required: false
gzipEncodingEnabled: true
# Parameters included in each call to HTTP endpoint
#
# - Key/Value pairs containing additional metadata you wish to send to the HTTP endpoint.
# - Parameters are delivered within **X-Amz-Firehose-Common-Attributes** header as a JSON object with following format: `{"commonAttributes":{"param1":"val1", "param2":"val2"}}`
#
# - Type: object
# - Required: false
# Amount of time spend on retries.
#
# - The total amount of time that Kinesis Data Firehose spends on retries.
# - This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails.
# - Logs that fail to be delivered to the HTTP endpoint even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: number
# - Required: false
retryDuration: 100
# Access key (credentials), needed for authenticating with endpoint
#
# - Access key is carried within a **X-Amz-Firehose-Access-Key** header
# - The configured key is copied verbatim into the value of this header.The contents can be arbitrary and can potentially represent a JWT token or an ACCESS_KEY.
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your access key.
#
# - Type: string
# - Required: false
accessKey: example-value

highlight

This example shows how to configure logforwarding using highlight.

resources:
myWorkerService:
type: worker-service
properties:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: highlight
#
# - Type: object
# - Required: true
properties:
# Id of a [highlight.io](https://www.highlight.io/) project.
#
# - You can get the id of your project in your [highlight.io console](https://app.highlight.io/).
#
# - Type: string
# - Required: true
projectId: example-value
# HTTPS endpoint where logs will be forwarded
#
# - By default Stacktape uses `https://pub.highlight.io/v1/logs/firehose`
#
# - Type: string
# - Required: false
# - Default: https://pub.highlight.io/v1/logs/firehose
endpointUrl: https://pub.highlight.io/v1/logs/firehose

datadog

This example shows how to configure logforwarding using datadog.

resources:
myWorkerService:
type: worker-service
properties:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: datadog
#
# - Type: object
# - Required: true
properties:
# API key required to enable delivery of logs to Datadog
#
# - You can get your Datadog API key in [Datadog console](https://app.datadoghq.com/organization-settings/api-keys)
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your api key.
#
# - Type: string
# - Required: true
apiKey: example-value
# HTTPS endpoint where logs will be forwarded
#
# - By default Stacktape uses `https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input`
# - If your Datadog site is in EU you should probably use `https://aws-kinesis-http-intake.logs.datadoghq.eu/v1/input`
#
# - Type: string
# - Required: false
# - Default: https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input
endpointUrl: https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input

Contents