Stacktape
Stacktape


Next.js Web

This example shows a basic nextjs web configuration.

Next.js web resource

  • Purpose built Next.js web resource resource that runs Next.js server in AWS Lambda or Lambda@Edge
  • Enables seemless integration of Next.js web with other resources of your infrastructure

Basic example

resources:
myNextjsWeb:
type: nextjs-web
properties:
# Root directory of your nextjs app
#
# - Path to root of your Next.js project (where your `next.config.js/ts` is).
# - Open Next build will be executed inside this directory.
# - Unless you are using monorepo setup, this path is usually `./`
# - If the app directory additionally contains `open-next.config.js/ts` file, the options from this file will be used during the build.
#
# - Type: string
# - Required: true
appDirectory: example-value
# Properties of server lambda running the next server
#
# - Setting properties can be useful if you want to i.e increase the memory or timeout of the function.
#
# - Type: object
# - Required: false
serverLambda:
# Amount of memory (in MB) available to the server function during execution
#
# - Must be between 128 MB and 10,240 MB in 1-MB increments.
# - Amount of CPU power available to the function is also set using memory property - it's proportionate to the amount of available memory.
# - Function with 1797MB has a CPU power equal to 1 virtual CPU. Lambda function can have a maximum of 6 vCPUs (at 10,240 MB of RAM).
#
# - Type: number
# - Required: false
# - Default: 1024
memory: 2048
# Maximum amount of time (in seconds) the lambda function is allowed to run
#
# - Maximum allowed time is 30 seconds.
#
# - Type: number
# - Required: false
# - Default: 30
timeout: 3600
# Configures logging behavior for the nextjs server function
#
# - Information about the function invocation and function logs (stdout and stderr)
# are automatically sent to a pre-created CloudWatch log group.
# - By default, logs are retained for 180 days.
# - You can browse your logs in 2 ways:
# - go to the log group page in the AWS CloudWatch console. You can use `stacktape stack-info` command to get a
# direct link.
# - use [stacktape logs command](https://docs.stacktape.com/cli/commands/logs/) to print logs to the console
#
# - Type: object
# - Required: false
logging:
# Disables the collection of function's application logs (stdout and stderr) to CloudWatch
#
# - Type: boolean
# - Required: false
# - Default: false
disabled: false
# Amount of days the logs will be retained in the log group
#
# - Type: enum: [1, 120, 14, 150, 180, 1827, 3, 30, 365, 3653, 400, 5, 545, 60, 7, 731, 90]
# - Required: false
# - Default: 180
# - Allowed values: [1, 120, 14, 150, 180, 1827, 3, 30, 365, 3653, 400, 5, 545, 60, 7, 731, 90]
retentionDays: 180
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: union (anyOf)
# - Required: false
#
# - Type: object
# - Required: false
logForwarding:
#
# - Type: string
# - Required: true
type: http-endpoint
#
# - Type: object
# - Required: true
properties:
# HTTPS endpoint where logs will be forwarded
#
# - Type: string
# - Required: true
endpointUrl: https://example.com
# Specifies whether to use GZIP compression for the request
#
# - When enabled, Firehose uses the content encoding to compress the body of a request before sending the request to the destination
#
# - Type: boolean
# - Required: false
gzipEncodingEnabled: true
# Parameters included in each call to HTTP endpoint
#
# - Key/Value pairs containing additional metadata you wish to send to the HTTP endpoint.
# - Parameters are delivered within **X-Amz-Firehose-Common-Attributes** header as a JSON object with following format: `{"commonAttributes":{"param1":"val1", "param2":"val2"}}`
#
# - Type: object
# - Required: false
# Amount of time spend on retries.
#
# - The total amount of time that Kinesis Data Firehose spends on retries.
# - This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails.
# - Logs that fail to be delivered to the HTTP endpoint even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: number
# - Required: false
retryDuration: 100
# Access key (credentials), needed for authenticating with endpoint
#
# - Access key is carried within a **X-Amz-Firehose-Access-Key** header
# - The configured key is copied verbatim into the value of this header.The contents can be arbitrary and can potentially represent a JWT token or an ACCESS_KEY.
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your access key.
#
# - Type: string
# - Required: false
accessKey: example-value
# Connects the function to the default VPC
#
# > Connecting a function to the VPC makes it lose connection to the internet. (Outbound requests will fail).
# - Functions are NOT connected to the default VPC of your stack by default.
# - To communicate with certain resources inside your VPC, you might need to connect your function to the VPC - most commonly when accessing relational-database or redis-cluster.
# - If your function connect to S3 (**bucket**) or DynamoDB (**dynamo-db-table**) resources using `connectTo`, VPC gateway endpoints are automatically created in the VPC so that your function can access the resources even without internet access.
# - To learn more about VPCs, refer to [VPCs Stacktape documentation](https://docs.stacktape.com/user-guides/vpcs).
#
# - Type: boolean
# - Required: false
joinDefaultVpc: true
# Number of server lambda instances to keep warm
#
# - Server function may experience performance issues due to Lambda cold starts (slower response times).
# - To mitigate cold start effects, the server function can be invoked periodically by warmer function.
# This is done using warmer function which periodically invokes server function to keep it warm.
# - By default, warmer is disabled.
#
# > At the moment warmer does not work when using lambda@edge for server function (see `useEdgeLambda` property)
#
# - Type: number
# - Required: false
warmServerInstances: 100
# Configures using lambda@edge for server function
#
# - When using edge lambda functions, your server function is executed at the regional cache locations across the globe.
# This means that the function is executed closer to your user, which results in reduced response time.
# - Downside of using lambda@edge are:
# - slower deployment times compared to using regular lambda,
# - you cannot use warmer (see `warmServerInstances` property) to keep server function warm.
#
# - Type: boolean
# - Required: false
# - Default: false
useEdgeLambda: false
# Build command used to build your nextjs app
#
# - Allows you to specify custom build command used for building your nextjs app
#
# - Type: string
# - Required: false
buildCommand: example-value
# File options allow you to manually set headers (such as `cache-control` or `content-type`) of files that match the filter pattern
#
# - Type: array<object (reference)>
# - Required: false
fileOptions:
- includePattern: example-value
excludePattern: example-value
# Environment variables injected to the runtime environment of server function
#
# - Environment variables are often used to inject information about other parts of the infrastructure (such as information about user pool or API gateway).
# - Specified environment variables can be accessed inside the server function
#
# - Type: array<object (reference)>
# - Required: false
environment:
- name: NODE_ENV
value: production
- name: DATABASE_URL
value: $ResourceParam(myDatabase, connectionString)
# Attaches a custom domains to your nextjs web
#
# Stacktape allows you to connect your custom domain names to some of your resources
# (Web Service, Nextjs web, HTTP API Gateways, Application Load Balancers and Buckets with CDNs).
#
# Connecting a custom domain to the resource does 2 things:
# - **Creates DNS records**:
# - If you use your custom domain with a resource, Stacktape automatically creates a DNS record (during deploy) pointing the specified domain name to the resource.
# - **Adds TLS certificates**
# - If the origin resource (HTTP API Gateway, Application Load Balancer or CDN) uses HTTPS protocol, Stacktape takes care of
# issuing and attaching correct (free, AWS-managed) certificate to the resource.
# This means, you do not have to deal with TLS termination as it is handled by the connected resource.
# - If you want to use your own certificates, you can configure `customCertificateArns`.
#
# > To manage a custom domain, it first needs to be added to your AWS account.
# > This means that a [hosted zone](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/AboutHZWorkingWith.html)
# > (collection of records managed together for a given domain)
# > for your domain exists in your AWS account and your domain registrar's name servers are pointing to it.
# > To learn more, refer to [Adding a domain guide](https://docs.stacktape.com/other-resources/domains-and-certificates/#adding-domain).
#
# - Type: array<object (reference)>
# - Required: false
customDomains:
- domainName: api.example.com
customCertificateArn: example-value
# Name of the 'web-app-firewall' resource to used with your CDN
#
# - You can use `web-app-firewall` to protect your resources from common web exploits that could affect application availability, compromise security, or consume excessive resources.
# - Web app firewall protects your application by filtering dangerous requests coming to your app.
# You can read more about the firewall [in our docs](https://docs.stacktape.com/security-resources/web-app-firewalls/).
#
# - Type: string
# - Required: false
useFirewall: example-value
# Enables response streaming.
#
# - Advantages of reponse streaming:
# - can improve performance for web and mobile applications (lower TTFB - time to first byte)
# - response size can be up to 20MB (compared to Lambda's default 6MB limit) - this is soft limit and can be increased
#
# > Currently reponse streaming is still considered experimental
# > Response streaming does not work when using edge lambdas
#
# - Type: boolean
# - Required: false
# - Default: false
streamingEnabled: false
# Configures access to other resources of your stack (such as databases, buckets, event-buses, etc.) and aws services
#
# By referencing resources (or services) in `connectTo` list, Stacktape automatically:
# - configures correct compute resource's **IAM role permissions** if needed
# - sets up correct **security group rules** to allow access if needed
# - **injects relevant environment variables** containing information about resource you are connecting to into the compute resource's runtime
# - names of environment variables use upper-snake-case and are in form `STP_[RESOURCE_NAME]_[VARIABLE_NAME]`,
# - examples: `STP_MY_DATABASE_CONNECTION_STRING` or `STP_MY_EVENT_BUS_ARN`,
# - list of injected variables for each resource type can be seen below.
#
#
# Granted permissions and injected environment variables are different depending on resource type:
#
#
# `Bucket`
# - **Permissions:**
# - list objects in a bucket
# - create / get / delete / tag object in a bucket
# - **Injected env variables**: `NAME`, `ARN`
#
#
# `DynamoDB table`
# - **Permissions:**
# - get / put / update / delete item in a table
# - scan / query a table
# - describe table stream
# - **Injected env variables**: `NAME`, `ARN`, `STREAM_ARN`
#
#
# `MongoDB Atlas cluster`
# - **Permissions:**
# - Allows connection to a cluster with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# MongoDB Atlas clusters accessibility modes, refer to
# [MongoDB Atlas cluster docs](https://docs.stacktape.com/3rd-party-resources/mongo-db-atlas-clusters/#accessibility).
# - Creates access "user" associated with compute resource's role to allow for secure credential-less access to the the cluster
# - **Injected env variables**: `CONNECTION_STRING`
#
#
# `Relational(SQL) database`
# - **Permissions:**
# - Allows connection to a relational database with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# relational database accessibility modes, refer to [Relational databases docs](https://docs.stacktape.com/resources/relational-databases#accessibility).
# - **Injected env variables**: `CONNECTION_STRING`, `JDBC_CONNECTION_STRING`, `HOST`, `PORT`
# (in case of aurora multi instance cluster additionally: `READER_CONNECTION_STRING`, `READER_JDBC_CONNECTION_STRING`, `READER_HOST`)
#
#
# `Redis cluster`
# - **Permissions:**
# - Allows connection to a redis cluster with `accessibilityMode` set to `scoping-workloads-in-vpc`. To learn more about
# redis cluster accessibility modes, refer to [Redis clusters docs](https://docs.stacktape.com/resources/redis-clusters#accessibility).
# - **Injected env variables**: `HOST`, `READER_HOST`, `PORT`
#
#
# `Event bus`
# - **Permissions:**
# - publish events to the specified Event bus
# - **Injected env variables**: `ARN`
#
#
# `Function`
# - **Permissions:**
# - invoke the specified function
# - invoke the specified function via url (if lambda has URL enabled)
# - **Injected env variables**: `ARN`
#
#
# `Batch job`
# - **Permissions:**
# - submit batch-job instance into batch-job queue
# - list submitted job instances in a batch-job queue
# - describe / terminate a batch-job instance
# - list executions of state machine which executes the batch-job according to its strategy
# - start / terminate execution of a state machine which executes the batch-job according to its strategy
# - **Injected env variables**: `JOB_DEFINITION_ARN`, `STATE_MACHINE_ARN`
#
#
# `User auth pool`
# - **Permissions:**
# - full control over the user pool (`cognito-idp:*`)
# - for more information about allowed methods refer to [AWS docs](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazoncognitouserpools.html)
# - **Injected env variables**: `ID`, `CLIENT_ID`, `ARN`
#
#
#
# `SNS Topic`
# - **Permissions:**
# - confirm/list subscriptions of the topic
# - publish/subscribe to the topic
# - unsubscribe from the topic
# - **Injected env variables**: `ARN`, `NAME`
#
#
#
# `SQS Queue`
# - **Permissions:**
# - send/receive/delete message
# - change visibility of message
# - purge queue
# - **Injected env variables**: `ARN`, `NAME`, `URL`
#
#
# `Upstash Kafka topic`
# - **Injected env variables**: `TOPIC_NAME`, `TOPIC_ID`, `USERNAME`, `PASSWORD`, `TCP_ENDPOINT`, `REST_URL`
#
#
# `Upstash Redis`
# - **Injected env variables**: `HOST`, `PORT`, `PASSWORD`, `REST_TOKEN`, `REST_URL`, `REDIS_URL`
#
#
# `Private service`
# - **Injected env variables**: `ADDRESS`
#
#
# `aws:ses`(Macro)
# - **Permissions:**
# - gives full permissions to aws ses (`ses:*`).
# - for more information about allowed methods refer to [AWS docs](https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonses.html)
#
# - Type: array<string>
# - Required: false
connectTo:
- myDatabase
- myBucket
# Raw AWS IAM role statements appended to your resources's role.
#
# - Type: array<object (reference)>
# - Required: false
iamRoleStatements:
- Resource: ["example-value"]
Sid: example-value

LogForwarding alternatives

http-endpoint

This example shows how to configure logforwarding using http-endpoint.

resources:
myNextjsWeb:
type: nextjs-web
properties:
serverLambda:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: http-endpoint
#
# - Type: object
# - Required: true
properties:
# HTTPS endpoint where logs will be forwarded
#
# - Type: string
# - Required: true
endpointUrl: https://example.com
# Specifies whether to use GZIP compression for the request
#
# - When enabled, Firehose uses the content encoding to compress the body of a request before sending the request to the destination
#
# - Type: boolean
# - Required: false
gzipEncodingEnabled: true
# Parameters included in each call to HTTP endpoint
#
# - Key/Value pairs containing additional metadata you wish to send to the HTTP endpoint.
# - Parameters are delivered within **X-Amz-Firehose-Common-Attributes** header as a JSON object with following format: `{"commonAttributes":{"param1":"val1", "param2":"val2"}}`
#
# - Type: object
# - Required: false
# Amount of time spend on retries.
#
# - The total amount of time that Kinesis Data Firehose spends on retries.
# - This duration starts after the initial attempt to send data to the custom destination via HTTPS endpoint fails.
# - Logs that fail to be delivered to the HTTP endpoint even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: number
# - Required: false
retryDuration: 100
# Access key (credentials), needed for authenticating with endpoint
#
# - Access key is carried within a **X-Amz-Firehose-Access-Key** header
# - The configured key is copied verbatim into the value of this header.The contents can be arbitrary and can potentially represent a JWT token or an ACCESS_KEY.
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your access key.
#
# - Type: string
# - Required: false
accessKey: example-value

highlight

This example shows how to configure logforwarding using highlight.

resources:
myNextjsWeb:
type: nextjs-web
properties:
serverLambda:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: highlight
#
# - Type: object
# - Required: true
properties:
# Id of a [highlight.io](https://www.highlight.io/) project.
#
# - You can get the id of your project in your [highlight.io console](https://app.highlight.io/).
#
# - Type: string
# - Required: true
projectId: example-value
# HTTPS endpoint where logs will be forwarded
#
# - By default Stacktape uses `https://pub.highlight.io/v1/logs/firehose`
#
# - Type: string
# - Required: false
# - Default: https://pub.highlight.io/v1/logs/firehose
endpointUrl: https://pub.highlight.io/v1/logs/firehose

datadog

This example shows how to configure logforwarding using datadog.

resources:
myNextjsWeb:
type: nextjs-web
properties:
serverLambda:
logging:
# Configures forwarding of logs to specified destination
#
# - Log forwarding is done using [Amazon Kinesis Data Firehose](https://aws.amazon.com/kinesis/data-firehose/) delivery stream.
# - When using log forwarding, you will incur costs based on the amount of data being transferred to the destination (~$0.03 per transferred GB).
# Refer to [AWS Kinesis Firehose Pricing](https://aws.amazon.com/kinesis/data-firehose/pricing/?nc=sn&loc=3) page to see details.
# - Currently supported destinations for logs:
# - `http-endpoint`
# - delivers logs to any HTTP endpoint.
# - The endpoint must follow [Firehose request and response specifications](https://docs.aws.amazon.com/firehose/latest/dev/httpdeliveryrequestresponse.html).
# (Many of the third party vendors are compliant with this specifications out of the box.)
# - `datadog`
# - delivers logs to [Datadog](https://www.datadoghq.com/).
# - `highlight`
# - delivers logs to [Highlight.io](https://www.highlight.io/) project.
#
# Refer to [our docs](https://docs.stacktape.com/configuration/log-forwarding/) for more information.
#
# > Logs that fail to be delivered to the destination even after multiple retries (time spend on retries can be configured) are put into bucket with name `{stackName}-{resourceName}-logs-{generatedHash}`
#
# - Type: object
# - Required: true
logForwarding:
#
# - Type: string
# - Required: true
type: datadog
#
# - Type: object
# - Required: true
properties:
# API key required to enable delivery of logs to Datadog
#
# - You can get your Datadog API key in [Datadog console](https://app.datadoghq.com/organization-settings/api-keys)
# - It is recommended to use [secret](https://docs.stacktape.com/resources/secrets/) for storing your api key.
#
# - Type: string
# - Required: true
apiKey: example-value
# HTTPS endpoint where logs will be forwarded
#
# - By default Stacktape uses `https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input`
# - If your Datadog site is in EU you should probably use `https://aws-kinesis-http-intake.logs.datadoghq.eu/v1/input`
#
# - Type: string
# - Required: false
# - Default: https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input
endpointUrl: https://aws-kinesis-http-intake.logs.datadoghq.com/v1/input

Contents