- 필수 기능
- 시작하기
- Glossary
- 표준 속성
- Guides
- Agent
- 통합
- 개방형텔레메트리
- 개발자
- Administrator's Guide
- API
- Datadog Mobile App
- CoScreen
- Cloudcraft
- 앱 내
- 서비스 관리
- 인프라스트럭처
- 애플리케이션 성능
- APM
- Continuous Profiler
- 스팬 시각화
- 데이터 스트림 모니터링
- 데이터 작업 모니터링
- 디지털 경험
- 소프트웨어 제공
- 보안
- AI Observability
- 로그 관리
- 관리
",t};e.buildCustomizationMenuUi=t;function n(e){let t='
",t}function s(e){let n=e.filter.currentValue||e.filter.defaultValue,t='${e.filter.label}
`,e.filter.options.forEach(s=>{let o=s.id===n;t+=``}),t+="${e.filter.label}
`,t+=`This page describes how to instrument your containerized Linux Azure App Service application with the Datadog Agent.
This document assumes that your application is set up for sidecars according to Azure’s Configure a sidecar container for custom container in Azure App Service tutorial.
If you would prefer to not use the sidecar approach (Not Recommended), you can instead follow the instructions to Instrument Azure App Service - Linux Container with serverless-init
.
If you haven’t already, install the Datadog-Azure integration to collect metrics and logs.
Instrument your main application with the dd-trace-js
library. See Tracing Node.js applications for instructions.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see Node.js Log Collection. To set up trace log correlation, see Correlating Node.js Logs and Traces.
Instrument your main application with the dd-trace-py
library. See Tracing Python applications for instructions.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see Node.js Log Collection. To set up trace log correlation, see Correlating Node.js Logs and Traces.
Instrument your main application with the dd-trace-java
library. See Tracing Java applications for instructions.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see Node.js Log Collection. To set up trace log correlation, see Correlating Node.js Logs and Traces.
Instrument your main application with the dd-trace-dotnet
library.
RUN mkdir -p /datadog/tracer
RUN mkdir -p /home/LogFiles/dotnet
ADD https://github.com/DataDog/dd-trace-dotnet/releases/download/v2.51.0/datadog-dotnet-apm-2.51.0.tar.gz /datadog/tracer
RUN cd /datadog/tracer && tar -zxf datadog-dotnet-apm-2.51.0.tar.gz
Full example Dockerfile
# Stage 1: Build the application
FROM mcr.microsoft.com/dotnet/sdk:8.0 AS build
WORKDIR /app
# Copy the project file and restore dependencies
COPY *.csproj ./
RUN dotnet restore
# Copy the remaining source code
COPY . .
# Build the application
RUN dotnet publish -c Release -o out
# Stage 2: Create a runtime image
FROM mcr.microsoft.com/dotnet/aspnet:8.0 AS runtime
WORKDIR /app
# Copy the build output from stage 1
COPY --from=build /app/out ./
# Datadog specific
RUN mkdir -p /datadog/tracer
RUN mkdir -p /home/LogFiles/dotnet
ADD https://github.com/DataDog/dd-trace-dotnet/releases/download/v2.51.0/datadog-dotnet-apm-2.51.0.tar.gz /datadog/tracer
RUN cd /datadog/tracer && tar -zxf datadog-dotnet-apm-2.51.0.tar.gz
# Set the entry point for the application
ENTRYPOINT ["dotnet", "<your dotnet app>.dll"]
For more information, see Tracing .NET Applications.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see C# Log Collection. To set up trace log correlation, see Correlating .NET Logs and Traces.
Instrument your main application with the dd-trace-go
library. See Tracing Go applications for instructions.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see Node.js Log Collection. To set up trace log correlation, see Correlating Node.js Logs and Traces.
Instrument your main application with the dd-trace-php
library. See Tracing PHP applications for instructions.
Custom metrics are also collected through the tracer. See the code examples.
The Datadog sidecar uses file tailing to collect logs. Datadog recommends writing application logs to /home/LogFiles/
because this directory is persisted across restarts.
You can also create a subdirectory, such as /home/LogFiles/myapp
, if you want more control over what is sent to Datadog. However, if you do not tail all log files in /home/LogFiles
, then Azure App Service application logs related to startups and errors are not collected.
To set up logging in your application, see Node.js Log Collection. To set up trace log correlation, see Correlating Node.js Logs and Traces.
Instrumentation is done using a sidecar container. This sidecar container collects traces, metrics, and logs from your main application container and sends them to Datadog.
First, install the Datadog CLI and Azure CLI.
Login to your Azure account using the Azure CLI:
az login
Then, run the following command to set up the sidecar container:
export DD_API_KEY=<DATADOG_API_KEY>
export DD_SITE=<DATADOG_SITE>
datadog-ci aas instrument -s <subscription-id> -r <resource-group-name> -n <app-service-name>
Set your Datadog site to . Defaults to
datadoghq.com
.
Note: For .NET applications, add the --dotnet
flag to include the additional environment variables required by the .NET tracer.
Additional flags, like --service
and --env
, can be used to set the service and environment tags. For a full list of options, run datadog-ci aas instrument --help
.
index.docker.io
datadog/serverless-init:latest
In your App settings in Azure, set the following environment variables on both your main container and the sidecar container. Alternatively, set these variables on your main container and enable the Allow access to all app settings option.
DD_API_KEY
: Your Datadog API key
DD_SERVICE
: How you want to tag your service. For example, sidecar-azure
DD_ENV
: How you want to tag your env. For example, prod
DD_SERVERLESS_LOG_PATH
: Where you write your logs. For example, /home/LogFiles/*.log
or /home/LogFiles/myapp/*.log
DD_AAS_INSTANCE_LOGGING_ENABLED
: When true
, log collection is automatically configured for an additional file path: /home/LogFiles/*$COMPUTERNAME*.log
$COMPUTERNAME
variable. This ensures that log tailing does not create duplicated logs from multiple instances reading the same file.If you are setting up monitoring for a .NET application, configure the following required environment variables.
Variable name | Value |
---|---|
DD_DOTNET_TRACER_HOME | /datadog/tracer |
DD_TRACE_LOG_DIRECTORY | /home/LogFiles/dotnet |
CORECLR_ENABLE_PROFILING | 1 |
CORECLR_PROFILER | {846F5F1C-F9AE-4B07-969E-05C26BC060D8} |
CORECLR_PROFILER_PATH | /datadog/tracer/Datadog.Trace.ClrProfiler.Native.so |
The following example contains a single app with tracing, metrics, and logs set up.
const tracer = require('dd-trace').init({
logInjection: true,
});
const express = require("express");
const app = express();
const { createLogger, format, transports } = require('winston');
const logger = createLogger({
level: 'info',
exitOnError: false,
format: format.json(),
transports: [new transports.File({ filename: `/home/LogFiles/app-${process.env.COMPUTERNAME}.log`}),
],
});
app.get("/", (_, res) => {
logger.info("Welcome!");
res.sendStatus(200);
});
app.get("/hello", (_, res) => {
logger.info("Hello!");
metricPrefix = "nodejs-azure-sidecar";
// Send three unique metrics, just so we're testing more than one single metric
metricsToSend = ["sample_metric_1", "sample_metric_2", "sample_metric_3"];
metricsToSend.forEach((metric) => {
for (let i = 0; i < 20; i++) {
tracer.dogstatsd.distribution(`${metricPrefix}.${metric}`, 1);
}
});
res.status(200).json({ msg: "Sending metrics to Datadog" });
});
const port = process.env.PORT || 8080;
app.listen(port);
from flask import Flask, Response
from datadog import initialize, statsd
import os
import ddtrace
import logging
ddtrace.patch(logging=True)
FORMAT = ('%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] '
'[dd.service=%(dd.service)s dd.env=%(dd.env)s dd.version=%(dd.version)s dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] '
'- %(message)s')
logging.basicConfig(filename=f'/home/LogFiles/app-{os.getenv(COMPUTERNAME)}.log', format=FORMAT)
log = logging.getLogger(__name__)
log.level = logging.INFO
options = {
'statsd_host':'127.0.0.1',
'statsd_port':8125
}
initialize(**options)
app = Flask(__name__)
@app.route("/")
def home():
statsd.increment('page.views')
log.info('Hello Datadog!!')
return Response('💜 Hello Datadog!! 💜', status=200, mimetype='application/json')
app.run(host="0.0.0.0", port=8080)
package com.example.springboot;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RestController;
import com.timgroup.statsd.NonBlockingStatsDClientBuilder;
import com.timgroup.statsd.StatsDClient;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@RestController
public class HelloController {
private static final StatsDClient Statsd = new NonBlockingStatsDClientBuilder().hostname("localhost").port(8125).build();
private static final Log logger = LogFactory.getLog(HelloController.class);
@GetMapping("/")
public String index() {
Statsd.incrementCounter("page.views");
logger.info("Hello Azure!");
return "💜 Hello Azure! 💜";
}
}
package main
import (
"fmt"
"log"
"net/http"
"os"
"path/filepath"
"github.com/DataDog/datadog-go/v5/statsd"
"github.com/DataDog/dd-trace-go/v2/ddtrace"
"github.com/DataDog/dd-trace-go/v2/ddtrace/tracer"
)
const logDir = "/home/LogFiles"
var logFile *os.File
var logCounter int
var dogstatsdClient *statsd.Client
func handler(w http.ResponseWriter, r *http.Request) {
log.Println("Hello Datadog!")
span := tracer.StartSpan("maincontainer", tracer.ResourceName("/handler"))
defer span.Finish()
logCounter++
writeLogsToFile(fmt.Sprintf("received request %d", logCounter), span.Context())
dogstatsdClient.Incr("request.count", []string{}, 1)
fmt.Fprintf(w, "💜 Hello Datadog! 💜")
}
func writeLogsToFile(log_msg string, context ddtrace.SpanContext) {
span := tracer.StartSpan(
"writeLogToFile",
tracer.ResourceName("/writeLogsToFile"),
tracer.ChildOf(context))
defer span.Finish()
_, err := logFile.WriteString(log_msg + "\n")
if err != nil {
log.Println("Error writing to log file:", err)
}
}
func main() {
log.Print("Main container started...")
err := os.MkdirAll(logDir, 0755)
if err != nil {
panic(err)
}
logFilePath := filepath.Join(logDir, fmt.Sprintf("app-%s.log", os.Getenv("COMPUTERNAME")))
log.Println("Saving logs in ", logFilePath)
logFileLocal, err := os.OpenFile(logFilePath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644)
if err != nil {
panic(err)
}
defer logFileLocal.Close()
logFile = logFileLocal
dogstatsdClient, err = statsd.New("localhost:8125")
if err != nil {
panic(err)
}
defer dogstatsdClient.Close()
tracer.Start()
defer tracer.Stop()
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(":8080", nil))
}
<?php
require __DIR__ . '/vendor/autoload.php';
use DataDog\DogStatsd;
use Monolog\Logger;
use Monolog\Handler\StreamHandler;
use Monolog\Formatter\JsonFormatter;
$statsd = new DogStatsd(
array('host' => '127.0.0.1',
'port' => 8125,
)
);
$log = new logger('datadog');
$formatter = new JsonFormatter();
$stream = new StreamHandler('/home/LogFiles/app-'.getenv("COMPUTERNAME").'.log', Logger::DEBUG);
$stream->setFormatter($formatter);
$log->pushHandler($stream);
$log->pushProcessor(function ($record) {
$record['message'] .= sprintf(
' [dd.trace_id=%s dd.span_id=%s]',
\DDTrace\logs_correlation_trace_id(),
\dd_trace_peek_span_id()
);
return $record;
});
$log->info("Hello Datadog!");
echo '💜 Hello Datadog! 💜';
$log->info("sending a metric");
$statsd->increment('page.views', 1, array('environment'=>'dev'));
?>