Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
b40cd78
Initial code, and removal of reset credentials
jsoriano Dec 24, 2024
47532c3
Assume 410 status gone is ok for elasticsearch
jsoriano Dec 24, 2024
b9e112f
Refactor client tests so they don't try to use the configured client …
jsoriano Dec 24, 2024
a44469d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 26, 2024
cd980a6
Refactor shellinit
jsoriano Dec 26, 2024
5b41cd9
Use API key in stack clients
jsoriano Dec 26, 2024
12aaebe
Ignore errors when getting logs from a non-local elasticsearch
jsoriano Dec 26, 2024
cce94bd
Share logic to start local services
jsoriano Dec 26, 2024
b3b1e76
Fix spaces in logstash config
jsoriano Dec 27, 2024
3797d20
Prepare interfaces to create policies and getting enrollment tokens
jsoriano Dec 27, 2024
04e22d2
Initial enrollment works
jsoriano Dec 27, 2024
8f17940
Tear down
jsoriano Dec 27, 2024
83beb64
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Dec 30, 2024
290c6d9
Fix tear down
jsoriano Dec 30, 2024
be6dd46
Fix system tests
jsoriano Dec 30, 2024
6169e15
Get kibana host directly from the config?
jsoriano Dec 30, 2024
2e12e02
Fix stack up with logstash
jsoriano Dec 30, 2024
f8d1cee
Fix logstash with api keys
jsoriano Dec 30, 2024
9a24380
Better idempotence
jsoriano Dec 30, 2024
c4822eb
Remove unused variable
jsoriano Dec 30, 2024
7295a2e
Revert change in initialization of kibana host
jsoriano Dec 30, 2024
0ec34f2
Implement status for environment provider
jsoriano Dec 31, 2024
5f000c5
Try to support local Fleet Server for remote stacks
jsoriano Jan 2, 2025
0a188b4
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 2, 2025
184209e
Fix certifictes on agent deployer
jsoriano Jan 3, 2025
d4d32ac
Fix fleet status when fleet server is locally managed
jsoriano Jan 3, 2025
038549c
Reuse existing fleet server hosts
jsoriano Jan 3, 2025
91f2b2d
Add options for API key in clients
jsoriano Jan 3, 2025
b854ca9
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 3, 2025
0d1a1b2
Merge branch 'api-key-clients' into api-key-support
jsoriano Jan 3, 2025
74f2049
Add host.docker.internal to the local services
jsoriano Jan 3, 2025
bbbc671
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 7, 2025
0095a32
Polish status
jsoriano Jan 7, 2025
f60e15d
Add output id to stack config
jsoriano Jan 7, 2025
0c407a0
Fix error formatting value
jsoriano Jan 7, 2025
f53325d
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 8, 2025
dcc5e0b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 13, 2025
c65452b
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 14, 2025
ffeb24c
Remove unused API keys
jsoriano Jan 15, 2025
1079df7
Fix issues after merge
jsoriano Jan 15, 2025
699623e
Fix kubernetes agent deployer
jsoriano Jan 17, 2025
699cb0f
Add tech preview warning
jsoriano Jan 17, 2025
52ec637
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 17, 2025
aa71071
Merge remote-tracking branch 'origin/main' into api-key-support
jsoriano Jan 20, 2025
d728838
Pass context to call to get enrollment tokens
jsoriano Jan 20, 2025
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions internal/agentdeployer/_static/docker-agent-base.yml.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
{{- $dockerfile_hash := fact "dockerfile_hash" -}}
{{- $stack_version := fact "stack_version" }}
{{- $agent_image := fact "agent_image" }}
{{- $enrollment_token := fact "enrollment_token" }}
services:
elastic-agent:
hostname: ${AGENT_HOSTNAME}
Expand Down Expand Up @@ -40,9 +41,13 @@ services:
- FLEET_ENROLL=1
- FLEET_URL={{ fact "fleet_url" }}
- KIBANA_HOST={{ fact "kibana_host" }}
{{ if eq $enrollment_token "" }}
- FLEET_TOKEN_POLICY_NAME=${FLEET_TOKEN_POLICY_NAME}
- ELASTICSEARCH_USERNAME={{ fact "elasticsearch_username" }}
- ELASTICSEARCH_PASSWORD={{ fact "elasticsearch_password" }}
{{ else }}
- FLEET_ENROLLMENT_TOKEN={{ $enrollment_token }}
{{ end }}
volumes:
- type: bind
source: ${LOCAL_CA_CERT}
Expand All @@ -57,3 +62,5 @@ services:
source: ${SERVICE_LOGS_DIR}
target: /run/service_logs/
read_only: false
extra_hosts:
- "host.docker.internal:host-gateway"
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,15 @@ spec:
value: {{ .fleetURL }}
# If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed
- name: FLEET_ENROLLMENT_TOKEN
value: ""
value: "{{ .enrollmentToken }}"
- name: FLEET_TOKEN_POLICY_NAME
value: "{{ .elasticAgentTokenPolicyName }}"
- name: KIBANA_HOST
value: {{ .kibanaURL }}
- name: KIBANA_FLEET_USERNAME
value: "elastic"
value: {{ .username }}
- name: KIBANA_FLEET_PASSWORD
value: "changeme"
value: {{ .password }}
- name: SSL_CERT_DIR
value: "/etc/ssl/certs:/etc/ssl/elastic-package"
- name: NODE_NAME
Expand Down
30 changes: 24 additions & 6 deletions internal/agentdeployer/agent.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ func (d *DockerComposeAgentDeployer) SetUp(ctx context.Context, agentInfo AgentI
fmt.Sprintf("%s=%s", agentHostnameEnv, d.agentHostname()),
)

configDir, err := d.installDockerCompose(agentInfo)
configDir, err := d.installDockerCompose(ctx, agentInfo)
if err != nil {
return nil, fmt.Errorf("could not create resources for custom agent: %w", err)
}
Expand Down Expand Up @@ -233,7 +233,7 @@ func (d *DockerComposeAgentDeployer) agentName() string {

// installDockerCompose creates the files needed to run the custom elastic agent and returns
// the directory with these files.
func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (string, error) {
func (d *DockerComposeAgentDeployer) installDockerCompose(ctx context.Context, agentInfo AgentInfo) (string, error) {
customAgentDir, err := CreateDeployerDir(d.profile, fmt.Sprintf("docker-agent-%s-%s", d.agentName(), d.agentRunID))
if err != nil {
return "", fmt.Errorf("failed to create directory for custom agent files: %w", err)
Expand All @@ -254,14 +254,31 @@ func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (
if err != nil {
return "", fmt.Errorf("failed to load config from profile: %w", err)
}
enrollmentToken := ""
if config.ElasticsearchAPIKey != "" {
// TODO: Review if this is the correct place to get the enrollment token.
kibanaClient, err := stack.NewKibanaClientFromProfile(d.profile)
if err != nil {
return "", fmt.Errorf("failed to create kibana client: %w", err)
}
enrollmentToken, err = kibanaClient.GetEnrollmentTokenForPolicyID(ctx, agentInfo.Policy.ID)
if err != nil {
return "", fmt.Errorf("failed to get enrollment token for policy %q: %w", agentInfo.Policy.Name, err)
}
}

// TODO: Include these settings more explicitly in `config`.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be good to have those default values in config directly. In any case, I think this could be changed in a follow-up PR.

fleetURL := "https://fleet-server:8220"
kibanaHost := "https://kibana:5601"
stackVersion := d.stackVersion
if config.Provider == stack.ProviderServerless {
fleetURL = config.Parameters[stack.ParamServerlessFleetURL]
if config.Provider != stack.ProviderCompose {
kibanaHost = config.KibanaHost
stackVersion = config.Parameters[stack.ParamServerlessLocalStackVersion]
}
if url, ok := config.Parameters[stack.ParamServerlessFleetURL]; ok {
fleetURL = url
}
if version, ok := config.Parameters[stack.ParamServerlessLocalStackVersion]; ok {
stackVersion = version
}

agentImage, err := selectElasticAgentImage(stackVersion, agentInfo.Agent.BaseImage)
Expand All @@ -280,9 +297,10 @@ func (d *DockerComposeAgentDeployer) installDockerCompose(agentInfo AgentInfo) (
"dockerfile_hash": hex.EncodeToString(hashDockerfile),
"stack_version": stackVersion,
"fleet_url": fleetURL,
"kibana_host": kibanaHost,
"kibana_host": stack.DockerInternalHost(kibanaHost),
"elasticsearch_username": config.ElasticsearchUsername,
"elasticsearch_password": config.ElasticsearchPassword,
"enrollment_token": enrollmentToken,
})

resourceManager.RegisterProvider("file", &resource.FileProvider{
Expand Down
3 changes: 0 additions & 3 deletions internal/agentdeployer/info.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,4 @@ type AgentInfo struct {

AgentSettings
}

// CustomProperties store additional data used to boot up the service, e.g. AWS credentials.
CustomProperties map[string]interface{}
}
47 changes: 39 additions & 8 deletions internal/agentdeployer/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ type kubernetesDeployedAgent struct {
}

func (s kubernetesDeployedAgent) TearDown(ctx context.Context) error {
elasticAgentManagedYaml, err := getElasticAgentYAML(s.profile, s.stackVersion, s.agentInfo.Policy.Name, s.agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(ctx, s.profile, s.agentInfo, s.stackVersion, s.agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand Down Expand Up @@ -123,7 +123,7 @@ func (ksd *KubernetesAgentDeployer) SetUp(ctx context.Context, agentInfo AgentIn
if ksd.runTearDown || ksd.runTestsOnly {
logger.Debug("Skip install Elastic Agent in cluster")
} else {
err = installElasticAgentInCluster(ctx, ksd.profile, ksd.stackVersion, agentInfo.Policy.Name, agentName)
err = installElasticAgentInCluster(ctx, ksd.profile, agentInfo, ksd.stackVersion, agentName)
if err != nil {
return nil, fmt.Errorf("can't install Elastic-Agent in the Kubernetes cluster: %w", err)
}
Expand Down Expand Up @@ -155,10 +155,10 @@ func (ksd *KubernetesAgentDeployer) agentName() string {

var _ AgentDeployer = new(KubernetesAgentDeployer)

func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, stackVersion, policyName, agentName string) error {
func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) error {
logger.Debug("install Elastic Agent in the Kubernetes cluster")

elasticAgentManagedYaml, err := getElasticAgentYAML(profile, stackVersion, policyName, agentName)
elasticAgentManagedYaml, err := getElasticAgentYAML(ctx, profile, agentInfo, stackVersion, agentName)
if err != nil {
return fmt.Errorf("can't retrieve Kubernetes file for Elastic Agent: %w", err)
}
Expand All @@ -176,8 +176,36 @@ func installElasticAgentInCluster(ctx context.Context, profile *profile.Profile,
//go:embed _static/elastic-agent-managed.yaml.tmpl
var elasticAgentManagedYamlTmpl string

func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, agentName string) ([]byte, error) {
func getElasticAgentYAML(ctx context.Context, profile *profile.Profile, agentInfo AgentInfo, stackVersion, agentName string) ([]byte, error) {
logger.Debugf("Prepare YAML definition for Elastic Agent running in stack v%s", stackVersion)
config, err := stack.LoadConfig(profile)
if err != nil {
return nil, fmt.Errorf("failed to load config from profile: %w", err)
}
fleetURL := "https://fleet-server:8220"
kibanaURL := "https://kibana:5601"
if config.Provider != stack.ProviderCompose {
kibanaURL = config.KibanaHost
}
if url, ok := config.Parameters[stack.ParamServerlessFleetURL]; ok {
fleetURL = url
}
if version, ok := config.Parameters[stack.ParamServerlessLocalStackVersion]; ok {
stackVersion = version
}

enrollmentToken := ""
if config.ElasticsearchAPIKey != "" {
// TODO: Review if this is the correct place to get the enrollment token.
kibanaClient, err := stack.NewKibanaClientFromProfile(profile)
if err != nil {
return nil, fmt.Errorf("failed to create kibana client: %w", err)
}
enrollmentToken, err = kibanaClient.GetEnrollmentTokenForPolicyID(ctx, agentInfo.Policy.ID)
if err != nil {
return nil, fmt.Errorf("failed to get enrollment token for policy %q: %w", agentInfo.Policy.Name, err)
}
}

appConfig, err := install.Configuration(install.OptionWithStackVersion(stackVersion))
if err != nil {
Expand All @@ -193,11 +221,14 @@ func getElasticAgentYAML(profile *profile.Profile, stackVersion, policyName, age

var elasticAgentYaml bytes.Buffer
err = tmpl.Execute(&elasticAgentYaml, map[string]string{
"fleetURL": "https://fleet-server:8220",
"kibanaURL": "https://kibana:5601",
"fleetURL": fleetURL,
"kibanaURL": kibanaURL,
"username": config.ElasticsearchUsername,
"password": config.ElasticsearchPassword,
"enrollmentToken": enrollmentToken,
"caCertPem": caCert,
"elasticAgentImage": appConfig.StackImageRefs().ElasticAgent,
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, policyName),
"elasticAgentTokenPolicyName": getTokenPolicyName(stackVersion, agentInfo.Policy.Name),
"agentName": agentName,
})
if err != nil {
Expand Down
2 changes: 2 additions & 0 deletions internal/kubectl/kubectl_apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,8 @@ func waitForReadyResources(resources []resource) error {
// be unavailable (DaemonSet.spec.updateStrategy.rollingUpdate.maxUnavailable defaults to 1).
// daemonSetReady will return true regardless of the pod not being ready yet.
// Can be solved with multi-node clusters.
// TODO: Support context cancelation in this wait. We rely on a helm waiter
// that doesn't support it.
err := kubeClient.Wait(resList, readinessTimeout)
if err != nil {
return fmt.Errorf("waiter failed: %w", err)
Expand Down
6 changes: 3 additions & 3 deletions internal/serverless/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ type Project struct {
Region string `json:"region_id"`

Credentials struct {
Username string `json:"username"`
Password string `json:"password"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
} `json:"credentials"`

Endpoints struct {
Expand Down Expand Up @@ -150,7 +150,7 @@ func (p *Project) getFleetHealth(ctx context.Context) error {

if status.Status != "HEALTHY" {
return fmt.Errorf("fleet status %s", status.Status)

}

return nil
}
9 changes: 7 additions & 2 deletions internal/stack/_static/elastic-agent.env.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,13 @@ FLEET_ENROLL=1
FLEET_URL={{ fact "fleet_url" }}
KIBANA_FLEET_HOST={{ fact "kibana_host" }}
KIBANA_HOST={{ fact "kibana_host" }}
{{- $enrollment_token := fact "enrollment_token" }}
{{- if eq $enrollment_token "" }}
ELASTICSEARCH_USERNAME={{ fact "username" }}
ELASTICSEARCH_PASSWORD={{ fact "password" }}
{{ if not (semverLessThan $version "8.0.0") }}
{{- if not (semverLessThan $version "8.0.0") }}
FLEET_TOKEN_POLICY_NAME=Elastic-Agent (elastic-package)
{{ end }}
{{- end }}
{{- else }}
FLEET_ENROLLMENT_TOKEN={{ $enrollment_token }}
{{- end }}
2 changes: 1 addition & 1 deletion internal/stack/_static/fleet-server-healthcheck.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ NUMBER_SUCCESSES="$1"
WAITING_TIME="$2"

healthcheck() {
curl -s --cacert /etc/ssl/elastic-agent/ca-cert.pem -f https://localhost:8220/api/status | grep -i healthy 2>&1 >/dev/null
curl -s --cacert /etc/ssl/certs/elastic-package.pem -f https://localhost:8220/api/status | grep -i healthy 2>&1 >/dev/null
}

# Fleet Server can restart after announcing to be healthy, agents connecting during this restart will
Expand Down
116 changes: 116 additions & 0 deletions internal/stack/_static/local-services-docker-compose.yml.tmpl
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
services:
{{- $fleet_server_managed := fact "fleet_server_managed" }}
{{- if eq $fleet_server_managed "true" }}
{{- $fleet_healthcheck_success_checks := 3 -}}
{{- $fleet_healthcheck_waiting_time := 1 -}}
{{- $version := fact "agent_version" -}}
{{- if semverLessThan $version "8.0.0" -}}
{{- $fleet_healthcheck_success_checks = 10 -}}
{{- $fleet_healthcheck_waiting_time = 2 -}}
{{- end }}
fleet-server:
image: "{{ fact "agent_image" }}"
healthcheck:
test: "bash /healthcheck.sh {{ $fleet_healthcheck_success_checks }} {{ $fleet_healthcheck_waiting_time }}"
start_period: 60s
interval: 5s
hostname: docker-fleet-server
environment:
- "ELASTICSEARCH_HOST={{ fact "elasticsearch_host" }}"
- "FLEET_SERVER_CERT=/etc/ssl/fleet-server/cert.pem"
- "FLEET_SERVER_CERT_KEY=/etc/ssl/fleet-server/key.pem"
- "FLEET_SERVER_ELASTICSEARCH_HOST={{ fact "elasticsearch_host" }}"
- "FLEET_SERVER_ENABLE=1"
- "FLEET_SERVER_HOST=0.0.0.0"
- "FLEET_SERVER_SERVICE_TOKEN={{ fact "fleet_service_token" }}"
- "FLEET_SERVER_POLICY={{ fact "fleet_server_policy" }}"
- "FLEET_URL={{ fact "fleet_url" }}"
- "KIBANA_FLEET_HOST={{ fact "kibana_host" }}"
- "KIBANA_FLEET_SERVICE_TOKEN={{ fact "fleet_service_token" }}"
- "KIBANA_FLEET_SERVER_POLICY={{ fact "fleet_server_policy" }}"
- "KIBANA_FLEET_SETUP=1"
- "KIBANA_HOST={{ fact "kibana_host" }}"
volumes:
- "../certs/ca-cert.pem:/etc/ssl/certs/elastic-package.pem:ro"
- "../certs/fleet-server:/etc/ssl/fleet-server:ro"
- "./fleet-server-healthcheck.sh:/healthcheck.sh:ro"
ports:
- "127.0.0.1:8220:8220"
extra_hosts:
- "host.docker.internal:host-gateway"

fleet-server_is_ready:
image: tianon/true:multiarch
depends_on:
fleet-server:
condition: service_healthy
{{- end }}

elastic-agent:
image: "{{ fact "agent_image" }}"
{{- if eq $fleet_server_managed "true" }}
depends_on:
fleet-server:
condition: service_healthy
{{- end }}
healthcheck:
test: "elastic-agent status"
timeout: 2s
start_period: 360s
retries: 180
interval: 5s
hostname: docker-fleet-agent
env_file: "./elastic-agent.env"
cap_drop:
- ALL
volumes:
- type: bind
source: ../../../tmp/service_logs/
target: /tmp/service_logs/
# Mount service_logs under /run too as a testing workaround for the journald input (see elastic-package#1235).
- type: bind
source: ../../../tmp/service_logs/
target: /run/service_logs/
- "../certs/ca-cert.pem:/etc/ssl/certs/elastic-package.pem"
extra_hosts:
- "host.docker.internal:host-gateway"

elastic-agent_is_ready:
image: tianon/true:multiarch
depends_on:
elastic-agent:
condition: service_healthy

{{ $logstash_enabled := fact "logstash_enabled" }}
{{ if eq $logstash_enabled "true" }}
logstash:
build:
dockerfile: "./Dockerfile.logstash"
args:
IMAGE: "{{ fact "logstash_image" }}"
healthcheck:
test: bin/logstash -t
start_period: 120s
interval: 60s
timeout: 60s
retries: 5
volumes:
- "../certs/logstash:/usr/share/logstash/config/certs"
ports:
- "127.0.0.1:5044:5044"
- "127.0.0.1:9600:9600"
environment:
- XPACK_MONITORING_ENABLED=false
- ELASTIC_API_KEY={{ fact "api_key" }}
- ELASTIC_USER={{ fact "username" }}
- ELASTIC_PASSWORD={{ fact "password" }}
- ELASTIC_HOSTS={{ fact "elasticsearch_host" }}
extra_hosts:
- "host.docker.internal:host-gateway"

logstash_is_ready:
image: tianon/true:multiarch
depends_on:
logstash:
condition: service_healthy
{{ end }}
Loading