Skip to content
1,198 changes: 616 additions & 582 deletions Cargo.lock

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@ repository = "https://github.com/stackabletech/hdfs-operator"

[workspace.dependencies]
anyhow = "1.0"
built = { version = "0.6", features = ["chrono", "git2"] }
built = { version = "0.7", features = ["chrono", "git2"] }
clap = "4.3"
futures = { version = "0.3", features = ["compat"] }
indoc = "2.0"
rstest = "0.18"
rstest = "0.19"
semver = "1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_yaml = "0.9"
snafu = "0.7"
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "0.64.0" }
snafu = "0.8"
stackable-operator = { git = "https://github.com/stackabletech/operator-rs.git", tag = "stackable-operator-0.67.0" }
product-config = { git = "https://github.com/stackabletech/product-config.git", tag = "0.6.0" }
strum = { version = "0.25", features = ["derive"] }
tokio = { version = "1.29", features = ["full"] }
strum = { version = "0.26", features = ["derive"] }
tokio = { version = "1.37", features = ["full"] }
tracing = "0.1"
tracing-futures = { version = "0.2", features = ["futures-03"] }

Expand Down
1,574 changes: 1,105 additions & 469 deletions deploy/helm/hdfs-operator/crds/crds.yaml

Large diffs are not rendered by default.

2 changes: 2 additions & 0 deletions rust/crd/src/affinity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ spec:
namespace_selector: None,
namespaces: None,
topology_key: "kubernetes.io/hostname".to_string(),
..PodAffinityTerm::default()
},
weight: 20
}
Expand Down Expand Up @@ -120,6 +121,7 @@ spec:
namespace_selector: None,
namespaces: None,
topology_key: "kubernetes.io/hostname".to_string(),
..PodAffinityTerm::default()
},
weight: 70
}
Expand Down
4 changes: 2 additions & 2 deletions rust/crd/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use stackable_operator::{
},
kube::{runtime::reflector::ObjectRef, CustomResource, ResourceExt},
kvp::{LabelError, Labels},
product_config_utils::{ConfigError, Configuration},
product_config_utils::{Configuration, Error as ConfigError},
product_logging,
product_logging::spec::{ContainerLogConfig, Logging},
role_utils::{GenericRoleConfig, Role, RoleGroup, RoleGroupRef},
Expand Down Expand Up @@ -79,7 +79,7 @@ pub enum Error {

#[snafu(display("unable to get {listener} (for {pod})"))]
GetPodListener {
source: stackable_operator::error::Error,
source: stackable_operator::client::Error,
listener: ObjectRef<Listener>,
pod: ObjectRef<Pod>,
},
Expand Down
11 changes: 6 additions & 5 deletions rust/crd/src/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -182,10 +182,11 @@ impl HdfsStorageType {
mod test {
use std::collections::BTreeMap;

use stackable_operator::k8s_openapi::api::core::v1::VolumeResourceRequirements;
use stackable_operator::{
commons::resources::PvcConfig,
k8s_openapi::{
api::core::v1::{PersistentVolumeClaimSpec, ResourceRequirements},
api::core::v1::PersistentVolumeClaimSpec,
apimachinery::pkg::{api::resource::Quantity, apis::meta::v1::LabelSelector},
},
};
Expand Down Expand Up @@ -217,12 +218,12 @@ mod test {
assert_eq!(
pvcs[0].spec,
Some(PersistentVolumeClaimSpec {
resources: Some(ResourceRequirements {
resources: Some(VolumeResourceRequirements {
requests: Some(BTreeMap::from([(
"storage".to_string(),
Quantity("5Gi".to_string())
)])),
..ResourceRequirements::default()
..VolumeResourceRequirements::default()
}),
access_modes: Some(vec!["ReadWriteOnce".to_string()]),
storage_class_name: None,
Expand Down Expand Up @@ -276,12 +277,12 @@ mod test {
assert_eq!(
pvcs[0].spec,
Some(PersistentVolumeClaimSpec {
resources: Some(ResourceRequirements {
resources: Some(VolumeResourceRequirements {
requests: Some(BTreeMap::from([(
"storage".to_string(),
Quantity("12Ti".to_string())
)])),
..ResourceRequirements::default()
..VolumeResourceRequirements::default()
}),
access_modes: Some(vec!["ReadWriteOnce".to_string()]),
storage_class_name: Some("hdd-storage-class".to_string()),
Expand Down
12 changes: 1 addition & 11 deletions rust/operator-binary/build.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,3 @@
use std::path::PathBuf;

fn main() {
let out_dir = PathBuf::from(std::env::var("OUT_DIR").expect("OUT_DIR is required"));
built::write_built_file_with_opts(
// built's env module depends on a whole bunch of variables that crate2nix doesn't provide
// so we grab the specific env variables that we care about out ourselves instead.
built::Options::default().set_env(false),
"Cargo.toml".as_ref(),
&out_dir.join("built.rs"),
)
.unwrap();
built::write_built_file().unwrap();
}
49 changes: 28 additions & 21 deletions rust/operator-binary/src/container.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,30 +9,24 @@
//! - Set resources
//! - Add tcp probes and container ports (to the main containers)
//!
use crate::DATANODE_ROOT_DATA_DIR_PREFIX;
use crate::JVM_SECURITY_PROPERTIES_FILE;
use crate::LOG4J_PROPERTIES;
use stackable_operator::utils::COMMON_BASH_TRAP_FUNCTIONS;
use std::{collections::BTreeMap, str::FromStr};

use indoc::formatdoc;
use snafu::{OptionExt, ResultExt, Snafu};
use stackable_hdfs_crd::{
constants::{
DATANODE_ROOT_DATA_DIR_PREFIX, DEFAULT_DATA_NODE_METRICS_PORT,
DEFAULT_JOURNAL_NODE_METRICS_PORT, DEFAULT_NAME_NODE_METRICS_PORT,
JVM_SECURITY_PROPERTIES_FILE, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
LIVENESS_PROBE_FAILURE_THRESHOLD, LIVENESS_PROBE_INITIAL_DELAY_SECONDS,
LIVENESS_PROBE_PERIOD_SECONDS, LOG4J_PROPERTIES, NAMENODE_ROOT_DATA_DIR,
READINESS_PROBE_FAILURE_THRESHOLD, READINESS_PROBE_INITIAL_DELAY_SECONDS,
READINESS_PROBE_PERIOD_SECONDS, SERVICE_PORT_NAME_HTTP, SERVICE_PORT_NAME_HTTPS,
SERVICE_PORT_NAME_IPC, SERVICE_PORT_NAME_RPC, STACKABLE_ROOT_DATA_DIR,
},
storage::DataNodeStorageConfig,
AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, NameNodeContainer,
};
use stackable_operator::{
builder::{
resources::ResourceRequirementsBuilder, ContainerBuilder,
ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError,
ListenerReference, PodBuilder, SecretFormat, SecretOperatorVolumeSourceBuilder,
SecretOperatorVolumeSourceBuilderError, VolumeBuilder, VolumeMountBuilder,
pod::container::ContainerBuilder,
pod::resources::ResourceRequirementsBuilder,
pod::volume::{
ListenerOperatorVolumeSourceBuilder, ListenerOperatorVolumeSourceBuilderError,
ListenerReference, SecretFormat, SecretOperatorVolumeSourceBuilder,
SecretOperatorVolumeSourceBuilderError, VolumeBuilder, VolumeMountBuilder,
},
pod::PodBuilder,
},
commons::product_image_selection::ResolvedProductImage,
k8s_openapi::{
Expand All @@ -56,10 +50,23 @@ use stackable_operator::{
CustomContainerLogConfig,
},
},
utils::COMMON_BASH_TRAP_FUNCTIONS,
};
use strum::{Display, EnumDiscriminants, IntoStaticStr};

use stackable_hdfs_crd::{
constants::{
DEFAULT_DATA_NODE_METRICS_PORT, DEFAULT_JOURNAL_NODE_METRICS_PORT,
DEFAULT_NAME_NODE_METRICS_PORT, LISTENER_VOLUME_DIR, LISTENER_VOLUME_NAME,
LIVENESS_PROBE_FAILURE_THRESHOLD, LIVENESS_PROBE_INITIAL_DELAY_SECONDS,
LIVENESS_PROBE_PERIOD_SECONDS, NAMENODE_ROOT_DATA_DIR, READINESS_PROBE_FAILURE_THRESHOLD,
READINESS_PROBE_INITIAL_DELAY_SECONDS, READINESS_PROBE_PERIOD_SECONDS,
SERVICE_PORT_NAME_HTTP, SERVICE_PORT_NAME_HTTPS, SERVICE_PORT_NAME_IPC,
SERVICE_PORT_NAME_RPC, STACKABLE_ROOT_DATA_DIR,
},
storage::DataNodeStorageConfig,
AnyNodeConfig, DataNodeContainer, HdfsCluster, HdfsPodRef, HdfsRole, NameNodeContainer,
};

use crate::product_logging::{
FORMAT_NAMENODES_LOG4J_CONFIG_FILE, FORMAT_ZOOKEEPER_LOG4J_CONFIG_FILE, HDFS_LOG4J_CONFIG_FILE,
MAX_FORMAT_NAMENODE_LOG_FILE_SIZE, MAX_FORMAT_ZOOKEEPER_LOG_FILE_SIZE, MAX_HDFS_LOG_FILE_SIZE,
Expand All @@ -82,7 +89,7 @@ pub enum Error {

#[snafu(display("invalid java heap config for {role:?}"))]
InvalidJavaHeapConfig {
source: stackable_operator::error::Error,
source: stackable_operator::memory::Error,
role: String,
},

Expand All @@ -91,7 +98,7 @@ pub enum Error {

#[snafu(display("invalid container name {name:?}"))]
InvalidContainerName {
source: stackable_operator::error::Error,
source: stackable_operator::builder::pod::container::Error,
name: String,
},

Expand Down
11 changes: 7 additions & 4 deletions rust/operator-binary/src/discovery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@ use stackable_hdfs_crd::{
HdfsCluster, HdfsPodRef, HdfsRole,
};
use stackable_operator::{
builder::{ConfigMapBuilder, ObjectMetaBuilder, ObjectMetaBuilderError},
builder::configmap::ConfigMapBuilder,
builder::meta::ObjectMetaBuilder,
commons::product_image_selection::ResolvedProductImage,
k8s_openapi::api::core::v1::ConfigMap,
kube::{runtime::reflector::ObjectRef, ResourceExt},
Expand All @@ -23,17 +24,19 @@ type Result<T, E = Error> = std::result::Result<T, E>;
pub enum Error {
#[snafu(display("object {hdfs} is missing metadata to build owner reference"))]
ObjectMissingMetadataForOwnerRef {
source: stackable_operator::error::Error,
source: stackable_operator::builder::meta::Error,
hdfs: ObjectRef<HdfsCluster>,
},

#[snafu(display("failed to build ConfigMap"))]
BuildConfigMap {
source: stackable_operator::error::Error,
source: stackable_operator::builder::configmap::Error,
},

#[snafu(display("failed to build object meta data"))]
ObjectMeta { source: ObjectMetaBuilderError },
ObjectMeta {
source: stackable_operator::builder::meta::Error,
},

#[snafu(display("failed to build security discovery config map"))]
BuildSecurityDiscoveryConfigMap { source: kerberos::Error },
Expand Down
46 changes: 25 additions & 21 deletions rust/operator-binary/src/hdfs_controller.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,11 @@ use product_config::{
ProductConfigManager,
};
use snafu::{OptionExt, ResultExt, Snafu};
use stackable_hdfs_crd::{
constants::*, AnyNodeConfig, HdfsCluster, HdfsClusterStatus, HdfsPodRef, HdfsRole,
};
use stackable_operator::{
builder::{
ConfigMapBuilder, ObjectMetaBuilder, ObjectMetaBuilderError, PodBuilder,
PodSecurityContextBuilder,
configmap::ConfigMapBuilder,
meta::ObjectMetaBuilder,
pod::{security::PodSecurityContextBuilder, PodBuilder},
},
client::Client,
cluster_resources::{ClusterResourceApplyStrategy, ClusterResources},
Expand Down Expand Up @@ -49,6 +47,10 @@ use stackable_operator::{
};
use strum::{EnumDiscriminants, IntoStaticStr};

use stackable_hdfs_crd::{
constants::*, AnyNodeConfig, HdfsCluster, HdfsClusterStatus, HdfsPodRef, HdfsRole,
};

use crate::{
build_recommended_labels,
config::{CoreSiteConfigBuilder, HdfsSiteConfigBuilder},
Expand All @@ -73,41 +75,41 @@ const DOCKER_IMAGE_BASE_NAME: &str = "hadoop";
pub enum Error {
#[snafu(display("invalid role configuration"))]
InvalidRoleConfig {
source: stackable_operator::product_config_utils::ConfigError,
source: stackable_operator::product_config_utils::Error,
},

#[snafu(display("invalid product configuration"))]
InvalidProductConfig {
source: stackable_operator::error::Error,
source: stackable_operator::product_config_utils::Error,
},

#[snafu(display("cannot create rolegroup service {name:?}"))]
ApplyRoleGroupService {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
name: String,
},

#[snafu(display("cannot create role group config map {name:?}"))]
ApplyRoleGroupConfigMap {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
name: String,
},

#[snafu(display("cannot create role group stateful set {name:?}"))]
ApplyRoleGroupStatefulSet {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
name: String,
},

#[snafu(display("cannot create discovery config map {name:?}"))]
ApplyDiscoveryConfigMap {
source: stackable_operator::error::Error,
source: stackable_operator::client::Error,
name: String,
},

#[snafu(display("no metadata for {obj_ref:?}"))]
ObjectMissingMetadataForOwnerRef {
source: stackable_operator::error::Error,
source: stackable_operator::builder::meta::Error,
obj_ref: ObjectRef<HdfsCluster>,
},

Expand All @@ -122,7 +124,7 @@ pub enum Error {

#[snafu(display("cannot build config map for role {role:?} and role group {role_group:?}"))]
BuildRoleGroupConfigMap {
source: stackable_operator::error::Error,
source: stackable_operator::builder::configmap::Error,
role: String,
role_group: String,
},
Expand All @@ -135,22 +137,22 @@ pub enum Error {

#[snafu(display("failed to patch service account"))]
ApplyServiceAccount {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
},

#[snafu(display("failed to patch role binding"))]
ApplyRoleBinding {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
},

#[snafu(display("failed to create cluster resources"))]
CreateClusterResources {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
},

#[snafu(display("failed to delete orphaned resources"))]
DeleteOrphanedResources {
source: stackable_operator::error::Error,
source: stackable_operator::cluster_resources::Error,
},

#[snafu(display("failed to create pod references"))]
Expand Down Expand Up @@ -186,12 +188,12 @@ pub enum Error {

#[snafu(display("failed to update status"))]
ApplyStatus {
source: stackable_operator::error::Error,
source: stackable_operator::client::Error,
},

#[snafu(display("failed to build RBAC resources"))]
BuildRbacResources {
source: stackable_operator::error::Error,
source: stackable_operator::commons::rbac::Error,
},

#[snafu(display(
Expand Down Expand Up @@ -222,7 +224,9 @@ pub enum Error {
BuildRoleGroupVolumeClaimTemplates { source: container::Error },

#[snafu(display("failed to build object meta data"))]
ObjectMeta { source: ObjectMetaBuilderError },
ObjectMeta {
source: stackable_operator::builder::meta::Error,
},

#[snafu(display("failed to build security config"))]
BuildSecurityConfig { source: kerberos::Error },
Expand Down Expand Up @@ -251,7 +255,7 @@ pub async fn reconcile_hdfs(hdfs: Arc<HdfsCluster>, ctx: Arc<Ctx>) -> HdfsOperat
let resolved_product_image = hdfs
.spec
.image
.resolve(DOCKER_IMAGE_BASE_NAME, crate::built_info::CARGO_PKG_VERSION);
.resolve(DOCKER_IMAGE_BASE_NAME, crate::built_info::PKG_VERSION);

let vector_aggregator_address = resolve_vector_aggregator_address(&hdfs, client)
.await
Expand Down
Loading