-
- Notifications
You must be signed in to change notification settings - Fork 7
fix: Include hdfs principal names in discovery ConfigMap #424
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 6 commits
19a6062 7df2f75 312a086 a666f4d f8db6b9 25d7eb2 4eda0ff File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| | @@ -17,7 +17,7 @@ use crate::product_logging::{ | |
| }; | ||
| | ||
| use indoc::formatdoc; | ||
| use snafu::{OptionExt, ResultExt, Snafu}; | ||
| use snafu::{ResultExt, Snafu}; | ||
| use stackable_hdfs_crd::{ | ||
| constants::{ | ||
| DATANODE_ROOT_DATA_DIR_PREFIX, DEFAULT_DATA_NODE_METRICS_PORT, | ||
| | @@ -151,6 +151,8 @@ impl ContainerConfig { | |
| pub fn add_containers_and_volumes( | ||
| pb: &mut PodBuilder, | ||
| hdfs: &HdfsCluster, | ||
| hdfs_name: &str, | ||
| hdfs_namespace: &str, | ||
| role: &HdfsRole, | ||
| resolved_product_image: &ResolvedProductImage, | ||
| merged_config: &(dyn MergedConfig + Send + 'static), | ||
| | @@ -164,6 +166,8 @@ impl ContainerConfig { | |
| pb.add_volumes(main_container_config.volumes(merged_config, object_name)); | ||
| pb.add_container(main_container_config.main_container( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| resolved_product_image, | ||
| zk_config_map_name, | ||
| | @@ -209,7 +213,7 @@ impl ContainerConfig { | |
| SecretOperatorVolumeSourceBuilder::new( | ||
| &authentication_config.kerberos.secret_class, | ||
| ) | ||
| .with_service_scope(hdfs.name_any()) | ||
| .with_service_scope(hdfs_name) | ||
| .with_kerberos_service_name(role.kerberos_service_name()) | ||
| .with_kerberos_service_name("HTTP") | ||
| .build(), | ||
| | @@ -226,6 +230,8 @@ impl ContainerConfig { | |
| pb.add_volumes(zkfc_container_config.volumes(merged_config, object_name)); | ||
| pb.add_container(zkfc_container_config.main_container( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| resolved_product_image, | ||
| zk_config_map_name, | ||
| | @@ -241,6 +247,8 @@ impl ContainerConfig { | |
| ); | ||
| pb.add_init_container(format_namenodes_container_config.init_container( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| resolved_product_image, | ||
| zk_config_map_name, | ||
| | @@ -257,6 +265,8 @@ impl ContainerConfig { | |
| ); | ||
| pb.add_init_container(format_zookeeper_container_config.init_container( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| resolved_product_image, | ||
| zk_config_map_name, | ||
| | @@ -274,6 +284,8 @@ impl ContainerConfig { | |
| ); | ||
| pb.add_init_container(wait_for_namenodes_container_config.init_container( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| resolved_product_image, | ||
| zk_config_map_name, | ||
| | @@ -317,9 +329,13 @@ impl ContainerConfig { | |
| /// - Namenode ZooKeeper fail over controller (ZKFC) | ||
| /// - Datanode main process | ||
| /// - Journalnode main process | ||
| | ||
| #[allow(clippy::too_many_arguments)] | ||
| fn main_container( | ||
| &self, | ||
| hdfs: &HdfsCluster, | ||
| hdfs_name: &str, | ||
| hdfs_namespace: &str, | ||
| There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we really want to add extra parameters for something i can get from the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Created #451 based on this review comment, please have a look there | ||
| role: &HdfsRole, | ||
| resolved_product_image: &ResolvedProductImage, | ||
| zookeeper_config_map_name: &str, | ||
| | @@ -335,7 +351,7 @@ impl ContainerConfig { | |
| | ||
| cb.image_from_product_image(resolved_product_image) | ||
| .command(Self::command()) | ||
| .args(self.args(hdfs, role, merged_config, &[])?) | ||
| .args(self.args(hdfs, hdfs_name, hdfs_namespace, role, merged_config, &[])) | ||
| .add_env_vars(self.env( | ||
| hdfs, | ||
| zookeeper_config_map_name, | ||
| | @@ -364,6 +380,8 @@ impl ContainerConfig { | |
| fn init_container( | ||
| &self, | ||
| hdfs: &HdfsCluster, | ||
| hdfs_name: &str, | ||
| hdfs_namespace: &str, | ||
| role: &HdfsRole, | ||
| resolved_product_image: &ResolvedProductImage, | ||
| zookeeper_config_map_name: &str, | ||
| | @@ -376,7 +394,14 @@ impl ContainerConfig { | |
| | ||
| cb.image_from_product_image(resolved_product_image) | ||
| .command(Self::command()) | ||
| .args(self.args(hdfs, role, merged_config, namenode_podrefs)?) | ||
| .args(self.args( | ||
| hdfs, | ||
| hdfs_name, | ||
| hdfs_namespace, | ||
| role, | ||
| merged_config, | ||
| namenode_podrefs, | ||
| )) | ||
| .add_env_vars(self.env(hdfs, zookeeper_config_map_name, env_overrides, None)) | ||
| .add_volume_mounts(self.volume_mounts(hdfs, merged_config)); | ||
| | ||
| | @@ -427,10 +452,12 @@ impl ContainerConfig { | |
| fn args( | ||
| &self, | ||
| hdfs: &HdfsCluster, | ||
| hdfs_name: &str, | ||
| hdfs_namespace: &str, | ||
| role: &HdfsRole, | ||
| merged_config: &(dyn MergedConfig + Send + 'static), | ||
| namenode_podrefs: &[HdfsPodRef], | ||
| ) -> Result<Vec<String>, Error> { | ||
| ) -> Vec<String> { | ||
| let mut args = String::new(); | ||
| args.push_str(&self.create_config_directory_cmd()); | ||
| args.push_str(&self.copy_config_xml_cmd()); | ||
| | @@ -489,7 +516,7 @@ wait_for_termination $! | |
| // If there is no active namenode, the current pod is not formatted we format as | ||
| // active namenode. Otherwise as standby node. | ||
| if hdfs.has_kerberos_enabled() { | ||
| args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); | ||
| args.push_str(&Self::get_kerberos_ticket(hdfs_name, hdfs_namespace, role)); | ||
| } | ||
| args.push_str(&formatdoc!( | ||
| r###" | ||
| | @@ -570,7 +597,7 @@ wait_for_termination $! | |
| )); | ||
| } | ||
| if hdfs.has_kerberos_enabled() { | ||
| args.push_str(&Self::get_kerberos_ticket(hdfs, role)?); | ||
| args.push_str(&Self::get_kerberos_ticket(hdfs_name, hdfs_namespace, role)); | ||
| } | ||
| args.push_str(&formatdoc!( | ||
| r###" | ||
| | @@ -610,7 +637,7 @@ wait_for_termination $! | |
| )); | ||
| } | ||
| } | ||
| Ok(vec![args]) | ||
| vec![args] | ||
| } | ||
| | ||
| // Command to export `KERBEROS_REALM` env var to default real from krb5.conf, e.g. `CLUSTER.LOCAL` | ||
| | @@ -622,19 +649,19 @@ wait_for_termination $! | |
| /// Command to `kinit` a ticket using the principal created for the specified hdfs role | ||
| /// Needs the KERBEROS_REALM env var, which will be written with `export_kerberos_real_env_var_command` | ||
| /// Needs the POD_NAME env var to be present, which will be provided by the PodSpec | ||
| fn get_kerberos_ticket(hdfs: &HdfsCluster, role: &HdfsRole) -> Result<String, Error> { | ||
| fn get_kerberos_ticket(hdfs_name: &str, hdfs_namespace: &str, role: &HdfsRole) -> String { | ||
| // Watch out, this is a bash substitution from a bash env var, | ||
| // not the substitution hdfs is doing. | ||
| let principal = format!( | ||
| "{service_name}/{hdfs_name}.{namespace}.svc.cluster.local@${{KERBEROS_REALM}}", | ||
| "{service_name}/{hdfs_name}.{hdfs_namespace}.svc.cluster.local@${{KERBEROS_REALM}}", | ||
| service_name = role.kerberos_service_name(), | ||
| hdfs_name = hdfs.name_any(), | ||
| namespace = hdfs.namespace().context(ObjectHasNoNamespaceSnafu)?, | ||
| ); | ||
| Ok(formatdoc!( | ||
| formatdoc!( | ||
| r###" | ||
| echo "Getting ticket for {principal}" from /stackable/kerberos/keytab | ||
| kinit "{principal}" -kt /stackable/kerberos/keytab | ||
| "###, | ||
| )) | ||
| ) | ||
| } | ||
| | ||
| fn get_namenode_service_state_command() -> String { | ||
| | ||
Uh oh!
There was an error while loading. Please reload this page.