diff --git a/cgroups/src/common.rs b/cgroups/src/common.rs index d28c3e42..650d03d3 100644 --- a/cgroups/src/common.rs +++ b/cgroups/src/common.rs @@ -364,11 +364,7 @@ pub(crate) fn delete_with_retry, L: Into>>( let mut attempts = 0; let mut delay = Duration::from_millis(10); let path = path.as_ref(); - let limit = if let Some(backoff) = limit_backoff.into() { - backoff - } else { - Duration::MAX - }; + let limit = limit_backoff.into().unwrap_or(Duration::MAX); while attempts < retries { if fs::remove_dir(path).is_ok() { diff --git a/cgroups/src/stats.rs b/cgroups/src/stats.rs index a9102beb..b8974df5 100644 --- a/cgroups/src/stats.rs +++ b/cgroups/src/stats.rs @@ -284,20 +284,21 @@ pub fn supported_page_sizes() -> Result> { } fn extract_page_size(dir_name: &str) -> Result { - if let Some(name_stripped) = dir_name.strip_prefix("hugepages-") { - if let Some(size) = name_stripped.strip_suffix("kB") { - let size: u64 = parse_value(size)?; + if let Some(size) = dir_name + .strip_prefix("hugepages-") + .and_then(|name_stripped| name_stripped.strip_suffix("kB")) + { + let size: u64 = parse_value(size)?; - let size_moniker = if size >= (1 << 20) { - (size >> 20).to_string() + "GB" - } else if size >= (1 << 10) { - (size >> 10).to_string() + "MB" - } else { - size.to_string() + "KB" - }; + let size_moniker = if size >= (1 << 20) { + (size >> 20).to_string() + "GB" + } else if size >= (1 << 10) { + (size >> 10).to_string() + "MB" + } else { + size.to_string() + "KB" + }; - return Ok(size_moniker); - } + return Ok(size_moniker); } bail!("failed to determine page size from {}", dir_name); diff --git a/cgroups/src/v1/blkio.rs b/cgroups/src/v1/blkio.rs index 4ed01d6d..effaa4b9 100644 --- a/cgroups/src/v1/blkio.rs +++ b/cgroups/src/v1/blkio.rs @@ -85,11 +85,7 @@ impl Controller for Blkio { } fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource> { - if let Some(blkio) = &controller_opt.resources.block_io() { - return Some(blkio); - } - - None + controller_opt.resources.block_io().as_ref() } } diff --git a/cgroups/src/v1/cpuacct.rs b/cgroups/src/v1/cpuacct.rs index 1ba4066f..99706219 100644 --- a/cgroups/src/v1/cpuacct.rs +++ b/cgroups/src/v1/cpuacct.rs @@ -206,17 +206,17 @@ mod tests { assert_eq!( stats.per_core_usage_user, - &[5838999815217, 4139072325517, 4175712075766, 4021385867300] + [5838999815217, 4139072325517, 4175712075766, 4021385867300] ); assert_eq!( stats.per_core_usage_kernel, - &[295316023007, 325194619244, 323435639997, 304269989810] + [295316023007, 325194619244, 323435639997, 304269989810] ); assert_eq!( stats.per_core_usage_total, - &[989683000640, 4409567860144, 4439880333849, 4273328034121] + [989683000640, 4409567860144, 4439880333849, 4273328034121] ); } } diff --git a/cgroups/src/v1/freezer.rs b/cgroups/src/v1/freezer.rs index d266e4c3..58926192 100644 --- a/cgroups/src/v1/freezer.rs +++ b/cgroups/src/v1/freezer.rs @@ -33,11 +33,7 @@ impl Controller for Freezer { } fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource> { - if let Some(freezer_state) = &controller_opt.freezer_state { - return Some(freezer_state); - } - - None + controller_opt.freezer_state.as_ref() } } diff --git a/cgroups/src/v1/hugetlb.rs b/cgroups/src/v1/hugetlb.rs index f611a52a..07833a78 100644 --- a/cgroups/src/v1/hugetlb.rs +++ b/cgroups/src/v1/hugetlb.rs @@ -29,7 +29,7 @@ impl Controller for HugeTlb { } fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource> { - if let Some(hugepage_limits) = controller_opt.resources.hugepage_limits().as_ref() { + if let Some(hugepage_limits) = controller_opt.resources.hugepage_limits() { if !hugepage_limits.is_empty() { return controller_opt.resources.hugepage_limits().as_ref(); } diff --git a/cgroups/src/v1/memory.rs b/cgroups/src/v1/memory.rs index 155003f1..18c5ebe9 100644 --- a/cgroups/src/v1/memory.rs +++ b/cgroups/src/v1/memory.rs @@ -51,7 +51,7 @@ impl Controller for Memory { fn apply(controller_opt: &ControllerOpt, cgroup_root: &Path) -> Result<()> { log::debug!("Apply Memory cgroup config"); - if let Some(memory) = controller_opt.resources.memory().as_ref() { + if let Some(memory) = &controller_opt.resources.memory() { let reservation = memory.reservation().unwrap_or(0); Self::apply(memory, cgroup_root)?; diff --git a/cgroups/src/v1/network_classifier.rs b/cgroups/src/v1/network_classifier.rs index 5c016308..6c27d2ab 100644 --- a/cgroups/src/v1/network_classifier.rs +++ b/cgroups/src/v1/network_classifier.rs @@ -23,11 +23,7 @@ impl Controller for NetworkClassifier { } fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource> { - if let Some(network) = controller_opt.resources.network().as_ref() { - return Some(network); - } - - None + controller_opt.resources.network().as_ref() } } diff --git a/cgroups/src/v1/network_priority.rs b/cgroups/src/v1/network_priority.rs index eee0fbe6..c5553337 100644 --- a/cgroups/src/v1/network_priority.rs +++ b/cgroups/src/v1/network_priority.rs @@ -23,17 +23,13 @@ impl Controller for NetworkPriority { } fn needs_to_handle<'a>(controller_opt: &'a ControllerOpt) -> Option<&'a Self::Resource> { - if let Some(network) = &controller_opt.resources.network() { - return Some(network); - } - - None + controller_opt.resources.network().as_ref() } } impl NetworkPriority { fn apply(root_path: &Path, network: &LinuxNetwork) -> Result<()> { - if let Some(ni_priorities) = network.priorities().as_ref() { + if let Some(ni_priorities) = network.priorities() { let priorities: String = ni_priorities.iter().map(|p| p.to_string()).collect(); common::write_cgroup_file_str(root_path.join("net_prio.ifpriomap"), priorities.trim())?; } diff --git a/cgroups/src/v2/hugetlb.rs b/cgroups/src/v2/hugetlb.rs index 659bb8a2..d3b1dc5d 100644 --- a/cgroups/src/v2/hugetlb.rs +++ b/cgroups/src/v2/hugetlb.rs @@ -14,7 +14,7 @@ pub struct HugeTlb {} impl Controller for HugeTlb { fn apply(controller_opt: &ControllerOpt, cgroup_root: &std::path::Path) -> Result<()> { log::debug!("Apply hugetlb cgroup v2 config"); - if let Some(hugepage_limits) = controller_opt.resources.hugepage_limits().as_ref() { + if let Some(hugepage_limits) = controller_opt.resources.hugepage_limits() { for hugetlb in hugepage_limits { Self::apply(cgroup_root, hugetlb) .context("failed to apply hugetlb resource restrictions")? diff --git a/cgroups/src/v2/io.rs b/cgroups/src/v2/io.rs index 0cc48e5e..8fe6d6c5 100644 --- a/cgroups/src/v2/io.rs +++ b/cgroups/src/v2/io.rs @@ -85,7 +85,7 @@ impl Io { // linux kernel doc: https://www.kernel.org/doc/html/latest/admin-guide/cgroup-v2.html#io fn apply(root_path: &Path, blkio: &LinuxBlockIo) -> Result<()> { - if let Some(weight_device) = blkio.weight_device().as_ref() { + if let Some(weight_device) = blkio.weight_device() { for wd in weight_device { common::write_cgroup_file( root_path.join(CGROUP_BFQ_IO_WEIGHT), @@ -107,7 +107,7 @@ impl Io { } } - if let Some(throttle_read_bps_device) = blkio.throttle_read_bps_device().as_ref() { + if let Some(throttle_read_bps_device) = blkio.throttle_read_bps_device() { for trbd in throttle_read_bps_device { common::write_cgroup_file( Self::io_max_path(root_path), @@ -116,7 +116,7 @@ impl Io { } } - if let Some(throttle_write_bps_device) = blkio.throttle_write_bps_device().as_ref() { + if let Some(throttle_write_bps_device) = blkio.throttle_write_bps_device() { for twbd in throttle_write_bps_device { common::write_cgroup_file( Self::io_max_path(root_path), @@ -125,7 +125,7 @@ impl Io { } } - if let Some(throttle_read_iops_device) = blkio.throttle_read_iops_device().as_ref() { + if let Some(throttle_read_iops_device) = blkio.throttle_read_iops_device() { for trid in throttle_read_iops_device { common::write_cgroup_file( Self::io_max_path(root_path), @@ -134,7 +134,7 @@ impl Io { } } - if let Some(throttle_write_iops_device) = blkio.throttle_write_iops_device().as_ref() { + if let Some(throttle_write_iops_device) = blkio.throttle_write_iops_device() { for twid in throttle_write_iops_device { common::write_cgroup_file( Self::io_max_path(root_path), diff --git a/src/capabilities.rs b/src/capabilities.rs index d9e409f1..0ccda4d6 100644 --- a/src/capabilities.rs +++ b/src/capabilities.rs @@ -133,23 +133,23 @@ pub fn reset_effective(syscall: &S) -> Result<()> { /// Drop any extra granted capabilities, and reset to defaults which are in oci specification pub fn drop_privileges(cs: &LinuxCapabilities, syscall: &S) -> Result<()> { log::debug!("dropping bounding capabilities to {:?}", cs.bounding()); - if let Some(bounding) = cs.bounding().as_ref() { + if let Some(bounding) = cs.bounding() { syscall.set_capability(CapSet::Bounding, &to_set(bounding))?; } - if let Some(effective) = cs.effective().as_ref() { + if let Some(effective) = cs.effective() { syscall.set_capability(CapSet::Effective, &to_set(effective))?; } - if let Some(permitted) = cs.permitted().as_ref() { + if let Some(permitted) = cs.permitted() { syscall.set_capability(CapSet::Permitted, &to_set(permitted))?; } - if let Some(inheritable) = cs.inheritable().as_ref() { + if let Some(inheritable) = cs.inheritable() { syscall.set_capability(CapSet::Inheritable, &to_set(inheritable))?; } - if let Some(ambient) = cs.ambient().as_ref() { + if let Some(ambient) = cs.ambient() { // check specifically for ambient, as those might not always be available if let Err(e) = syscall.set_capability(CapSet::Ambient, &to_set(ambient)) { log::error!("failed to set ambient capabilities: {}", e); diff --git a/src/commands/delete.rs b/src/commands/delete.rs index 0c9c214b..931342b9 100644 --- a/src/commands/delete.rs +++ b/src/commands/delete.rs @@ -16,7 +16,7 @@ pub struct Delete { impl Delete { pub fn exec(&self, root_path: PathBuf) -> Result<()> { log::debug!("start deleting {}", self.container_id); - let mut container = load_container(root_path, self.container_id.as_str())?; + let mut container = load_container(root_path, &self.container_id)?; container .delete(self.force) .with_context(|| format!("failed to delete container {}", self.container_id)) diff --git a/src/commands/info.rs b/src/commands/info.rs index 22fae04a..625cfb50 100644 --- a/src/commands/info.rs +++ b/src/commands/info.rs @@ -64,7 +64,7 @@ fn try_read_os_from>(path: P) -> Option { let name = find_parameter(&release_content, "NAME"); let version = find_parameter(&release_content, "VERSION"); - if let (Some(name), Some(version)) = (name, version) { + if let Some((name, version)) = name.zip(version) { return Some(format!( "{} {}", name.trim_matches('"'), @@ -78,16 +78,10 @@ fn try_read_os_from>(path: P) -> Option { /// Helper function to find keyword values in OS info string fn find_parameter<'a>(content: &'a str, param_name: &str) -> Option<&'a str> { - let param_value = content + content .lines() .find(|l| l.starts_with(param_name)) - .map(|l| l.split_terminator('=').last()); - - if let Some(Some(value)) = param_value { - return Some(value); - } - - None + .and_then(|l| l.split_terminator('=').last()) } /// Print Hardware information of system diff --git a/src/commands/list.rs b/src/commands/list.rs index 4fa4d409..a51e9591 100644 --- a/src/commands/list.rs +++ b/src/commands/list.rs @@ -1,5 +1,4 @@ //! Contains Functionality of list container command -use std::ffi::OsString; use std::fs; use std::io; use std::io::Write; @@ -37,11 +36,7 @@ impl List { "".to_owned() }; - let user_name = if let Some(creator) = container.creator() { - creator - } else { - OsString::new() - }; + let user_name = container.creator().unwrap_or_default(); let created = if let Some(utc) = container.created() { let local: DateTime = DateTime::from(utc); diff --git a/src/container/builder_impl.rs b/src/container/builder_impl.rs index 7db9e425..0d986659 100644 --- a/src/container/builder_impl.rs +++ b/src/container/builder_impl.rs @@ -62,7 +62,7 @@ impl<'a> ContainerBuilderImpl<'a> { let process = self.spec.process().as_ref().context("No process in spec")?; if self.init { - if let Some(hooks) = self.spec.hooks().as_ref() { + if let Some(hooks) = self.spec.hooks() { hooks::run_hooks(hooks.create_runtime().as_ref(), self.container.as_ref())? } } @@ -146,7 +146,7 @@ impl<'a> ContainerBuilderImpl<'a> { // If creating a rootless container, the intermediate process will ask // the main process to set up uid and gid mapping, once the intermediate // process enters into a new user namespace. - if let Some(rootless) = self.rootless.as_ref() { + if let Some(rootless) = &self.rootless { receiver_from_intermediate.wait_for_mapping_request()?; setup_mapping(rootless, intermediate_pid)?; sender_to_intermediate.mapping_written()?; @@ -156,7 +156,7 @@ impl<'a> ContainerBuilderImpl<'a> { log::debug!("init pid is {:?}", init_pid); if self.rootless.is_none() && linux.resources().is_some() && self.init { - if let Some(resources) = linux.resources().as_ref() { + if let Some(resources) = linux.resources() { apply_cgroups(resources, init_pid, cmanager.as_ref())?; } } @@ -283,9 +283,9 @@ mod tests { let line = fs::read_to_string(format!("/proc/{}/uid_map", child.as_raw()))?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ - uid_mapping.container_id().to_string().as_str(), - uid_mapping.host_id().to_string().as_str(), - uid_mapping.size().to_string().as_str(), + uid_mapping.container_id().to_string(), + uid_mapping.host_id().to_string(), + uid_mapping.size().to_string(), ]) { assert_eq!(act, expect); } @@ -328,9 +328,9 @@ mod tests { let line = fs::read_to_string(format!("/proc/{}/gid_map", child.as_raw()))?; let line_splited = line.split_whitespace(); for (act, expect) in line_splited.zip([ - gid_mapping.container_id().to_string().as_str(), - gid_mapping.host_id().to_string().as_str(), - gid_mapping.size().to_string().as_str(), + gid_mapping.container_id().to_string(), + gid_mapping.host_id().to_string(), + gid_mapping.size().to_string(), ]) { assert_eq!(act, expect); } diff --git a/src/container/container.rs b/src/container/container.rs index 0a13febd..78247884 100644 --- a/src/container/container.rs +++ b/src/container/container.rs @@ -50,7 +50,7 @@ impl Container { } pub fn id(&self) -> &str { - self.state.id.as_str() + &self.state.id } pub fn can_start(&self) -> bool { diff --git a/src/container/container_delete.rs b/src/container/container_delete.rs index 19c26a8c..ec608504 100644 --- a/src/container/container_delete.rs +++ b/src/container/container_delete.rs @@ -67,7 +67,7 @@ impl Container { format!("failed to remove cgroup {}", cgroups_path.display()) })?; - if let Some(hooks) = spec.hooks().as_ref() { + if let Some(hooks) = spec.hooks() { hooks::run_hooks(hooks.poststop().as_ref(), Some(self)) .with_context(|| "failed to run post stop hooks")?; } diff --git a/src/container/container_start.rs b/src/container/container_start.rs index e6b90080..9eaf50d5 100644 --- a/src/container/container_start.rs +++ b/src/container/container_start.rs @@ -42,7 +42,7 @@ impl Container { let spec = self .spec() .with_context(|| format!("failed to load runtime spec for container {}", self.id()))?; - if let Some(hooks) = spec.hooks().as_ref() { + if let Some(hooks) = spec.hooks() { // While prestart is marked as deprecated in the OCI spec, the docker and integration test still // uses it. #[allow(deprecated)] @@ -60,7 +60,7 @@ impl Container { // Run post start hooks. It runs after the container process is started. // It is called in the runtime namespace. - if let Some(hooks) = spec.hooks().as_ref() { + if let Some(hooks) = spec.hooks() { hooks::run_hooks(hooks.poststart().as_ref(), Some(self)) .with_context(|| "failed to run post start hooks")?; } diff --git a/src/container/init_builder.rs b/src/container/init_builder.rs index 73361b57..221f72af 100644 --- a/src/container/init_builder.rs +++ b/src/container/init_builder.rs @@ -48,7 +48,7 @@ impl<'a> InitContainerBuilder<'a> { .set_systemd(self.use_systemd) .set_annotations(spec.annotations().clone()); - unistd::chdir(&*container_dir)?; + unistd::chdir(&container_dir)?; let notify_path = container_dir.join(NOTIFY_FILE); // convert path of root file system of the container to absolute path let rootfs = fs::canonicalize(&spec.root().as_ref().context("no root in spec")?.path())?; @@ -116,8 +116,8 @@ impl<'a> InitContainerBuilder<'a> { ); } - if let Some(process) = &spec.process() { - if let Some(profile) = &process.apparmor_profile() { + if let Some(process) = spec.process() { + if let Some(profile) = process.apparmor_profile() { if !apparmor::is_enabled()? { bail!( "apparmor profile {} is specified in runtime spec, \ @@ -142,7 +142,7 @@ impl<'a> InitContainerBuilder<'a> { &self.base.container_id, ContainerStatus::Creating, None, - self.bundle.as_path(), + &self.bundle, container_dir, )?; container.save()?; diff --git a/src/container/tenant_builder.rs b/src/container/tenant_builder.rs index 2078b69d..11f623bc 100644 --- a/src/container/tenant_builder.rs +++ b/src/container/tenant_builder.rs @@ -97,7 +97,7 @@ impl<'a> TenantContainerBuilder<'a> { log::debug!("{:#?}", spec); - unistd::chdir(&*container_dir)?; + unistd::chdir(&container_dir)?; let notify_path = Self::setup_notify_listener(&container_dir)?; // convert path of root file system of the container to absolute path let rootfs = fs::canonicalize(&spec.root().as_ref().context("no root in spec")?.path())?; @@ -160,7 +160,7 @@ impl<'a> TenantContainerBuilder<'a> { } fn adapt_spec_for_tenant(&self, spec: &Spec, container: &Container) -> Result { - let process = if let Some(ref process) = self.process { + let process = if let Some(process) = &self.process { self.set_process(process)? } else { let mut process_builder = ProcessBuilder::default(); @@ -238,7 +238,7 @@ impl<'a> TenantContainerBuilder<'a> { } fn set_working_dir(&self) -> Result> { - if let Some(ref cwd) = self.cwd { + if let Some(cwd) = &self.cwd { if cwd.is_relative() { bail!( "Current working directory must be an absolute path, but is {}", @@ -280,7 +280,7 @@ impl<'a> TenantContainerBuilder<'a> { let caps: SpecCapabilities = caps.iter().map(|c| SpecCapability::from_cap(*c)).collect(); - if let Some(ref spec_caps) = spec + if let Some(spec_caps) = spec .process() .as_ref() .context("no process in spec")? @@ -345,8 +345,8 @@ impl<'a> TenantContainerBuilder<'a> { fn set_namespaces(&self, init_namespaces: Vec) -> Result> { let mut tenant_namespaces = Vec::with_capacity(init_namespaces.len()); - for ns_type in NAMESPACE_TYPES.iter().copied() { - if let Some(init_ns) = init_namespaces.iter().find(|n| n.ns_type.eq(ns_type)) { + for &ns_type in NAMESPACE_TYPES { + if let Some(init_ns) = init_namespaces.iter().find(|n| n.ns_type == ns_type) { let tenant_ns = LinuxNamespaceType::try_from(ns_type)?; tenant_namespaces.push( LinuxNamespaceBuilder::default() diff --git a/src/hooks.rs b/src/hooks.rs index d9c8881c..848ef1a1 100644 --- a/src/hooks.rs +++ b/src/hooks.rs @@ -34,14 +34,14 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re // command differenciates arg0 from args, where rust command arg // doesn't include arg0. So we have to make the split arg0 from the // rest of args. - if let Some((arg0, args)) = hook.args().as_ref().map(|a| a.split_first()).flatten() { + if let Some((arg0, args)) = hook.args().as_ref().and_then(|a| a.split_first()) { log::debug!("run_hooks arg0: {:?}, args: {:?}", arg0, args); hook_command.arg0(arg0).args(args) } else { - hook_command.arg0(&hook.path().as_path().display().to_string()) + hook_command.arg0(&hook.path().display().to_string()) }; - let envs: HashMap = if let Some(env) = hook.env().as_ref() { + let envs: HashMap = if let Some(env) = hook.env() { utils::parse_env(env) } else { HashMap::new() @@ -57,7 +57,7 @@ pub fn run_hooks(hooks: Option<&Vec>, container: Option<&Container>) -> Re let hook_process_pid = Pid::from_raw(hook_process.id() as i32); // Based on the OCI spec, we need to pipe the container state into // the hook command through stdin. - if let Some(mut stdin) = hook_process.stdin.as_ref() { + if let Some(stdin) = &mut hook_process.stdin { // We want to ignore BrokenPipe here. A BrokenPipe indicates // either the hook is crashed/errored or it ran successfully. // Either way, this is an indication that the hook command diff --git a/src/main.rs b/src/main.rs index b6f8ded9..1516176e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -137,8 +137,7 @@ fn determine_root_path(root_path: Option) -> Result { } if let Ok(path) = std::env::var("HOME") { - let home = PathBuf::from(path); - if let Ok(resolved) = fs::canonicalize(home) { + if let Ok(resolved) = fs::canonicalize(path) { let run_dir = resolved.join(".youki/run"); if create_dir_all_with_mode(&run_dir, uid, Mode::S_IRWXU).is_ok() { return Ok(run_dir); diff --git a/src/notify_socket.rs b/src/notify_socket.rs index 7a541055..7b32a72d 100644 --- a/src/notify_socket.rs +++ b/src/notify_socket.rs @@ -68,11 +68,11 @@ impl NotifySocket { pub fn notify_container_start(&mut self) -> Result<()> { log::debug!("notify container start"); let cwd = env::current_dir()?; - unistd::chdir(&*self.path.parent().unwrap())?; + unistd::chdir(self.path.parent().unwrap())?; let mut stream = UnixStream::connect(&self.path.file_name().unwrap())?; stream.write_all(b"start container")?; log::debug!("notify finished"); - unistd::chdir(&*cwd)?; + unistd::chdir(&cwd)?; Ok(()) } diff --git a/src/process/init.rs b/src/process/init.rs index 0b5327a7..b13af793 100644 --- a/src/process/init.rs +++ b/src/process/init.rs @@ -197,7 +197,7 @@ pub fn container_init( // Only set the host name if entering into a new uts namespace if let Some(uts_namespace) = namespaces.get(LinuxNamespaceType::Uts) { if uts_namespace.path().is_none() { - if let Some(hostname) = spec.hostname().as_ref() { + if let Some(hostname) = spec.hostname() { command.set_hostname(hostname)?; } } @@ -237,13 +237,13 @@ pub fn container_init( rootfs::adjust_root_mount_propagation(linux) .context("Failed to set propagation type of root mount")?; - if let Some(kernel_params) = &linux.sysctl() { + if let Some(kernel_params) = linux.sysctl() { sysctl(kernel_params) .with_context(|| format!("Failed to sysctl: {:?}", kernel_params))?; } } - if let Some(profile) = &proc.apparmor_profile() { + if let Some(profile) = proc.apparmor_profile() { apparmor::apply_profile(profile) .with_context(|| format!("failed to apply apparmor profile {}", profile))?; } @@ -258,14 +258,14 @@ pub fn container_init( )? } - if let Some(paths) = &linux.readonly_paths() { + if let Some(paths) = linux.readonly_paths() { // mount readonly path for path in paths { readonly_path(path).context("Failed to set read only path")?; } } - if let Some(paths) = &linux.masked_paths() { + if let Some(paths) = linux.masked_paths() { // mount masked path for path in paths { masked_path(path, linux.mount_label()).context("Failed to set masked path")?; @@ -279,7 +279,7 @@ pub fn container_init( // This chdir must run before setting up the user. // This may allow the user running youki to access directories // that the container user cannot access. - match unistd::chdir(&*proc.cwd()) { + match unistd::chdir(proc.cwd()) { Ok(_) => false, Err(nix::Error::EPERM) => true, Err(e) => bail!("Failed to chdir: {}", e), @@ -305,7 +305,7 @@ pub fn container_init( } capabilities::reset_effective(command).context("Failed to reset effective capabilities")?; - if let Some(caps) = &proc.capabilities() { + if let Some(caps) = proc.capabilities() { capabilities::drop_privileges(caps, command).context("Failed to drop capabilities")?; } @@ -354,7 +354,7 @@ pub fn container_init( // change directory to process.cwd if process.cwd is not empty if do_chdir { - unistd::chdir(&*proc.cwd()) + unistd::chdir(proc.cwd()) .with_context(|| format!("Failed to chdir {}", proc.cwd().display()))?; } @@ -389,7 +389,7 @@ pub fn container_init( .context("Failed to execute seccomp")?; } - if let Some(args) = proc.args().as_ref() { + if let Some(args) = proc.args() { utils::do_exec(&args[0], args)?; } else { bail!("On non-Windows, at least one process arg entry is required.") @@ -425,7 +425,7 @@ pub fn container_init( // Privileged user starting a normal container: Just add the supplementary groups. // fn set_supplementary_gids(user: &User, rootless: &Option) -> Result<()> { - if let Some(additional_gids) = &user.additional_gids() { + if let Some(additional_gids) = user.additional_gids() { if additional_gids.is_empty() { return Ok(()); } diff --git a/src/process/intermediate.rs b/src/process/intermediate.rs index dcd7b154..b7e82438 100644 --- a/src/process/intermediate.rs +++ b/src/process/intermediate.rs @@ -47,8 +47,8 @@ pub fn container_intermediate( // set limits and namespaces to the process let proc = spec.process().as_ref().context("no process in spec")?; - if let Some(rlimits) = proc.rlimits().as_ref() { - for rlimit in rlimits.iter() { + if let Some(rlimits) = proc.rlimits() { + for rlimit in rlimits { command.set_rlimit(rlimit).context("failed to set rlimit")?; } } diff --git a/src/rootfs.rs b/src/rootfs.rs index 5e21d64b..5cdca84b 100644 --- a/src/rootfs.rs +++ b/src/rootfs.rs @@ -23,16 +23,12 @@ pub fn prepare_rootfs(spec: &Spec, rootfs: &Path, bind_devices: bool) -> Result< log::debug!("Prepare rootfs: {:?}", rootfs); let mut flags = MsFlags::MS_REC; let linux = spec.linux().as_ref().context("no linux in spec")?; - if let Some(roofs_propagation) = linux.rootfs_propagation().as_ref() { - match roofs_propagation.as_str() { - "shared" => flags |= MsFlags::MS_SHARED, - "private" => flags |= MsFlags::MS_PRIVATE, - "slave" => flags |= MsFlags::MS_SLAVE, - "unbindable" => flags |= MsFlags::MS_SLAVE, // set unbindable after pivot_root - uknown => bail!("unknown rootfs_propagation: {}", uknown), - } - } else { - flags |= MsFlags::MS_SLAVE; + + match linux.rootfs_propagation().as_deref() { + Some("shared") => flags |= MsFlags::MS_SHARED, + Some("private") => flags |= MsFlags::MS_PRIVATE, + Some("slave" | "unbindable") | None => flags |= MsFlags::MS_SLAVE, + Some(uknown) => bail!("unknown rootfs_propagation: {}", uknown), } nix_mount(None::<&str>, "/", None::<&str>, flags, None::<&str>) @@ -49,8 +45,8 @@ pub fn prepare_rootfs(spec: &Spec, rootfs: &Path, bind_devices: bool) -> Result< None::<&str>, )?; - if let Some(mounts) = spec.mounts().as_ref() { - for mount in mounts.iter() { + if let Some(mounts) = spec.mounts() { + for mount in mounts { log::debug!("Mount... {:?}", mount); let (flags, data) = parse_mount(mount); let mount_label = linux.mount_label().as_ref(); @@ -74,14 +70,14 @@ pub fn prepare_rootfs(spec: &Spec, rootfs: &Path, bind_devices: bool) -> Result< } setup_default_symlinks(rootfs).context("Failed to setup default symlinks")?; - if let Some(added_devices) = linux.devices().as_ref() { + if let Some(added_devices) = linux.devices() { create_devices( rootfs, default_devices().iter().chain(added_devices), bind_devices, ) } else { - create_devices(rootfs, default_devices().iter(), bind_devices) + create_devices(rootfs, &default_devices(), bind_devices) }?; setup_ptmx(rootfs)?; @@ -110,7 +106,7 @@ fn setup_default_symlinks(rootfs: &Path) -> Result<()> { ("/proc/self/fd/1", "dev/stdout"), ("/proc/self/fd/2", "dev/stderr"), ]; - for &(src, dst) in defaults.iter() { + for (src, dst) in defaults { symlink(src, rootfs.join(dst)).context("Fail to symlink defaults")?; } @@ -172,11 +168,12 @@ pub fn default_devices() -> Vec { fn create_devices<'a, I>(rootfs: &Path, devices: I, bind: bool) -> Result<()> where - I: Iterator, + I: IntoIterator, { let old_mode = umask(Mode::from_bits_truncate(0o000)); if bind { let _ = devices + .into_iter() .map(|dev| { if !dev.path().starts_with("/dev") { panic!("{} is not a valid device path", dev.path().display()); @@ -187,6 +184,7 @@ where .collect::>>()?; } else { devices + .into_iter() .map(|dev| { if !dev.path().starts_with("/dev") { panic!("{} is not a valid device path", dev.path().display()); @@ -323,8 +321,8 @@ fn mount_to_container( ) { nix_mount( - Some(&*dest), - &*dest, + Some(dest), + dest, None::<&str>, flags | MsFlags::MS_REMOUNT, None::<&str>, diff --git a/src/rootless.rs b/src/rootless.rs index 30fd0275..7b86846c 100644 --- a/src/rootless.rs +++ b/src/rootless.rs @@ -102,11 +102,7 @@ pub fn rootless_required() -> bool { return true; } - if let Ok("true") = std::env::var("YOUKI_USE_ROOTLESS").as_deref() { - return true; - } - - false + matches!(std::env::var("YOUKI_USE_ROOTLESS").as_deref(), Ok("true")) } /// Validates that the spec contains the required information for @@ -141,27 +137,29 @@ fn validate(spec: &Spec) -> Result<()> { gid_mappings, )?; - if let Some(process) = &spec.process() { - if let Some(additional_gids) = &process.user().additional_gids() { - let privileged = nix::unistd::geteuid().is_root(); + if let Some(additional_gids) = spec + .process() + .as_ref() + .and_then(|process| process.user().additional_gids().as_ref()) + { + let privileged = nix::unistd::geteuid().is_root(); - match (privileged, additional_gids.is_empty()) { - (true, false) => { - for gid in additional_gids { - if !is_id_mapped(*gid, gid_mappings) { - bail!("gid {} is specified as supplementary group, but is not mapped in the user namespace", gid); - } + match (privileged, additional_gids.is_empty()) { + (true, false) => { + for gid in additional_gids { + if !is_id_mapped(*gid, gid_mappings) { + bail!("gid {} is specified as supplementary group, but is not mapped in the user namespace", gid); } } - (false, false) => { - bail!( - "user is {} (unprivileged). Supplementary groups cannot be set in \ - a rootless container for this user due to CVE-2014-8989", - nix::unistd::geteuid() - ) - } - _ => {} } + (false, false) => { + bail!( + "user is {} (unprivileged). Supplementary groups cannot be set in \ + a rootless container for this user due to CVE-2014-8989", + nix::unistd::geteuid() + ) + } + _ => {} } } @@ -174,7 +172,7 @@ fn validate_mounts( gid_mappings: &[LinuxIdMapping], ) -> Result<()> { for mount in mounts { - if let Some(options) = &mount.options() { + if let Some(options) = mount.options() { for opt in options { if opt.starts_with("uid=") && !is_id_mapped(opt[4..].parse()?, uid_mappings) { bail!("Mount {:?} specifies option {} which is not mapped inside the rootless container", mount, opt); @@ -199,7 +197,7 @@ fn is_id_mapped(id: u32, mappings: &[LinuxIdMapping]) -> bool { /// Looks up the location of the newuidmap and newgidmap binaries which /// are required to write multiple user/group mappings pub fn lookup_map_binaries(spec: &Linux) -> Result> { - if let Some(uid_mappings) = spec.uid_mappings().as_ref() { + if let Some(uid_mappings) = spec.uid_mappings() { if uid_mappings.len() == 1 && uid_mappings.len() == 1 { return Ok(None); } diff --git a/src/seccomp/mod.rs b/src/seccomp/mod.rs index 048b1e2f..cbc9429b 100644 --- a/src/seccomp/mod.rs +++ b/src/seccomp/mod.rs @@ -47,7 +47,7 @@ impl Compare { } pub fn build(self) -> Result { - if let (Some(op), Some(datum_a)) = (self.op, self.datum_a) { + if let Some((op, datum_a)) = self.op.zip(self.datum_a) { Ok(scmp_arg_cmp { arg: self.arg, op, @@ -109,7 +109,7 @@ impl FilterContext { rule.action, rule.syscall_nr, rule.comparators.len() as u32, - rule.comparators.as_slice().as_ptr(), + rule.comparators.as_ptr(), ) }, }; @@ -210,7 +210,7 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { let default_action = translate_action(&seccomp.default_action(), None); let mut ctx = FilterContext::default(default_action)?; - if let Some(architectures) = seccomp.architectures().as_ref() { + if let Some(architectures) = seccomp.architectures() { for arch in architectures { let arch_token = translate_arch(arch); ctx.add_arch(arch_token as u32) @@ -233,7 +233,7 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { ); } - if let Some(syscalls) = seccomp.syscalls().as_ref() { + if let Some(syscalls) = seccomp.syscalls() { for syscall in syscalls { let action = translate_action(&syscall.action(), syscall.errno_ret()); if action == default_action { @@ -246,7 +246,7 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { continue; } - for name in syscall.names().iter() { + for name in syscall.names() { let syscall_number = match translate_syscall(name) { Ok(x) => x, Err(_) => { @@ -262,7 +262,7 @@ pub fn initialize_seccomp(seccomp: &LinuxSeccomp) -> Result<()> { // Not clear why but if there are multiple arg attached to one // syscall rule, we have to add them seperatly. add_rule will // return EINVAL. runc does the same but doesn't explain why. - match syscall.args().as_ref() { + match syscall.args() { Some(args) => { for arg in args { let mut rule = Rule::new(action, syscall_number); diff --git a/src/tty.rs b/src/tty.rs index a4d935b9..5eaa081b 100644 --- a/src/tty.rs +++ b/src/tty.rs @@ -34,7 +34,7 @@ pub fn setup_console_socket( )?; csocketfd = match socket::connect( csocketfd, - &socket::SockAddr::Unix(socket::UnixAddr::new(&*socket_name)?), + &socket::SockAddr::Unix(socket::UnixAddr::new(socket_name)?), ) { Err(errno) => { if !matches!(errno, Errno::ENOENT) { diff --git a/src/utils.rs b/src/utils.rs index 75ebdaf6..653ec84d 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -47,12 +47,10 @@ pub fn parse_env(envs: &[String]) -> HashMap { .filter_map(|e| { let mut split = e.split('='); - if let Some(key) = split.next() { - let value: String = split.collect::>().join("="); - Some((String::from(key), value)) - } else { - None - } + split.next().map(|key| { + let value = split.collect::>().join("="); + (key.into(), value) + }) }) .collect() } diff --git a/test_framework/src/test_manager.rs b/test_framework/src/test_manager.rs index 3daa8946..5fcc95b5 100644 --- a/test_framework/src/test_manager.rs +++ b/test_framework/src/test_manager.rs @@ -56,14 +56,14 @@ impl<'a> TestManager<'a> { /// Run all tests from all tests group pub fn run_all(&self) { - for (name, tg) in self.test_groups.iter() { + for (name, tg) in &self.test_groups { self.run_test_group(name, *tg); } } /// Run only selected tests pub fn run_selected(&self, tests: Vec<(&str, Option>)>) { - for (test_group_name, tests) in tests.iter() { + for (test_group_name, tests) in &tests { if let Some(tg) = self.test_groups.get(test_group_name) { match tests { None => self.run_test_group(test_group_name, *tg),