diff --git a/c/blake3.c b/c/blake3.c index 2f224a1..58a199e 100644 --- a/c/blake3.c +++ b/c/blake3.c @@ -425,8 +425,8 @@ INLINE void hasher_merge_cv_stack(blake3_hasher *self, uint64_t total_len) { // compress_subtree_to_parent_node(). That function always returns the top // *two* chaining values of the subtree it's compressing. We then do lazy // merging with each of them separately, so that the second CV will always -// remain unmerged. (The compress_subtree_to_parent_node also helps us support -// extendable output when we're hashing an input all-at-once.) +// remain unmerged. (That also helps us support extendable output when we're +// hashing an input all-at-once.) INLINE void hasher_push_cv(blake3_hasher *self, uint8_t new_cv[BLAKE3_OUT_LEN], uint64_t chunk_counter) { hasher_merge_cv_stack(self, chunk_counter); diff --git a/src/lib.rs b/src/lib.rs index 53e9239..fef3ac8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -921,8 +921,8 @@ impl Hasher { // compress_subtree_to_parent_node(). That function always returns the top // *two* chaining values of the subtree it's compressing. We then do lazy // merging with each of them separately, so that the second CV will always - // remain unmerged. (The compress_subtree_to_parent_node also helps us - // support extendable output when we're hashing an input all-at-once.) + // remain unmerged. (That also helps us support extendable output when + // we're hashing an input all-at-once.) fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) { self.merge_cv_stack(chunk_counter); self.cv_stack.push(*new_cv);