From: Eric Biggers ebiggers@google.com
The generic MORUS implementations all fail the improved AEAD tests because they produce the wrong result with some data layouts. Fix them.
Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations") Cc: stable@vger.kernel.org # v4.18+ Cc: Ondrej Mosnacek omosnace@redhat.com Signed-off-by: Eric Biggers ebiggers@google.com --- crypto/morus1280.c | 13 +++++++------ crypto/morus640.c | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/crypto/morus1280.c b/crypto/morus1280.c index 3889c188f266..b83576b4eb55 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, walk.nbytes); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } }
diff --git a/crypto/morus640.c b/crypto/morus640.c index da06ec2f6a80..b6a477444f6d 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state, const struct morus640_ops *ops) { struct skcipher_walk walk; - u8 *dst; - const u8 *src;
ops->skcipher_walk_init(&walk, req, false);
while (walk.nbytes) { - src = walk.src.virt.addr; - dst = walk.dst.virt.addr; + unsigned int nbytes = walk.nbytes;
- ops->crypt_chunk(state, dst, src, walk.nbytes); + if (nbytes < walk.total) + nbytes = round_down(nbytes, walk.stride);
- skcipher_walk_done(&walk, 0); + ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr, + nbytes); + + skcipher_walk_done(&walk, walk.nbytes - nbytes); } }
Hi Eric,
On Wed, Jan 23, 2019 at 11:52 PM Eric Biggers ebiggers@kernel.org wrote:
From: Eric Biggers ebiggers@google.com
The generic MORUS implementations all fail the improved AEAD tests because they produce the wrong result with some data layouts. Fix them.
Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations") Cc: stable@vger.kernel.org # v4.18+ Cc: Ondrej Mosnacek omosnace@redhat.com Signed-off-by: Eric Biggers ebiggers@google.com
crypto/morus1280.c | 13 +++++++------ crypto/morus640.c | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/crypto/morus1280.c b/crypto/morus1280.c index 3889c188f266..b83576b4eb55 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk;
u8 *dst;
const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, walk.nbytes);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes); }
Hm... I assume the problem was that skcipher_walk may give you nbytes that is not rounded to the algorithm's walksize (aka walk.stride) even in the middle of the stream, right? I must have misread the code in skcipher.c and assumed that it automatically makes every step of a round size, with the exception of the very last one, which may include a final partial block. Thinking about it now it makes sense to me that this isn't the case, since for stream ciphers it would mean some unnecessary memcpy'ing in the skcipher_walk code...
Maybe you could explain the problem in one or two sentences in the commit message(s), since the contract of skcipher_walk doesn't seem to be documented anywhere and so it is not obvious why the change is necessary.
Thank you for all your work on this - the improved testmgr tests were long needed!
}
diff --git a/crypto/morus640.c b/crypto/morus640.c index da06ec2f6a80..b6a477444f6d 100644 --- a/crypto/morus640.c +++ b/crypto/morus640.c @@ -365,18 +365,19 @@ static void crypto_morus640_process_crypt(struct morus640_state *state, const struct morus640_ops *ops) { struct skcipher_walk walk;
u8 *dst;
const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, walk.nbytes);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes); }
}
-- 2.20.1.321.g9e740568ce-goog
-- Ondrej Mosnacek <omosnace at redhat dot com> Associate Software Engineer, Security Technologies Red Hat, Inc.
On Thu, Jan 31, 2019 at 10:05:17AM +0100, Ondrej Mosnacek wrote:
Hi Eric,
On Wed, Jan 23, 2019 at 11:52 PM Eric Biggers ebiggers@kernel.org wrote:
From: Eric Biggers ebiggers@google.com
The generic MORUS implementations all fail the improved AEAD tests because they produce the wrong result with some data layouts. Fix them.
Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations") Cc: stable@vger.kernel.org # v4.18+ Cc: Ondrej Mosnacek omosnace@redhat.com Signed-off-by: Eric Biggers ebiggers@google.com
crypto/morus1280.c | 13 +++++++------ crypto/morus640.c | 13 +++++++------ 2 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/crypto/morus1280.c b/crypto/morus1280.c index 3889c188f266..b83576b4eb55 100644 --- a/crypto/morus1280.c +++ b/crypto/morus1280.c @@ -366,18 +366,19 @@ static void crypto_morus1280_process_crypt(struct morus1280_state *state, const struct morus1280_ops *ops) { struct skcipher_walk walk;
u8 *dst;
const u8 *src; ops->skcipher_walk_init(&walk, req, false); while (walk.nbytes) {
src = walk.src.virt.addr;
dst = walk.dst.virt.addr;
unsigned int nbytes = walk.nbytes;
ops->crypt_chunk(state, dst, src, walk.nbytes);
if (nbytes < walk.total)
nbytes = round_down(nbytes, walk.stride);
skcipher_walk_done(&walk, 0);
ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
nbytes);
skcipher_walk_done(&walk, walk.nbytes - nbytes); }
Hm... I assume the problem was that skcipher_walk may give you nbytes that is not rounded to the algorithm's walksize (aka walk.stride) even in the middle of the stream, right? I must have misread the code in skcipher.c and assumed that it automatically makes every step of a round size, with the exception of the very last one, which may include a final partial block. Thinking about it now it makes sense to me that this isn't the case, since for stream ciphers it would mean some unnecessary memcpy'ing in the skcipher_walk code...
Maybe you could explain the problem in one or two sentences in the commit message(s), since the contract of skcipher_walk doesn't seem to be documented anywhere and so it is not obvious why the change is necessary.
Thank you for all your work on this - the improved testmgr tests were long needed!
Yes, that's correct. I'll add an explanation to the commit messages. I'm actually not convinced that this behavior should be the default (vs. allowing the caller to opt-in to it if needed), but this is how it works now...
A while back I also started work on a documentation patch for the skcipher_walk functions, but it's still pretty incomplete. I'll see if I can get it in a state to send out.
- Eric
linux-stable-mirror@lists.linaro.org