ubuntu-linux-kernel/fs/crypto/keyinfo.c

373 lines
9.7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* key management facility for FS encryption support.
*
* Copyright (C) 2015, Google, Inc.
*
* This contains encryption key functions.
*
* Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
*/
#include <keys/user-type.h>
#include <linux/scatterlist.h>
#include <linux/ratelimit.h>
#include <crypto/aes.h>
#include <crypto/sha.h>
#include "fscrypt_private.h"
static struct crypto_shash *essiv_hash_tfm;
/**
* derive_key_aes() - Derive a key using AES-128-ECB
* @deriving_key: Encryption key used for derivation.
* @source_key: Source key to which to apply derivation.
* @derived_raw_key: Derived raw key.
*
* Return: Zero on success; non-zero otherwise.
*/
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
const struct fscrypt_key *source_key,
u8 derived_raw_key[FS_MAX_KEY_SIZE])
{
int res = 0;
struct skcipher_request *req = NULL;
DECLARE_CRYPTO_WAIT(wait);
struct scatterlist src_sg, dst_sg;
struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
if (IS_ERR(tfm)) {
res = PTR_ERR(tfm);
tfm = NULL;
goto out;
}
crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req) {
res = -ENOMEM;
goto out;
}
skcipher_request_set_callback(req,
CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &wait);
res = crypto_skcipher_setkey(tfm, deriving_key,
FS_AES_128_ECB_KEY_SIZE);
if (res < 0)
goto out;
sg_init_one(&src_sg, source_key->raw, source_key->size);
sg_init_one(&dst_sg, derived_raw_key, source_key->size);
skcipher_request_set_crypt(req, &src_sg, &dst_sg, source_key->size,
NULL);
res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
out:
skcipher_request_free(req);
crypto_free_skcipher(tfm);
return res;
}
static int validate_user_key(struct fscrypt_info *crypt_info,
struct fscrypt_context *ctx, u8 *raw_key,
const char *prefix, int min_keysize)
{
char *description;
struct key *keyring_key;
struct fscrypt_key *master_key;
const struct user_key_payload *ukp;
int res;
description = kasprintf(GFP_NOFS, "%s%*phN", prefix,
FS_KEY_DESCRIPTOR_SIZE,
ctx->master_key_descriptor);
if (!description)
return -ENOMEM;
keyring_key = request_key(&key_type_logon, description, NULL);
kfree(description);
if (IS_ERR(keyring_key))
return PTR_ERR(keyring_key);
down_read(&keyring_key->sem);
if (keyring_key->type != &key_type_logon) {
printk_once(KERN_WARNING
"%s: key type must be logon\n", __func__);
res = -ENOKEY;
goto out;
}
ukp = user_key_payload_locked(keyring_key);
if (!ukp) {
/* key was revoked before we acquired its semaphore */
res = -EKEYREVOKED;
goto out;
}
if (ukp->datalen != sizeof(struct fscrypt_key)) {
res = -EINVAL;
goto out;
}
master_key = (struct fscrypt_key *)ukp->data;
BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE);
if (master_key->size < min_keysize || master_key->size > FS_MAX_KEY_SIZE
|| master_key->size % AES_BLOCK_SIZE != 0) {
printk_once(KERN_WARNING
"%s: key size incorrect: %d\n",
__func__, master_key->size);
res = -ENOKEY;
goto out;
}
res = derive_key_aes(ctx->nonce, master_key, raw_key);
out:
up_read(&keyring_key->sem);
key_put(keyring_key);
return res;
}
static const struct {
const char *cipher_str;
int keysize;
} available_modes[] = {
[FS_ENCRYPTION_MODE_AES_256_XTS] = { "xts(aes)",
FS_AES_256_XTS_KEY_SIZE },
[FS_ENCRYPTION_MODE_AES_256_CTS] = { "cts(cbc(aes))",
FS_AES_256_CTS_KEY_SIZE },
[FS_ENCRYPTION_MODE_AES_128_CBC] = { "cbc(aes)",
FS_AES_128_CBC_KEY_SIZE },
[FS_ENCRYPTION_MODE_AES_128_CTS] = { "cts(cbc(aes))",
FS_AES_128_CTS_KEY_SIZE },
};
static int determine_cipher_type(struct fscrypt_info *ci, struct inode *inode,
const char **cipher_str_ret, int *keysize_ret)
{
u32 mode;
if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
pr_warn_ratelimited("fscrypt: inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)\n",
inode->i_ino,
ci->ci_data_mode, ci->ci_filename_mode);
return -EINVAL;
}
if (S_ISREG(inode->i_mode)) {
mode = ci->ci_data_mode;
} else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) {
mode = ci->ci_filename_mode;
} else {
WARN_ONCE(1, "fscrypt: filesystem tried to load encryption info for inode %lu, which is not encryptable (file type %d)\n",
inode->i_ino, (inode->i_mode & S_IFMT));
return -EINVAL;
}
*cipher_str_ret = available_modes[mode].cipher_str;
*keysize_ret = available_modes[mode].keysize;
return 0;
}
static void put_crypt_info(struct fscrypt_info *ci)
{
if (!ci)
return;
crypto_free_skcipher(ci->ci_ctfm);
crypto_free_cipher(ci->ci_essiv_tfm);
kmem_cache_free(fscrypt_info_cachep, ci);
}
static int derive_essiv_salt(const u8 *key, int keysize, u8 *salt)
{
struct crypto_shash *tfm = READ_ONCE(essiv_hash_tfm);
/* init hash transform on demand */
if (unlikely(!tfm)) {
struct crypto_shash *prev_tfm;
tfm = crypto_alloc_shash("sha256", 0, 0);
if (IS_ERR(tfm)) {
pr_warn_ratelimited("fscrypt: error allocating SHA-256 transform: %ld\n",
PTR_ERR(tfm));
return PTR_ERR(tfm);
}
prev_tfm = cmpxchg(&essiv_hash_tfm, NULL, tfm);
if (prev_tfm) {
crypto_free_shash(tfm);
tfm = prev_tfm;
}
}
{
SHASH_DESC_ON_STACK(desc, tfm);
desc->tfm = tfm;
desc->flags = 0;
return crypto_shash_digest(desc, key, keysize, salt);
}
}
static int init_essiv_generator(struct fscrypt_info *ci, const u8 *raw_key,
int keysize)
{
int err;
struct crypto_cipher *essiv_tfm;
u8 salt[SHA256_DIGEST_SIZE];
essiv_tfm = crypto_alloc_cipher("aes", 0, 0);
if (IS_ERR(essiv_tfm))
return PTR_ERR(essiv_tfm);
ci->ci_essiv_tfm = essiv_tfm;
err = derive_essiv_salt(raw_key, keysize, salt);
if (err)
goto out;
/*
* Using SHA256 to derive the salt/key will result in AES-256 being
* used for IV generation. File contents encryption will still use the
* configured keysize (AES-128) nevertheless.
*/
err = crypto_cipher_setkey(essiv_tfm, salt, sizeof(salt));
if (err)
goto out;
out:
memzero_explicit(salt, sizeof(salt));
return err;
}
void __exit fscrypt_essiv_cleanup(void)
{
crypto_free_shash(essiv_hash_tfm);
}
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
struct fscrypt_context ctx;
struct crypto_skcipher *ctfm;
const char *cipher_str;
int keysize;
u8 *raw_key = NULL;
int res;
if (inode->i_crypt_info)
return 0;
res = fscrypt_initialize(inode->i_sb->s_cop->flags);
if (res)
return res;
res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx));
if (res < 0) {
if (!fscrypt_dummy_context_enabled(inode) ||
IS_ENCRYPTED(inode))
return res;
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
} else if (res != sizeof(ctx)) {
return -EINVAL;
}
if (ctx.format != FS_ENCRYPTION_CONTEXT_FORMAT_V1)
return -EINVAL;
if (ctx.flags & ~FS_POLICY_FLAGS_VALID)
return -EINVAL;
crypt_info = kmem_cache_alloc(fscrypt_info_cachep, GFP_NOFS);
if (!crypt_info)
return -ENOMEM;
crypt_info->ci_flags = ctx.flags;
crypt_info->ci_data_mode = ctx.contents_encryption_mode;
crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
crypt_info->ci_ctfm = NULL;
crypt_info->ci_essiv_tfm = NULL;
memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
sizeof(crypt_info->ci_master_key));
res = determine_cipher_type(crypt_info, inode, &cipher_str, &keysize);
if (res)
goto out;
/*
* This cannot be a stack buffer because it is passed to the scatterlist
* crypto API as part of key derivation.
*/
res = -ENOMEM;
raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS);
if (!raw_key)
goto out;
res = validate_user_key(crypt_info, &ctx, raw_key, FS_KEY_DESC_PREFIX,
keysize);
if (res && inode->i_sb->s_cop->key_prefix) {
int res2 = validate_user_key(crypt_info, &ctx, raw_key,
inode->i_sb->s_cop->key_prefix,
keysize);
if (res2) {
if (res2 == -ENOKEY)
res = -ENOKEY;
goto out;
}
} else if (res) {
goto out;
}
ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
if (!ctfm || IS_ERR(ctfm)) {
res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
pr_debug("%s: error %d (inode %lu) allocating crypto tfm\n",
__func__, res, inode->i_ino);
goto out;
}
crypt_info->ci_ctfm = ctfm;
crypto_skcipher_clear_flags(ctfm, ~0);
crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
/*
* if the provided key is longer than keysize, we use the first
* keysize bytes of the derived key only
*/
res = crypto_skcipher_setkey(ctfm, raw_key, keysize);
if (res)
goto out;
if (S_ISREG(inode->i_mode) &&
crypt_info->ci_data_mode == FS_ENCRYPTION_MODE_AES_128_CBC) {
res = init_essiv_generator(crypt_info, raw_key, keysize);
if (res) {
pr_debug("%s: error %d (inode %lu) allocating essiv tfm\n",
__func__, res, inode->i_ino);
goto out;
}
}
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
out:
if (res == -ENOKEY)
res = 0;
put_crypt_info(crypt_info);
kzfree(raw_key);
return res;
}
EXPORT_SYMBOL(fscrypt_get_encryption_info);
void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci)
{
struct fscrypt_info *prev;
if (ci == NULL)
ci = READ_ONCE(inode->i_crypt_info);
if (ci == NULL)
return;
prev = cmpxchg(&inode->i_crypt_info, ci, NULL);
if (prev != ci)
return;
put_crypt_info(ci);
}
EXPORT_SYMBOL(fscrypt_put_encryption_info);