python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/mbedtls.h"
#ifdef GIT_MBEDTLS
#include <ctype.h>
#include "runtime.h"
#include "stream.h"
#include "streams/socket.h"
#include "netops.h"
#include "git2/transport.h"
#include "util.h"
#ifndef GIT_DEFAULT_CERT_LOCATION
#define GIT_DEFAULT_CERT_LOCATION NULL
#endif
/* Work around C90-conformance issues */
#if !defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L)
# if defined(_MSC_VER)
# define inline __inline
# elif defined(__GNUC__)
# define inline __inline__
# else
# define inline
# endif
#endif
#include <mbedtls/config.h>
#include <mbedtls/ssl.h>
#include <mbedtls/error.h>
#include <mbedtls/entropy.h>
#include <mbedtls/ctr_drbg.h>
#undef inline
#define GIT_SSL_DEFAULT_CIPHERS "TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256:TLS-ECDHE-RSA-WITH-AES-128-GCM-SHA256:TLS-ECDHE-ECDSA-WITH-AES-256-GCM-SHA384:TLS-ECDHE-RSA-WITH-AES-256-GCM-SHA384:TLS-DHE-RSA-WITH-AES-128-GCM-SHA256:TLS-DHE-DSS-WITH-AES-128-GCM-SHA256:TLS-DHE-RSA-WITH-AES-256-GCM-SHA384:TLS-DHE-DSS-WITH-AES-256-GCM-SHA384:TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256:TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256:TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA:TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA:TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA384:TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA384:TLS-ECDHE-ECDSA-WITH-AES-256-CBC-SHA:TLS-ECDHE-RSA-WITH-AES-256-CBC-SHA:TLS-DHE-RSA-WITH-AES-128-CBC-SHA256:TLS-DHE-RSA-WITH-AES-256-CBC-SHA256:TLS-DHE-RSA-WITH-AES-128-CBC-SHA:TLS-DHE-RSA-WITH-AES-256-CBC-SHA:TLS-DHE-DSS-WITH-AES-128-CBC-SHA256:TLS-DHE-DSS-WITH-AES-256-CBC-SHA256:TLS-DHE-DSS-WITH-AES-128-CBC-SHA:TLS-DHE-DSS-WITH-AES-256-CBC-SHA:TLS-RSA-WITH-AES-128-GCM-SHA256:TLS-RSA-WITH-AES-256-GCM-SHA384:TLS-RSA-WITH-AES-128-CBC-SHA256:TLS-RSA-WITH-AES-256-CBC-SHA256:TLS-RSA-WITH-AES-128-CBC-SHA:TLS-RSA-WITH-AES-256-CBC-SHA"
#define GIT_SSL_DEFAULT_CIPHERS_COUNT 30
static mbedtls_ssl_config *git__ssl_conf;
static int ciphers_list[GIT_SSL_DEFAULT_CIPHERS_COUNT];
static mbedtls_entropy_context *mbedtls_entropy;
/**
* This function aims to clean-up the SSL context which
* we allocated.
*/
static void shutdown_ssl(void)
{
if (git__ssl_conf) {
mbedtls_x509_crt_free(git__ssl_conf->ca_chain);
git__free(git__ssl_conf->ca_chain);
mbedtls_ctr_drbg_free(git__ssl_conf->p_rng);
git__free(git__ssl_conf->p_rng);
mbedtls_ssl_config_free(git__ssl_conf);
git__free(git__ssl_conf);
git__ssl_conf = NULL;
}
if (mbedtls_entropy) {
mbedtls_entropy_free(mbedtls_entropy);
git__free(mbedtls_entropy);
mbedtls_entropy = NULL;
}
}
int git_mbedtls_stream_global_init(void)
{
int loaded = 0;
char *crtpath = GIT_DEFAULT_CERT_LOCATION;
struct stat statbuf;
mbedtls_ctr_drbg_context *ctr_drbg = NULL;
size_t ciphers_known = 0;
char *cipher_name = NULL;
char *cipher_string = NULL;
char *cipher_string_tmp = NULL;
git__ssl_conf = git__malloc(sizeof(mbedtls_ssl_config));
GIT_ERROR_CHECK_ALLOC(git__ssl_conf);
mbedtls_ssl_config_init(git__ssl_conf);
if (mbedtls_ssl_config_defaults(git__ssl_conf,
MBEDTLS_SSL_IS_CLIENT,
MBEDTLS_SSL_TRANSPORT_STREAM,
MBEDTLS_SSL_PRESET_DEFAULT) != 0) {
git_error_set(GIT_ERROR_SSL, "failed to initialize mbedTLS");
goto cleanup;
}
/* configure TLSv1 */
mbedtls_ssl_conf_min_version(git__ssl_conf, MBEDTLS_SSL_MAJOR_VERSION_3, MBEDTLS_SSL_MINOR_VERSION_0);
/* verify_server_cert is responsible for making the check.
* OPTIONAL because REQUIRED drops the certificate as soon as the check
* is made, so we can never see the certificate and override it. */
mbedtls_ssl_conf_authmode(git__ssl_conf, MBEDTLS_SSL_VERIFY_OPTIONAL);
/* set the list of allowed ciphersuites */
ciphers_known = 0;
cipher_string = cipher_string_tmp = git__strdup(GIT_SSL_DEFAULT_CIPHERS);
GIT_ERROR_CHECK_ALLOC(cipher_string);
while ((cipher_name = git__strtok(&cipher_string_tmp, ":")) != NULL) {
int cipherid = mbedtls_ssl_get_ciphersuite_id(cipher_name);
if (cipherid == 0) continue;
if (ciphers_known >= ARRAY_SIZE(ciphers_list)) {
git_error_set(GIT_ERROR_SSL, "out of cipher list space");
goto cleanup;
}
ciphers_list[ciphers_known++] = cipherid;
}
git__free(cipher_string);
if (!ciphers_known) {
git_error_set(GIT_ERROR_SSL, "no cipher could be enabled");
goto cleanup;
}
mbedtls_ssl_conf_ciphersuites(git__ssl_conf, ciphers_list);
/* Seeding the random number generator */
mbedtls_entropy = git__malloc(sizeof(mbedtls_entropy_context));
GIT_ERROR_CHECK_ALLOC(mbedtls_entropy);
mbedtls_entropy_init(mbedtls_entropy);
ctr_drbg = git__malloc(sizeof(mbedtls_ctr_drbg_context));
GIT_ERROR_CHECK_ALLOC(ctr_drbg);
mbedtls_ctr_drbg_init(ctr_drbg);
if (mbedtls_ctr_drbg_seed(ctr_drbg,
mbedtls_entropy_func,
mbedtls_entropy, NULL, 0) != 0) {
git_error_set(GIT_ERROR_SSL, "failed to initialize mbedTLS entropy pool");
goto cleanup;
}
mbedtls_ssl_conf_rng(git__ssl_conf, mbedtls_ctr_drbg_random, ctr_drbg);
/* load default certificates */
if (crtpath != NULL && stat(crtpath, &statbuf) == 0 && S_ISREG(statbuf.st_mode))
loaded = (git_mbedtls__set_cert_location(crtpath, NULL) == 0);
if (!loaded && crtpath != NULL && stat(crtpath, &statbuf) == 0 && S_ISDIR(statbuf.st_mode))
loaded = (git_mbedtls__set_cert_location(NULL, crtpath) == 0);
return git_runtime_shutdown_register(shutdown_ssl);
cleanup:
mbedtls_ctr_drbg_free(ctr_drbg);
git__free(ctr_drbg);
mbedtls_ssl_config_free(git__ssl_conf);
git__free(git__ssl_conf);
git__ssl_conf = NULL;
return -1;
}
static int bio_read(void *b, unsigned char *buf, size_t len)
{
git_stream *io = (git_stream *) b;
return (int) git_stream_read(io, buf, min(len, INT_MAX));
}
static int bio_write(void *b, const unsigned char *buf, size_t len)
{
git_stream *io = (git_stream *) b;
return (int) git_stream_write(io, (const char *)buf, min(len, INT_MAX), 0);
}
static int ssl_set_error(mbedtls_ssl_context *ssl, int error)
{
char errbuf[512];
int ret = -1;
GIT_ASSERT(error != MBEDTLS_ERR_SSL_WANT_READ);
GIT_ASSERT(error != MBEDTLS_ERR_SSL_WANT_WRITE);
if (error != 0)
mbedtls_strerror( error, errbuf, 512 );
switch(error) {
case 0:
git_error_set(GIT_ERROR_SSL, "SSL error: unknown error");
break;
case MBEDTLS_ERR_X509_CERT_VERIFY_FAILED:
git_error_set(GIT_ERROR_SSL, "SSL error: %#04x [%x] - %s", error, ssl->session_negotiate->verify_result, errbuf);
ret = GIT_ECERTIFICATE;
break;
default:
git_error_set(GIT_ERROR_SSL, "SSL error: %#04x - %s", error, errbuf);
}
return ret;
}
static int ssl_teardown(mbedtls_ssl_context *ssl)
{
int ret = 0;
ret = mbedtls_ssl_close_notify(ssl);
if (ret < 0)
ret = ssl_set_error(ssl, ret);
mbedtls_ssl_free(ssl);
return ret;
}
static int verify_server_cert(mbedtls_ssl_context *ssl)
{
int ret = -1;
if ((ret = mbedtls_ssl_get_verify_result(ssl)) != 0) {
char vrfy_buf[512];
int len = mbedtls_x509_crt_verify_info(vrfy_buf, sizeof(vrfy_buf), "", ret);
if (len >= 1) vrfy_buf[len - 1] = '\0'; /* Remove trailing \n */
git_error_set(GIT_ERROR_SSL, "the SSL certificate is invalid: %#04x - %s", ret, vrfy_buf);
return GIT_ECERTIFICATE;
}
return 0;
}
typedef struct {
git_stream parent;
git_stream *io;
int owned;
bool connected;
char *host;
mbedtls_ssl_context *ssl;
git_cert_x509 cert_info;
} mbedtls_stream;
static int mbedtls_connect(git_stream *stream)
{
int ret;
mbedtls_stream *st = (mbedtls_stream *) stream;
if (st->owned && (ret = git_stream_connect(st->io)) < 0)
return ret;
st->connected = true;
mbedtls_ssl_set_hostname(st->ssl, st->host);
mbedtls_ssl_set_bio(st->ssl, st->io, bio_write, bio_read, NULL);
if ((ret = mbedtls_ssl_handshake(st->ssl)) != 0)
return ssl_set_error(st->ssl, ret);
return verify_server_cert(st->ssl);
}
static int mbedtls_certificate(git_cert **out, git_stream *stream)
{
unsigned char *encoded_cert;
mbedtls_stream *st = (mbedtls_stream *) stream;
const mbedtls_x509_crt *cert = mbedtls_ssl_get_peer_cert(st->ssl);
if (!cert) {
git_error_set(GIT_ERROR_SSL, "the server did not provide a certificate");
return -1;
}
/* Retrieve the length of the certificate first */
if (cert->raw.len == 0) {
git_error_set(GIT_ERROR_NET, "failed to retrieve certificate information");
return -1;
}
encoded_cert = git__malloc(cert->raw.len);
GIT_ERROR_CHECK_ALLOC(encoded_cert);
memcpy(encoded_cert, cert->raw.p, cert->raw.len);
st->cert_info.parent.cert_type = GIT_CERT_X509;
st->cert_info.data = encoded_cert;
st->cert_info.len = cert->raw.len;
*out = &st->cert_info.parent;
return 0;
}
static int mbedtls_set_proxy(git_stream *stream, const git_proxy_options *proxy_options)
{
mbedtls_stream *st = (mbedtls_stream *) stream;
return git_stream_set_proxy(st->io, proxy_options);
}
static ssize_t mbedtls_stream_write(git_stream *stream, const char *data, size_t len, int flags)
{
mbedtls_stream *st = (mbedtls_stream *) stream;
int written;
GIT_UNUSED(flags);
/*
* `mbedtls_ssl_write` can only represent INT_MAX bytes
* written via its return value. We thus need to clamp
* the maximum number of bytes written.
*/
len = min(len, INT_MAX);
if ((written = mbedtls_ssl_write(st->ssl, (const unsigned char *)data, len)) <= 0)
return ssl_set_error(st->ssl, written);
return written;
}
static ssize_t mbedtls_stream_read(git_stream *stream, void *data, size_t len)
{
mbedtls_stream *st = (mbedtls_stream *) stream;
int ret;
if ((ret = mbedtls_ssl_read(st->ssl, (unsigned char *)data, len)) <= 0)
ssl_set_error(st->ssl, ret);
return ret;
}
static int mbedtls_stream_close(git_stream *stream)
{
mbedtls_stream *st = (mbedtls_stream *) stream;
int ret = 0;
if (st->connected && (ret = ssl_teardown(st->ssl)) != 0)
return -1;
st->connected = false;
return st->owned ? git_stream_close(st->io) : 0;
}
static void mbedtls_stream_free(git_stream *stream)
{
mbedtls_stream *st = (mbedtls_stream *) stream;
if (st->owned)
git_stream_free(st->io);
git__free(st->host);
git__free(st->cert_info.data);
mbedtls_ssl_free(st->ssl);
git__free(st->ssl);
git__free(st);
}
static int mbedtls_stream_wrap(
git_stream **out,
git_stream *in,
const char *host,
int owned)
{
mbedtls_stream *st;
int error;
st = git__calloc(1, sizeof(mbedtls_stream));
GIT_ERROR_CHECK_ALLOC(st);
st->io = in;
st->owned = owned;
st->ssl = git__malloc(sizeof(mbedtls_ssl_context));
GIT_ERROR_CHECK_ALLOC(st->ssl);
mbedtls_ssl_init(st->ssl);
if (mbedtls_ssl_setup(st->ssl, git__ssl_conf)) {
git_error_set(GIT_ERROR_SSL, "failed to create ssl object");
error = -1;
goto out_err;
}
st->host = git__strdup(host);
GIT_ERROR_CHECK_ALLOC(st->host);
st->parent.version = GIT_STREAM_VERSION;
st->parent.encrypted = 1;
st->parent.proxy_support = git_stream_supports_proxy(st->io);
st->parent.connect = mbedtls_connect;
st->parent.certificate = mbedtls_certificate;
st->parent.set_proxy = mbedtls_set_proxy;
st->parent.read = mbedtls_stream_read;
st->parent.write = mbedtls_stream_write;
st->parent.close = mbedtls_stream_close;
st->parent.free = mbedtls_stream_free;
*out = (git_stream *) st;
return 0;
out_err:
mbedtls_ssl_free(st->ssl);
git_stream_close(st->io);
git_stream_free(st->io);
git__free(st);
return error;
}
int git_mbedtls_stream_wrap(
git_stream **out,
git_stream *in,
const char *host)
{
return mbedtls_stream_wrap(out, in, host, 0);
}
int git_mbedtls_stream_new(
git_stream **out,
const char *host,
const char *port)
{
git_stream *stream;
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
GIT_ASSERT_ARG(port);
if ((error = git_socket_stream_new(&stream, host, port)) < 0)
return error;
if ((error = mbedtls_stream_wrap(out, stream, host, 1)) < 0) {
git_stream_close(stream);
git_stream_free(stream);
}
return error;
}
int git_mbedtls__set_cert_location(const char *file, const char *path)
{
int ret = 0;
char errbuf[512];
mbedtls_x509_crt *cacert;
GIT_ASSERT_ARG(file || path);
cacert = git__malloc(sizeof(mbedtls_x509_crt));
GIT_ERROR_CHECK_ALLOC(cacert);
mbedtls_x509_crt_init(cacert);
if (file)
ret = mbedtls_x509_crt_parse_file(cacert, file);
if (ret >= 0 && path)
ret = mbedtls_x509_crt_parse_path(cacert, path);
/* mbedtls_x509_crt_parse_path returns the number of invalid certs on success */
if (ret < 0) {
mbedtls_x509_crt_free(cacert);
git__free(cacert);
mbedtls_strerror( ret, errbuf, 512 );
git_error_set(GIT_ERROR_SSL, "failed to load CA certificates: %#04x - %s", ret, errbuf);
return -1;
}
mbedtls_x509_crt_free(git__ssl_conf->ca_chain);
git__free(git__ssl_conf->ca_chain);
mbedtls_ssl_conf_ca_chain(git__ssl_conf, cacert, NULL);
return 0;
}
#else
#include "stream.h"
int git_mbedtls_stream_global_init(void)
{
return 0;
}
#endif
| libgit2-main | src/libgit2/streams/mbedtls.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/openssl.h"
#include "streams/openssl_dynamic.h"
#if defined(GIT_OPENSSL) && defined(GIT_OPENSSL_DYNAMIC)
#include "runtime.h"
#include <dlfcn.h>
unsigned char *(*ASN1_STRING_data)(ASN1_STRING *x);
const unsigned char *(*ASN1_STRING_get0_data)(const ASN1_STRING *x);
int (*ASN1_STRING_length)(const ASN1_STRING *x);
int (*ASN1_STRING_to_UTF8)(unsigned char **out, const ASN1_STRING *in);
int (*ASN1_STRING_type)(const ASN1_STRING *x);
void *(*BIO_get_data)(BIO *a);
int (*BIO_get_new_index)(void);
int (*OPENSSL_init_ssl)(uint64_t opts, const void *settings);
void (*BIO_meth_free)(BIO_METHOD *biom);
int (*BIO_meth_set_create)(BIO_METHOD *biom, int (*create) (BIO *));
int (*BIO_meth_set_ctrl)(BIO_METHOD *biom, long (*ctrl) (BIO *, int, long, void *));
int (*BIO_meth_set_destroy)(BIO_METHOD *biom, int (*destroy) (BIO *));
int (*BIO_meth_set_gets)(BIO_METHOD *biom, int (*gets) (BIO *, char *, int));
int (*BIO_meth_set_puts)(BIO_METHOD *biom, int (*puts) (BIO *, const char *));
int (*BIO_meth_set_read)(BIO_METHOD *biom, int (*read) (BIO *, char *, int));
int (*BIO_meth_set_write)(BIO_METHOD *biom, int (*write) (BIO *, const char *, int));
BIO_METHOD *(*BIO_meth_new)(int type, const char *name);
BIO *(*BIO_new)(const BIO_METHOD *type);
void (*BIO_set_data)(BIO *a, void *ptr);
void (*BIO_set_init)(BIO *a, int init);
void (*CRYPTO_free)(void *ptr, const char *file, int line);
void *(*CRYPTO_malloc)(size_t num, const char *file, int line);
int (*CRYPTO_num_locks)(void);
void (*CRYPTO_set_locking_callback)(void (*func)(int mode, int type, const char *file, int line));
int (*CRYPTO_set_mem_functions)(void *(*m)(size_t bytes), void *(*r)(void *mem, size_t size), void (*f)(void *mem));
int (*CRYPTO_THREADID_set_callback)(void (*func)(CRYPTO_THREADID *id));
void (*CRYPTO_THREADID_set_numeric)(CRYPTO_THREADID *id, unsigned long val);
char *(*ERR_error_string)(unsigned long e, char *buf);
void (*ERR_error_string_n)(unsigned long e, char *buf, size_t len);
unsigned long (*ERR_get_error)(void);
int (*SSL_connect)(SSL *ssl);
long (*SSL_ctrl)(SSL *ssl, int cmd, long arg, void *parg);
void (*SSL_free)(SSL *ssl);
int (*SSL_get_error)(SSL *ssl, int ret);
X509 *(*SSL_get_peer_certificate)(const SSL *ssl);
long (*SSL_get_verify_result)(const SSL *ssl);
int (*SSL_library_init)(void);
void (*SSL_load_error_strings)(void);
SSL *(*SSL_new)(SSL_CTX *ctx);
int (*SSL_read)(SSL *ssl, const void *buf, int num);
void (*SSL_set_bio)(SSL *ssl, BIO *rbio, BIO *wbio);
int (*SSL_shutdown)(SSL *ssl);
int (*SSL_write)(SSL *ssl, const void *buf, int num);
long (*SSL_CTX_ctrl)(SSL_CTX *ctx, int cmd, long larg, void *parg);
void (*SSL_CTX_free)(SSL_CTX *ctx);
SSL_CTX *(*SSL_CTX_new)(const SSL_METHOD *method);
int (*SSL_CTX_set_cipher_list)(SSL_CTX *ctx, const char *str);
int (*SSL_CTX_set_default_verify_paths)(SSL_CTX *ctx);
long (*SSL_CTX_set_options)(SSL_CTX *ctx, long options);
void (*SSL_CTX_set_verify)(SSL_CTX *ctx, int mode, int (*verify_callback)(int, X509_STORE_CTX *));
int (*SSL_CTX_load_verify_locations)(SSL_CTX *ctx, const char *CAfile, const char *CApath);
const SSL_METHOD *(*SSLv23_method)(void);
const SSL_METHOD *(*TLS_method)(void);
ASN1_STRING *(*X509_NAME_ENTRY_get_data)(const X509_NAME_ENTRY *ne);
X509_NAME_ENTRY *(*X509_NAME_get_entry)(X509_NAME *name, int loc);
int (*X509_NAME_get_index_by_NID)(X509_NAME *name, int nid, int lastpos);
void (*X509_free)(X509 *a);
void *(*X509_get_ext_d2i)(const X509 *x, int nid, int *crit, int *idx);
X509_NAME *(*X509_get_subject_name)(const X509 *x);
int (*i2d_X509)(X509 *a, unsigned char **ppout);
int (*OPENSSL_sk_num)(const void *sk);
void *(*OPENSSL_sk_value)(const void *sk, int i);
void (*OPENSSL_sk_free)(void *sk);
int (*sk_num)(const void *sk);
void *(*sk_value)(const void *sk, int i);
void (*sk_free)(void *sk);
void *openssl_handle;
GIT_INLINE(void *) openssl_sym(int *err, const char *name, bool required)
{
void *symbol;
/* if we've seen an err, noop to retain it */
if (*err)
return NULL;
if ((symbol = dlsym(openssl_handle, name)) == NULL && required) {
const char *msg = dlerror();
git_error_set(GIT_ERROR_SSL, "could not load ssl function '%s': %s", name, msg ? msg : "unknown error");
*err = -1;
}
return symbol;
}
static void dynamic_shutdown(void)
{
dlclose(openssl_handle);
openssl_handle = NULL;
}
int git_openssl_stream_dynamic_init(void)
{
int err = 0;
if ((openssl_handle = dlopen("libssl.so.1.1", RTLD_NOW)) == NULL &&
(openssl_handle = dlopen("libssl.1.1.dylib", RTLD_NOW)) == NULL &&
(openssl_handle = dlopen("libssl.so.1.0.0", RTLD_NOW)) == NULL &&
(openssl_handle = dlopen("libssl.1.0.0.dylib", RTLD_NOW)) == NULL &&
(openssl_handle = dlopen("libssl.so.10", RTLD_NOW)) == NULL) {
git_error_set(GIT_ERROR_SSL, "could not load ssl libraries");
return -1;
}
ASN1_STRING_data = (unsigned char *(*)(ASN1_STRING *x))openssl_sym(&err, "ASN1_STRING_data", false);
ASN1_STRING_get0_data = (const unsigned char *(*)(const ASN1_STRING *x))openssl_sym(&err, "ASN1_STRING_get0_data", false);
ASN1_STRING_length = (int (*)(const ASN1_STRING *))openssl_sym(&err, "ASN1_STRING_length", true);
ASN1_STRING_to_UTF8 = (int (*)(unsigned char **, const ASN1_STRING *))openssl_sym(&err, "ASN1_STRING_to_UTF8", true);
ASN1_STRING_type = (int (*)(const ASN1_STRING *))openssl_sym(&err, "ASN1_STRING_type", true);
BIO_get_data = (void *(*)(BIO *))openssl_sym(&err, "BIO_get_data", false);
BIO_get_new_index = (int (*)(void))openssl_sym(&err, "BIO_get_new_index", false);
BIO_meth_free = (void (*)(BIO_METHOD *))openssl_sym(&err, "BIO_meth_free", false);
BIO_meth_new = (BIO_METHOD *(*)(int, const char *))openssl_sym(&err, "BIO_meth_new", false);
BIO_meth_set_create = (int (*)(BIO_METHOD *, int (*)(BIO *)))openssl_sym(&err, "BIO_meth_set_create", false);
BIO_meth_set_ctrl = (int (*)(BIO_METHOD *, long (*)(BIO *, int, long, void *)))openssl_sym(&err, "BIO_meth_set_ctrl", false);
BIO_meth_set_destroy = (int (*)(BIO_METHOD *, int (*)(BIO *)))openssl_sym(&err, "BIO_meth_set_destroy", false);
BIO_meth_set_gets = (int (*)(BIO_METHOD *, int (*)(BIO *, char *, int)))openssl_sym(&err, "BIO_meth_set_gets", false);
BIO_meth_set_puts = (int (*)(BIO_METHOD *, int (*)(BIO *, const char *)))openssl_sym(&err, "BIO_meth_set_puts", false);
BIO_meth_set_read = (int (*)(BIO_METHOD *, int (*)(BIO *, char *, int)))openssl_sym(&err, "BIO_meth_set_read", false);
BIO_meth_set_write = (int (*)(BIO_METHOD *, int (*)(BIO *, const char *, int)))openssl_sym(&err, "BIO_meth_set_write", false);
BIO_new = (BIO *(*)(const BIO_METHOD *))openssl_sym(&err, "BIO_new", true);
BIO_set_data = (void (*)(BIO *a, void *))openssl_sym(&err, "BIO_set_data", false);
BIO_set_init = (void (*)(BIO *a, int))openssl_sym(&err, "BIO_set_init", false);
CRYPTO_free = (void (*)(void *, const char *, int))openssl_sym(&err, "CRYPTO_free", true);
CRYPTO_malloc = (void *(*)(size_t, const char *, int))openssl_sym(&err, "CRYPTO_malloc", true);
CRYPTO_num_locks = (int (*)(void))openssl_sym(&err, "CRYPTO_num_locks", false);
CRYPTO_set_locking_callback = (void (*)(void (*)(int, int, const char *, int)))openssl_sym(&err, "CRYPTO_set_locking_callback", false);
CRYPTO_set_mem_functions = (int (*)(void *(*)(size_t), void *(*)(void *, size_t), void (*f)(void *)))openssl_sym(&err, "CRYPTO_set_mem_functions", true);
CRYPTO_THREADID_set_callback = (int (*)(void (*)(CRYPTO_THREADID *)))openssl_sym(&err, "CRYPTO_THREADID_set_callback", false);
CRYPTO_THREADID_set_numeric = (void (*)(CRYPTO_THREADID *, unsigned long))openssl_sym(&err, "CRYPTO_THREADID_set_numeric", false);
ERR_error_string = (char *(*)(unsigned long, char *))openssl_sym(&err, "ERR_error_string", true);
ERR_error_string_n = (void (*)(unsigned long, char *, size_t))openssl_sym(&err, "ERR_error_string_n", true);
ERR_get_error = (unsigned long (*)(void))openssl_sym(&err, "ERR_get_error", true);
OPENSSL_init_ssl = (int (*)(uint64_t opts, const void *settings))openssl_sym(&err, "OPENSSL_init_ssl", false);
OPENSSL_sk_num = (int (*)(const void *))openssl_sym(&err, "OPENSSL_sk_num", false);
OPENSSL_sk_value = (void *(*)(const void *sk, int i))openssl_sym(&err, "OPENSSL_sk_value", false);
OPENSSL_sk_free = (void (*)(void *))openssl_sym(&err, "OPENSSL_sk_free", false);
sk_num = (int (*)(const void *))openssl_sym(&err, "sk_num", false);
sk_value = (void *(*)(const void *sk, int i))openssl_sym(&err, "sk_value", false);
sk_free = (void (*)(void *))openssl_sym(&err, "sk_free", false);
SSL_connect = (int (*)(SSL *))openssl_sym(&err, "SSL_connect", true);
SSL_ctrl = (long (*)(SSL *, int, long, void *))openssl_sym(&err, "SSL_ctrl", true);
SSL_get_peer_certificate = (X509 *(*)(const SSL *))openssl_sym(&err, "SSL_get_peer_certificate", true);
SSL_library_init = (int (*)(void))openssl_sym(&err, "SSL_library_init", false);
SSL_free = (void (*)(SSL *))openssl_sym(&err, "SSL_free", true);
SSL_get_error = (int (*)(SSL *, int))openssl_sym(&err, "SSL_get_error", true);
SSL_get_verify_result = (long (*)(const SSL *ssl))openssl_sym(&err, "SSL_get_verify_result", true);
SSL_load_error_strings = (void (*)(void))openssl_sym(&err, "SSL_load_error_strings", false);
SSL_new = (SSL *(*)(SSL_CTX *))openssl_sym(&err, "SSL_new", true);
SSL_read = (int (*)(SSL *, const void *, int))openssl_sym(&err, "SSL_read", true);
SSL_set_bio = (void (*)(SSL *, BIO *, BIO *))openssl_sym(&err, "SSL_set_bio", true);
SSL_shutdown = (int (*)(SSL *ssl))openssl_sym(&err, "SSL_shutdown", true);
SSL_write = (int (*)(SSL *, const void *, int))openssl_sym(&err, "SSL_write", true);
SSL_CTX_ctrl = (long (*)(SSL_CTX *, int, long, void *))openssl_sym(&err, "SSL_CTX_ctrl", true);
SSL_CTX_free = (void (*)(SSL_CTX *))openssl_sym(&err, "SSL_CTX_free", true);
SSL_CTX_new = (SSL_CTX *(*)(const SSL_METHOD *))openssl_sym(&err, "SSL_CTX_new", true);
SSL_CTX_set_cipher_list = (int (*)(SSL_CTX *, const char *))openssl_sym(&err, "SSL_CTX_set_cipher_list", true);
SSL_CTX_set_default_verify_paths = (int (*)(SSL_CTX *ctx))openssl_sym(&err, "SSL_CTX_set_default_verify_paths", true);
SSL_CTX_set_options = (long (*)(SSL_CTX *, long))openssl_sym(&err, "SSL_CTX_set_options", false);
SSL_CTX_set_verify = (void (*)(SSL_CTX *, int, int (*)(int, X509_STORE_CTX *)))openssl_sym(&err, "SSL_CTX_set_verify", true);
SSL_CTX_load_verify_locations = (int (*)(SSL_CTX *, const char *, const char *))openssl_sym(&err, "SSL_CTX_load_verify_locations", true);
SSLv23_method = (const SSL_METHOD *(*)(void))openssl_sym(&err, "SSLv23_method", false);
TLS_method = (const SSL_METHOD *(*)(void))openssl_sym(&err, "TLS_method", false);
X509_NAME_ENTRY_get_data = (ASN1_STRING *(*)(const X509_NAME_ENTRY *))openssl_sym(&err, "X509_NAME_ENTRY_get_data", true);
X509_NAME_get_entry = (X509_NAME_ENTRY *(*)(X509_NAME *, int))openssl_sym(&err, "X509_NAME_get_entry", true);
X509_NAME_get_index_by_NID = (int (*)(X509_NAME *, int, int))openssl_sym(&err, "X509_NAME_get_index_by_NID", true);
X509_free = (void (*)(X509 *))openssl_sym(&err, "X509_free", true);
X509_get_ext_d2i = (void *(*)(const X509 *x, int nid, int *crit, int *idx))openssl_sym(&err, "X509_get_ext_d2i", true);
X509_get_subject_name = (X509_NAME *(*)(const X509 *))openssl_sym(&err, "X509_get_subject_name", true);
i2d_X509 = (int (*)(X509 *a, unsigned char **ppout))openssl_sym(&err, "i2d_X509", true);
if (err)
goto on_error;
/* Add legacy functionality */
if (!OPENSSL_init_ssl) {
OPENSSL_init_ssl = OPENSSL_init_ssl__legacy;
if (!SSL_library_init ||
!SSL_load_error_strings ||
!CRYPTO_num_locks ||
!CRYPTO_set_locking_callback ||
!CRYPTO_THREADID_set_callback ||
!CRYPTO_THREADID_set_numeric) {
git_error_set(GIT_ERROR_SSL, "could not load legacy openssl initialization functions");
goto on_error;
}
}
if (!SSL_CTX_set_options)
SSL_CTX_set_options = SSL_CTX_set_options__legacy;
if (TLS_method)
SSLv23_method = TLS_method;
if (!BIO_meth_new) {
BIO_meth_new = BIO_meth_new__legacy;
BIO_meth_new = BIO_meth_new__legacy;
BIO_meth_free = BIO_meth_free__legacy;
BIO_meth_set_write = BIO_meth_set_write__legacy;
BIO_meth_set_read = BIO_meth_set_read__legacy;
BIO_meth_set_puts = BIO_meth_set_puts__legacy;
BIO_meth_set_gets = BIO_meth_set_gets__legacy;
BIO_meth_set_ctrl = BIO_meth_set_ctrl__legacy;
BIO_meth_set_create = BIO_meth_set_create__legacy;
BIO_meth_set_destroy = BIO_meth_set_destroy__legacy;
BIO_get_new_index = BIO_get_new_index__legacy;
BIO_set_data = BIO_set_data__legacy;
BIO_set_init = BIO_set_init__legacy;
BIO_get_data = BIO_get_data__legacy;
}
if (!ASN1_STRING_get0_data) {
if (!ASN1_STRING_data) {
git_error_set(GIT_ERROR_SSL, "could not load legacy openssl string function");
goto on_error;
}
ASN1_STRING_get0_data = ASN1_STRING_get0_data__legacy;
}
if ((!OPENSSL_sk_num && !sk_num) ||
(!OPENSSL_sk_value && !sk_value) ||
(!OPENSSL_sk_free && !sk_free)) {
git_error_set(GIT_ERROR_SSL, "could not load legacy openssl stack functions");
goto on_error;
}
if (git_runtime_shutdown_register(dynamic_shutdown) != 0)
goto on_error;
return 0;
on_error:
dlclose(openssl_handle);
return -1;
}
int sk_GENERAL_NAME_num(const GENERAL_NAME *sk)
{
if (OPENSSL_sk_num)
return OPENSSL_sk_num(sk);
else if (sk_num)
return sk_num(sk);
GIT_ASSERT_WITH_RETVAL(false, 0);
return 0;
}
GENERAL_NAME *sk_GENERAL_NAME_value(const GENERAL_NAME *sk, int i)
{
if (OPENSSL_sk_value)
return OPENSSL_sk_value(sk, i);
else if (sk_value)
return sk_value(sk, i);
GIT_ASSERT_WITH_RETVAL(false, NULL);
return NULL;
}
void GENERAL_NAMES_free(GENERAL_NAME *sk)
{
if (OPENSSL_sk_free)
OPENSSL_sk_free(sk);
else if (sk_free)
sk_free(sk);
}
#endif /* GIT_OPENSSL && GIT_OPENSSL_DYNAMIC */
| libgit2-main | src/libgit2/streams/openssl_dynamic.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/socket.h"
#include "posix.h"
#include "netops.h"
#include "registry.h"
#include "stream.h"
#ifndef _WIN32
# include <sys/types.h>
# include <sys/socket.h>
# include <sys/select.h>
# include <sys/time.h>
# include <netdb.h>
# include <netinet/in.h>
# include <arpa/inet.h>
#else
# include <winsock2.h>
# include <ws2tcpip.h>
# ifdef _MSC_VER
# pragma comment(lib, "ws2_32")
# endif
#endif
#ifdef GIT_WIN32
static void net_set_error(const char *str)
{
int error = WSAGetLastError();
char * win32_error = git_win32_get_error_message(error);
if (win32_error) {
git_error_set(GIT_ERROR_NET, "%s: %s", str, win32_error);
git__free(win32_error);
} else {
git_error_set(GIT_ERROR_NET, "%s", str);
}
}
#else
static void net_set_error(const char *str)
{
git_error_set(GIT_ERROR_NET, "%s: %s", str, strerror(errno));
}
#endif
static int close_socket(GIT_SOCKET s)
{
if (s == INVALID_SOCKET)
return 0;
#ifdef GIT_WIN32
if (SOCKET_ERROR == closesocket(s))
return -1;
if (0 != WSACleanup()) {
git_error_set(GIT_ERROR_OS, "winsock cleanup failed");
return -1;
}
return 0;
#else
return close(s);
#endif
}
static int socket_connect(git_stream *stream)
{
struct addrinfo *info = NULL, *p;
struct addrinfo hints;
git_socket_stream *st = (git_socket_stream *) stream;
GIT_SOCKET s = INVALID_SOCKET;
int ret;
#ifdef GIT_WIN32
/* on win32, the WSA context needs to be initialized
* before any socket calls can be performed */
WSADATA wsd;
if (WSAStartup(MAKEWORD(2,2), &wsd) != 0) {
git_error_set(GIT_ERROR_OS, "winsock init failed");
return -1;
}
if (LOBYTE(wsd.wVersion) != 2 || HIBYTE(wsd.wVersion) != 2) {
WSACleanup();
git_error_set(GIT_ERROR_OS, "winsock init failed");
return -1;
}
#endif
memset(&hints, 0x0, sizeof(struct addrinfo));
hints.ai_socktype = SOCK_STREAM;
hints.ai_family = AF_UNSPEC;
if ((ret = p_getaddrinfo(st->host, st->port, &hints, &info)) != 0) {
git_error_set(GIT_ERROR_NET,
"failed to resolve address for %s: %s", st->host, p_gai_strerror(ret));
return -1;
}
for (p = info; p != NULL; p = p->ai_next) {
s = socket(p->ai_family, p->ai_socktype | SOCK_CLOEXEC, p->ai_protocol);
if (s == INVALID_SOCKET)
continue;
if (connect(s, p->ai_addr, (socklen_t)p->ai_addrlen) == 0)
break;
/* If we can't connect, try the next one */
close_socket(s);
s = INVALID_SOCKET;
}
/* Oops, we couldn't connect to any address */
if (s == INVALID_SOCKET && p == NULL) {
git_error_set(GIT_ERROR_OS, "failed to connect to %s", st->host);
p_freeaddrinfo(info);
return -1;
}
st->s = s;
p_freeaddrinfo(info);
return 0;
}
static ssize_t socket_write(git_stream *stream, const char *data, size_t len, int flags)
{
git_socket_stream *st = (git_socket_stream *) stream;
ssize_t written;
errno = 0;
if ((written = p_send(st->s, data, len, flags)) < 0) {
net_set_error("error sending data");
return -1;
}
return written;
}
static ssize_t socket_read(git_stream *stream, void *data, size_t len)
{
ssize_t ret;
git_socket_stream *st = (git_socket_stream *) stream;
if ((ret = p_recv(st->s, data, len, 0)) < 0)
net_set_error("error receiving socket data");
return ret;
}
static int socket_close(git_stream *stream)
{
git_socket_stream *st = (git_socket_stream *) stream;
int error;
error = close_socket(st->s);
st->s = INVALID_SOCKET;
return error;
}
static void socket_free(git_stream *stream)
{
git_socket_stream *st = (git_socket_stream *) stream;
git__free(st->host);
git__free(st->port);
git__free(st);
}
static int default_socket_stream_new(
git_stream **out,
const char *host,
const char *port)
{
git_socket_stream *st;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
GIT_ASSERT_ARG(port);
st = git__calloc(1, sizeof(git_socket_stream));
GIT_ERROR_CHECK_ALLOC(st);
st->host = git__strdup(host);
GIT_ERROR_CHECK_ALLOC(st->host);
if (port) {
st->port = git__strdup(port);
GIT_ERROR_CHECK_ALLOC(st->port);
}
st->parent.version = GIT_STREAM_VERSION;
st->parent.connect = socket_connect;
st->parent.write = socket_write;
st->parent.read = socket_read;
st->parent.close = socket_close;
st->parent.free = socket_free;
st->s = INVALID_SOCKET;
*out = (git_stream *) st;
return 0;
}
int git_socket_stream_new(
git_stream **out,
const char *host,
const char *port)
{
int (*init)(git_stream **, const char *, const char *) = NULL;
git_stream_registration custom = {0};
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
GIT_ASSERT_ARG(port);
if ((error = git_stream_registry_lookup(&custom, GIT_STREAM_STANDARD)) == 0)
init = custom.init;
else if (error == GIT_ENOTFOUND)
init = default_socket_stream_new;
else
return error;
if (!init) {
git_error_set(GIT_ERROR_NET, "there is no socket stream available");
return -1;
}
return init(out, host, port);
}
| libgit2-main | src/libgit2/streams/socket.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/openssl.h"
#include "streams/openssl_legacy.h"
#include "runtime.h"
#include "git2/sys/openssl.h"
#if defined(GIT_OPENSSL) && !defined(GIT_OPENSSL_DYNAMIC)
# include <openssl/ssl.h>
# include <openssl/err.h>
# include <openssl/x509v3.h>
# include <openssl/bio.h>
#endif
#if defined(GIT_OPENSSL_LEGACY) || defined(GIT_OPENSSL_DYNAMIC)
/*
* OpenSSL 1.1 made BIO opaque so we have to use functions to interact with it
* which do not exist in previous versions. We define these inline functions so
* we can program against the interface instead of littering the implementation
* with ifdefs. We do the same for OPENSSL_init_ssl.
*/
int OPENSSL_init_ssl__legacy(uint64_t opts, const void *settings)
{
GIT_UNUSED(opts);
GIT_UNUSED(settings);
SSL_load_error_strings();
SSL_library_init();
return 0;
}
BIO_METHOD *BIO_meth_new__legacy(int type, const char *name)
{
BIO_METHOD *meth = git__calloc(1, sizeof(BIO_METHOD));
if (!meth) {
return NULL;
}
meth->type = type;
meth->name = name;
return meth;
}
void BIO_meth_free__legacy(BIO_METHOD *biom)
{
git__free(biom);
}
int BIO_meth_set_write__legacy(BIO_METHOD *biom, int (*write) (BIO *, const char *, int))
{
biom->bwrite = write;
return 1;
}
int BIO_meth_set_read__legacy(BIO_METHOD *biom, int (*read) (BIO *, char *, int))
{
biom->bread = read;
return 1;
}
int BIO_meth_set_puts__legacy(BIO_METHOD *biom, int (*puts) (BIO *, const char *))
{
biom->bputs = puts;
return 1;
}
int BIO_meth_set_gets__legacy(BIO_METHOD *biom, int (*gets) (BIO *, char *, int))
{
biom->bgets = gets;
return 1;
}
int BIO_meth_set_ctrl__legacy(BIO_METHOD *biom, long (*ctrl) (BIO *, int, long, void *))
{
biom->ctrl = ctrl;
return 1;
}
int BIO_meth_set_create__legacy(BIO_METHOD *biom, int (*create) (BIO *))
{
biom->create = create;
return 1;
}
int BIO_meth_set_destroy__legacy(BIO_METHOD *biom, int (*destroy) (BIO *))
{
biom->destroy = destroy;
return 1;
}
int BIO_get_new_index__legacy(void)
{
/* This exists as of 1.1 so before we'd just have 0 */
return 0;
}
void BIO_set_init__legacy(BIO *b, int init)
{
b->init = init;
}
void BIO_set_data__legacy(BIO *a, void *ptr)
{
a->ptr = ptr;
}
void *BIO_get_data__legacy(BIO *a)
{
return a->ptr;
}
const unsigned char *ASN1_STRING_get0_data__legacy(const ASN1_STRING *x)
{
return ASN1_STRING_data((ASN1_STRING *)x);
}
long SSL_CTX_set_options__legacy(SSL_CTX *ctx, long op)
{
return SSL_CTX_ctrl(ctx, SSL_CTRL_OPTIONS, op, NULL);
}
# if defined(GIT_THREADS)
static git_mutex *openssl_locks;
static void openssl_locking_function(int mode, int n, const char *file, int line)
{
int lock;
GIT_UNUSED(file);
GIT_UNUSED(line);
lock = mode & CRYPTO_LOCK;
if (lock)
(void)git_mutex_lock(&openssl_locks[n]);
else
git_mutex_unlock(&openssl_locks[n]);
}
static void shutdown_ssl_locking(void)
{
int num_locks, i;
num_locks = CRYPTO_num_locks();
CRYPTO_set_locking_callback(NULL);
for (i = 0; i < num_locks; ++i)
git_mutex_free(&openssl_locks[i]);
git__free(openssl_locks);
}
static void threadid_cb(CRYPTO_THREADID *threadid)
{
GIT_UNUSED(threadid);
CRYPTO_THREADID_set_numeric(threadid, git_thread_currentid());
}
int git_openssl_set_locking(void)
{
int num_locks, i;
#ifndef GIT_THREADS
git_error_set(GIT_ERROR_THREAD, "libgit2 was not built with threads");
return -1;
#endif
#ifdef GIT_OPENSSL_DYNAMIC
/*
* This function is required on legacy versions of OpenSSL; when building
* with dynamically-loaded OpenSSL, we detect whether we loaded it or not.
*/
if (!CRYPTO_set_locking_callback)
return 0;
#endif
CRYPTO_THREADID_set_callback(threadid_cb);
num_locks = CRYPTO_num_locks();
openssl_locks = git__calloc(num_locks, sizeof(git_mutex));
GIT_ERROR_CHECK_ALLOC(openssl_locks);
for (i = 0; i < num_locks; i++) {
if (git_mutex_init(&openssl_locks[i]) != 0) {
git_error_set(GIT_ERROR_SSL, "failed to initialize openssl locks");
return -1;
}
}
CRYPTO_set_locking_callback(openssl_locking_function);
return git_runtime_shutdown_register(shutdown_ssl_locking);
}
#endif /* GIT_THREADS */
#endif /* GIT_OPENSSL_LEGACY || GIT_OPENSSL_DYNAMIC */
| libgit2-main | src/libgit2/streams/openssl_legacy.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "git2/errors.h"
#include "common.h"
#include "streams/registry.h"
#include "streams/tls.h"
#include "streams/mbedtls.h"
#include "streams/openssl.h"
#include "streams/stransport.h"
int git_tls_stream_new(git_stream **out, const char *host, const char *port)
{
int (*init)(git_stream **, const char *, const char *) = NULL;
git_stream_registration custom = {0};
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
GIT_ASSERT_ARG(port);
if ((error = git_stream_registry_lookup(&custom, GIT_STREAM_TLS)) == 0) {
init = custom.init;
} else if (error == GIT_ENOTFOUND) {
#ifdef GIT_SECURE_TRANSPORT
init = git_stransport_stream_new;
#elif defined(GIT_OPENSSL)
init = git_openssl_stream_new;
#elif defined(GIT_MBEDTLS)
init = git_mbedtls_stream_new;
#endif
} else {
return error;
}
if (!init) {
git_error_set(GIT_ERROR_SSL, "there is no TLS stream available");
return -1;
}
return init(out, host, port);
}
int git_tls_stream_wrap(git_stream **out, git_stream *in, const char *host)
{
int (*wrap)(git_stream **, git_stream *, const char *) = NULL;
git_stream_registration custom = {0};
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(in);
if (git_stream_registry_lookup(&custom, GIT_STREAM_TLS) == 0) {
wrap = custom.wrap;
} else {
#ifdef GIT_SECURE_TRANSPORT
wrap = git_stransport_stream_wrap;
#elif defined(GIT_OPENSSL)
wrap = git_openssl_stream_wrap;
#elif defined(GIT_MBEDTLS)
wrap = git_mbedtls_stream_wrap;
#endif
}
if (!wrap) {
git_error_set(GIT_ERROR_SSL, "there is no TLS stream available");
return -1;
}
return wrap(out, in, host);
}
| libgit2-main | src/libgit2/streams/tls.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/openssl.h"
#include "streams/openssl_legacy.h"
#include "streams/openssl_dynamic.h"
#ifdef GIT_OPENSSL
#include <ctype.h>
#include "common.h"
#include "runtime.h"
#include "settings.h"
#include "posix.h"
#include "stream.h"
#include "streams/socket.h"
#include "netops.h"
#include "git2/transport.h"
#include "git2/sys/openssl.h"
#ifndef GIT_WIN32
# include <sys/types.h>
# include <sys/socket.h>
# include <netinet/in.h>
#endif
#ifndef GIT_OPENSSL_DYNAMIC
# include <openssl/ssl.h>
# include <openssl/err.h>
# include <openssl/x509v3.h>
# include <openssl/bio.h>
#endif
SSL_CTX *git__ssl_ctx;
#define GIT_SSL_DEFAULT_CIPHERS "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA:DHE-DSS-AES128-SHA256:DHE-DSS-AES256-SHA256:DHE-DSS-AES128-SHA:DHE-DSS-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA"
static BIO_METHOD *git_stream_bio_method;
static int init_bio_method(void);
/**
* This function aims to clean-up the SSL context which
* we allocated.
*/
static void shutdown_ssl(void)
{
if (git_stream_bio_method) {
BIO_meth_free(git_stream_bio_method);
git_stream_bio_method = NULL;
}
if (git__ssl_ctx) {
SSL_CTX_free(git__ssl_ctx);
git__ssl_ctx = NULL;
}
}
#ifdef VALGRIND
# if !defined(GIT_OPENSSL_LEGACY) && !defined(GIT_OPENSSL_DYNAMIC)
static void *git_openssl_malloc(size_t bytes, const char *file, int line)
{
GIT_UNUSED(file);
GIT_UNUSED(line);
return git__calloc(1, bytes);
}
static void *git_openssl_realloc(void *mem, size_t size, const char *file, int line)
{
GIT_UNUSED(file);
GIT_UNUSED(line);
return git__realloc(mem, size);
}
static void git_openssl_free(void *mem, const char *file, int line)
{
GIT_UNUSED(file);
GIT_UNUSED(line);
git__free(mem);
}
# else /* !GIT_OPENSSL_LEGACY && !GIT_OPENSSL_DYNAMIC */
static void *git_openssl_malloc(size_t bytes)
{
return git__calloc(1, bytes);
}
static void *git_openssl_realloc(void *mem, size_t size)
{
return git__realloc(mem, size);
}
static void git_openssl_free(void *mem)
{
git__free(mem);
}
# endif /* !GIT_OPENSSL_LEGACY && !GIT_OPENSSL_DYNAMIC */
#endif /* VALGRIND */
static int openssl_init(void)
{
long ssl_opts = SSL_OP_NO_SSLv2 | SSL_OP_NO_SSLv3;
const char *ciphers = git_libgit2__ssl_ciphers();
#ifdef VALGRIND
static bool allocators_initialized = false;
#endif
/* Older OpenSSL and MacOS OpenSSL doesn't have this */
#ifdef SSL_OP_NO_COMPRESSION
ssl_opts |= SSL_OP_NO_COMPRESSION;
#endif
#ifdef VALGRIND
/*
* Swap in our own allocator functions that initialize
* allocated memory to avoid spurious valgrind warnings.
* Don't error on failure; many builds of OpenSSL do not
* allow you to set these functions.
*/
if (!allocators_initialized) {
CRYPTO_set_mem_functions(git_openssl_malloc,
git_openssl_realloc,
git_openssl_free);
allocators_initialized = true;
}
#endif
OPENSSL_init_ssl(0, NULL);
/*
* Load SSLv{2,3} and TLSv1 so that we can talk with servers
* which use the SSL hellos, which are often used for
* compatibility. We then disable SSL so we only allow OpenSSL
* to speak TLSv1 to perform the encryption itself.
*/
if (!(git__ssl_ctx = SSL_CTX_new(SSLv23_method())))
goto error;
SSL_CTX_set_options(git__ssl_ctx, ssl_opts);
SSL_CTX_set_mode(git__ssl_ctx, SSL_MODE_AUTO_RETRY);
SSL_CTX_set_verify(git__ssl_ctx, SSL_VERIFY_NONE, NULL);
if (!SSL_CTX_set_default_verify_paths(git__ssl_ctx))
goto error;
if (!ciphers)
ciphers = GIT_SSL_DEFAULT_CIPHERS;
if(!SSL_CTX_set_cipher_list(git__ssl_ctx, ciphers))
goto error;
if (init_bio_method() < 0)
goto error;
return git_runtime_shutdown_register(shutdown_ssl);
error:
git_error_set(GIT_ERROR_NET, "could not initialize openssl: %s",
ERR_error_string(ERR_get_error(), NULL));
SSL_CTX_free(git__ssl_ctx);
git__ssl_ctx = NULL;
return -1;
}
/*
* When we use dynamic loading, we defer OpenSSL initialization until
* it's first used. `openssl_ensure_initialized` will do the work
* under a mutex.
*/
git_mutex openssl_mutex;
bool openssl_initialized;
int git_openssl_stream_global_init(void)
{
#ifndef GIT_OPENSSL_DYNAMIC
return openssl_init();
#else
if (git_mutex_init(&openssl_mutex) != 0)
return -1;
return 0;
#endif
}
static int openssl_ensure_initialized(void)
{
#ifdef GIT_OPENSSL_DYNAMIC
int error = 0;
if (git_mutex_lock(&openssl_mutex) != 0)
return -1;
if (!openssl_initialized) {
if ((error = git_openssl_stream_dynamic_init()) == 0)
error = openssl_init();
openssl_initialized = true;
}
error |= git_mutex_unlock(&openssl_mutex);
return error;
#else
return 0;
#endif
}
#if !defined(GIT_OPENSSL_LEGACY) && !defined(GIT_OPENSSL_DYNAMIC)
int git_openssl_set_locking(void)
{
# ifdef GIT_THREADS
return 0;
# else
git_error_set(GIT_ERROR_THREAD, "libgit2 was not built with threads");
return -1;
# endif
}
#endif
static int bio_create(BIO *b)
{
BIO_set_init(b, 1);
BIO_set_data(b, NULL);
return 1;
}
static int bio_destroy(BIO *b)
{
if (!b)
return 0;
BIO_set_data(b, NULL);
return 1;
}
static int bio_read(BIO *b, char *buf, int len)
{
git_stream *io = (git_stream *) BIO_get_data(b);
return (int) git_stream_read(io, buf, len);
}
static int bio_write(BIO *b, const char *buf, int len)
{
git_stream *io = (git_stream *) BIO_get_data(b);
return (int) git_stream_write(io, buf, len, 0);
}
static long bio_ctrl(BIO *b, int cmd, long num, void *ptr)
{
GIT_UNUSED(b);
GIT_UNUSED(num);
GIT_UNUSED(ptr);
if (cmd == BIO_CTRL_FLUSH)
return 1;
return 0;
}
static int bio_gets(BIO *b, char *buf, int len)
{
GIT_UNUSED(b);
GIT_UNUSED(buf);
GIT_UNUSED(len);
return -1;
}
static int bio_puts(BIO *b, const char *str)
{
return bio_write(b, str, strlen(str));
}
static int init_bio_method(void)
{
/* Set up the BIO_METHOD we use for wrapping our own stream implementations */
git_stream_bio_method = BIO_meth_new(BIO_TYPE_SOURCE_SINK | BIO_get_new_index(), "git_stream");
GIT_ERROR_CHECK_ALLOC(git_stream_bio_method);
BIO_meth_set_write(git_stream_bio_method, bio_write);
BIO_meth_set_read(git_stream_bio_method, bio_read);
BIO_meth_set_puts(git_stream_bio_method, bio_puts);
BIO_meth_set_gets(git_stream_bio_method, bio_gets);
BIO_meth_set_ctrl(git_stream_bio_method, bio_ctrl);
BIO_meth_set_create(git_stream_bio_method, bio_create);
BIO_meth_set_destroy(git_stream_bio_method, bio_destroy);
return 0;
}
static int ssl_set_error(SSL *ssl, int error)
{
int err;
unsigned long e;
err = SSL_get_error(ssl, error);
GIT_ASSERT(err != SSL_ERROR_WANT_READ);
GIT_ASSERT(err != SSL_ERROR_WANT_WRITE);
switch (err) {
case SSL_ERROR_WANT_CONNECT:
case SSL_ERROR_WANT_ACCEPT:
git_error_set(GIT_ERROR_SSL, "SSL error: connection failure");
break;
case SSL_ERROR_WANT_X509_LOOKUP:
git_error_set(GIT_ERROR_SSL, "SSL error: x509 error");
break;
case SSL_ERROR_SYSCALL:
e = ERR_get_error();
if (e > 0) {
char errmsg[256];
ERR_error_string_n(e, errmsg, sizeof(errmsg));
git_error_set(GIT_ERROR_NET, "SSL error: %s", errmsg);
break;
} else if (error < 0) {
git_error_set(GIT_ERROR_OS, "SSL error: syscall failure");
break;
}
git_error_set(GIT_ERROR_SSL, "SSL error: received early EOF");
return GIT_EEOF;
break;
case SSL_ERROR_SSL:
{
char errmsg[256];
e = ERR_get_error();
ERR_error_string_n(e, errmsg, sizeof(errmsg));
git_error_set(GIT_ERROR_SSL, "SSL error: %s", errmsg);
break;
}
case SSL_ERROR_NONE:
case SSL_ERROR_ZERO_RETURN:
default:
git_error_set(GIT_ERROR_SSL, "SSL error: unknown error");
break;
}
return -1;
}
static int ssl_teardown(SSL *ssl)
{
int ret;
ret = SSL_shutdown(ssl);
if (ret < 0)
ret = ssl_set_error(ssl, ret);
else
ret = 0;
return ret;
}
static int check_host_name(const char *name, const char *host)
{
if (!strcasecmp(name, host))
return 0;
if (gitno__match_host(name, host) < 0)
return -1;
return 0;
}
static int verify_server_cert(SSL *ssl, const char *host)
{
X509 *cert = NULL;
X509_NAME *peer_name;
ASN1_STRING *str;
unsigned char *peer_cn = NULL;
int matched = -1, type = GEN_DNS;
GENERAL_NAMES *alts;
struct in6_addr addr6;
struct in_addr addr4;
void *addr = NULL;
int i = -1, j, error = 0;
if (SSL_get_verify_result(ssl) != X509_V_OK) {
git_error_set(GIT_ERROR_SSL, "the SSL certificate is invalid");
return GIT_ECERTIFICATE;
}
/* Try to parse the host as an IP address to see if it is */
if (p_inet_pton(AF_INET, host, &addr4)) {
type = GEN_IPADD;
addr = &addr4;
} else {
if (p_inet_pton(AF_INET6, host, &addr6)) {
type = GEN_IPADD;
addr = &addr6;
}
}
cert = SSL_get_peer_certificate(ssl);
if (!cert) {
error = -1;
git_error_set(GIT_ERROR_SSL, "the server did not provide a certificate");
goto cleanup;
}
/* Check the alternative names */
alts = X509_get_ext_d2i(cert, NID_subject_alt_name, NULL, NULL);
if (alts) {
int num;
num = sk_GENERAL_NAME_num(alts);
for (i = 0; i < num && matched != 1; i++) {
const GENERAL_NAME *gn = sk_GENERAL_NAME_value(alts, i);
const char *name = (char *) ASN1_STRING_get0_data(gn->d.ia5);
size_t namelen = (size_t) ASN1_STRING_length(gn->d.ia5);
/* Skip any names of a type we're not looking for */
if (gn->type != type)
continue;
if (type == GEN_DNS) {
/* If it contains embedded NULs, don't even try */
if (memchr(name, '\0', namelen))
continue;
if (check_host_name(name, host) < 0)
matched = 0;
else
matched = 1;
} else if (type == GEN_IPADD) {
/* Here name isn't so much a name but a binary representation of the IP */
matched = addr && !!memcmp(name, addr, namelen);
}
}
}
GENERAL_NAMES_free(alts);
if (matched == 0)
goto cert_fail_name;
if (matched == 1) {
goto cleanup;
}
/* If no alternative names are available, check the common name */
peer_name = X509_get_subject_name(cert);
if (peer_name == NULL)
goto on_error;
if (peer_name) {
/* Get the index of the last CN entry */
while ((j = X509_NAME_get_index_by_NID(peer_name, NID_commonName, i)) >= 0)
i = j;
}
if (i < 0)
goto on_error;
str = X509_NAME_ENTRY_get_data(X509_NAME_get_entry(peer_name, i));
if (str == NULL)
goto on_error;
/* Work around a bug in OpenSSL whereby ASN1_STRING_to_UTF8 fails if it's already in utf-8 */
if (ASN1_STRING_type(str) == V_ASN1_UTF8STRING) {
int size = ASN1_STRING_length(str);
if (size > 0) {
peer_cn = OPENSSL_malloc(size + 1);
GIT_ERROR_CHECK_ALLOC(peer_cn);
memcpy(peer_cn, ASN1_STRING_get0_data(str), size);
peer_cn[size] = '\0';
} else {
goto cert_fail_name;
}
} else {
int size = ASN1_STRING_to_UTF8(&peer_cn, str);
GIT_ERROR_CHECK_ALLOC(peer_cn);
if (memchr(peer_cn, '\0', size))
goto cert_fail_name;
}
if (check_host_name((char *)peer_cn, host) < 0)
goto cert_fail_name;
goto cleanup;
cert_fail_name:
error = GIT_ECERTIFICATE;
git_error_set(GIT_ERROR_SSL, "hostname does not match certificate");
goto cleanup;
on_error:
error = ssl_set_error(ssl, 0);
goto cleanup;
cleanup:
X509_free(cert);
OPENSSL_free(peer_cn);
return error;
}
typedef struct {
git_stream parent;
git_stream *io;
int owned;
bool connected;
char *host;
SSL *ssl;
git_cert_x509 cert_info;
} openssl_stream;
static int openssl_connect(git_stream *stream)
{
int ret;
BIO *bio;
openssl_stream *st = (openssl_stream *) stream;
if (st->owned && (ret = git_stream_connect(st->io)) < 0)
return ret;
bio = BIO_new(git_stream_bio_method);
GIT_ERROR_CHECK_ALLOC(bio);
BIO_set_data(bio, st->io);
SSL_set_bio(st->ssl, bio, bio);
/* specify the host in case SNI is needed */
#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
SSL_set_tlsext_host_name(st->ssl, st->host);
#endif
if ((ret = SSL_connect(st->ssl)) <= 0)
return ssl_set_error(st->ssl, ret);
st->connected = true;
return verify_server_cert(st->ssl, st->host);
}
static int openssl_certificate(git_cert **out, git_stream *stream)
{
openssl_stream *st = (openssl_stream *) stream;
X509 *cert = SSL_get_peer_certificate(st->ssl);
unsigned char *guard, *encoded_cert = NULL;
int error, len;
/* Retrieve the length of the certificate first */
len = i2d_X509(cert, NULL);
if (len < 0) {
git_error_set(GIT_ERROR_NET, "failed to retrieve certificate information");
error = -1;
goto out;
}
encoded_cert = git__malloc(len);
GIT_ERROR_CHECK_ALLOC(encoded_cert);
/* i2d_X509 makes 'guard' point to just after the data */
guard = encoded_cert;
len = i2d_X509(cert, &guard);
if (len < 0) {
git_error_set(GIT_ERROR_NET, "failed to retrieve certificate information");
error = -1;
goto out;
}
st->cert_info.parent.cert_type = GIT_CERT_X509;
st->cert_info.data = encoded_cert;
st->cert_info.len = len;
encoded_cert = NULL;
*out = &st->cert_info.parent;
error = 0;
out:
git__free(encoded_cert);
X509_free(cert);
return error;
}
static int openssl_set_proxy(git_stream *stream, const git_proxy_options *proxy_opts)
{
openssl_stream *st = (openssl_stream *) stream;
return git_stream_set_proxy(st->io, proxy_opts);
}
static ssize_t openssl_write(git_stream *stream, const char *data, size_t data_len, int flags)
{
openssl_stream *st = (openssl_stream *) stream;
int ret, len = min(data_len, INT_MAX);
GIT_UNUSED(flags);
if ((ret = SSL_write(st->ssl, data, len)) <= 0)
return ssl_set_error(st->ssl, ret);
return ret;
}
static ssize_t openssl_read(git_stream *stream, void *data, size_t len)
{
openssl_stream *st = (openssl_stream *) stream;
int ret;
if ((ret = SSL_read(st->ssl, data, len)) <= 0)
return ssl_set_error(st->ssl, ret);
return ret;
}
static int openssl_close(git_stream *stream)
{
openssl_stream *st = (openssl_stream *) stream;
int ret;
if (st->connected && (ret = ssl_teardown(st->ssl)) < 0)
return -1;
st->connected = false;
return st->owned ? git_stream_close(st->io) : 0;
}
static void openssl_free(git_stream *stream)
{
openssl_stream *st = (openssl_stream *) stream;
if (st->owned)
git_stream_free(st->io);
SSL_free(st->ssl);
git__free(st->host);
git__free(st->cert_info.data);
git__free(st);
}
static int openssl_stream_wrap(
git_stream **out,
git_stream *in,
const char *host,
int owned)
{
openssl_stream *st;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(in);
GIT_ASSERT_ARG(host);
st = git__calloc(1, sizeof(openssl_stream));
GIT_ERROR_CHECK_ALLOC(st);
st->io = in;
st->owned = owned;
st->ssl = SSL_new(git__ssl_ctx);
if (st->ssl == NULL) {
git_error_set(GIT_ERROR_SSL, "failed to create ssl object");
git__free(st);
return -1;
}
st->host = git__strdup(host);
GIT_ERROR_CHECK_ALLOC(st->host);
st->parent.version = GIT_STREAM_VERSION;
st->parent.encrypted = 1;
st->parent.proxy_support = git_stream_supports_proxy(st->io);
st->parent.connect = openssl_connect;
st->parent.certificate = openssl_certificate;
st->parent.set_proxy = openssl_set_proxy;
st->parent.read = openssl_read;
st->parent.write = openssl_write;
st->parent.close = openssl_close;
st->parent.free = openssl_free;
*out = (git_stream *) st;
return 0;
}
int git_openssl_stream_wrap(git_stream **out, git_stream *in, const char *host)
{
if (openssl_ensure_initialized() < 0)
return -1;
return openssl_stream_wrap(out, in, host, 0);
}
int git_openssl_stream_new(git_stream **out, const char *host, const char *port)
{
git_stream *stream = NULL;
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
GIT_ASSERT_ARG(port);
if (openssl_ensure_initialized() < 0)
return -1;
if ((error = git_socket_stream_new(&stream, host, port)) < 0)
return error;
if ((error = openssl_stream_wrap(out, stream, host, 1)) < 0) {
git_stream_close(stream);
git_stream_free(stream);
}
return error;
}
int git_openssl__set_cert_location(const char *file, const char *path)
{
if (openssl_ensure_initialized() < 0)
return -1;
if (SSL_CTX_load_verify_locations(git__ssl_ctx, file, path) == 0) {
char errmsg[256];
ERR_error_string_n(ERR_get_error(), errmsg, sizeof(errmsg));
git_error_set(GIT_ERROR_SSL, "OpenSSL error: failed to load certificates: %s",
errmsg);
return -1;
}
return 0;
}
#else
#include "stream.h"
#include "git2/sys/openssl.h"
int git_openssl_stream_global_init(void)
{
return 0;
}
int git_openssl_set_locking(void)
{
git_error_set(GIT_ERROR_SSL, "libgit2 was not built with OpenSSL support");
return -1;
}
#endif
| libgit2-main | src/libgit2/streams/openssl.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "streams/registry.h"
#include "runtime.h"
#include "streams/tls.h"
#include "streams/mbedtls.h"
#include "streams/openssl.h"
#include "streams/stransport.h"
struct stream_registry {
git_rwlock lock;
git_stream_registration callbacks;
git_stream_registration tls_callbacks;
};
static struct stream_registry stream_registry;
static void shutdown_stream_registry(void)
{
git_rwlock_free(&stream_registry.lock);
}
int git_stream_registry_global_init(void)
{
if (git_rwlock_init(&stream_registry.lock) < 0)
return -1;
return git_runtime_shutdown_register(shutdown_stream_registry);
}
GIT_INLINE(void) stream_registration_cpy(
git_stream_registration *target,
git_stream_registration *src)
{
if (src)
memcpy(target, src, sizeof(git_stream_registration));
else
memset(target, 0, sizeof(git_stream_registration));
}
int git_stream_registry_lookup(git_stream_registration *out, git_stream_t type)
{
git_stream_registration *target;
int error = GIT_ENOTFOUND;
GIT_ASSERT_ARG(out);
switch(type) {
case GIT_STREAM_STANDARD:
target = &stream_registry.callbacks;
break;
case GIT_STREAM_TLS:
target = &stream_registry.tls_callbacks;
break;
default:
git_error_set(GIT_ERROR_INVALID, "invalid stream type");
return -1;
}
if (git_rwlock_rdlock(&stream_registry.lock) < 0) {
git_error_set(GIT_ERROR_OS, "failed to lock stream registry");
return -1;
}
if (target->init) {
stream_registration_cpy(out, target);
error = 0;
}
git_rwlock_rdunlock(&stream_registry.lock);
return error;
}
int git_stream_register(git_stream_t type, git_stream_registration *registration)
{
GIT_ASSERT(!registration || registration->init);
GIT_ERROR_CHECK_VERSION(registration, GIT_STREAM_VERSION, "stream_registration");
if (git_rwlock_wrlock(&stream_registry.lock) < 0) {
git_error_set(GIT_ERROR_OS, "failed to lock stream registry");
return -1;
}
if ((type & GIT_STREAM_STANDARD) == GIT_STREAM_STANDARD)
stream_registration_cpy(&stream_registry.callbacks, registration);
if ((type & GIT_STREAM_TLS) == GIT_STREAM_TLS)
stream_registration_cpy(&stream_registry.tls_callbacks, registration);
git_rwlock_wrunlock(&stream_registry.lock);
return 0;
}
#ifndef GIT_DEPRECATE_HARD
int git_stream_register_tls(
int GIT_CALLBACK(ctor)(git_stream **out, const char *host, const char *port))
{
git_stream_registration registration = {0};
if (ctor) {
registration.version = GIT_STREAM_VERSION;
registration.init = ctor;
registration.wrap = NULL;
return git_stream_register(GIT_STREAM_TLS, ®istration);
} else {
return git_stream_register(GIT_STREAM_TLS, NULL);
}
}
#endif
| libgit2-main | src/libgit2/streams/registry.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "streams/stransport.h"
#ifdef GIT_SECURE_TRANSPORT
#include <CoreFoundation/CoreFoundation.h>
#include <Security/SecureTransport.h>
#include <Security/SecCertificate.h>
#include "git2/transport.h"
#include "streams/socket.h"
static int stransport_error(OSStatus ret)
{
CFStringRef message;
if (ret == noErr || ret == errSSLClosedGraceful) {
git_error_clear();
return 0;
}
#if !TARGET_OS_IPHONE
message = SecCopyErrorMessageString(ret, NULL);
GIT_ERROR_CHECK_ALLOC(message);
git_error_set(GIT_ERROR_NET, "SecureTransport error: %s", CFStringGetCStringPtr(message, kCFStringEncodingUTF8));
CFRelease(message);
#else
git_error_set(GIT_ERROR_NET, "SecureTransport error: OSStatus %d", (unsigned int)ret);
GIT_UNUSED(message);
#endif
return -1;
}
typedef struct {
git_stream parent;
git_stream *io;
int owned;
SSLContextRef ctx;
CFDataRef der_data;
git_cert_x509 cert_info;
} stransport_stream;
static int stransport_connect(git_stream *stream)
{
stransport_stream *st = (stransport_stream *) stream;
int error;
SecTrustRef trust = NULL;
SecTrustResultType sec_res;
OSStatus ret;
if (st->owned && (error = git_stream_connect(st->io)) < 0)
return error;
ret = SSLHandshake(st->ctx);
if (ret != errSSLServerAuthCompleted) {
git_error_set(GIT_ERROR_SSL, "unexpected return value from ssl handshake %d", (int)ret);
return -1;
}
if ((ret = SSLCopyPeerTrust(st->ctx, &trust)) != noErr)
goto on_error;
if (!trust)
return GIT_ECERTIFICATE;
if ((ret = SecTrustEvaluate(trust, &sec_res)) != noErr)
goto on_error;
CFRelease(trust);
if (sec_res == kSecTrustResultInvalid || sec_res == kSecTrustResultOtherError) {
git_error_set(GIT_ERROR_SSL, "internal security trust error");
return -1;
}
if (sec_res == kSecTrustResultDeny || sec_res == kSecTrustResultRecoverableTrustFailure ||
sec_res == kSecTrustResultFatalTrustFailure) {
git_error_set(GIT_ERROR_SSL, "untrusted connection error");
return GIT_ECERTIFICATE;
}
return 0;
on_error:
if (trust)
CFRelease(trust);
return stransport_error(ret);
}
static int stransport_certificate(git_cert **out, git_stream *stream)
{
stransport_stream *st = (stransport_stream *) stream;
SecTrustRef trust = NULL;
SecCertificateRef sec_cert;
OSStatus ret;
if ((ret = SSLCopyPeerTrust(st->ctx, &trust)) != noErr)
return stransport_error(ret);
sec_cert = SecTrustGetCertificateAtIndex(trust, 0);
st->der_data = SecCertificateCopyData(sec_cert);
CFRelease(trust);
if (st->der_data == NULL) {
git_error_set(GIT_ERROR_SSL, "retrieved invalid certificate data");
return -1;
}
st->cert_info.parent.cert_type = GIT_CERT_X509;
st->cert_info.data = (void *) CFDataGetBytePtr(st->der_data);
st->cert_info.len = CFDataGetLength(st->der_data);
*out = (git_cert *)&st->cert_info;
return 0;
}
static int stransport_set_proxy(
git_stream *stream,
const git_proxy_options *proxy_opts)
{
stransport_stream *st = (stransport_stream *) stream;
return git_stream_set_proxy(st->io, proxy_opts);
}
/*
* Contrary to typical network IO callbacks, Secure Transport write callback is
* expected to write *all* passed data, not just as much as it can, and any
* other case would be considered a failure.
*
* This behavior is actually not specified in the Apple documentation, but is
* required for things to work correctly (and incidentally, that's also how
* Apple implements it in its projects at opensource.apple.com).
*
* Libgit2 streams happen to already have this very behavior so this is just
* passthrough.
*/
static OSStatus write_cb(SSLConnectionRef conn, const void *data, size_t *len)
{
git_stream *io = (git_stream *) conn;
if (git_stream__write_full(io, data, *len, 0) < 0)
return -36; /* "ioErr" from MacErrors.h which is not available on iOS */
return noErr;
}
static ssize_t stransport_write(git_stream *stream, const char *data, size_t len, int flags)
{
stransport_stream *st = (stransport_stream *) stream;
size_t data_len, processed;
OSStatus ret;
GIT_UNUSED(flags);
data_len = min(len, SSIZE_MAX);
if ((ret = SSLWrite(st->ctx, data, data_len, &processed)) != noErr)
return stransport_error(ret);
GIT_ASSERT(processed < SSIZE_MAX);
return (ssize_t)processed;
}
/*
* Contrary to typical network IO callbacks, Secure Transport read callback is
* expected to read *exactly* the requested number of bytes, not just as much
* as it can, and any other case would be considered a failure.
*
* This behavior is actually not specified in the Apple documentation, but is
* required for things to work correctly (and incidentally, that's also how
* Apple implements it in its projects at opensource.apple.com).
*/
static OSStatus read_cb(SSLConnectionRef conn, void *data, size_t *len)
{
git_stream *io = (git_stream *) conn;
OSStatus error = noErr;
size_t off = 0;
ssize_t ret;
do {
ret = git_stream_read(io, data + off, *len - off);
if (ret < 0) {
error = -36; /* "ioErr" from MacErrors.h which is not available on iOS */
break;
}
if (ret == 0) {
error = errSSLClosedGraceful;
break;
}
off += ret;
} while (off < *len);
*len = off;
return error;
}
static ssize_t stransport_read(git_stream *stream, void *data, size_t len)
{
stransport_stream *st = (stransport_stream *) stream;
size_t processed;
OSStatus ret;
if ((ret = SSLRead(st->ctx, data, len, &processed)) != noErr)
return stransport_error(ret);
return processed;
}
static int stransport_close(git_stream *stream)
{
stransport_stream *st = (stransport_stream *) stream;
OSStatus ret;
ret = SSLClose(st->ctx);
if (ret != noErr && ret != errSSLClosedGraceful)
return stransport_error(ret);
return st->owned ? git_stream_close(st->io) : 0;
}
static void stransport_free(git_stream *stream)
{
stransport_stream *st = (stransport_stream *) stream;
if (st->owned)
git_stream_free(st->io);
CFRelease(st->ctx);
if (st->der_data)
CFRelease(st->der_data);
git__free(st);
}
static int stransport_wrap(
git_stream **out,
git_stream *in,
const char *host,
int owned)
{
stransport_stream *st;
OSStatus ret;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(in);
GIT_ASSERT_ARG(host);
st = git__calloc(1, sizeof(stransport_stream));
GIT_ERROR_CHECK_ALLOC(st);
st->io = in;
st->owned = owned;
st->ctx = SSLCreateContext(NULL, kSSLClientSide, kSSLStreamType);
if (!st->ctx) {
git_error_set(GIT_ERROR_NET, "failed to create SSL context");
git__free(st);
return -1;
}
if ((ret = SSLSetIOFuncs(st->ctx, read_cb, write_cb)) != noErr ||
(ret = SSLSetConnection(st->ctx, st->io)) != noErr ||
(ret = SSLSetSessionOption(st->ctx, kSSLSessionOptionBreakOnServerAuth, true)) != noErr ||
(ret = SSLSetProtocolVersionMin(st->ctx, kTLSProtocol1)) != noErr ||
(ret = SSLSetProtocolVersionMax(st->ctx, kTLSProtocol12)) != noErr ||
(ret = SSLSetPeerDomainName(st->ctx, host, strlen(host))) != noErr) {
CFRelease(st->ctx);
git__free(st);
return stransport_error(ret);
}
st->parent.version = GIT_STREAM_VERSION;
st->parent.encrypted = 1;
st->parent.proxy_support = git_stream_supports_proxy(st->io);
st->parent.connect = stransport_connect;
st->parent.certificate = stransport_certificate;
st->parent.set_proxy = stransport_set_proxy;
st->parent.read = stransport_read;
st->parent.write = stransport_write;
st->parent.close = stransport_close;
st->parent.free = stransport_free;
*out = (git_stream *) st;
return 0;
}
int git_stransport_stream_wrap(
git_stream **out,
git_stream *in,
const char *host)
{
return stransport_wrap(out, in, host, 0);
}
int git_stransport_stream_new(git_stream **out, const char *host, const char *port)
{
git_stream *stream = NULL;
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(host);
error = git_socket_stream_new(&stream, host, port);
if (!error)
error = stransport_wrap(out, stream, host, 1);
if (error < 0 && stream) {
git_stream_close(stream);
git_stream_free(stream);
}
return error;
}
#endif
| libgit2-main | src/libgit2/streams/stransport.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "ssh.h"
#ifdef GIT_SSH
#include <libssh2.h>
#endif
#include "runtime.h"
#include "net.h"
#include "netops.h"
#include "smart.h"
#include "streams/socket.h"
#include "git2/credential.h"
#include "git2/sys/credential.h"
#ifdef GIT_SSH
#define OWNING_SUBTRANSPORT(s) ((ssh_subtransport *)(s)->parent.subtransport)
static const char cmd_uploadpack[] = "git-upload-pack";
static const char cmd_receivepack[] = "git-receive-pack";
typedef struct {
git_smart_subtransport_stream parent;
git_stream *io;
LIBSSH2_SESSION *session;
LIBSSH2_CHANNEL *channel;
const char *cmd;
git_net_url url;
unsigned sent_command : 1;
} ssh_stream;
typedef struct {
git_smart_subtransport parent;
transport_smart *owner;
ssh_stream *current_stream;
git_credential *cred;
char *cmd_uploadpack;
char *cmd_receivepack;
} ssh_subtransport;
static int list_auth_methods(int *out, LIBSSH2_SESSION *session, const char *username);
static void ssh_error(LIBSSH2_SESSION *session, const char *errmsg)
{
char *ssherr;
libssh2_session_last_error(session, &ssherr, NULL, 0);
git_error_set(GIT_ERROR_SSH, "%s: %s", errmsg, ssherr);
}
/*
* Create a git protocol request.
*
* For example: git-upload-pack '/libgit2/libgit2'
*/
static int gen_proto(git_str *request, const char *cmd, git_net_url *url)
{
const char *repo;
repo = url->path;
if (repo && repo[0] == '/' && repo[1] == '~')
repo++;
if (!repo || !repo[0]) {
git_error_set(GIT_ERROR_NET, "malformed git protocol URL");
return -1;
}
git_str_puts(request, cmd);
git_str_puts(request, " '");
git_str_puts(request, repo);
git_str_puts(request, "'");
if (git_str_oom(request))
return -1;
return 0;
}
static int send_command(ssh_stream *s)
{
int error;
git_str request = GIT_STR_INIT;
error = gen_proto(&request, s->cmd, &s->url);
if (error < 0)
goto cleanup;
error = libssh2_channel_exec(s->channel, request.ptr);
if (error < LIBSSH2_ERROR_NONE) {
ssh_error(s->session, "SSH could not execute request");
goto cleanup;
}
s->sent_command = 1;
cleanup:
git_str_dispose(&request);
return error;
}
static int ssh_stream_read(
git_smart_subtransport_stream *stream,
char *buffer,
size_t buf_size,
size_t *bytes_read)
{
int rc;
ssh_stream *s = GIT_CONTAINER_OF(stream, ssh_stream, parent);
*bytes_read = 0;
if (!s->sent_command && send_command(s) < 0)
return -1;
if ((rc = libssh2_channel_read(s->channel, buffer, buf_size)) < LIBSSH2_ERROR_NONE) {
ssh_error(s->session, "SSH could not read data");
return -1;
}
/*
* If we can't get anything out of stdout, it's typically a
* not-found error, so read from stderr and signal EOF on
* stderr.
*/
if (rc == 0) {
if ((rc = libssh2_channel_read_stderr(s->channel, buffer, buf_size)) > 0) {
git_error_set(GIT_ERROR_SSH, "%*s", rc, buffer);
return GIT_EEOF;
} else if (rc < LIBSSH2_ERROR_NONE) {
ssh_error(s->session, "SSH could not read stderr");
return -1;
}
}
*bytes_read = rc;
return 0;
}
static int ssh_stream_write(
git_smart_subtransport_stream *stream,
const char *buffer,
size_t len)
{
ssh_stream *s = GIT_CONTAINER_OF(stream, ssh_stream, parent);
size_t off = 0;
ssize_t ret = 0;
if (!s->sent_command && send_command(s) < 0)
return -1;
do {
ret = libssh2_channel_write(s->channel, buffer + off, len - off);
if (ret < 0)
break;
off += ret;
} while (off < len);
if (ret < 0) {
ssh_error(s->session, "SSH could not write data");
return -1;
}
return 0;
}
static void ssh_stream_free(git_smart_subtransport_stream *stream)
{
ssh_stream *s = GIT_CONTAINER_OF(stream, ssh_stream, parent);
ssh_subtransport *t;
if (!stream)
return;
t = OWNING_SUBTRANSPORT(s);
t->current_stream = NULL;
if (s->channel) {
libssh2_channel_close(s->channel);
libssh2_channel_free(s->channel);
s->channel = NULL;
}
if (s->session) {
libssh2_session_disconnect(s->session, "closing transport");
libssh2_session_free(s->session);
s->session = NULL;
}
if (s->io) {
git_stream_close(s->io);
git_stream_free(s->io);
s->io = NULL;
}
git_net_url_dispose(&s->url);
git__free(s);
}
static int ssh_stream_alloc(
ssh_subtransport *t,
const char *cmd,
git_smart_subtransport_stream **stream)
{
ssh_stream *s;
GIT_ASSERT_ARG(stream);
s = git__calloc(sizeof(ssh_stream), 1);
GIT_ERROR_CHECK_ALLOC(s);
s->parent.subtransport = &t->parent;
s->parent.read = ssh_stream_read;
s->parent.write = ssh_stream_write;
s->parent.free = ssh_stream_free;
s->cmd = cmd;
*stream = &s->parent;
return 0;
}
static int ssh_agent_auth(LIBSSH2_SESSION *session, git_credential_ssh_key *c) {
int rc = LIBSSH2_ERROR_NONE;
struct libssh2_agent_publickey *curr, *prev = NULL;
LIBSSH2_AGENT *agent = libssh2_agent_init(session);
if (agent == NULL)
return -1;
rc = libssh2_agent_connect(agent);
if (rc != LIBSSH2_ERROR_NONE)
goto shutdown;
rc = libssh2_agent_list_identities(agent);
if (rc != LIBSSH2_ERROR_NONE)
goto shutdown;
while (1) {
rc = libssh2_agent_get_identity(agent, &curr, prev);
if (rc < 0)
goto shutdown;
/* rc is set to 1 whenever the ssh agent ran out of keys to check.
* Set the error code to authentication failure rather than erroring
* out with an untranslatable error code.
*/
if (rc == 1) {
rc = LIBSSH2_ERROR_AUTHENTICATION_FAILED;
goto shutdown;
}
rc = libssh2_agent_userauth(agent, c->username, curr);
if (rc == 0)
break;
prev = curr;
}
shutdown:
if (rc != LIBSSH2_ERROR_NONE)
ssh_error(session, "error authenticating");
libssh2_agent_disconnect(agent);
libssh2_agent_free(agent);
return rc;
}
static int _git_ssh_authenticate_session(
LIBSSH2_SESSION *session,
git_credential *cred)
{
int rc;
do {
git_error_clear();
switch (cred->credtype) {
case GIT_CREDENTIAL_USERPASS_PLAINTEXT: {
git_credential_userpass_plaintext *c = (git_credential_userpass_plaintext *)cred;
rc = libssh2_userauth_password(session, c->username, c->password);
break;
}
case GIT_CREDENTIAL_SSH_KEY: {
git_credential_ssh_key *c = (git_credential_ssh_key *)cred;
if (c->privatekey)
rc = libssh2_userauth_publickey_fromfile(
session, c->username, c->publickey,
c->privatekey, c->passphrase);
else
rc = ssh_agent_auth(session, c);
break;
}
case GIT_CREDENTIAL_SSH_CUSTOM: {
git_credential_ssh_custom *c = (git_credential_ssh_custom *)cred;
rc = libssh2_userauth_publickey(
session, c->username, (const unsigned char *)c->publickey,
c->publickey_len, c->sign_callback, &c->payload);
break;
}
case GIT_CREDENTIAL_SSH_INTERACTIVE: {
void **abstract = libssh2_session_abstract(session);
git_credential_ssh_interactive *c = (git_credential_ssh_interactive *)cred;
/* ideally, we should be able to set this by calling
* libssh2_session_init_ex() instead of libssh2_session_init().
* libssh2's API is inconsistent here i.e. libssh2_userauth_publickey()
* allows you to pass the `abstract` as part of the call, whereas
* libssh2_userauth_keyboard_interactive() does not!
*
* The only way to set the `abstract` pointer is by calling
* libssh2_session_abstract(), which will replace the existing
* pointer as is done below. This is safe for now (at time of writing),
* but may not be valid in future.
*/
*abstract = c->payload;
rc = libssh2_userauth_keyboard_interactive(
session, c->username, c->prompt_callback);
break;
}
#ifdef GIT_SSH_MEMORY_CREDENTIALS
case GIT_CREDENTIAL_SSH_MEMORY: {
git_credential_ssh_key *c = (git_credential_ssh_key *)cred;
GIT_ASSERT(c->username);
GIT_ASSERT(c->privatekey);
rc = libssh2_userauth_publickey_frommemory(
session,
c->username,
strlen(c->username),
c->publickey,
c->publickey ? strlen(c->publickey) : 0,
c->privatekey,
strlen(c->privatekey),
c->passphrase);
break;
}
#endif
default:
rc = LIBSSH2_ERROR_AUTHENTICATION_FAILED;
}
} while (LIBSSH2_ERROR_EAGAIN == rc || LIBSSH2_ERROR_TIMEOUT == rc);
if (rc == LIBSSH2_ERROR_PASSWORD_EXPIRED ||
rc == LIBSSH2_ERROR_AUTHENTICATION_FAILED ||
rc == LIBSSH2_ERROR_PUBLICKEY_UNVERIFIED)
return GIT_EAUTH;
if (rc != LIBSSH2_ERROR_NONE) {
if (!git_error_last())
ssh_error(session, "Failed to authenticate SSH session");
return -1;
}
return 0;
}
static int request_creds(git_credential **out, ssh_subtransport *t, const char *user, int auth_methods)
{
int error, no_callback = 0;
git_credential *cred = NULL;
if (!t->owner->connect_opts.callbacks.credentials) {
no_callback = 1;
} else {
error = t->owner->connect_opts.callbacks.credentials(
&cred,
t->owner->url,
user,
auth_methods,
t->owner->connect_opts.callbacks.payload);
if (error == GIT_PASSTHROUGH) {
no_callback = 1;
} else if (error < 0) {
return error;
} else if (!cred) {
git_error_set(GIT_ERROR_SSH, "callback failed to initialize SSH credentials");
return -1;
}
}
if (no_callback) {
git_error_set(GIT_ERROR_SSH, "authentication required but no callback set");
return GIT_EAUTH;
}
if (!(cred->credtype & auth_methods)) {
cred->free(cred);
git_error_set(GIT_ERROR_SSH, "authentication callback returned unsupported credentials type");
return GIT_EAUTH;
}
*out = cred;
return 0;
}
static int _git_ssh_session_create(
LIBSSH2_SESSION **session,
git_stream *io)
{
int rc = 0;
LIBSSH2_SESSION *s;
git_socket_stream *socket = GIT_CONTAINER_OF(io, git_socket_stream, parent);
GIT_ASSERT_ARG(session);
s = libssh2_session_init();
if (!s) {
git_error_set(GIT_ERROR_NET, "failed to initialize SSH session");
return -1;
}
do {
rc = libssh2_session_handshake(s, socket->s);
} while (LIBSSH2_ERROR_EAGAIN == rc || LIBSSH2_ERROR_TIMEOUT == rc);
if (rc != LIBSSH2_ERROR_NONE) {
ssh_error(s, "failed to start SSH session");
libssh2_session_free(s);
return -1;
}
libssh2_session_set_blocking(s, 1);
*session = s;
return 0;
}
#define SSH_DEFAULT_PORT "22"
static int _git_ssh_setup_conn(
ssh_subtransport *t,
const char *url,
const char *cmd,
git_smart_subtransport_stream **stream)
{
int auth_methods, error = 0;
ssh_stream *s;
git_credential *cred = NULL;
LIBSSH2_SESSION *session=NULL;
LIBSSH2_CHANNEL *channel=NULL;
t->current_stream = NULL;
*stream = NULL;
if (ssh_stream_alloc(t, cmd, stream) < 0)
return -1;
s = (ssh_stream *)*stream;
s->session = NULL;
s->channel = NULL;
if (git_net_str_is_url(url))
error = git_net_url_parse(&s->url, url);
else
error = git_net_url_parse_scp(&s->url, url);
if (error < 0)
goto done;
if ((error = git_socket_stream_new(&s->io, s->url.host, s->url.port)) < 0 ||
(error = git_stream_connect(s->io)) < 0)
goto done;
if ((error = _git_ssh_session_create(&session, s->io)) < 0)
goto done;
if (t->owner->connect_opts.callbacks.certificate_check != NULL) {
git_cert_hostkey cert = {{ 0 }}, *cert_ptr;
const char *key;
size_t cert_len;
int cert_type;
cert.parent.cert_type = GIT_CERT_HOSTKEY_LIBSSH2;
key = libssh2_session_hostkey(session, &cert_len, &cert_type);
if (key != NULL) {
cert.type |= GIT_CERT_SSH_RAW;
cert.hostkey = key;
cert.hostkey_len = cert_len;
switch (cert_type) {
case LIBSSH2_HOSTKEY_TYPE_RSA:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_RSA;
break;
case LIBSSH2_HOSTKEY_TYPE_DSS:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_DSS;
break;
#ifdef LIBSSH2_HOSTKEY_TYPE_ECDSA_256
case LIBSSH2_HOSTKEY_TYPE_ECDSA_256:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_256;
break;
case LIBSSH2_HOSTKEY_TYPE_ECDSA_384:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_384;
break;
case LIBSSH2_KNOWNHOST_KEY_ECDSA_521:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_KEY_ECDSA_521;
break;
#endif
#ifdef LIBSSH2_HOSTKEY_TYPE_ED25519
case LIBSSH2_HOSTKEY_TYPE_ED25519:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_KEY_ED25519;
break;
#endif
default:
cert.raw_type = GIT_CERT_SSH_RAW_TYPE_UNKNOWN;
}
}
#ifdef LIBSSH2_HOSTKEY_HASH_SHA256
key = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA256);
if (key != NULL) {
cert.type |= GIT_CERT_SSH_SHA256;
memcpy(&cert.hash_sha256, key, 32);
}
#endif
key = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_SHA1);
if (key != NULL) {
cert.type |= GIT_CERT_SSH_SHA1;
memcpy(&cert.hash_sha1, key, 20);
}
key = libssh2_hostkey_hash(session, LIBSSH2_HOSTKEY_HASH_MD5);
if (key != NULL) {
cert.type |= GIT_CERT_SSH_MD5;
memcpy(&cert.hash_md5, key, 16);
}
if (cert.type == 0) {
git_error_set(GIT_ERROR_SSH, "unable to get the host key");
error = -1;
goto done;
}
/* We don't currently trust any hostkeys */
git_error_clear();
cert_ptr = &cert;
error = t->owner->connect_opts.callbacks.certificate_check(
(git_cert *)cert_ptr,
0,
s->url.host,
t->owner->connect_opts.callbacks.payload);
if (error < 0 && error != GIT_PASSTHROUGH) {
if (!git_error_last())
git_error_set(GIT_ERROR_NET, "user cancelled hostkey check");
goto done;
}
}
/* we need the username to ask for auth methods */
if (!s->url.username) {
if ((error = request_creds(&cred, t, NULL, GIT_CREDENTIAL_USERNAME)) < 0)
goto done;
s->url.username = git__strdup(((git_credential_username *) cred)->username);
cred->free(cred);
cred = NULL;
if (!s->url.username)
goto done;
} else if (s->url.username && s->url.password) {
if ((error = git_credential_userpass_plaintext_new(&cred, s->url.username, s->url.password)) < 0)
goto done;
}
if ((error = list_auth_methods(&auth_methods, session, s->url.username)) < 0)
goto done;
error = GIT_EAUTH;
/* if we already have something to try */
if (cred && auth_methods & cred->credtype)
error = _git_ssh_authenticate_session(session, cred);
while (error == GIT_EAUTH) {
if (cred) {
cred->free(cred);
cred = NULL;
}
if ((error = request_creds(&cred, t, s->url.username, auth_methods)) < 0)
goto done;
if (strcmp(s->url.username, git_credential_get_username(cred))) {
git_error_set(GIT_ERROR_SSH, "username does not match previous request");
error = -1;
goto done;
}
error = _git_ssh_authenticate_session(session, cred);
if (error == GIT_EAUTH) {
/* refresh auth methods */
if ((error = list_auth_methods(&auth_methods, session, s->url.username)) < 0)
goto done;
else
error = GIT_EAUTH;
}
}
if (error < 0)
goto done;
channel = libssh2_channel_open_session(session);
if (!channel) {
error = -1;
ssh_error(session, "Failed to open SSH channel");
goto done;
}
libssh2_channel_set_blocking(channel, 1);
s->session = session;
s->channel = channel;
t->current_stream = s;
done:
if (error < 0) {
ssh_stream_free(*stream);
if (session)
libssh2_session_free(session);
}
if (cred)
cred->free(cred);
return error;
}
static int ssh_uploadpack_ls(
ssh_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
const char *cmd = t->cmd_uploadpack ? t->cmd_uploadpack : cmd_uploadpack;
return _git_ssh_setup_conn(t, url, cmd, stream);
}
static int ssh_uploadpack(
ssh_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
GIT_UNUSED(url);
if (t->current_stream) {
*stream = &t->current_stream->parent;
return 0;
}
git_error_set(GIT_ERROR_NET, "must call UPLOADPACK_LS before UPLOADPACK");
return -1;
}
static int ssh_receivepack_ls(
ssh_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
const char *cmd = t->cmd_receivepack ? t->cmd_receivepack : cmd_receivepack;
return _git_ssh_setup_conn(t, url, cmd, stream);
}
static int ssh_receivepack(
ssh_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
GIT_UNUSED(url);
if (t->current_stream) {
*stream = &t->current_stream->parent;
return 0;
}
git_error_set(GIT_ERROR_NET, "must call RECEIVEPACK_LS before RECEIVEPACK");
return -1;
}
static int _ssh_action(
git_smart_subtransport_stream **stream,
git_smart_subtransport *subtransport,
const char *url,
git_smart_service_t action)
{
ssh_subtransport *t = GIT_CONTAINER_OF(subtransport, ssh_subtransport, parent);
switch (action) {
case GIT_SERVICE_UPLOADPACK_LS:
return ssh_uploadpack_ls(t, url, stream);
case GIT_SERVICE_UPLOADPACK:
return ssh_uploadpack(t, url, stream);
case GIT_SERVICE_RECEIVEPACK_LS:
return ssh_receivepack_ls(t, url, stream);
case GIT_SERVICE_RECEIVEPACK:
return ssh_receivepack(t, url, stream);
}
*stream = NULL;
return -1;
}
static int _ssh_close(git_smart_subtransport *subtransport)
{
ssh_subtransport *t = GIT_CONTAINER_OF(subtransport, ssh_subtransport, parent);
GIT_ASSERT(!t->current_stream);
GIT_UNUSED(t);
return 0;
}
static void _ssh_free(git_smart_subtransport *subtransport)
{
ssh_subtransport *t = GIT_CONTAINER_OF(subtransport, ssh_subtransport, parent);
git__free(t->cmd_uploadpack);
git__free(t->cmd_receivepack);
git__free(t);
}
#define SSH_AUTH_PUBLICKEY "publickey"
#define SSH_AUTH_PASSWORD "password"
#define SSH_AUTH_KEYBOARD_INTERACTIVE "keyboard-interactive"
static int list_auth_methods(int *out, LIBSSH2_SESSION *session, const char *username)
{
const char *list, *ptr;
*out = 0;
list = libssh2_userauth_list(session, username, strlen(username));
/* either error, or the remote accepts NONE auth, which is bizarre, let's punt */
if (list == NULL && !libssh2_userauth_authenticated(session)) {
ssh_error(session, "Failed to retrieve list of SSH authentication methods");
return GIT_EAUTH;
}
ptr = list;
while (ptr) {
if (*ptr == ',')
ptr++;
if (!git__prefixcmp(ptr, SSH_AUTH_PUBLICKEY)) {
*out |= GIT_CREDENTIAL_SSH_KEY;
*out |= GIT_CREDENTIAL_SSH_CUSTOM;
#ifdef GIT_SSH_MEMORY_CREDENTIALS
*out |= GIT_CREDENTIAL_SSH_MEMORY;
#endif
ptr += strlen(SSH_AUTH_PUBLICKEY);
continue;
}
if (!git__prefixcmp(ptr, SSH_AUTH_PASSWORD)) {
*out |= GIT_CREDENTIAL_USERPASS_PLAINTEXT;
ptr += strlen(SSH_AUTH_PASSWORD);
continue;
}
if (!git__prefixcmp(ptr, SSH_AUTH_KEYBOARD_INTERACTIVE)) {
*out |= GIT_CREDENTIAL_SSH_INTERACTIVE;
ptr += strlen(SSH_AUTH_KEYBOARD_INTERACTIVE);
continue;
}
/* Skip it if we don't know it */
ptr = strchr(ptr, ',');
}
return 0;
}
#endif
int git_smart_subtransport_ssh(
git_smart_subtransport **out, git_transport *owner, void *param)
{
#ifdef GIT_SSH
ssh_subtransport *t;
GIT_ASSERT_ARG(out);
GIT_UNUSED(param);
t = git__calloc(sizeof(ssh_subtransport), 1);
GIT_ERROR_CHECK_ALLOC(t);
t->owner = (transport_smart *)owner;
t->parent.action = _ssh_action;
t->parent.close = _ssh_close;
t->parent.free = _ssh_free;
*out = (git_smart_subtransport *) t;
return 0;
#else
GIT_UNUSED(owner);
GIT_UNUSED(param);
GIT_ASSERT_ARG(out);
*out = NULL;
git_error_set(GIT_ERROR_INVALID, "cannot create SSH transport. Library was built without SSH support");
return -1;
#endif
}
int git_transport_ssh_with_paths(git_transport **out, git_remote *owner, void *payload)
{
#ifdef GIT_SSH
git_strarray *paths = (git_strarray *) payload;
git_transport *transport;
transport_smart *smart;
ssh_subtransport *t;
int error;
git_smart_subtransport_definition ssh_definition = {
git_smart_subtransport_ssh,
0, /* no RPC */
NULL,
};
if (paths->count != 2) {
git_error_set(GIT_ERROR_SSH, "invalid ssh paths, must be two strings");
return GIT_EINVALIDSPEC;
}
if ((error = git_transport_smart(&transport, owner, &ssh_definition)) < 0)
return error;
smart = (transport_smart *) transport;
t = (ssh_subtransport *) smart->wrapped;
t->cmd_uploadpack = git__strdup(paths->strings[0]);
GIT_ERROR_CHECK_ALLOC(t->cmd_uploadpack);
t->cmd_receivepack = git__strdup(paths->strings[1]);
GIT_ERROR_CHECK_ALLOC(t->cmd_receivepack);
*out = transport;
return 0;
#else
GIT_UNUSED(owner);
GIT_UNUSED(payload);
GIT_ASSERT_ARG(out);
*out = NULL;
git_error_set(GIT_ERROR_INVALID, "cannot create SSH transport. Library was built without SSH support");
return -1;
#endif
}
#ifdef GIT_SSH
static void shutdown_ssh(void)
{
libssh2_exit();
}
#endif
int git_transport_ssh_global_init(void)
{
#ifdef GIT_SSH
if (libssh2_init(0) < 0) {
git_error_set(GIT_ERROR_SSH, "unable to initialize libssh2");
return -1;
}
return git_runtime_shutdown_register(shutdown_ssh);
#else
/* Nothing to initialize */
return 0;
#endif
}
| libgit2-main | src/libgit2/transports/ssh.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "git2.h"
#include "http_parser.h"
#include "vector.h"
#include "trace.h"
#include "httpclient.h"
#include "http.h"
#include "auth.h"
#include "auth_negotiate.h"
#include "auth_ntlm.h"
#include "git2/sys/credential.h"
#include "net.h"
#include "stream.h"
#include "streams/socket.h"
#include "streams/tls.h"
#include "auth.h"
static git_http_auth_scheme auth_schemes[] = {
{ GIT_HTTP_AUTH_NEGOTIATE, "Negotiate", GIT_CREDENTIAL_DEFAULT, git_http_auth_negotiate },
{ GIT_HTTP_AUTH_NTLM, "NTLM", GIT_CREDENTIAL_USERPASS_PLAINTEXT, git_http_auth_ntlm },
{ GIT_HTTP_AUTH_BASIC, "Basic", GIT_CREDENTIAL_USERPASS_PLAINTEXT, git_http_auth_basic },
};
/*
* Use a 16kb read buffer to match the maximum size of a TLS packet. This
* is critical for compatibility with SecureTransport, which will always do
* a network read on every call, even if it has data buffered to return to
* you. That buffered data may be the _end_ of a keep-alive response, so
* if SecureTransport performs another network read, it will wait until the
* server ultimately times out before it returns that buffered data to you.
* Since SecureTransport only reads a single TLS packet at a time, by
* calling it with a read buffer that is the maximum size of a TLS packet,
* we ensure that it will never buffer.
*/
#define GIT_READ_BUFFER_SIZE (16 * 1024)
typedef struct {
git_net_url url;
git_stream *stream;
git_vector auth_challenges;
git_http_auth_context *auth_context;
} git_http_server;
typedef enum {
PROXY = 1,
SERVER
} git_http_server_t;
typedef enum {
NONE = 0,
SENDING_REQUEST,
SENDING_BODY,
SENT_REQUEST,
HAS_EARLY_RESPONSE,
READING_RESPONSE,
READING_BODY,
DONE
} http_client_state;
/* Parser state */
typedef enum {
PARSE_HEADER_NONE = 0,
PARSE_HEADER_NAME,
PARSE_HEADER_VALUE,
PARSE_HEADER_COMPLETE
} parse_header_state;
typedef enum {
PARSE_STATUS_OK,
PARSE_STATUS_NO_OUTPUT,
PARSE_STATUS_ERROR
} parse_status;
typedef struct {
git_http_client *client;
git_http_response *response;
/* Temporary buffers to avoid extra mallocs */
git_str parse_header_name;
git_str parse_header_value;
/* Parser state */
int error;
parse_status parse_status;
/* Headers parsing */
parse_header_state parse_header_state;
/* Body parsing */
char *output_buf; /* Caller's output buffer */
size_t output_size; /* Size of caller's output buffer */
size_t output_written; /* Bytes we've written to output buffer */
} http_parser_context;
/* HTTP client connection */
struct git_http_client {
git_http_client_options opts;
/* Are we writing to the proxy or server, and state of the client. */
git_http_server_t current_server;
http_client_state state;
http_parser parser;
git_http_server server;
git_http_server proxy;
unsigned request_count;
unsigned connected : 1,
proxy_connected : 1,
keepalive : 1,
request_chunked : 1;
/* Temporary buffers to avoid extra mallocs */
git_str request_msg;
git_str read_buf;
/* A subset of information from the request */
size_t request_body_len,
request_body_remain;
/*
* When state == HAS_EARLY_RESPONSE, the response of our proxy
* that we have buffered and will deliver during read_response.
*/
git_http_response early_response;
};
bool git_http_response_is_redirect(git_http_response *response)
{
return (response->status == GIT_HTTP_MOVED_PERMANENTLY ||
response->status == GIT_HTTP_FOUND ||
response->status == GIT_HTTP_SEE_OTHER ||
response->status == GIT_HTTP_TEMPORARY_REDIRECT ||
response->status == GIT_HTTP_PERMANENT_REDIRECT);
}
void git_http_response_dispose(git_http_response *response)
{
if (!response)
return;
git__free(response->content_type);
git__free(response->location);
memset(response, 0, sizeof(git_http_response));
}
static int on_header_complete(http_parser *parser)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
git_http_client *client = ctx->client;
git_http_response *response = ctx->response;
git_str *name = &ctx->parse_header_name;
git_str *value = &ctx->parse_header_value;
if (!strcasecmp("Content-Type", name->ptr)) {
if (response->content_type) {
git_error_set(GIT_ERROR_HTTP,
"multiple content-type headers");
return -1;
}
response->content_type =
git__strndup(value->ptr, value->size);
GIT_ERROR_CHECK_ALLOC(ctx->response->content_type);
} else if (!strcasecmp("Content-Length", name->ptr)) {
int64_t len;
if (response->content_length) {
git_error_set(GIT_ERROR_HTTP,
"multiple content-length headers");
return -1;
}
if (git__strntol64(&len, value->ptr, value->size,
NULL, 10) < 0 || len < 0) {
git_error_set(GIT_ERROR_HTTP,
"invalid content-length");
return -1;
}
response->content_length = (size_t)len;
} else if (!strcasecmp("Transfer-Encoding", name->ptr) &&
!strcasecmp("chunked", value->ptr)) {
ctx->response->chunked = 1;
} else if (!strcasecmp("Proxy-Authenticate", git_str_cstr(name))) {
char *dup = git__strndup(value->ptr, value->size);
GIT_ERROR_CHECK_ALLOC(dup);
if (git_vector_insert(&client->proxy.auth_challenges, dup) < 0)
return -1;
} else if (!strcasecmp("WWW-Authenticate", name->ptr)) {
char *dup = git__strndup(value->ptr, value->size);
GIT_ERROR_CHECK_ALLOC(dup);
if (git_vector_insert(&client->server.auth_challenges, dup) < 0)
return -1;
} else if (!strcasecmp("Location", name->ptr)) {
if (response->location) {
git_error_set(GIT_ERROR_HTTP,
"multiple location headers");
return -1;
}
response->location = git__strndup(value->ptr, value->size);
GIT_ERROR_CHECK_ALLOC(response->location);
}
return 0;
}
static int on_header_field(http_parser *parser, const char *str, size_t len)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
switch (ctx->parse_header_state) {
/*
* We last saw a header value, process the name/value pair and
* get ready to handle this new name.
*/
case PARSE_HEADER_VALUE:
if (on_header_complete(parser) < 0)
return ctx->parse_status = PARSE_STATUS_ERROR;
git_str_clear(&ctx->parse_header_name);
git_str_clear(&ctx->parse_header_value);
/* Fall through */
case PARSE_HEADER_NONE:
case PARSE_HEADER_NAME:
ctx->parse_header_state = PARSE_HEADER_NAME;
if (git_str_put(&ctx->parse_header_name, str, len) < 0)
return ctx->parse_status = PARSE_STATUS_ERROR;
break;
default:
git_error_set(GIT_ERROR_HTTP,
"header name seen at unexpected time");
return ctx->parse_status = PARSE_STATUS_ERROR;
}
return 0;
}
static int on_header_value(http_parser *parser, const char *str, size_t len)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
switch (ctx->parse_header_state) {
case PARSE_HEADER_NAME:
case PARSE_HEADER_VALUE:
ctx->parse_header_state = PARSE_HEADER_VALUE;
if (git_str_put(&ctx->parse_header_value, str, len) < 0)
return ctx->parse_status = PARSE_STATUS_ERROR;
break;
default:
git_error_set(GIT_ERROR_HTTP,
"header value seen at unexpected time");
return ctx->parse_status = PARSE_STATUS_ERROR;
}
return 0;
}
GIT_INLINE(bool) challenge_matches_scheme(
const char *challenge,
git_http_auth_scheme *scheme)
{
const char *scheme_name = scheme->name;
size_t scheme_len = strlen(scheme_name);
if (!strncasecmp(challenge, scheme_name, scheme_len) &&
(challenge[scheme_len] == '\0' || challenge[scheme_len] == ' '))
return true;
return false;
}
static git_http_auth_scheme *scheme_for_challenge(const char *challenge)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) {
if (challenge_matches_scheme(challenge, &auth_schemes[i]))
return &auth_schemes[i];
}
return NULL;
}
GIT_INLINE(void) collect_authinfo(
unsigned int *schemetypes,
unsigned int *credtypes,
git_vector *challenges)
{
git_http_auth_scheme *scheme;
const char *challenge;
size_t i;
*schemetypes = 0;
*credtypes = 0;
git_vector_foreach(challenges, i, challenge) {
if ((scheme = scheme_for_challenge(challenge)) != NULL) {
*schemetypes |= scheme->type;
*credtypes |= scheme->credtypes;
}
}
}
static int resend_needed(git_http_client *client, git_http_response *response)
{
git_http_auth_context *auth_context;
if (response->status == GIT_HTTP_STATUS_UNAUTHORIZED &&
(auth_context = client->server.auth_context) &&
auth_context->is_complete &&
!auth_context->is_complete(auth_context))
return 1;
if (response->status == GIT_HTTP_STATUS_PROXY_AUTHENTICATION_REQUIRED &&
(auth_context = client->proxy.auth_context) &&
auth_context->is_complete &&
!auth_context->is_complete(auth_context))
return 1;
return 0;
}
static int on_headers_complete(http_parser *parser)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
/* Finalize the last seen header */
switch (ctx->parse_header_state) {
case PARSE_HEADER_VALUE:
if (on_header_complete(parser) < 0)
return ctx->parse_status = PARSE_STATUS_ERROR;
/* Fall through */
case PARSE_HEADER_NONE:
ctx->parse_header_state = PARSE_HEADER_COMPLETE;
break;
default:
git_error_set(GIT_ERROR_HTTP,
"header completion at unexpected time");
return ctx->parse_status = PARSE_STATUS_ERROR;
}
ctx->response->status = parser->status_code;
ctx->client->keepalive = http_should_keep_alive(parser);
/* Prepare for authentication */
collect_authinfo(&ctx->response->server_auth_schemetypes,
&ctx->response->server_auth_credtypes,
&ctx->client->server.auth_challenges);
collect_authinfo(&ctx->response->proxy_auth_schemetypes,
&ctx->response->proxy_auth_credtypes,
&ctx->client->proxy.auth_challenges);
ctx->response->resend_credentials = resend_needed(ctx->client,
ctx->response);
/* Stop parsing. */
http_parser_pause(parser, 1);
if (ctx->response->content_type || ctx->response->chunked)
ctx->client->state = READING_BODY;
else
ctx->client->state = DONE;
return 0;
}
static int on_body(http_parser *parser, const char *buf, size_t len)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
size_t max_len;
/* Saw data when we expected not to (eg, in consume_response_body) */
if (ctx->output_buf == NULL || ctx->output_size == 0) {
ctx->parse_status = PARSE_STATUS_NO_OUTPUT;
return 0;
}
GIT_ASSERT(ctx->output_size >= ctx->output_written);
max_len = min(ctx->output_size - ctx->output_written, len);
max_len = min(max_len, INT_MAX);
memcpy(ctx->output_buf + ctx->output_written, buf, max_len);
ctx->output_written += max_len;
return 0;
}
static int on_message_complete(http_parser *parser)
{
http_parser_context *ctx = (http_parser_context *) parser->data;
ctx->client->state = DONE;
return 0;
}
GIT_INLINE(int) stream_write(
git_http_server *server,
const char *data,
size_t len)
{
git_trace(GIT_TRACE_TRACE,
"Sending request:\n%.*s", (int)len, data);
return git_stream__write_full(server->stream, data, len, 0);
}
GIT_INLINE(int) client_write_request(git_http_client *client)
{
git_stream *stream = client->current_server == PROXY ?
client->proxy.stream : client->server.stream;
git_trace(GIT_TRACE_TRACE,
"Sending request:\n%.*s",
(int)client->request_msg.size, client->request_msg.ptr);
return git_stream__write_full(stream,
client->request_msg.ptr,
client->request_msg.size,
0);
}
static const char *name_for_method(git_http_method method)
{
switch (method) {
case GIT_HTTP_METHOD_GET:
return "GET";
case GIT_HTTP_METHOD_POST:
return "POST";
case GIT_HTTP_METHOD_CONNECT:
return "CONNECT";
}
return NULL;
}
/*
* Find the scheme that is suitable for the given credentials, based on the
* server's auth challenges.
*/
static bool best_scheme_and_challenge(
git_http_auth_scheme **scheme_out,
const char **challenge_out,
git_vector *challenges,
git_credential *credentials)
{
const char *challenge;
size_t i, j;
for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) {
git_vector_foreach(challenges, j, challenge) {
git_http_auth_scheme *scheme = &auth_schemes[i];
if (challenge_matches_scheme(challenge, scheme) &&
(scheme->credtypes & credentials->credtype)) {
*scheme_out = scheme;
*challenge_out = challenge;
return true;
}
}
}
return false;
}
/*
* Find the challenge from the server for our current auth context.
*/
static const char *challenge_for_context(
git_vector *challenges,
git_http_auth_context *auth_ctx)
{
const char *challenge;
size_t i, j;
for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) {
if (auth_schemes[i].type == auth_ctx->type) {
git_http_auth_scheme *scheme = &auth_schemes[i];
git_vector_foreach(challenges, j, challenge) {
if (challenge_matches_scheme(challenge, scheme))
return challenge;
}
}
}
return NULL;
}
static const char *init_auth_context(
git_http_server *server,
git_vector *challenges,
git_credential *credentials)
{
git_http_auth_scheme *scheme;
const char *challenge;
int error;
if (!best_scheme_and_challenge(&scheme, &challenge, challenges, credentials)) {
git_error_set(GIT_ERROR_HTTP, "could not find appropriate mechanism for credentials");
return NULL;
}
error = scheme->init_context(&server->auth_context, &server->url);
if (error == GIT_PASSTHROUGH) {
git_error_set(GIT_ERROR_HTTP, "'%s' authentication is not supported", scheme->name);
return NULL;
}
return challenge;
}
static void free_auth_context(git_http_server *server)
{
if (!server->auth_context)
return;
if (server->auth_context->free)
server->auth_context->free(server->auth_context);
server->auth_context = NULL;
}
static int apply_credentials(
git_str *buf,
git_http_server *server,
const char *header_name,
git_credential *credentials)
{
git_http_auth_context *auth = server->auth_context;
git_vector *challenges = &server->auth_challenges;
const char *challenge;
git_str token = GIT_STR_INIT;
int error = 0;
/* We've started a new request without creds; free the context. */
if (auth && !credentials) {
free_auth_context(server);
return 0;
}
/* We haven't authenticated, nor were we asked to. Nothing to do. */
if (!auth && !git_vector_length(challenges))
return 0;
if (!auth) {
challenge = init_auth_context(server, challenges, credentials);
auth = server->auth_context;
if (!challenge || !auth) {
error = -1;
goto done;
}
} else if (auth->set_challenge) {
challenge = challenge_for_context(challenges, auth);
}
if (auth->set_challenge && challenge &&
(error = auth->set_challenge(auth, challenge)) < 0)
goto done;
if ((error = auth->next_token(&token, auth, credentials)) < 0)
goto done;
if (auth->is_complete && auth->is_complete(auth)) {
/*
* If we're done with an auth mechanism with connection affinity,
* we don't need to send any more headers and can dispose the context.
*/
if (auth->connection_affinity)
free_auth_context(server);
} else if (!token.size) {
git_error_set(GIT_ERROR_HTTP, "failed to respond to authentication challenge");
error = GIT_EAUTH;
goto done;
}
if (token.size > 0)
error = git_str_printf(buf, "%s: %s\r\n", header_name, token.ptr);
done:
git_str_dispose(&token);
return error;
}
GIT_INLINE(int) apply_server_credentials(
git_str *buf,
git_http_client *client,
git_http_request *request)
{
return apply_credentials(buf,
&client->server,
"Authorization",
request->credentials);
}
GIT_INLINE(int) apply_proxy_credentials(
git_str *buf,
git_http_client *client,
git_http_request *request)
{
return apply_credentials(buf,
&client->proxy,
"Proxy-Authorization",
request->proxy_credentials);
}
static int puts_host_and_port(git_str *buf, git_net_url *url, bool force_port)
{
bool ipv6 = git_net_url_is_ipv6(url);
if (ipv6)
git_str_putc(buf, '[');
git_str_puts(buf, url->host);
if (ipv6)
git_str_putc(buf, ']');
if (force_port || !git_net_url_is_default_port(url)) {
git_str_putc(buf, ':');
git_str_puts(buf, url->port);
}
return git_str_oom(buf) ? -1 : 0;
}
static int generate_connect_request(
git_http_client *client,
git_http_request *request)
{
git_str *buf;
int error;
git_str_clear(&client->request_msg);
buf = &client->request_msg;
git_str_puts(buf, "CONNECT ");
puts_host_and_port(buf, &client->server.url, true);
git_str_puts(buf, " HTTP/1.1\r\n");
git_str_puts(buf, "User-Agent: ");
git_http__user_agent(buf);
git_str_puts(buf, "\r\n");
git_str_puts(buf, "Host: ");
puts_host_and_port(buf, &client->server.url, true);
git_str_puts(buf, "\r\n");
if ((error = apply_proxy_credentials(buf, client, request) < 0))
return -1;
git_str_puts(buf, "\r\n");
return git_str_oom(buf) ? -1 : 0;
}
static bool use_connect_proxy(git_http_client *client)
{
return client->proxy.url.host && !strcmp(client->server.url.scheme, "https");
}
static int generate_request(
git_http_client *client,
git_http_request *request)
{
git_str *buf;
size_t i;
int error;
GIT_ASSERT_ARG(client);
GIT_ASSERT_ARG(request);
git_str_clear(&client->request_msg);
buf = &client->request_msg;
/* GET|POST path HTTP/1.1 */
git_str_puts(buf, name_for_method(request->method));
git_str_putc(buf, ' ');
if (request->proxy && strcmp(request->url->scheme, "https"))
git_net_url_fmt(buf, request->url);
else
git_net_url_fmt_path(buf, request->url);
git_str_puts(buf, " HTTP/1.1\r\n");
git_str_puts(buf, "User-Agent: ");
git_http__user_agent(buf);
git_str_puts(buf, "\r\n");
git_str_puts(buf, "Host: ");
puts_host_and_port(buf, request->url, false);
git_str_puts(buf, "\r\n");
if (request->accept)
git_str_printf(buf, "Accept: %s\r\n", request->accept);
else
git_str_puts(buf, "Accept: */*\r\n");
if (request->content_type)
git_str_printf(buf, "Content-Type: %s\r\n",
request->content_type);
if (request->chunked)
git_str_puts(buf, "Transfer-Encoding: chunked\r\n");
if (request->content_length > 0)
git_str_printf(buf, "Content-Length: %"PRIuZ "\r\n",
request->content_length);
if (request->expect_continue)
git_str_printf(buf, "Expect: 100-continue\r\n");
if ((error = apply_server_credentials(buf, client, request)) < 0 ||
(!use_connect_proxy(client) &&
(error = apply_proxy_credentials(buf, client, request)) < 0))
return error;
if (request->custom_headers) {
for (i = 0; i < request->custom_headers->count; i++) {
const char *hdr = request->custom_headers->strings[i];
if (hdr)
git_str_printf(buf, "%s\r\n", hdr);
}
}
git_str_puts(buf, "\r\n");
if (git_str_oom(buf))
return -1;
return 0;
}
static int check_certificate(
git_stream *stream,
git_net_url *url,
int is_valid,
git_transport_certificate_check_cb cert_cb,
void *cert_cb_payload)
{
git_cert *cert;
git_error_state last_error = {0};
int error;
if ((error = git_stream_certificate(&cert, stream)) < 0)
return error;
git_error_state_capture(&last_error, GIT_ECERTIFICATE);
error = cert_cb(cert, is_valid, url->host, cert_cb_payload);
if (error == GIT_PASSTHROUGH && !is_valid)
return git_error_state_restore(&last_error);
else if (error == GIT_PASSTHROUGH)
error = 0;
else if (error && !git_error_last())
git_error_set(GIT_ERROR_HTTP,
"user rejected certificate for %s", url->host);
git_error_state_free(&last_error);
return error;
}
static int server_connect_stream(
git_http_server *server,
git_transport_certificate_check_cb cert_cb,
void *cb_payload)
{
int error;
GIT_ERROR_CHECK_VERSION(server->stream, GIT_STREAM_VERSION, "git_stream");
error = git_stream_connect(server->stream);
if (error && error != GIT_ECERTIFICATE)
return error;
if (git_stream_is_encrypted(server->stream) && cert_cb != NULL)
error = check_certificate(server->stream, &server->url, !error,
cert_cb, cb_payload);
return error;
}
static void reset_auth_connection(git_http_server *server)
{
/*
* If we've authenticated and we're doing "normal"
* authentication with a request affinity (Basic, Digest)
* then we want to _keep_ our context, since authentication
* survives even through non-keep-alive connections. If
* we've authenticated and we're doing connection-based
* authentication (NTLM, Negotiate) - indicated by the presence
* of an `is_complete` callback - then we need to restart
* authentication on a new connection.
*/
if (server->auth_context &&
server->auth_context->connection_affinity)
free_auth_context(server);
}
/*
* Updates the server data structure with the new URL; returns 1 if the server
* has changed and we need to reconnect, returns 0 otherwise.
*/
GIT_INLINE(int) server_setup_from_url(
git_http_server *server,
git_net_url *url)
{
if (!server->url.scheme || strcmp(server->url.scheme, url->scheme) ||
!server->url.host || strcmp(server->url.host, url->host) ||
!server->url.port || strcmp(server->url.port, url->port)) {
git__free(server->url.scheme);
git__free(server->url.host);
git__free(server->url.port);
server->url.scheme = git__strdup(url->scheme);
GIT_ERROR_CHECK_ALLOC(server->url.scheme);
server->url.host = git__strdup(url->host);
GIT_ERROR_CHECK_ALLOC(server->url.host);
server->url.port = git__strdup(url->port);
GIT_ERROR_CHECK_ALLOC(server->url.port);
return 1;
}
return 0;
}
static void reset_parser(git_http_client *client)
{
http_parser_init(&client->parser, HTTP_RESPONSE);
}
static int setup_hosts(
git_http_client *client,
git_http_request *request)
{
int ret, diff = 0;
GIT_ASSERT_ARG(client);
GIT_ASSERT_ARG(request);
GIT_ASSERT(request->url);
if ((ret = server_setup_from_url(&client->server, request->url)) < 0)
return ret;
diff |= ret;
if (request->proxy &&
(ret = server_setup_from_url(&client->proxy, request->proxy)) < 0)
return ret;
diff |= ret;
if (diff) {
free_auth_context(&client->server);
free_auth_context(&client->proxy);
client->connected = 0;
}
return 0;
}
GIT_INLINE(int) server_create_stream(git_http_server *server)
{
git_net_url *url = &server->url;
if (strcasecmp(url->scheme, "https") == 0)
return git_tls_stream_new(&server->stream, url->host, url->port);
else if (strcasecmp(url->scheme, "http") == 0)
return git_socket_stream_new(&server->stream, url->host, url->port);
git_error_set(GIT_ERROR_HTTP, "unknown http scheme '%s'", url->scheme);
return -1;
}
GIT_INLINE(void) save_early_response(
git_http_client *client,
git_http_response *response)
{
/* Buffer the response so we can return it in read_response */
client->state = HAS_EARLY_RESPONSE;
memcpy(&client->early_response, response, sizeof(git_http_response));
memset(response, 0, sizeof(git_http_response));
}
static int proxy_connect(
git_http_client *client,
git_http_request *request)
{
git_http_response response = {0};
int error;
if (!client->proxy_connected || !client->keepalive) {
git_trace(GIT_TRACE_DEBUG, "Connecting to proxy %s port %s",
client->proxy.url.host, client->proxy.url.port);
if ((error = server_create_stream(&client->proxy)) < 0 ||
(error = server_connect_stream(&client->proxy,
client->opts.proxy_certificate_check_cb,
client->opts.proxy_certificate_check_payload)) < 0)
goto done;
client->proxy_connected = 1;
}
client->current_server = PROXY;
client->state = SENDING_REQUEST;
if ((error = generate_connect_request(client, request)) < 0 ||
(error = client_write_request(client)) < 0)
goto done;
client->state = SENT_REQUEST;
if ((error = git_http_client_read_response(&response, client)) < 0 ||
(error = git_http_client_skip_body(client)) < 0)
goto done;
GIT_ASSERT(client->state == DONE);
if (response.status == GIT_HTTP_STATUS_PROXY_AUTHENTICATION_REQUIRED) {
save_early_response(client, &response);
error = GIT_RETRY;
goto done;
} else if (response.status != GIT_HTTP_STATUS_OK) {
git_error_set(GIT_ERROR_HTTP, "proxy returned unexpected status: %d", response.status);
error = -1;
goto done;
}
reset_parser(client);
client->state = NONE;
done:
git_http_response_dispose(&response);
return error;
}
static int server_connect(git_http_client *client)
{
git_net_url *url = &client->server.url;
git_transport_certificate_check_cb cert_cb;
void *cert_payload;
int error;
client->current_server = SERVER;
if (client->proxy.stream)
error = git_tls_stream_wrap(&client->server.stream, client->proxy.stream, url->host);
else
error = server_create_stream(&client->server);
if (error < 0)
goto done;
cert_cb = client->opts.server_certificate_check_cb;
cert_payload = client->opts.server_certificate_check_payload;
error = server_connect_stream(&client->server, cert_cb, cert_payload);
done:
return error;
}
GIT_INLINE(void) close_stream(git_http_server *server)
{
if (server->stream) {
git_stream_close(server->stream);
git_stream_free(server->stream);
server->stream = NULL;
}
}
static int http_client_connect(
git_http_client *client,
git_http_request *request)
{
bool use_proxy = false;
int error;
if ((error = setup_hosts(client, request)) < 0)
goto on_error;
/* We're connected to our destination server; no need to reconnect */
if (client->connected && client->keepalive &&
(client->state == NONE || client->state == DONE))
return 0;
client->connected = 0;
client->request_count = 0;
close_stream(&client->server);
reset_auth_connection(&client->server);
reset_parser(client);
/* Reconnect to the proxy if necessary. */
use_proxy = use_connect_proxy(client);
if (use_proxy) {
if (!client->proxy_connected || !client->keepalive ||
(client->state != NONE && client->state != DONE)) {
close_stream(&client->proxy);
reset_auth_connection(&client->proxy);
client->proxy_connected = 0;
}
if ((error = proxy_connect(client, request)) < 0)
goto on_error;
}
git_trace(GIT_TRACE_DEBUG, "Connecting to remote %s port %s",
client->server.url.host, client->server.url.port);
if ((error = server_connect(client)) < 0)
goto on_error;
client->connected = 1;
return error;
on_error:
if (error != GIT_RETRY)
close_stream(&client->proxy);
close_stream(&client->server);
return error;
}
GIT_INLINE(int) client_read(git_http_client *client)
{
http_parser_context *parser_context = client->parser.data;
git_stream *stream;
char *buf = client->read_buf.ptr + client->read_buf.size;
size_t max_len;
ssize_t read_len;
stream = client->current_server == PROXY ?
client->proxy.stream : client->server.stream;
/*
* We use a git_str for convenience, but statically allocate it and
* don't resize. Limit our consumption to INT_MAX since calling
* functions use an int return type to return number of bytes read.
*/
max_len = client->read_buf.asize - client->read_buf.size;
max_len = min(max_len, INT_MAX);
if (parser_context->output_size)
max_len = min(max_len, parser_context->output_size);
if (max_len == 0) {
git_error_set(GIT_ERROR_HTTP, "no room in output buffer");
return -1;
}
read_len = git_stream_read(stream, buf, max_len);
if (read_len >= 0) {
client->read_buf.size += read_len;
git_trace(GIT_TRACE_TRACE, "Received:\n%.*s",
(int)read_len, buf);
}
return (int)read_len;
}
static bool parser_settings_initialized;
static http_parser_settings parser_settings;
GIT_INLINE(http_parser_settings *) http_client_parser_settings(void)
{
if (!parser_settings_initialized) {
parser_settings.on_header_field = on_header_field;
parser_settings.on_header_value = on_header_value;
parser_settings.on_headers_complete = on_headers_complete;
parser_settings.on_body = on_body;
parser_settings.on_message_complete = on_message_complete;
parser_settings_initialized = true;
}
return &parser_settings;
}
GIT_INLINE(int) client_read_and_parse(git_http_client *client)
{
http_parser *parser = &client->parser;
http_parser_context *ctx = (http_parser_context *) parser->data;
unsigned char http_errno;
int read_len;
size_t parsed_len;
/*
* If we have data in our read buffer, that means we stopped early
* when parsing headers. Use the data in the read buffer instead of
* reading more from the socket.
*/
if (!client->read_buf.size && (read_len = client_read(client)) < 0)
return read_len;
parsed_len = http_parser_execute(parser,
http_client_parser_settings(),
client->read_buf.ptr,
client->read_buf.size);
http_errno = client->parser.http_errno;
if (parsed_len > INT_MAX) {
git_error_set(GIT_ERROR_HTTP, "unexpectedly large parse");
return -1;
}
if (ctx->parse_status == PARSE_STATUS_ERROR) {
client->connected = 0;
return ctx->error ? ctx->error : -1;
}
/*
* If we finished reading the headers or body, we paused parsing.
* Otherwise the parser will start filling the body, or even parse
* a new response if the server pipelined us multiple responses.
* (This can happen in response to an expect/continue request,
* where the server gives you a 100 and 200 simultaneously.)
*/
if (http_errno == HPE_PAUSED) {
/*
* http-parser has a "feature" where it will not deliver the
* final byte when paused in a callback. Consume that byte.
* https://github.com/nodejs/http-parser/issues/97
*/
GIT_ASSERT(client->read_buf.size > parsed_len);
http_parser_pause(parser, 0);
parsed_len += http_parser_execute(parser,
http_client_parser_settings(),
client->read_buf.ptr + parsed_len,
1);
}
/* Most failures will be reported in http_errno */
else if (parser->http_errno != HPE_OK) {
git_error_set(GIT_ERROR_HTTP, "http parser error: %s",
http_errno_description(http_errno));
return -1;
}
/* Otherwise we should have consumed the entire buffer. */
else if (parsed_len != client->read_buf.size) {
git_error_set(GIT_ERROR_HTTP,
"http parser did not consume entire buffer: %s",
http_errno_description(http_errno));
return -1;
}
/* recv returned 0, the server hung up on us */
else if (!parsed_len) {
git_error_set(GIT_ERROR_HTTP, "unexpected EOF");
return -1;
}
git_str_consume_bytes(&client->read_buf, parsed_len);
return (int)parsed_len;
}
/*
* See if we've consumed the entire response body. If the client was
* reading the body but did not consume it entirely, it's possible that
* they knew that the stream had finished (in a git response, seeing a
* final flush) and stopped reading. But if the response was chunked,
* we may have not consumed the final chunk marker. Consume it to
* ensure that we don't have it waiting in our socket. If there's
* more than just a chunk marker, close the connection.
*/
static void complete_response_body(git_http_client *client)
{
http_parser_context parser_context = {0};
/* If we're not keeping alive, don't bother. */
if (!client->keepalive) {
client->connected = 0;
goto done;
}
parser_context.client = client;
client->parser.data = &parser_context;
/* If there was an error, just close the connection. */
if (client_read_and_parse(client) < 0 ||
parser_context.error != HPE_OK ||
(parser_context.parse_status != PARSE_STATUS_OK &&
parser_context.parse_status != PARSE_STATUS_NO_OUTPUT)) {
git_error_clear();
client->connected = 0;
}
done:
git_str_clear(&client->read_buf);
}
int git_http_client_send_request(
git_http_client *client,
git_http_request *request)
{
git_http_response response = {0};
int error = -1;
GIT_ASSERT_ARG(client);
GIT_ASSERT_ARG(request);
/* If the client did not finish reading, clean up the stream. */
if (client->state == READING_BODY)
complete_response_body(client);
/* If we're waiting for proxy auth, don't sending more requests. */
if (client->state == HAS_EARLY_RESPONSE)
return 0;
if (git_trace_level() >= GIT_TRACE_DEBUG) {
git_str url = GIT_STR_INIT;
git_net_url_fmt(&url, request->url);
git_trace(GIT_TRACE_DEBUG, "Sending %s request to %s",
name_for_method(request->method),
url.ptr ? url.ptr : "<invalid>");
git_str_dispose(&url);
}
if ((error = http_client_connect(client, request)) < 0 ||
(error = generate_request(client, request)) < 0 ||
(error = client_write_request(client)) < 0)
goto done;
client->state = SENT_REQUEST;
if (request->expect_continue) {
if ((error = git_http_client_read_response(&response, client)) < 0 ||
(error = git_http_client_skip_body(client)) < 0)
goto done;
error = 0;
if (response.status != GIT_HTTP_STATUS_CONTINUE) {
save_early_response(client, &response);
goto done;
}
}
if (request->content_length || request->chunked) {
client->state = SENDING_BODY;
client->request_body_len = request->content_length;
client->request_body_remain = request->content_length;
client->request_chunked = request->chunked;
}
reset_parser(client);
done:
if (error == GIT_RETRY)
error = 0;
git_http_response_dispose(&response);
return error;
}
bool git_http_client_has_response(git_http_client *client)
{
return (client->state == HAS_EARLY_RESPONSE ||
client->state > SENT_REQUEST);
}
int git_http_client_send_body(
git_http_client *client,
const char *buffer,
size_t buffer_len)
{
git_http_server *server;
git_str hdr = GIT_STR_INIT;
int error;
GIT_ASSERT_ARG(client);
/* If we're waiting for proxy auth, don't sending more requests. */
if (client->state == HAS_EARLY_RESPONSE)
return 0;
if (client->state != SENDING_BODY) {
git_error_set(GIT_ERROR_HTTP, "client is in invalid state");
return -1;
}
if (!buffer_len)
return 0;
server = &client->server;
if (client->request_body_len) {
GIT_ASSERT(buffer_len <= client->request_body_remain);
if ((error = stream_write(server, buffer, buffer_len)) < 0)
goto done;
client->request_body_remain -= buffer_len;
} else {
if ((error = git_str_printf(&hdr, "%" PRIxZ "\r\n", buffer_len)) < 0 ||
(error = stream_write(server, hdr.ptr, hdr.size)) < 0 ||
(error = stream_write(server, buffer, buffer_len)) < 0 ||
(error = stream_write(server, "\r\n", 2)) < 0)
goto done;
}
done:
git_str_dispose(&hdr);
return error;
}
static int complete_request(git_http_client *client)
{
int error = 0;
GIT_ASSERT_ARG(client);
GIT_ASSERT(client->state == SENDING_BODY);
if (client->request_body_len && client->request_body_remain) {
git_error_set(GIT_ERROR_HTTP, "truncated write");
error = -1;
} else if (client->request_chunked) {
error = stream_write(&client->server, "0\r\n\r\n", 5);
}
client->state = SENT_REQUEST;
return error;
}
int git_http_client_read_response(
git_http_response *response,
git_http_client *client)
{
http_parser_context parser_context = {0};
int error;
GIT_ASSERT_ARG(response);
GIT_ASSERT_ARG(client);
if (client->state == SENDING_BODY) {
if ((error = complete_request(client)) < 0)
goto done;
}
if (client->state == HAS_EARLY_RESPONSE) {
memcpy(response, &client->early_response, sizeof(git_http_response));
memset(&client->early_response, 0, sizeof(git_http_response));
client->state = DONE;
return 0;
}
if (client->state != SENT_REQUEST) {
git_error_set(GIT_ERROR_HTTP, "client is in invalid state");
error = -1;
goto done;
}
git_http_response_dispose(response);
if (client->current_server == PROXY) {
git_vector_free_deep(&client->proxy.auth_challenges);
} else if(client->current_server == SERVER) {
git_vector_free_deep(&client->server.auth_challenges);
}
client->state = READING_RESPONSE;
client->keepalive = 0;
client->parser.data = &parser_context;
parser_context.client = client;
parser_context.response = response;
while (client->state == READING_RESPONSE) {
if ((error = client_read_and_parse(client)) < 0)
goto done;
}
GIT_ASSERT(client->state == READING_BODY || client->state == DONE);
done:
git_str_dispose(&parser_context.parse_header_name);
git_str_dispose(&parser_context.parse_header_value);
return error;
}
int git_http_client_read_body(
git_http_client *client,
char *buffer,
size_t buffer_size)
{
http_parser_context parser_context = {0};
int error = 0;
if (client->state == DONE)
return 0;
if (client->state != READING_BODY) {
git_error_set(GIT_ERROR_HTTP, "client is in invalid state");
return -1;
}
/*
* Now we'll read from the socket and http_parser will pipeline the
* data directly to the client.
*/
parser_context.client = client;
parser_context.output_buf = buffer;
parser_context.output_size = buffer_size;
client->parser.data = &parser_context;
/*
* Clients expect to get a non-zero amount of data from us,
* so we either block until we have data to return, until we
* hit EOF or there's an error. Do this in a loop, since we
* may end up reading only some stream metadata (like chunk
* information).
*/
while (!parser_context.output_written) {
error = client_read_and_parse(client);
if (error <= 0)
goto done;
if (client->state == DONE)
break;
}
GIT_ASSERT(parser_context.output_written <= INT_MAX);
error = (int)parser_context.output_written;
done:
if (error < 0)
client->connected = 0;
return error;
}
int git_http_client_skip_body(git_http_client *client)
{
http_parser_context parser_context = {0};
int error;
if (client->state == DONE)
return 0;
if (client->state != READING_BODY) {
git_error_set(GIT_ERROR_HTTP, "client is in invalid state");
return -1;
}
parser_context.client = client;
client->parser.data = &parser_context;
do {
error = client_read_and_parse(client);
if (parser_context.error != HPE_OK ||
(parser_context.parse_status != PARSE_STATUS_OK &&
parser_context.parse_status != PARSE_STATUS_NO_OUTPUT)) {
git_error_set(GIT_ERROR_HTTP,
"unexpected data handled in callback");
error = -1;
}
} while (error >= 0 && client->state != DONE);
if (error < 0)
client->connected = 0;
return error;
}
/*
* Create an http_client capable of communicating with the given remote
* host.
*/
int git_http_client_new(
git_http_client **out,
git_http_client_options *opts)
{
git_http_client *client;
GIT_ASSERT_ARG(out);
client = git__calloc(1, sizeof(git_http_client));
GIT_ERROR_CHECK_ALLOC(client);
git_str_init(&client->read_buf, GIT_READ_BUFFER_SIZE);
GIT_ERROR_CHECK_ALLOC(client->read_buf.ptr);
if (opts)
memcpy(&client->opts, opts, sizeof(git_http_client_options));
*out = client;
return 0;
}
GIT_INLINE(void) http_server_close(git_http_server *server)
{
if (server->stream) {
git_stream_close(server->stream);
git_stream_free(server->stream);
server->stream = NULL;
}
git_net_url_dispose(&server->url);
git_vector_free_deep(&server->auth_challenges);
free_auth_context(server);
}
static void http_client_close(git_http_client *client)
{
http_server_close(&client->server);
http_server_close(&client->proxy);
git_str_dispose(&client->request_msg);
client->state = 0;
client->request_count = 0;
client->connected = 0;
client->keepalive = 0;
}
void git_http_client_free(git_http_client *client)
{
if (!client)
return;
http_client_close(client);
git_str_dispose(&client->read_buf);
git__free(client);
}
| libgit2-main | src/libgit2/transports/httpclient.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "auth.h"
#include "git2/sys/credential.h"
static int basic_next_token(
git_str *out,
git_http_auth_context *ctx,
git_credential *c)
{
git_credential_userpass_plaintext *cred;
git_str raw = GIT_STR_INIT;
int error = GIT_EAUTH;
GIT_UNUSED(ctx);
if (c->credtype != GIT_CREDENTIAL_USERPASS_PLAINTEXT) {
git_error_set(GIT_ERROR_INVALID, "invalid credential type for basic auth");
goto on_error;
}
cred = (git_credential_userpass_plaintext *)c;
git_str_printf(&raw, "%s:%s", cred->username, cred->password);
if (git_str_oom(&raw) ||
git_str_puts(out, "Basic ") < 0 ||
git_str_encode_base64(out, git_str_cstr(&raw), raw.size) < 0)
goto on_error;
error = 0;
on_error:
if (raw.size)
git__memzero(raw.ptr, raw.size);
git_str_dispose(&raw);
return error;
}
static git_http_auth_context basic_context = {
GIT_HTTP_AUTH_BASIC,
GIT_CREDENTIAL_USERPASS_PLAINTEXT,
0,
NULL,
basic_next_token,
NULL,
NULL
};
int git_http_auth_basic(
git_http_auth_context **out, const git_net_url *url)
{
GIT_UNUSED(url);
*out = &basic_context;
return 0;
}
int git_http_auth_dummy(
git_http_auth_context **out, const git_net_url *url)
{
GIT_UNUSED(url);
*out = NULL;
return GIT_PASSTHROUGH;
}
| libgit2-main | src/libgit2/transports/auth.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "auth_negotiate.h"
#if defined(GIT_GSSAPI) || defined(GIT_GSSFRAMEWORK)
#include "git2.h"
#include "auth.h"
#include "git2/sys/credential.h"
#ifdef GIT_GSSFRAMEWORK
#import <GSS/GSS.h>
#elif defined(GIT_GSSAPI)
#include <gssapi.h>
#include <krb5.h>
#endif
static gss_OID_desc negotiate_oid_spnego =
{ 6, (void *) "\x2b\x06\x01\x05\x05\x02" };
static gss_OID_desc negotiate_oid_krb5 =
{ 9, (void *) "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02" };
static gss_OID negotiate_oids[] =
{ &negotiate_oid_spnego, &negotiate_oid_krb5, NULL };
typedef struct {
git_http_auth_context parent;
unsigned configured : 1,
complete : 1;
git_str target;
char *challenge;
gss_ctx_id_t gss_context;
gss_OID oid;
} http_auth_negotiate_context;
static void negotiate_err_set(
OM_uint32 status_major,
OM_uint32 status_minor,
const char *message)
{
gss_buffer_desc buffer = GSS_C_EMPTY_BUFFER;
OM_uint32 status_display, context = 0;
if (gss_display_status(&status_display, status_major, GSS_C_GSS_CODE,
GSS_C_NO_OID, &context, &buffer) == GSS_S_COMPLETE) {
git_error_set(GIT_ERROR_NET, "%s: %.*s (%d.%d)",
message, (int)buffer.length, (const char *)buffer.value,
status_major, status_minor);
gss_release_buffer(&status_minor, &buffer);
} else {
git_error_set(GIT_ERROR_NET, "%s: unknown negotiate error (%d.%d)",
message, status_major, status_minor);
}
}
static int negotiate_set_challenge(
git_http_auth_context *c,
const char *challenge)
{
http_auth_negotiate_context *ctx = (http_auth_negotiate_context *)c;
GIT_ASSERT_ARG(ctx);
GIT_ASSERT_ARG(challenge);
GIT_ASSERT(ctx->configured);
git__free(ctx->challenge);
ctx->challenge = git__strdup(challenge);
GIT_ERROR_CHECK_ALLOC(ctx->challenge);
return 0;
}
static void negotiate_context_dispose(http_auth_negotiate_context *ctx)
{
OM_uint32 status_minor;
if (ctx->gss_context != GSS_C_NO_CONTEXT) {
gss_delete_sec_context(
&status_minor, &ctx->gss_context, GSS_C_NO_BUFFER);
ctx->gss_context = GSS_C_NO_CONTEXT;
}
git_str_dispose(&ctx->target);
git__free(ctx->challenge);
ctx->challenge = NULL;
}
static int negotiate_next_token(
git_str *buf,
git_http_auth_context *c,
git_credential *cred)
{
http_auth_negotiate_context *ctx = (http_auth_negotiate_context *)c;
OM_uint32 status_major, status_minor;
gss_buffer_desc target_buffer = GSS_C_EMPTY_BUFFER,
input_token = GSS_C_EMPTY_BUFFER,
output_token = GSS_C_EMPTY_BUFFER;
gss_buffer_t input_token_ptr = GSS_C_NO_BUFFER;
git_str input_buf = GIT_STR_INIT;
gss_name_t server = NULL;
gss_OID mech;
size_t challenge_len;
int error = 0;
GIT_ASSERT_ARG(buf);
GIT_ASSERT_ARG(ctx);
GIT_ASSERT_ARG(cred);
GIT_ASSERT(ctx->configured);
GIT_ASSERT(cred->credtype == GIT_CREDENTIAL_DEFAULT);
if (ctx->complete)
return 0;
target_buffer.value = (void *)ctx->target.ptr;
target_buffer.length = ctx->target.size;
status_major = gss_import_name(&status_minor, &target_buffer,
GSS_C_NT_HOSTBASED_SERVICE, &server);
if (GSS_ERROR(status_major)) {
negotiate_err_set(status_major, status_minor,
"could not parse principal");
error = -1;
goto done;
}
challenge_len = ctx->challenge ? strlen(ctx->challenge) : 0;
if (challenge_len < 9 || memcmp(ctx->challenge, "Negotiate", 9) != 0) {
git_error_set(GIT_ERROR_NET, "server did not request negotiate");
error = -1;
goto done;
}
if (challenge_len > 9) {
if (git_str_decode_base64(&input_buf,
ctx->challenge + 10, challenge_len - 10) < 0) {
git_error_set(GIT_ERROR_NET, "invalid negotiate challenge from server");
error = -1;
goto done;
}
input_token.value = input_buf.ptr;
input_token.length = input_buf.size;
input_token_ptr = &input_token;
} else if (ctx->gss_context != GSS_C_NO_CONTEXT) {
negotiate_context_dispose(ctx);
}
mech = &negotiate_oid_spnego;
status_major = gss_init_sec_context(
&status_minor,
GSS_C_NO_CREDENTIAL,
&ctx->gss_context,
server,
mech,
GSS_C_DELEG_FLAG | GSS_C_MUTUAL_FLAG,
GSS_C_INDEFINITE,
GSS_C_NO_CHANNEL_BINDINGS,
input_token_ptr,
NULL,
&output_token,
NULL,
NULL);
if (GSS_ERROR(status_major)) {
negotiate_err_set(status_major, status_minor, "negotiate failure");
error = -1;
goto done;
}
/* This message merely told us auth was complete; we do not respond. */
if (status_major == GSS_S_COMPLETE) {
negotiate_context_dispose(ctx);
ctx->complete = 1;
goto done;
}
if (output_token.length == 0) {
git_error_set(GIT_ERROR_NET, "GSSAPI did not return token");
error = -1;
goto done;
}
git_str_puts(buf, "Negotiate ");
git_str_encode_base64(buf, output_token.value, output_token.length);
if (git_str_oom(buf))
error = -1;
done:
gss_release_name(&status_minor, &server);
gss_release_buffer(&status_minor, (gss_buffer_t) &output_token);
git_str_dispose(&input_buf);
return error;
}
static int negotiate_is_complete(git_http_auth_context *c)
{
http_auth_negotiate_context *ctx = (http_auth_negotiate_context *)c;
GIT_ASSERT_ARG(ctx);
return (ctx->complete == 1);
}
static void negotiate_context_free(git_http_auth_context *c)
{
http_auth_negotiate_context *ctx = (http_auth_negotiate_context *)c;
negotiate_context_dispose(ctx);
ctx->configured = 0;
ctx->complete = 0;
ctx->oid = NULL;
git__free(ctx);
}
static int negotiate_init_context(
http_auth_negotiate_context *ctx,
const git_net_url *url)
{
OM_uint32 status_major, status_minor;
gss_OID item, *oid;
gss_OID_set mechanism_list;
size_t i;
/* Query supported mechanisms looking for SPNEGO) */
status_major = gss_indicate_mechs(&status_minor, &mechanism_list);
if (GSS_ERROR(status_major)) {
negotiate_err_set(status_major, status_minor,
"could not query mechanisms");
return -1;
}
if (mechanism_list) {
for (oid = negotiate_oids; *oid; oid++) {
for (i = 0; i < mechanism_list->count; i++) {
item = &mechanism_list->elements[i];
if (item->length == (*oid)->length &&
memcmp(item->elements, (*oid)->elements, item->length) == 0) {
ctx->oid = *oid;
break;
}
}
if (ctx->oid)
break;
}
}
gss_release_oid_set(&status_minor, &mechanism_list);
if (!ctx->oid) {
git_error_set(GIT_ERROR_NET, "negotiate authentication is not supported");
return GIT_EAUTH;
}
git_str_puts(&ctx->target, "HTTP@");
git_str_puts(&ctx->target, url->host);
if (git_str_oom(&ctx->target))
return -1;
ctx->gss_context = GSS_C_NO_CONTEXT;
ctx->configured = 1;
return 0;
}
int git_http_auth_negotiate(
git_http_auth_context **out,
const git_net_url *url)
{
http_auth_negotiate_context *ctx;
*out = NULL;
ctx = git__calloc(1, sizeof(http_auth_negotiate_context));
GIT_ERROR_CHECK_ALLOC(ctx);
if (negotiate_init_context(ctx, url) < 0) {
git__free(ctx);
return -1;
}
ctx->parent.type = GIT_HTTP_AUTH_NEGOTIATE;
ctx->parent.credtypes = GIT_CREDENTIAL_DEFAULT;
ctx->parent.connection_affinity = 1;
ctx->parent.set_challenge = negotiate_set_challenge;
ctx->parent.next_token = negotiate_next_token;
ctx->parent.is_complete = negotiate_is_complete;
ctx->parent.free = negotiate_context_free;
*out = (git_http_auth_context *)ctx;
return 0;
}
#endif /* GIT_GSSAPI */
| libgit2-main | src/libgit2/transports/auth_negotiate.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "git2/credential.h"
#include "git2/sys/credential.h"
#include "git2/credential_helpers.h"
static int git_credential_ssh_key_type_new(
git_credential **cred,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase,
git_credential_t credtype);
int git_credential_has_username(git_credential *cred)
{
if (cred->credtype == GIT_CREDENTIAL_DEFAULT)
return 0;
return 1;
}
const char *git_credential_get_username(git_credential *cred)
{
switch (cred->credtype) {
case GIT_CREDENTIAL_USERNAME:
{
git_credential_username *c = (git_credential_username *) cred;
return c->username;
}
case GIT_CREDENTIAL_USERPASS_PLAINTEXT:
{
git_credential_userpass_plaintext *c = (git_credential_userpass_plaintext *) cred;
return c->username;
}
case GIT_CREDENTIAL_SSH_KEY:
case GIT_CREDENTIAL_SSH_MEMORY:
{
git_credential_ssh_key *c = (git_credential_ssh_key *) cred;
return c->username;
}
case GIT_CREDENTIAL_SSH_CUSTOM:
{
git_credential_ssh_custom *c = (git_credential_ssh_custom *) cred;
return c->username;
}
case GIT_CREDENTIAL_SSH_INTERACTIVE:
{
git_credential_ssh_interactive *c = (git_credential_ssh_interactive *) cred;
return c->username;
}
default:
return NULL;
}
}
static void plaintext_free(struct git_credential *cred)
{
git_credential_userpass_plaintext *c = (git_credential_userpass_plaintext *)cred;
git__free(c->username);
/* Zero the memory which previously held the password */
if (c->password) {
size_t pass_len = strlen(c->password);
git__memzero(c->password, pass_len);
git__free(c->password);
}
git__free(c);
}
int git_credential_userpass_plaintext_new(
git_credential **cred,
const char *username,
const char *password)
{
git_credential_userpass_plaintext *c;
GIT_ASSERT_ARG(cred);
GIT_ASSERT_ARG(username);
GIT_ASSERT_ARG(password);
c = git__malloc(sizeof(git_credential_userpass_plaintext));
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = GIT_CREDENTIAL_USERPASS_PLAINTEXT;
c->parent.free = plaintext_free;
c->username = git__strdup(username);
if (!c->username) {
git__free(c);
return -1;
}
c->password = git__strdup(password);
if (!c->password) {
git__free(c->username);
git__free(c);
return -1;
}
*cred = &c->parent;
return 0;
}
static void ssh_key_free(struct git_credential *cred)
{
git_credential_ssh_key *c =
(git_credential_ssh_key *)cred;
git__free(c->username);
if (c->privatekey) {
/* Zero the memory which previously held the private key */
size_t key_len = strlen(c->privatekey);
git__memzero(c->privatekey, key_len);
git__free(c->privatekey);
}
if (c->passphrase) {
/* Zero the memory which previously held the passphrase */
size_t pass_len = strlen(c->passphrase);
git__memzero(c->passphrase, pass_len);
git__free(c->passphrase);
}
if (c->publickey) {
/* Zero the memory which previously held the public key */
size_t key_len = strlen(c->publickey);
git__memzero(c->publickey, key_len);
git__free(c->publickey);
}
git__free(c);
}
static void ssh_interactive_free(struct git_credential *cred)
{
git_credential_ssh_interactive *c = (git_credential_ssh_interactive *)cred;
git__free(c->username);
git__free(c);
}
static void ssh_custom_free(struct git_credential *cred)
{
git_credential_ssh_custom *c = (git_credential_ssh_custom *)cred;
git__free(c->username);
if (c->publickey) {
/* Zero the memory which previously held the publickey */
size_t key_len = strlen(c->publickey);
git__memzero(c->publickey, key_len);
git__free(c->publickey);
}
git__free(c);
}
static void default_free(struct git_credential *cred)
{
git_credential_default *c = (git_credential_default *)cred;
git__free(c);
}
static void username_free(struct git_credential *cred)
{
git__free(cred);
}
int git_credential_ssh_key_new(
git_credential **cred,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase)
{
return git_credential_ssh_key_type_new(
cred,
username,
publickey,
privatekey,
passphrase,
GIT_CREDENTIAL_SSH_KEY);
}
int git_credential_ssh_key_memory_new(
git_credential **cred,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase)
{
#ifdef GIT_SSH_MEMORY_CREDENTIALS
return git_credential_ssh_key_type_new(
cred,
username,
publickey,
privatekey,
passphrase,
GIT_CREDENTIAL_SSH_MEMORY);
#else
GIT_UNUSED(cred);
GIT_UNUSED(username);
GIT_UNUSED(publickey);
GIT_UNUSED(privatekey);
GIT_UNUSED(passphrase);
git_error_set(GIT_ERROR_INVALID,
"this version of libgit2 was not built with ssh memory credentials.");
return -1;
#endif
}
static int git_credential_ssh_key_type_new(
git_credential **cred,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase,
git_credential_t credtype)
{
git_credential_ssh_key *c;
GIT_ASSERT_ARG(username);
GIT_ASSERT_ARG(cred);
GIT_ASSERT_ARG(privatekey);
c = git__calloc(1, sizeof(git_credential_ssh_key));
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = credtype;
c->parent.free = ssh_key_free;
c->username = git__strdup(username);
GIT_ERROR_CHECK_ALLOC(c->username);
c->privatekey = git__strdup(privatekey);
GIT_ERROR_CHECK_ALLOC(c->privatekey);
if (publickey) {
c->publickey = git__strdup(publickey);
GIT_ERROR_CHECK_ALLOC(c->publickey);
}
if (passphrase) {
c->passphrase = git__strdup(passphrase);
GIT_ERROR_CHECK_ALLOC(c->passphrase);
}
*cred = &c->parent;
return 0;
}
int git_credential_ssh_interactive_new(
git_credential **out,
const char *username,
git_credential_ssh_interactive_cb prompt_callback,
void *payload)
{
git_credential_ssh_interactive *c;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(username);
GIT_ASSERT_ARG(prompt_callback);
c = git__calloc(1, sizeof(git_credential_ssh_interactive));
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = GIT_CREDENTIAL_SSH_INTERACTIVE;
c->parent.free = ssh_interactive_free;
c->username = git__strdup(username);
GIT_ERROR_CHECK_ALLOC(c->username);
c->prompt_callback = prompt_callback;
c->payload = payload;
*out = &c->parent;
return 0;
}
int git_credential_ssh_key_from_agent(git_credential **cred, const char *username) {
git_credential_ssh_key *c;
GIT_ASSERT_ARG(username);
GIT_ASSERT_ARG(cred);
c = git__calloc(1, sizeof(git_credential_ssh_key));
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = GIT_CREDENTIAL_SSH_KEY;
c->parent.free = ssh_key_free;
c->username = git__strdup(username);
GIT_ERROR_CHECK_ALLOC(c->username);
c->privatekey = NULL;
*cred = &c->parent;
return 0;
}
int git_credential_ssh_custom_new(
git_credential **cred,
const char *username,
const char *publickey,
size_t publickey_len,
git_credential_sign_cb sign_callback,
void *payload)
{
git_credential_ssh_custom *c;
GIT_ASSERT_ARG(username);
GIT_ASSERT_ARG(cred);
c = git__calloc(1, sizeof(git_credential_ssh_custom));
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = GIT_CREDENTIAL_SSH_CUSTOM;
c->parent.free = ssh_custom_free;
c->username = git__strdup(username);
GIT_ERROR_CHECK_ALLOC(c->username);
if (publickey_len > 0) {
c->publickey = git__malloc(publickey_len);
GIT_ERROR_CHECK_ALLOC(c->publickey);
memcpy(c->publickey, publickey, publickey_len);
}
c->publickey_len = publickey_len;
c->sign_callback = sign_callback;
c->payload = payload;
*cred = &c->parent;
return 0;
}
int git_credential_default_new(git_credential **cred)
{
git_credential_default *c;
GIT_ASSERT_ARG(cred);
c = git__calloc(1, sizeof(git_credential_default));
GIT_ERROR_CHECK_ALLOC(c);
c->credtype = GIT_CREDENTIAL_DEFAULT;
c->free = default_free;
*cred = c;
return 0;
}
int git_credential_username_new(git_credential **cred, const char *username)
{
git_credential_username *c;
size_t len, allocsize;
GIT_ASSERT_ARG(cred);
len = strlen(username);
GIT_ERROR_CHECK_ALLOC_ADD(&allocsize, sizeof(git_credential_username), len);
GIT_ERROR_CHECK_ALLOC_ADD(&allocsize, allocsize, 1);
c = git__malloc(allocsize);
GIT_ERROR_CHECK_ALLOC(c);
c->parent.credtype = GIT_CREDENTIAL_USERNAME;
c->parent.free = username_free;
memcpy(c->username, username, len + 1);
*cred = (git_credential *) c;
return 0;
}
void git_credential_free(git_credential *cred)
{
if (!cred)
return;
cred->free(cred);
}
/* Deprecated credential functions */
#ifndef GIT_DEPRECATE_HARD
int git_cred_has_username(git_credential *cred)
{
return git_credential_has_username(cred);
}
const char *git_cred_get_username(git_credential *cred)
{
return git_credential_get_username(cred);
}
int git_cred_userpass_plaintext_new(
git_credential **out,
const char *username,
const char *password)
{
return git_credential_userpass_plaintext_new(out,username, password);
}
int git_cred_default_new(git_credential **out)
{
return git_credential_default_new(out);
}
int git_cred_username_new(git_credential **out, const char *username)
{
return git_credential_username_new(out, username);
}
int git_cred_ssh_key_new(
git_credential **out,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase)
{
return git_credential_ssh_key_new(out, username,
publickey, privatekey, passphrase);
}
int git_cred_ssh_key_memory_new(
git_credential **out,
const char *username,
const char *publickey,
const char *privatekey,
const char *passphrase)
{
return git_credential_ssh_key_memory_new(out, username,
publickey, privatekey, passphrase);
}
int git_cred_ssh_interactive_new(
git_credential **out,
const char *username,
git_credential_ssh_interactive_cb prompt_callback,
void *payload)
{
return git_credential_ssh_interactive_new(out, username,
prompt_callback, payload);
}
int git_cred_ssh_key_from_agent(
git_credential **out,
const char *username)
{
return git_credential_ssh_key_from_agent(out, username);
}
int git_cred_ssh_custom_new(
git_credential **out,
const char *username,
const char *publickey,
size_t publickey_len,
git_credential_sign_cb sign_callback,
void *payload)
{
return git_credential_ssh_custom_new(out, username,
publickey, publickey_len, sign_callback, payload);
}
void git_cred_free(git_credential *cred)
{
git_credential_free(cred);
}
#endif
| libgit2-main | src/libgit2/transports/credential.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "pack-objects.h"
#include "refs.h"
#include "posix.h"
#include "fs_path.h"
#include "repository.h"
#include "odb.h"
#include "push.h"
#include "remote.h"
#include "proxy.h"
#include "git2/types.h"
#include "git2/net.h"
#include "git2/repository.h"
#include "git2/object.h"
#include "git2/tag.h"
#include "git2/transport.h"
#include "git2/revwalk.h"
#include "git2/odb_backend.h"
#include "git2/pack.h"
#include "git2/commit.h"
#include "git2/revparse.h"
#include "git2/sys/remote.h"
typedef struct {
git_transport parent;
git_remote *owner;
char *url;
int direction;
git_atomic32 cancelled;
git_repository *repo;
git_remote_connect_options connect_opts;
git_vector refs;
unsigned connected : 1,
have_refs : 1;
} transport_local;
static void free_head(git_remote_head *head)
{
git__free(head->name);
git__free(head->symref_target);
git__free(head);
}
static void free_heads(git_vector *heads)
{
git_remote_head *head;
size_t i;
git_vector_foreach(heads, i, head)
free_head(head);
git_vector_free(heads);
}
static int add_ref(transport_local *t, const char *name)
{
const char peeled[] = "^{}";
git_reference *ref, *resolved;
git_remote_head *head;
git_oid obj_id;
git_object *obj = NULL, *target = NULL;
git_str buf = GIT_STR_INIT;
int error;
if ((error = git_reference_lookup(&ref, t->repo, name)) < 0)
return error;
error = git_reference_resolve(&resolved, ref);
if (error < 0) {
git_reference_free(ref);
if (!strcmp(name, GIT_HEAD_FILE) && error == GIT_ENOTFOUND) {
/* This is actually okay. Empty repos often have a HEAD that
* points to a nonexistent "refs/heads/master". */
git_error_clear();
return 0;
}
return error;
}
git_oid_cpy(&obj_id, git_reference_target(resolved));
git_reference_free(resolved);
head = git__calloc(1, sizeof(git_remote_head));
GIT_ERROR_CHECK_ALLOC(head);
head->name = git__strdup(name);
GIT_ERROR_CHECK_ALLOC(head->name);
git_oid_cpy(&head->oid, &obj_id);
if (git_reference_type(ref) == GIT_REFERENCE_SYMBOLIC) {
head->symref_target = git__strdup(git_reference_symbolic_target(ref));
GIT_ERROR_CHECK_ALLOC(head->symref_target);
}
git_reference_free(ref);
if ((error = git_vector_insert(&t->refs, head)) < 0) {
free_head(head);
return error;
}
/* If it's not a tag, we don't need to try to peel it */
if (git__prefixcmp(name, GIT_REFS_TAGS_DIR))
return 0;
if ((error = git_object_lookup(&obj, t->repo, &head->oid, GIT_OBJECT_ANY)) < 0)
return error;
head = NULL;
/* If it's not an annotated tag, or if we're mocking
* git-receive-pack, just get out */
if (git_object_type(obj) != GIT_OBJECT_TAG ||
t->direction != GIT_DIRECTION_FETCH) {
git_object_free(obj);
return 0;
}
/* And if it's a tag, peel it, and add it to the list */
head = git__calloc(1, sizeof(git_remote_head));
GIT_ERROR_CHECK_ALLOC(head);
if (git_str_join(&buf, 0, name, peeled) < 0) {
free_head(head);
return -1;
}
head->name = git_str_detach(&buf);
if (!(error = git_tag_peel(&target, (git_tag *)obj))) {
git_oid_cpy(&head->oid, git_object_id(target));
if ((error = git_vector_insert(&t->refs, head)) < 0) {
free_head(head);
}
}
git_object_free(obj);
git_object_free(target);
return error;
}
static int store_refs(transport_local *t)
{
size_t i;
git_remote_head *head;
git_strarray ref_names = {0};
GIT_ASSERT_ARG(t);
if (git_reference_list(&ref_names, t->repo) < 0)
goto on_error;
/* Clear all heads we might have fetched in a previous connect */
git_vector_foreach(&t->refs, i, head) {
git__free(head->name);
git__free(head);
}
/* Clear the vector so we can reuse it */
git_vector_clear(&t->refs);
/* Sort the references first */
git__tsort((void **)ref_names.strings, ref_names.count, &git__strcmp_cb);
/* Add HEAD iff direction is fetch */
if (t->direction == GIT_DIRECTION_FETCH && add_ref(t, GIT_HEAD_FILE) < 0)
goto on_error;
for (i = 0; i < ref_names.count; ++i) {
if (add_ref(t, ref_names.strings[i]) < 0)
goto on_error;
}
t->have_refs = 1;
git_strarray_dispose(&ref_names);
return 0;
on_error:
git_vector_free(&t->refs);
git_strarray_dispose(&ref_names);
return -1;
}
/*
* Try to open the url as a git directory. The direction doesn't
* matter in this case because we're calculating the heads ourselves.
*/
static int local_connect(
git_transport *transport,
const char *url,
int direction,
const git_remote_connect_options *connect_opts)
{
git_repository *repo;
int error;
transport_local *t = (transport_local *)transport;
const char *path;
git_str buf = GIT_STR_INIT;
if (t->connected)
return 0;
if (git_remote_connect_options_normalize(&t->connect_opts, t->owner->repo, connect_opts) < 0)
return -1;
free_heads(&t->refs);
t->url = git__strdup(url);
GIT_ERROR_CHECK_ALLOC(t->url);
t->direction = direction;
/* 'url' may be a url or path; convert to a path */
if ((error = git_fs_path_from_url_or_path(&buf, url)) < 0) {
git_str_dispose(&buf);
return error;
}
path = git_str_cstr(&buf);
error = git_repository_open(&repo, path);
git_str_dispose(&buf);
if (error < 0)
return -1;
t->repo = repo;
if (store_refs(t) < 0)
return -1;
t->connected = 1;
return 0;
}
static int local_set_connect_opts(
git_transport *transport,
const git_remote_connect_options *connect_opts)
{
transport_local *t = (transport_local *)transport;
if (!t->connected) {
git_error_set(GIT_ERROR_NET, "cannot reconfigure a transport that is not connected");
return -1;
}
return git_remote_connect_options_normalize(&t->connect_opts, t->owner->repo, connect_opts);
}
static int local_capabilities(unsigned int *capabilities, git_transport *transport)
{
GIT_UNUSED(transport);
*capabilities = GIT_REMOTE_CAPABILITY_TIP_OID |
GIT_REMOTE_CAPABILITY_REACHABLE_OID;
return 0;
}
static int local_ls(const git_remote_head ***out, size_t *size, git_transport *transport)
{
transport_local *t = (transport_local *)transport;
if (!t->have_refs) {
git_error_set(GIT_ERROR_NET, "the transport has not yet loaded the refs");
return -1;
}
*out = (const git_remote_head **)t->refs.contents;
*size = t->refs.length;
return 0;
}
static int local_negotiate_fetch(
git_transport *transport,
git_repository *repo,
const git_remote_head * const *refs,
size_t count)
{
transport_local *t = (transport_local*)transport;
git_remote_head *rhead;
unsigned int i;
GIT_UNUSED(refs);
GIT_UNUSED(count);
/* Fill in the loids */
git_vector_foreach(&t->refs, i, rhead) {
git_object *obj;
int error = git_revparse_single(&obj, repo, rhead->name);
if (!error)
git_oid_cpy(&rhead->loid, git_object_id(obj));
else if (error != GIT_ENOTFOUND)
return error;
else
git_error_clear();
git_object_free(obj);
}
return 0;
}
static int local_push_update_remote_ref(
git_repository *remote_repo,
const char *lref,
const char *rref,
git_oid *loid,
git_oid *roid)
{
int error;
git_reference *remote_ref = NULL;
/* check for lhs, if it's empty it means to delete */
if (lref[0] != '\0') {
/* Create or update a ref */
error = git_reference_create(NULL, remote_repo, rref, loid,
!git_oid_is_zero(roid), NULL);
} else {
/* Delete a ref */
if ((error = git_reference_lookup(&remote_ref, remote_repo, rref)) < 0) {
if (error == GIT_ENOTFOUND)
error = 0;
return error;
}
error = git_reference_delete(remote_ref);
git_reference_free(remote_ref);
}
return error;
}
static int transfer_to_push_transfer(const git_indexer_progress *stats, void *payload)
{
const git_remote_callbacks *cbs = payload;
if (!cbs || !cbs->push_transfer_progress)
return 0;
return cbs->push_transfer_progress(stats->received_objects, stats->total_objects, stats->received_bytes,
cbs->payload);
}
static int local_push(
git_transport *transport,
git_push *push)
{
transport_local *t = (transport_local *)transport;
git_remote_callbacks *cbs = &t->connect_opts.callbacks;
git_repository *remote_repo = NULL;
push_spec *spec;
char *url = NULL;
const char *path;
git_str buf = GIT_STR_INIT, odb_path = GIT_STR_INIT;
int error;
size_t j;
/* 'push->remote->url' may be a url or path; convert to a path */
if ((error = git_fs_path_from_url_or_path(&buf, push->remote->url)) < 0) {
git_str_dispose(&buf);
return error;
}
path = git_str_cstr(&buf);
error = git_repository_open(&remote_repo, path);
git_str_dispose(&buf);
if (error < 0)
return error;
/* We don't currently support pushing locally to non-bare repos. Proper
non-bare repo push support would require checking configs to see if
we should override the default 'don't let this happen' behavior.
Note that this is only an issue when pushing to the current branch,
but we forbid all pushes just in case */
if (!remote_repo->is_bare) {
error = GIT_EBAREREPO;
git_error_set(GIT_ERROR_INVALID, "local push doesn't (yet) support pushing to non-bare repos.");
goto on_error;
}
if ((error = git_repository__item_path(&odb_path, remote_repo, GIT_REPOSITORY_ITEM_OBJECTS)) < 0
|| (error = git_str_joinpath(&odb_path, odb_path.ptr, "pack")) < 0)
goto on_error;
error = git_packbuilder_write(push->pb, odb_path.ptr, 0, transfer_to_push_transfer, (void *) cbs);
git_str_dispose(&odb_path);
if (error < 0)
goto on_error;
push->unpack_ok = 1;
git_vector_foreach(&push->specs, j, spec) {
push_status *status;
const git_error *last;
char *ref = spec->refspec.dst;
status = git__calloc(1, sizeof(push_status));
if (!status)
goto on_error;
status->ref = git__strdup(ref);
if (!status->ref) {
git_push_status_free(status);
goto on_error;
}
error = local_push_update_remote_ref(remote_repo, spec->refspec.src, spec->refspec.dst,
&spec->loid, &spec->roid);
switch (error) {
case GIT_OK:
break;
case GIT_EINVALIDSPEC:
status->msg = git__strdup("funny refname");
break;
case GIT_ENOTFOUND:
status->msg = git__strdup("Remote branch not found to delete");
break;
default:
last = git_error_last();
if (last && last->message)
status->msg = git__strdup(last->message);
else
status->msg = git__strdup("Unspecified error encountered");
break;
}
/* failed to allocate memory for a status message */
if (error < 0 && !status->msg) {
git_push_status_free(status);
goto on_error;
}
/* failed to insert the ref update status */
if ((error = git_vector_insert(&push->status, status)) < 0) {
git_push_status_free(status);
goto on_error;
}
}
if (push->specs.length) {
url = git__strdup(t->url);
if (!url || t->parent.close(&t->parent) < 0 ||
t->parent.connect(&t->parent, url,
GIT_DIRECTION_PUSH, NULL))
goto on_error;
}
error = 0;
on_error:
git_repository_free(remote_repo);
git__free(url);
return error;
}
typedef struct foreach_data {
git_indexer_progress *stats;
git_indexer_progress_cb progress_cb;
void *progress_payload;
git_odb_writepack *writepack;
} foreach_data;
static int foreach_cb(void *buf, size_t len, void *payload)
{
foreach_data *data = (foreach_data*)payload;
data->stats->received_bytes += len;
return data->writepack->append(data->writepack, buf, len, data->stats);
}
static const char *counting_objects_fmt = "Counting objects %d\r";
static const char *compressing_objects_fmt = "Compressing objects: %.0f%% (%d/%d)";
static int local_counting(int stage, unsigned int current, unsigned int total, void *payload)
{
git_str progress_info = GIT_STR_INIT;
transport_local *t = payload;
int error;
if (!t->connect_opts.callbacks.sideband_progress)
return 0;
if (stage == GIT_PACKBUILDER_ADDING_OBJECTS) {
git_str_printf(&progress_info, counting_objects_fmt, current);
} else if (stage == GIT_PACKBUILDER_DELTAFICATION) {
float perc = (((float) current) / total) * 100;
git_str_printf(&progress_info, compressing_objects_fmt, perc, current, total);
if (current == total)
git_str_printf(&progress_info, ", done\n");
else
git_str_putc(&progress_info, '\r');
}
if (git_str_oom(&progress_info))
return -1;
if (progress_info.size > INT_MAX) {
git_error_set(GIT_ERROR_NET, "remote sent overly large progress data");
git_str_dispose(&progress_info);
return -1;
}
error = t->connect_opts.callbacks.sideband_progress(
progress_info.ptr,
(int)progress_info.size,
t->connect_opts.callbacks.payload);
git_str_dispose(&progress_info);
return error;
}
static int foreach_reference_cb(git_reference *reference, void *payload)
{
git_revwalk *walk = (git_revwalk *)payload;
int error;
if (git_reference_type(reference) != GIT_REFERENCE_DIRECT) {
git_reference_free(reference);
return 0;
}
error = git_revwalk_hide(walk, git_reference_target(reference));
/* The reference is in the local repository, so the target may not
* exist on the remote. It also may not be a commit. */
if (error == GIT_ENOTFOUND || error == GIT_ERROR_INVALID) {
git_error_clear();
error = 0;
}
git_reference_free(reference);
return error;
}
static int local_download_pack(
git_transport *transport,
git_repository *repo,
git_indexer_progress *stats)
{
transport_local *t = (transport_local*)transport;
git_revwalk *walk = NULL;
git_remote_head *rhead;
unsigned int i;
int error = -1;
git_packbuilder *pack = NULL;
git_odb_writepack *writepack = NULL;
git_odb *odb = NULL;
git_str progress_info = GIT_STR_INIT;
foreach_data data = {0};
if ((error = git_revwalk_new(&walk, t->repo)) < 0)
goto cleanup;
git_revwalk_sorting(walk, GIT_SORT_TIME);
if ((error = git_packbuilder_new(&pack, t->repo)) < 0)
goto cleanup;
git_packbuilder_set_callbacks(pack, local_counting, t);
stats->total_objects = 0;
stats->indexed_objects = 0;
stats->received_objects = 0;
stats->received_bytes = 0;
git_vector_foreach(&t->refs, i, rhead) {
git_object *obj;
if ((error = git_object_lookup(&obj, t->repo, &rhead->oid, GIT_OBJECT_ANY)) < 0)
goto cleanup;
if (git_object_type(obj) == GIT_OBJECT_COMMIT) {
/* Revwalker includes only wanted commits */
error = git_revwalk_push(walk, &rhead->oid);
} else {
/* Tag or some other wanted object. Add it on its own */
error = git_packbuilder_insert_recur(pack, &rhead->oid, rhead->name);
}
git_object_free(obj);
if (error < 0)
goto cleanup;
}
if ((error = git_reference_foreach(repo, foreach_reference_cb, walk)))
goto cleanup;
if ((error = git_packbuilder_insert_walk(pack, walk)))
goto cleanup;
if (t->connect_opts.callbacks.sideband_progress) {
if ((error = git_str_printf(
&progress_info,
counting_objects_fmt,
git_packbuilder_object_count(pack))) < 0 ||
(error = t->connect_opts.callbacks.sideband_progress(
progress_info.ptr,
(int)progress_info.size,
t->connect_opts.callbacks.payload)) < 0)
goto cleanup;
}
/* Walk the objects, building a packfile */
if ((error = git_repository_odb__weakptr(&odb, repo)) < 0)
goto cleanup;
/* One last one with the newline */
if (t->connect_opts.callbacks.sideband_progress) {
git_str_clear(&progress_info);
if ((error = git_str_printf(
&progress_info,
counting_objects_fmt,
git_packbuilder_object_count(pack))) < 0 ||
(error = git_str_putc(&progress_info, '\n')) < 0 ||
(error = t->connect_opts.callbacks.sideband_progress(
progress_info.ptr,
(int)progress_info.size,
t->connect_opts.callbacks.payload)) < 0)
goto cleanup;
}
if ((error = git_odb_write_pack(
&writepack,
odb,
t->connect_opts.callbacks.transfer_progress,
t->connect_opts.callbacks.payload)) < 0)
goto cleanup;
/* Write the data to the ODB */
data.stats = stats;
data.progress_cb = t->connect_opts.callbacks.transfer_progress;
data.progress_payload = t->connect_opts.callbacks.payload;
data.writepack = writepack;
/* autodetect */
git_packbuilder_set_threads(pack, 0);
if ((error = git_packbuilder_foreach(pack, foreach_cb, &data)) != 0)
goto cleanup;
error = writepack->commit(writepack, stats);
cleanup:
if (writepack) writepack->free(writepack);
git_str_dispose(&progress_info);
git_packbuilder_free(pack);
git_revwalk_free(walk);
return error;
}
static int local_is_connected(git_transport *transport)
{
transport_local *t = (transport_local *)transport;
return t->connected;
}
static void local_cancel(git_transport *transport)
{
transport_local *t = (transport_local *)transport;
git_atomic32_set(&t->cancelled, 1);
}
static int local_close(git_transport *transport)
{
transport_local *t = (transport_local *)transport;
t->connected = 0;
if (t->repo) {
git_repository_free(t->repo);
t->repo = NULL;
}
if (t->url) {
git__free(t->url);
t->url = NULL;
}
return 0;
}
static void local_free(git_transport *transport)
{
transport_local *t = (transport_local *)transport;
free_heads(&t->refs);
/* Close the transport, if it's still open. */
local_close(transport);
/* Free the transport */
git__free(t);
}
/**************
* Public API *
**************/
int git_transport_local(git_transport **out, git_remote *owner, void *param)
{
int error;
transport_local *t;
GIT_UNUSED(param);
t = git__calloc(1, sizeof(transport_local));
GIT_ERROR_CHECK_ALLOC(t);
t->parent.version = GIT_TRANSPORT_VERSION;
t->parent.connect = local_connect;
t->parent.set_connect_opts = local_set_connect_opts;
t->parent.capabilities = local_capabilities;
t->parent.negotiate_fetch = local_negotiate_fetch;
t->parent.download_pack = local_download_pack;
t->parent.push = local_push;
t->parent.close = local_close;
t->parent.free = local_free;
t->parent.ls = local_ls;
t->parent.is_connected = local_is_connected;
t->parent.cancel = local_cancel;
if ((error = git_vector_init(&t->refs, 0, NULL)) < 0) {
git__free(t);
return error;
}
t->owner = owner;
*out = (git_transport *) t;
return 0;
}
| libgit2-main | src/libgit2/transports/local.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "git2/credential_helpers.h"
int git_credential_userpass(
git_credential **cred,
const char *url,
const char *user_from_url,
unsigned int allowed_types,
void *payload)
{
git_credential_userpass_payload *userpass = (git_credential_userpass_payload*)payload;
const char *effective_username = NULL;
GIT_UNUSED(url);
if (!userpass || !userpass->password) return -1;
/* Username resolution: a username can be passed with the URL, the
* credentials payload, or both. Here's what we do. Note that if we get
* this far, we know that any password the url may contain has already
* failed at least once, so we ignore it.
*
* | Payload | URL | Used |
* +-------------+----------+-----------+
* | yes | no | payload |
* | yes | yes | payload |
* | no | yes | url |
* | no | no | FAIL |
*/
if (userpass->username)
effective_username = userpass->username;
else if (user_from_url)
effective_username = user_from_url;
else
return -1;
if (GIT_CREDENTIAL_USERNAME & allowed_types)
return git_credential_username_new(cred, effective_username);
if ((GIT_CREDENTIAL_USERPASS_PLAINTEXT & allowed_types) == 0 ||
git_credential_userpass_plaintext_new(cred, effective_username, userpass->password) < 0)
return -1;
return 0;
}
/* Deprecated credential functions */
#ifndef GIT_DEPRECATE_HARD
int git_cred_userpass(
git_credential **out,
const char *url,
const char *user_from_url,
unsigned int allowed_types,
void *payload)
{
return git_credential_userpass(out, url, user_from_url,
allowed_types, payload);
}
#endif
| libgit2-main | src/libgit2/transports/credential_helpers.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "auth_ntlm.h"
#include "common.h"
#include "str.h"
#include "auth.h"
#include "git2/sys/credential.h"
#ifdef GIT_NTLM
#include "ntlmclient.h"
typedef struct {
git_http_auth_context parent;
ntlm_client *ntlm;
char *challenge;
bool complete;
} http_auth_ntlm_context;
static int ntlm_set_challenge(
git_http_auth_context *c,
const char *challenge)
{
http_auth_ntlm_context *ctx = (http_auth_ntlm_context *)c;
GIT_ASSERT_ARG(ctx);
GIT_ASSERT_ARG(challenge);
git__free(ctx->challenge);
ctx->challenge = git__strdup(challenge);
GIT_ERROR_CHECK_ALLOC(ctx->challenge);
return 0;
}
static int ntlm_set_credentials(http_auth_ntlm_context *ctx, git_credential *_cred)
{
git_credential_userpass_plaintext *cred;
const char *sep, *username;
char *domain = NULL, *domainuser = NULL;
int error = 0;
GIT_ASSERT(_cred->credtype == GIT_CREDENTIAL_USERPASS_PLAINTEXT);
cred = (git_credential_userpass_plaintext *)_cred;
if ((sep = strchr(cred->username, '\\')) != NULL) {
domain = git__strndup(cred->username, (sep - cred->username));
GIT_ERROR_CHECK_ALLOC(domain);
domainuser = git__strdup(sep + 1);
GIT_ERROR_CHECK_ALLOC(domainuser);
username = domainuser;
} else {
username = cred->username;
}
if (ntlm_client_set_credentials(ctx->ntlm,
username, domain, cred->password) < 0) {
git_error_set(GIT_ERROR_NET, "could not set credentials: %s",
ntlm_client_errmsg(ctx->ntlm));
error = -1;
goto done;
}
done:
git__free(domain);
git__free(domainuser);
return error;
}
static int ntlm_next_token(
git_str *buf,
git_http_auth_context *c,
git_credential *cred)
{
http_auth_ntlm_context *ctx = (http_auth_ntlm_context *)c;
git_str input_buf = GIT_STR_INIT;
const unsigned char *msg;
size_t challenge_len, msg_len;
int error = GIT_EAUTH;
GIT_ASSERT_ARG(buf);
GIT_ASSERT_ARG(ctx);
GIT_ASSERT(ctx->ntlm);
challenge_len = ctx->challenge ? strlen(ctx->challenge) : 0;
if (ctx->complete)
ntlm_client_reset(ctx->ntlm);
/*
* Set us complete now since it's the default case; the one
* incomplete case (successfully created a client request)
* will explicitly set that it requires a second step.
*/
ctx->complete = true;
if (cred && ntlm_set_credentials(ctx, cred) != 0)
goto done;
if (challenge_len < 4) {
git_error_set(GIT_ERROR_NET, "no ntlm challenge sent from server");
goto done;
} else if (challenge_len == 4) {
if (memcmp(ctx->challenge, "NTLM", 4) != 0) {
git_error_set(GIT_ERROR_NET, "server did not request NTLM");
goto done;
}
if (ntlm_client_negotiate(&msg, &msg_len, ctx->ntlm) != 0) {
git_error_set(GIT_ERROR_NET, "ntlm authentication failed: %s",
ntlm_client_errmsg(ctx->ntlm));
goto done;
}
ctx->complete = false;
} else {
if (memcmp(ctx->challenge, "NTLM ", 5) != 0) {
git_error_set(GIT_ERROR_NET, "challenge from server was not NTLM");
goto done;
}
if (git_str_decode_base64(&input_buf,
ctx->challenge + 5, challenge_len - 5) < 0) {
git_error_set(GIT_ERROR_NET, "invalid NTLM challenge from server");
goto done;
}
if (ntlm_client_set_challenge(ctx->ntlm,
(const unsigned char *)input_buf.ptr, input_buf.size) != 0) {
git_error_set(GIT_ERROR_NET, "ntlm challenge failed: %s",
ntlm_client_errmsg(ctx->ntlm));
goto done;
}
if (ntlm_client_response(&msg, &msg_len, ctx->ntlm) != 0) {
git_error_set(GIT_ERROR_NET, "ntlm authentication failed: %s",
ntlm_client_errmsg(ctx->ntlm));
goto done;
}
}
git_str_puts(buf, "NTLM ");
git_str_encode_base64(buf, (const char *)msg, msg_len);
if (git_str_oom(buf))
goto done;
error = 0;
done:
git_str_dispose(&input_buf);
return error;
}
static int ntlm_is_complete(git_http_auth_context *c)
{
http_auth_ntlm_context *ctx = (http_auth_ntlm_context *)c;
GIT_ASSERT_ARG(ctx);
return (ctx->complete == true);
}
static void ntlm_context_free(git_http_auth_context *c)
{
http_auth_ntlm_context *ctx = (http_auth_ntlm_context *)c;
ntlm_client_free(ctx->ntlm);
git__free(ctx->challenge);
git__free(ctx);
}
static int ntlm_init_context(
http_auth_ntlm_context *ctx,
const git_net_url *url)
{
GIT_UNUSED(url);
if ((ctx->ntlm = ntlm_client_init(NTLM_CLIENT_DEFAULTS)) == NULL) {
git_error_set_oom();
return -1;
}
return 0;
}
int git_http_auth_ntlm(
git_http_auth_context **out,
const git_net_url *url)
{
http_auth_ntlm_context *ctx;
GIT_UNUSED(url);
*out = NULL;
ctx = git__calloc(1, sizeof(http_auth_ntlm_context));
GIT_ERROR_CHECK_ALLOC(ctx);
if (ntlm_init_context(ctx, url) < 0) {
git__free(ctx);
return -1;
}
ctx->parent.type = GIT_HTTP_AUTH_NTLM;
ctx->parent.credtypes = GIT_CREDENTIAL_USERPASS_PLAINTEXT;
ctx->parent.connection_affinity = 1;
ctx->parent.set_challenge = ntlm_set_challenge;
ctx->parent.next_token = ntlm_next_token;
ctx->parent.is_complete = ntlm_is_complete;
ctx->parent.free = ntlm_context_free;
*out = (git_http_auth_context *)ctx;
return 0;
}
#endif /* GIT_NTLM */
| libgit2-main | src/libgit2/transports/auth_ntlm.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "smart.h"
#include "util.h"
#include "netops.h"
#include "posix.h"
#include "str.h"
#include "oid.h"
#include "git2/types.h"
#include "git2/errors.h"
#include "git2/refs.h"
#include "git2/revwalk.h"
#include <ctype.h>
#define PKT_LEN_SIZE 4
static const char pkt_done_str[] = "0009done\n";
static const char pkt_flush_str[] = "0000";
static const char pkt_have_prefix[] = "0032have ";
static const char pkt_want_prefix[] = "0032want ";
static int flush_pkt(git_pkt **out)
{
git_pkt *pkt;
pkt = git__malloc(sizeof(git_pkt));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_FLUSH;
*out = pkt;
return 0;
}
/* the rest of the line will be useful for multi_ack and multi_ack_detailed */
static int ack_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ack *pkt;
pkt = git__calloc(1, sizeof(git_pkt_ack));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ACK;
if (git__prefixncmp(line, len, "ACK "))
goto out_err;
line += 4;
len -= 4;
if (len < GIT_OID_SHA1_HEXSIZE ||
git_oid__fromstr(&pkt->oid, line, GIT_OID_SHA1) < 0)
goto out_err;
line += GIT_OID_SHA1_HEXSIZE;
len -= GIT_OID_SHA1_HEXSIZE;
if (len && line[0] == ' ') {
line++;
len--;
if (!git__prefixncmp(line, len, "continue"))
pkt->status = GIT_ACK_CONTINUE;
else if (!git__prefixncmp(line, len, "common"))
pkt->status = GIT_ACK_COMMON;
else if (!git__prefixncmp(line, len, "ready"))
pkt->status = GIT_ACK_READY;
else
goto out_err;
}
*out = (git_pkt *) pkt;
return 0;
out_err:
git_error_set(GIT_ERROR_NET, "error parsing ACK pkt-line");
git__free(pkt);
return -1;
}
static int nak_pkt(git_pkt **out)
{
git_pkt *pkt;
pkt = git__malloc(sizeof(git_pkt));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_NAK;
*out = pkt;
return 0;
}
static int comment_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_comment *pkt;
size_t alloclen;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_comment), len);
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1);
pkt = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_COMMENT;
memcpy(pkt->comment, line, len);
pkt->comment[len] = '\0';
*out = (git_pkt *) pkt;
return 0;
}
static int err_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_err *pkt = NULL;
size_t alloclen;
/* Remove "ERR " from the line */
if (git__prefixncmp(line, len, "ERR "))
goto out_err;
line += 4;
len -= 4;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, alloclen, 1);
pkt = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ERR;
pkt->len = len;
memcpy(pkt->error, line, len);
pkt->error[len] = '\0';
*out = (git_pkt *) pkt;
return 0;
out_err:
git_error_set(GIT_ERROR_NET, "error parsing ERR pkt-line");
git__free(pkt);
return -1;
}
static int data_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_data *pkt;
size_t alloclen;
line++;
len--;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
pkt = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_DATA;
pkt->len = len;
memcpy(pkt->data, line, len);
*out = (git_pkt *) pkt;
return 0;
}
static int sideband_progress_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_progress *pkt;
size_t alloclen;
line++;
len--;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, sizeof(git_pkt_progress), len);
pkt = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_PROGRESS;
pkt->len = len;
memcpy(pkt->data, line, len);
*out = (git_pkt *) pkt;
return 0;
}
static int sideband_error_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_err *pkt;
size_t alloc_len;
line++;
len--;
GIT_ERROR_CHECK_ALLOC_ADD(&alloc_len, sizeof(git_pkt_err), len);
GIT_ERROR_CHECK_ALLOC_ADD(&alloc_len, alloc_len, 1);
pkt = git__malloc(alloc_len);
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_ERR;
pkt->len = (int)len;
memcpy(pkt->error, line, len);
pkt->error[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
}
/*
* Parse an other-ref line.
*/
static int ref_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ref *pkt;
size_t alloclen;
pkt = git__calloc(1, sizeof(git_pkt_ref));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_REF;
if (len < GIT_OID_SHA1_HEXSIZE ||
git_oid__fromstr(&pkt->head.oid, line, GIT_OID_SHA1) < 0)
goto out_err;
line += GIT_OID_SHA1_HEXSIZE;
len -= GIT_OID_SHA1_HEXSIZE;
if (git__prefixncmp(line, len, " "))
goto out_err;
line++;
len--;
if (!len)
goto out_err;
if (line[len - 1] == '\n')
--len;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->head.name = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt->head.name);
memcpy(pkt->head.name, line, len);
pkt->head.name[len] = '\0';
if (strlen(pkt->head.name) < len)
pkt->capabilities = strchr(pkt->head.name, '\0') + 1;
*out = (git_pkt *)pkt;
return 0;
out_err:
git_error_set(GIT_ERROR_NET, "error parsing REF pkt-line");
if (pkt)
git__free(pkt->head.name);
git__free(pkt);
return -1;
}
static int ok_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ok *pkt;
size_t alloc_len;
pkt = git__malloc(sizeof(*pkt));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_OK;
if (git__prefixncmp(line, len, "ok "))
goto out_err;
line += 3;
len -= 3;
if (len && line[len - 1] == '\n')
--len;
GIT_ERROR_CHECK_ALLOC_ADD(&alloc_len, len, 1);
pkt->ref = git__malloc(alloc_len);
GIT_ERROR_CHECK_ALLOC(pkt->ref);
memcpy(pkt->ref, line, len);
pkt->ref[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
out_err:
git_error_set(GIT_ERROR_NET, "error parsing OK pkt-line");
git__free(pkt);
return -1;
}
static int ng_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_ng *pkt;
const char *ptr, *eol;
size_t alloclen;
pkt = git__malloc(sizeof(*pkt));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->ref = NULL;
pkt->type = GIT_PKT_NG;
eol = line + len;
if (git__prefixncmp(line, len, "ng "))
goto out_err;
line += 3;
if (!(ptr = memchr(line, ' ', eol - line)))
goto out_err;
len = ptr - line;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->ref = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt->ref);
memcpy(pkt->ref, line, len);
pkt->ref[len] = '\0';
line = ptr + 1;
if (line >= eol)
goto out_err;
if (!(ptr = memchr(line, '\n', eol - line)))
goto out_err;
len = ptr - line;
GIT_ERROR_CHECK_ALLOC_ADD(&alloclen, len, 1);
pkt->msg = git__malloc(alloclen);
GIT_ERROR_CHECK_ALLOC(pkt->msg);
memcpy(pkt->msg, line, len);
pkt->msg[len] = '\0';
*out = (git_pkt *)pkt;
return 0;
out_err:
git_error_set(GIT_ERROR_NET, "invalid packet line");
git__free(pkt->ref);
git__free(pkt);
return -1;
}
static int unpack_pkt(git_pkt **out, const char *line, size_t len)
{
git_pkt_unpack *pkt;
pkt = git__malloc(sizeof(*pkt));
GIT_ERROR_CHECK_ALLOC(pkt);
pkt->type = GIT_PKT_UNPACK;
if (!git__prefixncmp(line, len, "unpack ok"))
pkt->unpack_ok = 1;
else
pkt->unpack_ok = 0;
*out = (git_pkt *)pkt;
return 0;
}
static int parse_len(size_t *out, const char *line, size_t linelen)
{
char num[PKT_LEN_SIZE + 1];
int i, k, error;
int32_t len;
const char *num_end;
/* Not even enough for the length */
if (linelen < PKT_LEN_SIZE)
return GIT_EBUFS;
memcpy(num, line, PKT_LEN_SIZE);
num[PKT_LEN_SIZE] = '\0';
for (i = 0; i < PKT_LEN_SIZE; ++i) {
if (!isxdigit(num[i])) {
/* Make sure there are no special characters before passing to error message */
for (k = 0; k < PKT_LEN_SIZE; ++k) {
if(!isprint(num[k])) {
num[k] = '.';
}
}
git_error_set(GIT_ERROR_NET, "invalid hex digit in length: '%s'", num);
return -1;
}
}
if ((error = git__strntol32(&len, num, PKT_LEN_SIZE, &num_end, 16)) < 0)
return error;
if (len < 0)
return -1;
*out = (size_t) len;
return 0;
}
/*
* As per the documentation, the syntax is:
*
* pkt-line = data-pkt / flush-pkt
* data-pkt = pkt-len pkt-payload
* pkt-len = 4*(HEXDIG)
* pkt-payload = (pkt-len -4)*(OCTET)
* flush-pkt = "0000"
*
* Which means that the first four bytes are the length of the line,
* in ASCII hexadecimal (including itself)
*/
int git_pkt_parse_line(
git_pkt **pkt, const char **endptr, const char *line, size_t linelen)
{
int error;
size_t len;
if ((error = parse_len(&len, line, linelen)) < 0) {
/*
* If we fail to parse the length, it might be
* because the server is trying to send us the
* packfile already or because we do not yet have
* enough data.
*/
if (error == GIT_EBUFS)
;
else if (!git__prefixncmp(line, linelen, "PACK"))
git_error_set(GIT_ERROR_NET, "unexpected pack file");
else
git_error_set(GIT_ERROR_NET, "bad packet length");
return error;
}
/*
* Make sure there is enough in the buffer to satisfy
* this line.
*/
if (linelen < len)
return GIT_EBUFS;
/*
* The length has to be exactly 0 in case of a flush
* packet or greater than PKT_LEN_SIZE, as the decoded
* length includes its own encoded length of four bytes.
*/
if (len != 0 && len < PKT_LEN_SIZE)
return GIT_ERROR;
line += PKT_LEN_SIZE;
/*
* The Git protocol does not specify empty lines as part
* of the protocol. Not knowing what to do with an empty
* line, we should return an error upon hitting one.
*/
if (len == PKT_LEN_SIZE) {
git_error_set_str(GIT_ERROR_NET, "Invalid empty packet");
return GIT_ERROR;
}
if (len == 0) { /* Flush pkt */
*endptr = line;
return flush_pkt(pkt);
}
len -= PKT_LEN_SIZE; /* the encoded length includes its own size */
if (*line == GIT_SIDE_BAND_DATA)
error = data_pkt(pkt, line, len);
else if (*line == GIT_SIDE_BAND_PROGRESS)
error = sideband_progress_pkt(pkt, line, len);
else if (*line == GIT_SIDE_BAND_ERROR)
error = sideband_error_pkt(pkt, line, len);
else if (!git__prefixncmp(line, len, "ACK"))
error = ack_pkt(pkt, line, len);
else if (!git__prefixncmp(line, len, "NAK"))
error = nak_pkt(pkt);
else if (!git__prefixncmp(line, len, "ERR"))
error = err_pkt(pkt, line, len);
else if (*line == '#')
error = comment_pkt(pkt, line, len);
else if (!git__prefixncmp(line, len, "ok"))
error = ok_pkt(pkt, line, len);
else if (!git__prefixncmp(line, len, "ng"))
error = ng_pkt(pkt, line, len);
else if (!git__prefixncmp(line, len, "unpack"))
error = unpack_pkt(pkt, line, len);
else
error = ref_pkt(pkt, line, len);
*endptr = line + len;
return error;
}
void git_pkt_free(git_pkt *pkt)
{
if (pkt == NULL) {
return;
}
if (pkt->type == GIT_PKT_REF) {
git_pkt_ref *p = (git_pkt_ref *) pkt;
git__free(p->head.name);
git__free(p->head.symref_target);
}
if (pkt->type == GIT_PKT_OK) {
git_pkt_ok *p = (git_pkt_ok *) pkt;
git__free(p->ref);
}
if (pkt->type == GIT_PKT_NG) {
git_pkt_ng *p = (git_pkt_ng *) pkt;
git__free(p->ref);
git__free(p->msg);
}
git__free(pkt);
}
int git_pkt_buffer_flush(git_str *buf)
{
return git_str_put(buf, pkt_flush_str, strlen(pkt_flush_str));
}
static int buffer_want_with_caps(const git_remote_head *head, transport_smart_caps *caps, git_str *buf)
{
git_str str = GIT_STR_INIT;
char oid[GIT_OID_SHA1_HEXSIZE +1] = {0};
size_t len;
/* Prefer multi_ack_detailed */
if (caps->multi_ack_detailed)
git_str_puts(&str, GIT_CAP_MULTI_ACK_DETAILED " ");
else if (caps->multi_ack)
git_str_puts(&str, GIT_CAP_MULTI_ACK " ");
/* Prefer side-band-64k if the server supports both */
if (caps->side_band_64k)
git_str_printf(&str, "%s ", GIT_CAP_SIDE_BAND_64K);
else if (caps->side_band)
git_str_printf(&str, "%s ", GIT_CAP_SIDE_BAND);
if (caps->include_tag)
git_str_puts(&str, GIT_CAP_INCLUDE_TAG " ");
if (caps->thin_pack)
git_str_puts(&str, GIT_CAP_THIN_PACK " ");
if (caps->ofs_delta)
git_str_puts(&str, GIT_CAP_OFS_DELTA " ");
if (git_str_oom(&str))
return -1;
len = strlen("XXXXwant ") + GIT_OID_SHA1_HEXSIZE + 1 /* NUL */ +
git_str_len(&str) + 1 /* LF */;
if (len > 0xffff) {
git_error_set(GIT_ERROR_NET,
"tried to produce packet with invalid length %" PRIuZ, len);
return -1;
}
git_str_grow_by(buf, len);
git_oid_fmt(oid, &head->oid);
git_str_printf(buf,
"%04xwant %s %s\n", (unsigned int)len, oid, git_str_cstr(&str));
git_str_dispose(&str);
GIT_ERROR_CHECK_ALLOC_STR(buf);
return 0;
}
/*
* All "want" packets have the same length and format, so what we do
* is overwrite the OID each time.
*/
int git_pkt_buffer_wants(
const git_remote_head * const *refs,
size_t count,
transport_smart_caps *caps,
git_str *buf)
{
size_t i = 0;
const git_remote_head *head;
if (caps->common) {
for (; i < count; ++i) {
head = refs[i];
if (!head->local)
break;
}
if (buffer_want_with_caps(refs[i], caps, buf) < 0)
return -1;
i++;
}
for (; i < count; ++i) {
char oid[GIT_OID_SHA1_HEXSIZE];
head = refs[i];
if (head->local)
continue;
git_oid_fmt(oid, &head->oid);
git_str_put(buf, pkt_want_prefix, strlen(pkt_want_prefix));
git_str_put(buf, oid, GIT_OID_SHA1_HEXSIZE);
git_str_putc(buf, '\n');
if (git_str_oom(buf))
return -1;
}
return git_pkt_buffer_flush(buf);
}
int git_pkt_buffer_have(git_oid *oid, git_str *buf)
{
char oidhex[GIT_OID_SHA1_HEXSIZE + 1];
memset(oidhex, 0x0, sizeof(oidhex));
git_oid_fmt(oidhex, oid);
return git_str_printf(buf, "%s%s\n", pkt_have_prefix, oidhex);
}
int git_pkt_buffer_done(git_str *buf)
{
return git_str_puts(buf, pkt_done_str);
}
| libgit2-main | src/libgit2/transports/smart_pkt.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#ifdef GIT_WINHTTP
#include "git2.h"
#include "git2/transport.h"
#include "posix.h"
#include "str.h"
#include "netops.h"
#include "smart.h"
#include "remote.h"
#include "repository.h"
#include "http.h"
#include "git2/sys/credential.h"
#include <wincrypt.h>
#include <winhttp.h>
/* For IInternetSecurityManager zone check */
#include <objbase.h>
#include <urlmon.h>
#define WIDEN2(s) L ## s
#define WIDEN(s) WIDEN2(s)
#define MAX_CONTENT_TYPE_LEN 100
#define WINHTTP_OPTION_PEERDIST_EXTENSION_STATE 109
#define CACHED_POST_BODY_BUF_SIZE 4096
#define UUID_LENGTH_CCH 32
#define TIMEOUT_INFINITE -1
#define DEFAULT_CONNECT_TIMEOUT 60000
#ifndef WINHTTP_IGNORE_REQUEST_TOTAL_LENGTH
#define WINHTTP_IGNORE_REQUEST_TOTAL_LENGTH 0
#endif
#ifndef WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_1
# define WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_1 0x00000200
#endif
#ifndef WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_2
# define WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_2 0x00000800
#endif
#ifndef WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_3
# define WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_3 0x00002000
#endif
#ifndef WINHTTP_NO_CLIENT_CERT_CONTEXT
# define WINHTTP_NO_CLIENT_CERT_CONTEXT NULL
#endif
#ifndef HTTP_STATUS_PERMANENT_REDIRECT
# define HTTP_STATUS_PERMANENT_REDIRECT 308
#endif
#ifndef DWORD_MAX
# define DWORD_MAX 0xffffffff
#endif
bool git_http__expect_continue = false;
static const char *prefix_https = "https://";
static const char *upload_pack_service = "upload-pack";
static const char *upload_pack_ls_service_url = "/info/refs?service=git-upload-pack";
static const char *upload_pack_service_url = "/git-upload-pack";
static const char *receive_pack_service = "receive-pack";
static const char *receive_pack_ls_service_url = "/info/refs?service=git-receive-pack";
static const char *receive_pack_service_url = "/git-receive-pack";
static const wchar_t *get_verb = L"GET";
static const wchar_t *post_verb = L"POST";
static const wchar_t *pragma_nocache = L"Pragma: no-cache";
static const wchar_t *transfer_encoding = L"Transfer-Encoding: chunked";
static const int no_check_cert_flags = SECURITY_FLAG_IGNORE_CERT_CN_INVALID |
SECURITY_FLAG_IGNORE_CERT_DATE_INVALID |
SECURITY_FLAG_IGNORE_UNKNOWN_CA;
#if defined(__MINGW32__)
static const CLSID CLSID_InternetSecurityManager_mingw =
{ 0x7B8A2D94, 0x0AC9, 0x11D1,
{ 0x89, 0x6C, 0x00, 0xC0, 0x4F, 0xB6, 0xBF, 0xC4 } };
static const IID IID_IInternetSecurityManager_mingw =
{ 0x79EAC9EE, 0xBAF9, 0x11CE,
{ 0x8C, 0x82, 0x00, 0xAA, 0x00, 0x4B, 0xA9, 0x0B } };
# define CLSID_InternetSecurityManager CLSID_InternetSecurityManager_mingw
# define IID_IInternetSecurityManager IID_IInternetSecurityManager_mingw
#endif
#define OWNING_SUBTRANSPORT(s) ((winhttp_subtransport *)(s)->parent.subtransport)
typedef enum {
GIT_WINHTTP_AUTH_BASIC = 1,
GIT_WINHTTP_AUTH_NTLM = 2,
GIT_WINHTTP_AUTH_NEGOTIATE = 4,
GIT_WINHTTP_AUTH_DIGEST = 8
} winhttp_authmechanism_t;
typedef struct {
git_smart_subtransport_stream parent;
const char *service;
const char *service_url;
const wchar_t *verb;
HINTERNET request;
wchar_t *request_uri;
char *chunk_buffer;
unsigned chunk_buffer_len;
HANDLE post_body;
DWORD post_body_len;
unsigned sent_request : 1,
received_response : 1,
chunked : 1,
status_sending_request_reached: 1;
} winhttp_stream;
typedef struct {
git_net_url url;
git_credential *cred;
int auth_mechanisms;
bool url_cred_presented;
} winhttp_server;
typedef struct {
git_smart_subtransport parent;
transport_smart *owner;
winhttp_server server;
winhttp_server proxy;
HINTERNET session;
HINTERNET connection;
} winhttp_subtransport;
static int apply_userpass_credentials(HINTERNET request, DWORD target, int mechanisms, git_credential *cred)
{
git_credential_userpass_plaintext *c = (git_credential_userpass_plaintext *)cred;
wchar_t *user = NULL, *pass = NULL;
int user_len = 0, pass_len = 0, error = 0;
DWORD native_scheme;
if (mechanisms & GIT_WINHTTP_AUTH_NEGOTIATE) {
native_scheme = WINHTTP_AUTH_SCHEME_NEGOTIATE;
} else if (mechanisms & GIT_WINHTTP_AUTH_NTLM) {
native_scheme = WINHTTP_AUTH_SCHEME_NTLM;
} else if (mechanisms & GIT_WINHTTP_AUTH_DIGEST) {
native_scheme = WINHTTP_AUTH_SCHEME_DIGEST;
} else if (mechanisms & GIT_WINHTTP_AUTH_BASIC) {
native_scheme = WINHTTP_AUTH_SCHEME_BASIC;
} else {
git_error_set(GIT_ERROR_HTTP, "invalid authentication scheme");
error = GIT_EAUTH;
goto done;
}
if ((error = user_len = git__utf8_to_16_alloc(&user, c->username)) < 0)
goto done;
if ((error = pass_len = git__utf8_to_16_alloc(&pass, c->password)) < 0)
goto done;
if (!WinHttpSetCredentials(request, target, native_scheme, user, pass, NULL)) {
git_error_set(GIT_ERROR_OS, "failed to set credentials");
error = -1;
}
done:
if (user_len > 0)
git__memzero(user, user_len * sizeof(wchar_t));
if (pass_len > 0)
git__memzero(pass, pass_len * sizeof(wchar_t));
git__free(user);
git__free(pass);
return error;
}
static int apply_default_credentials(HINTERNET request, DWORD target, int mechanisms)
{
DWORD autologon_level = WINHTTP_AUTOLOGON_SECURITY_LEVEL_LOW;
DWORD native_scheme = 0;
if ((mechanisms & GIT_WINHTTP_AUTH_NEGOTIATE) != 0) {
native_scheme = WINHTTP_AUTH_SCHEME_NEGOTIATE;
} else if ((mechanisms & GIT_WINHTTP_AUTH_NTLM) != 0) {
native_scheme = WINHTTP_AUTH_SCHEME_NTLM;
} else {
git_error_set(GIT_ERROR_HTTP, "invalid authentication scheme");
return GIT_EAUTH;
}
/*
* Autologon policy must be "low" to use default creds.
* This is safe as the user has explicitly requested it.
*/
if (!WinHttpSetOption(request, WINHTTP_OPTION_AUTOLOGON_POLICY, &autologon_level, sizeof(DWORD))) {
git_error_set(GIT_ERROR_OS, "could not configure logon policy");
return -1;
}
if (!WinHttpSetCredentials(request, target, native_scheme, NULL, NULL, NULL)) {
git_error_set(GIT_ERROR_OS, "could not configure credentials");
return -1;
}
return 0;
}
static int acquire_url_cred(
git_credential **cred,
unsigned int allowed_types,
const char *username,
const char *password)
{
if (allowed_types & GIT_CREDENTIAL_USERPASS_PLAINTEXT)
return git_credential_userpass_plaintext_new(cred, username, password);
if ((allowed_types & GIT_CREDENTIAL_DEFAULT) && *username == '\0' && *password == '\0')
return git_credential_default_new(cred);
return 1;
}
static int acquire_fallback_cred(
git_credential **cred,
const char *url,
unsigned int allowed_types)
{
int error = 1;
/* If the target URI supports integrated Windows authentication
* as an authentication mechanism */
if (GIT_CREDENTIAL_DEFAULT & allowed_types) {
wchar_t *wide_url;
HRESULT hCoInitResult;
/* Convert URL to wide characters */
if (git__utf8_to_16_alloc(&wide_url, url) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert string to wide form");
return -1;
}
hCoInitResult = CoInitializeEx(NULL, COINIT_MULTITHREADED);
if (SUCCEEDED(hCoInitResult) || hCoInitResult == RPC_E_CHANGED_MODE) {
IInternetSecurityManager *pISM;
/* And if the target URI is in the My Computer, Intranet, or Trusted zones */
if (SUCCEEDED(CoCreateInstance(&CLSID_InternetSecurityManager, NULL,
CLSCTX_ALL, &IID_IInternetSecurityManager, (void **)&pISM))) {
DWORD dwZone;
if (SUCCEEDED(pISM->lpVtbl->MapUrlToZone(pISM, wide_url, &dwZone, 0)) &&
(URLZONE_LOCAL_MACHINE == dwZone ||
URLZONE_INTRANET == dwZone ||
URLZONE_TRUSTED == dwZone)) {
git_credential *existing = *cred;
if (existing)
existing->free(existing);
/* Then use default Windows credentials to authenticate this request */
error = git_credential_default_new(cred);
}
pISM->lpVtbl->Release(pISM);
}
/* Only uninitialize if the call to CoInitializeEx was successful. */
if (SUCCEEDED(hCoInitResult))
CoUninitialize();
}
git__free(wide_url);
}
return error;
}
static int certificate_check(winhttp_stream *s, int valid)
{
int error;
winhttp_subtransport *t = OWNING_SUBTRANSPORT(s);
PCERT_CONTEXT cert_ctx;
DWORD cert_ctx_size = sizeof(cert_ctx);
git_cert_x509 cert;
/* If there is no override, we should fail if WinHTTP doesn't think it's fine */
if (t->owner->connect_opts.callbacks.certificate_check == NULL && !valid) {
if (!git_error_last())
git_error_set(GIT_ERROR_HTTP, "unknown certificate check failure");
return GIT_ECERTIFICATE;
}
if (t->owner->connect_opts.callbacks.certificate_check == NULL || git__strcmp(t->server.url.scheme, "https") != 0)
return 0;
if (!WinHttpQueryOption(s->request, WINHTTP_OPTION_SERVER_CERT_CONTEXT, &cert_ctx, &cert_ctx_size)) {
git_error_set(GIT_ERROR_OS, "failed to get server certificate");
return -1;
}
git_error_clear();
cert.parent.cert_type = GIT_CERT_X509;
cert.data = cert_ctx->pbCertEncoded;
cert.len = cert_ctx->cbCertEncoded;
error = t->owner->connect_opts.callbacks.certificate_check((git_cert *) &cert, valid, t->server.url.host, t->owner->connect_opts.callbacks.payload);
CertFreeCertificateContext(cert_ctx);
if (error == GIT_PASSTHROUGH)
error = valid ? 0 : GIT_ECERTIFICATE;
if (error < 0 && !git_error_last())
git_error_set(GIT_ERROR_HTTP, "user cancelled certificate check");
return error;
}
static void winhttp_stream_close(winhttp_stream *s)
{
if (s->chunk_buffer) {
git__free(s->chunk_buffer);
s->chunk_buffer = NULL;
}
if (s->post_body) {
CloseHandle(s->post_body);
s->post_body = NULL;
}
if (s->request_uri) {
git__free(s->request_uri);
s->request_uri = NULL;
}
if (s->request) {
WinHttpCloseHandle(s->request);
s->request = NULL;
}
s->sent_request = 0;
}
static int apply_credentials(
HINTERNET request,
git_net_url *url,
int target,
git_credential *creds,
int mechanisms)
{
int error = 0;
GIT_UNUSED(url);
/* If we have creds, just apply them */
if (creds && creds->credtype == GIT_CREDENTIAL_USERPASS_PLAINTEXT)
error = apply_userpass_credentials(request, target, mechanisms, creds);
else if (creds && creds->credtype == GIT_CREDENTIAL_DEFAULT)
error = apply_default_credentials(request, target, mechanisms);
return error;
}
static int winhttp_stream_connect(winhttp_stream *s)
{
winhttp_subtransport *t = OWNING_SUBTRANSPORT(s);
git_str buf = GIT_STR_INIT;
char *proxy_url = NULL;
wchar_t ct[MAX_CONTENT_TYPE_LEN];
LPCWSTR types[] = { L"*/*", NULL };
BOOL peerdist = FALSE;
int error = -1;
unsigned long disable_redirects = WINHTTP_DISABLE_REDIRECTS;
int default_timeout = TIMEOUT_INFINITE;
int default_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
DWORD autologon_policy = WINHTTP_AUTOLOGON_SECURITY_LEVEL_HIGH;
const char *service_url = s->service_url;
size_t i;
const git_proxy_options *proxy_opts;
/* If path already ends in /, remove the leading slash from service_url */
if ((git__suffixcmp(t->server.url.path, "/") == 0) && (git__prefixcmp(service_url, "/") == 0))
service_url++;
/* Prepare URL */
git_str_printf(&buf, "%s%s", t->server.url.path, service_url);
if (git_str_oom(&buf))
return -1;
/* Convert URL to wide characters */
if (git__utf8_to_16_alloc(&s->request_uri, git_str_cstr(&buf)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert string to wide form");
goto on_error;
}
/* Establish request */
s->request = WinHttpOpenRequest(
t->connection,
s->verb,
s->request_uri,
NULL,
WINHTTP_NO_REFERER,
types,
git__strcmp(t->server.url.scheme, "https") == 0 ? WINHTTP_FLAG_SECURE : 0);
if (!s->request) {
git_error_set(GIT_ERROR_OS, "failed to open request");
goto on_error;
}
/* Never attempt default credentials; we'll provide them explicitly. */
if (!WinHttpSetOption(s->request, WINHTTP_OPTION_AUTOLOGON_POLICY, &autologon_policy, sizeof(DWORD)))
return -1;
if (!WinHttpSetTimeouts(s->request, default_timeout, default_connect_timeout, default_timeout, default_timeout)) {
git_error_set(GIT_ERROR_OS, "failed to set timeouts for WinHTTP");
goto on_error;
}
proxy_opts = &t->owner->connect_opts.proxy_opts;
if (proxy_opts->type == GIT_PROXY_AUTO) {
/* Set proxy if necessary */
if (git_remote__http_proxy(&proxy_url, t->owner->owner, &t->server.url) < 0)
goto on_error;
}
else if (proxy_opts->type == GIT_PROXY_SPECIFIED) {
proxy_url = git__strdup(proxy_opts->url);
GIT_ERROR_CHECK_ALLOC(proxy_url);
}
if (proxy_url) {
git_str processed_url = GIT_STR_INIT;
WINHTTP_PROXY_INFO proxy_info;
wchar_t *proxy_wide;
git_net_url_dispose(&t->proxy.url);
if ((error = git_net_url_parse(&t->proxy.url, proxy_url)) < 0)
goto on_error;
if (strcmp(t->proxy.url.scheme, "http") != 0 && strcmp(t->proxy.url.scheme, "https") != 0) {
git_error_set(GIT_ERROR_HTTP, "invalid URL: '%s'", proxy_url);
error = -1;
goto on_error;
}
git_str_puts(&processed_url, t->proxy.url.scheme);
git_str_PUTS(&processed_url, "://");
if (git_net_url_is_ipv6(&t->proxy.url))
git_str_putc(&processed_url, '[');
git_str_puts(&processed_url, t->proxy.url.host);
if (git_net_url_is_ipv6(&t->proxy.url))
git_str_putc(&processed_url, ']');
if (!git_net_url_is_default_port(&t->proxy.url))
git_str_printf(&processed_url, ":%s", t->proxy.url.port);
if (git_str_oom(&processed_url)) {
error = -1;
goto on_error;
}
/* Convert URL to wide characters */
error = git__utf8_to_16_alloc(&proxy_wide, processed_url.ptr);
git_str_dispose(&processed_url);
if (error < 0)
goto on_error;
proxy_info.dwAccessType = WINHTTP_ACCESS_TYPE_NAMED_PROXY;
proxy_info.lpszProxy = proxy_wide;
proxy_info.lpszProxyBypass = NULL;
if (!WinHttpSetOption(s->request,
WINHTTP_OPTION_PROXY,
&proxy_info,
sizeof(WINHTTP_PROXY_INFO))) {
git_error_set(GIT_ERROR_OS, "failed to set proxy");
git__free(proxy_wide);
goto on_error;
}
git__free(proxy_wide);
if ((error = apply_credentials(s->request, &t->proxy.url, WINHTTP_AUTH_TARGET_PROXY, t->proxy.cred, t->proxy.auth_mechanisms)) < 0)
goto on_error;
}
/* Disable WinHTTP redirects so we can handle them manually. Why, you ask?
* http://social.msdn.microsoft.com/Forums/windowsdesktop/en-US/b2ff8879-ab9f-4218-8f09-16d25dff87ae
*/
if (!WinHttpSetOption(s->request,
WINHTTP_OPTION_DISABLE_FEATURE,
&disable_redirects,
sizeof(disable_redirects))) {
git_error_set(GIT_ERROR_OS, "failed to disable redirects");
error = -1;
goto on_error;
}
/* Strip unwanted headers (X-P2P-PeerDist, X-P2P-PeerDistEx) that WinHTTP
* adds itself. This option may not be supported by the underlying
* platform, so we do not error-check it */
WinHttpSetOption(s->request,
WINHTTP_OPTION_PEERDIST_EXTENSION_STATE,
&peerdist,
sizeof(peerdist));
/* Send Pragma: no-cache header */
if (!WinHttpAddRequestHeaders(s->request, pragma_nocache, (ULONG) -1L, WINHTTP_ADDREQ_FLAG_ADD)) {
git_error_set(GIT_ERROR_OS, "failed to add a header to the request");
goto on_error;
}
if (post_verb == s->verb) {
/* Send Content-Type and Accept headers -- only necessary on a POST */
git_str_clear(&buf);
if (git_str_printf(&buf,
"Content-Type: application/x-git-%s-request",
s->service) < 0)
goto on_error;
if (git__utf8_to_16(ct, MAX_CONTENT_TYPE_LEN, git_str_cstr(&buf)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert content-type to wide characters");
goto on_error;
}
if (!WinHttpAddRequestHeaders(s->request, ct, (ULONG)-1L,
WINHTTP_ADDREQ_FLAG_ADD | WINHTTP_ADDREQ_FLAG_REPLACE)) {
git_error_set(GIT_ERROR_OS, "failed to add a header to the request");
goto on_error;
}
git_str_clear(&buf);
if (git_str_printf(&buf,
"Accept: application/x-git-%s-result",
s->service) < 0)
goto on_error;
if (git__utf8_to_16(ct, MAX_CONTENT_TYPE_LEN, git_str_cstr(&buf)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert accept header to wide characters");
goto on_error;
}
if (!WinHttpAddRequestHeaders(s->request, ct, (ULONG)-1L,
WINHTTP_ADDREQ_FLAG_ADD | WINHTTP_ADDREQ_FLAG_REPLACE)) {
git_error_set(GIT_ERROR_OS, "failed to add a header to the request");
goto on_error;
}
}
for (i = 0; i < t->owner->connect_opts.custom_headers.count; i++) {
if (t->owner->connect_opts.custom_headers.strings[i]) {
git_str_clear(&buf);
git_str_puts(&buf, t->owner->connect_opts.custom_headers.strings[i]);
if (git__utf8_to_16(ct, MAX_CONTENT_TYPE_LEN, git_str_cstr(&buf)) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert custom header to wide characters");
goto on_error;
}
if (!WinHttpAddRequestHeaders(s->request, ct, (ULONG)-1L,
WINHTTP_ADDREQ_FLAG_ADD | WINHTTP_ADDREQ_FLAG_REPLACE)) {
git_error_set(GIT_ERROR_OS, "failed to add a header to the request");
goto on_error;
}
}
}
if ((error = apply_credentials(s->request, &t->server.url, WINHTTP_AUTH_TARGET_SERVER, t->server.cred, t->server.auth_mechanisms)) < 0)
goto on_error;
/* We've done everything up to calling WinHttpSendRequest. */
error = 0;
on_error:
if (error < 0)
winhttp_stream_close(s);
git__free(proxy_url);
git_str_dispose(&buf);
return error;
}
static int parse_unauthorized_response(
int *allowed_types,
int *allowed_mechanisms,
HINTERNET request)
{
DWORD supported, first, target;
*allowed_types = 0;
*allowed_mechanisms = 0;
/* WinHttpQueryHeaders() must be called before WinHttpQueryAuthSchemes().
* We can assume this was already done, since we know we are unauthorized.
*/
if (!WinHttpQueryAuthSchemes(request, &supported, &first, &target)) {
git_error_set(GIT_ERROR_OS, "failed to parse supported auth schemes");
return GIT_EAUTH;
}
if (WINHTTP_AUTH_SCHEME_NTLM & supported) {
*allowed_types |= GIT_CREDENTIAL_USERPASS_PLAINTEXT;
*allowed_types |= GIT_CREDENTIAL_DEFAULT;
*allowed_mechanisms |= GIT_WINHTTP_AUTH_NTLM;
}
if (WINHTTP_AUTH_SCHEME_NEGOTIATE & supported) {
*allowed_types |= GIT_CREDENTIAL_DEFAULT;
*allowed_mechanisms |= GIT_WINHTTP_AUTH_NEGOTIATE;
}
if (WINHTTP_AUTH_SCHEME_BASIC & supported) {
*allowed_types |= GIT_CREDENTIAL_USERPASS_PLAINTEXT;
*allowed_mechanisms |= GIT_WINHTTP_AUTH_BASIC;
}
if (WINHTTP_AUTH_SCHEME_DIGEST & supported) {
*allowed_types |= GIT_CREDENTIAL_USERPASS_PLAINTEXT;
*allowed_mechanisms |= GIT_WINHTTP_AUTH_DIGEST;
}
return 0;
}
static int write_chunk(HINTERNET request, const char *buffer, size_t len)
{
DWORD bytes_written;
git_str buf = GIT_STR_INIT;
/* Chunk header */
git_str_printf(&buf, "%"PRIXZ"\r\n", len);
if (git_str_oom(&buf))
return -1;
if (!WinHttpWriteData(request,
git_str_cstr(&buf), (DWORD)git_str_len(&buf),
&bytes_written)) {
git_str_dispose(&buf);
git_error_set(GIT_ERROR_OS, "failed to write chunk header");
return -1;
}
git_str_dispose(&buf);
/* Chunk body */
if (!WinHttpWriteData(request,
buffer, (DWORD)len,
&bytes_written)) {
git_error_set(GIT_ERROR_OS, "failed to write chunk");
return -1;
}
/* Chunk footer */
if (!WinHttpWriteData(request,
"\r\n", 2,
&bytes_written)) {
git_error_set(GIT_ERROR_OS, "failed to write chunk footer");
return -1;
}
return 0;
}
static int winhttp_close_connection(winhttp_subtransport *t)
{
int ret = 0;
if (t->connection) {
if (!WinHttpCloseHandle(t->connection)) {
git_error_set(GIT_ERROR_OS, "unable to close connection");
ret = -1;
}
t->connection = NULL;
}
if (t->session) {
if (!WinHttpCloseHandle(t->session)) {
git_error_set(GIT_ERROR_OS, "unable to close session");
ret = -1;
}
t->session = NULL;
}
return ret;
}
static void CALLBACK winhttp_status(
HINTERNET connection,
DWORD_PTR ctx,
DWORD code,
LPVOID info,
DWORD info_len)
{
DWORD status;
GIT_UNUSED(connection);
GIT_UNUSED(info_len);
switch (code) {
case WINHTTP_CALLBACK_STATUS_SECURE_FAILURE:
status = *((DWORD *)info);
if ((status & WINHTTP_CALLBACK_STATUS_FLAG_CERT_CN_INVALID))
git_error_set(GIT_ERROR_HTTP, "SSL certificate issued for different common name");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_CERT_DATE_INVALID))
git_error_set(GIT_ERROR_HTTP, "SSL certificate has expired");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_INVALID_CA))
git_error_set(GIT_ERROR_HTTP, "SSL certificate signed by unknown CA");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_INVALID_CERT))
git_error_set(GIT_ERROR_HTTP, "SSL certificate is invalid");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_CERT_REV_FAILED))
git_error_set(GIT_ERROR_HTTP, "certificate revocation check failed");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_CERT_REVOKED))
git_error_set(GIT_ERROR_HTTP, "SSL certificate was revoked");
else if ((status & WINHTTP_CALLBACK_STATUS_FLAG_SECURITY_CHANNEL_ERROR))
git_error_set(GIT_ERROR_HTTP, "security libraries could not be loaded");
else
git_error_set(GIT_ERROR_HTTP, "unknown security error %lu", status);
break;
case WINHTTP_CALLBACK_STATUS_SENDING_REQUEST:
((winhttp_stream *) ctx)->status_sending_request_reached = 1;
break;
}
}
static int winhttp_connect(
winhttp_subtransport *t)
{
wchar_t *wide_host = NULL;
int32_t port;
wchar_t *wide_ua = NULL;
git_str ipv6 = GIT_STR_INIT, ua = GIT_STR_INIT;
const char *host;
int error = -1;
int default_timeout = TIMEOUT_INFINITE;
int default_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
DWORD protocols =
WINHTTP_FLAG_SECURE_PROTOCOL_TLS1 |
WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_1 |
WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_2 |
WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_3;
t->session = NULL;
t->connection = NULL;
/* Prepare port */
if (git__strntol32(&port, t->server.url.port,
strlen(t->server.url.port), NULL, 10) < 0)
goto on_error;
/* IPv6? Add braces around the host. */
if (git_net_url_is_ipv6(&t->server.url)) {
if (git_str_printf(&ipv6, "[%s]", t->server.url.host) < 0)
goto on_error;
host = ipv6.ptr;
} else {
host = t->server.url.host;
}
/* Prepare host */
if (git__utf8_to_16_alloc(&wide_host, host) < 0) {
git_error_set(GIT_ERROR_OS, "unable to convert host to wide characters");
goto on_error;
}
if (git_http__user_agent(&ua) < 0)
goto on_error;
if (git__utf8_to_16_alloc(&wide_ua, git_str_cstr(&ua)) < 0) {
git_error_set(GIT_ERROR_OS, "unable to convert host to wide characters");
goto on_error;
}
/* Establish session */
t->session = WinHttpOpen(
wide_ua,
WINHTTP_ACCESS_TYPE_DEFAULT_PROXY,
WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS,
0);
if (!t->session) {
git_error_set(GIT_ERROR_OS, "failed to init WinHTTP");
goto on_error;
}
/*
* Do a best-effort attempt to enable TLS 1.3 and 1.2 but allow this to
* fail; if TLS 1.2 or 1.3 support is not available for some reason,
* ignore the failure (it will keep the default protocols).
*/
if (WinHttpSetOption(t->session,
WINHTTP_OPTION_SECURE_PROTOCOLS,
&protocols,
sizeof(protocols)) == FALSE) {
protocols &= ~WINHTTP_FLAG_SECURE_PROTOCOL_TLS1_3;
WinHttpSetOption(t->session,
WINHTTP_OPTION_SECURE_PROTOCOLS,
&protocols,
sizeof(protocols));
}
if (!WinHttpSetTimeouts(t->session, default_timeout, default_connect_timeout, default_timeout, default_timeout)) {
git_error_set(GIT_ERROR_OS, "failed to set timeouts for WinHTTP");
goto on_error;
}
/* Establish connection */
t->connection = WinHttpConnect(
t->session,
wide_host,
(INTERNET_PORT) port,
0);
if (!t->connection) {
git_error_set(GIT_ERROR_OS, "failed to connect to host");
goto on_error;
}
if (WinHttpSetStatusCallback(
t->connection,
winhttp_status,
WINHTTP_CALLBACK_FLAG_SECURE_FAILURE | WINHTTP_CALLBACK_FLAG_SEND_REQUEST,
0
) == WINHTTP_INVALID_STATUS_CALLBACK) {
git_error_set(GIT_ERROR_OS, "failed to set status callback");
goto on_error;
}
error = 0;
on_error:
if (error < 0)
winhttp_close_connection(t);
git_str_dispose(&ua);
git_str_dispose(&ipv6);
git__free(wide_host);
git__free(wide_ua);
return error;
}
static int do_send_request(winhttp_stream *s, size_t len, bool chunked)
{
int attempts;
bool success;
if (len > DWORD_MAX) {
SetLastError(ERROR_NOT_ENOUGH_MEMORY);
return -1;
}
for (attempts = 0; attempts < 5; attempts++) {
if (chunked) {
success = WinHttpSendRequest(s->request,
WINHTTP_NO_ADDITIONAL_HEADERS, 0,
WINHTTP_NO_REQUEST_DATA, 0,
WINHTTP_IGNORE_REQUEST_TOTAL_LENGTH, (DWORD_PTR)s);
} else {
success = WinHttpSendRequest(s->request,
WINHTTP_NO_ADDITIONAL_HEADERS, 0,
WINHTTP_NO_REQUEST_DATA, 0,
(DWORD)len, (DWORD_PTR)s);
}
if (success || GetLastError() != (DWORD)SEC_E_BUFFER_TOO_SMALL)
break;
}
return success ? 0 : -1;
}
static int send_request(winhttp_stream *s, size_t len, bool chunked)
{
int request_failed = 1, error, attempts = 0;
DWORD ignore_flags, send_request_error;
git_error_clear();
while (request_failed && attempts++ < 3) {
int cert_valid = 1;
int client_cert_requested = 0;
request_failed = 0;
if ((error = do_send_request(s, len, chunked)) < 0) {
send_request_error = GetLastError();
request_failed = 1;
switch (send_request_error) {
case ERROR_WINHTTP_SECURE_FAILURE:
cert_valid = 0;
break;
case ERROR_WINHTTP_CLIENT_AUTH_CERT_NEEDED:
client_cert_requested = 1;
break;
default:
git_error_set(GIT_ERROR_OS, "failed to send request");
return -1;
}
}
/*
* Only check the certificate if we were able to reach the sending request phase, or
* received a secure failure error. Otherwise, the server certificate won't be available
* since the request wasn't able to complete (e.g. proxy auth required)
*/
if (!cert_valid ||
(!request_failed && s->status_sending_request_reached)) {
git_error_clear();
if ((error = certificate_check(s, cert_valid)) < 0) {
if (!git_error_last())
git_error_set(GIT_ERROR_OS, "user cancelled certificate check");
return error;
}
}
/* if neither the request nor the certificate check returned errors, we're done */
if (!request_failed)
return 0;
if (!cert_valid) {
ignore_flags = no_check_cert_flags;
if (!WinHttpSetOption(s->request, WINHTTP_OPTION_SECURITY_FLAGS, &ignore_flags, sizeof(ignore_flags))) {
git_error_set(GIT_ERROR_OS, "failed to set security options");
return -1;
}
}
if (client_cert_requested) {
/*
* Client certificates are not supported, explicitly tell the server that
* (it's possible a client certificate was requested but is not required)
*/
if (!WinHttpSetOption(s->request, WINHTTP_OPTION_CLIENT_CERT_CONTEXT, WINHTTP_NO_CLIENT_CERT_CONTEXT, 0)) {
git_error_set(GIT_ERROR_OS, "failed to set client cert context");
return -1;
}
}
}
return error;
}
static int acquire_credentials(
HINTERNET request,
winhttp_server *server,
const char *url_str,
git_credential_acquire_cb cred_cb,
void *cred_cb_payload)
{
int allowed_types;
int error = 1;
if (parse_unauthorized_response(&allowed_types, &server->auth_mechanisms, request) < 0)
return -1;
if (allowed_types) {
git_credential_free(server->cred);
server->cred = NULL;
/* Start with URL-specified credentials, if there were any. */
if (!server->url_cred_presented && server->url.username && server->url.password) {
error = acquire_url_cred(&server->cred, allowed_types, server->url.username, server->url.password);
server->url_cred_presented = 1;
if (error < 0)
return error;
}
/* Next use the user-defined callback, if there is one. */
if (error > 0 && cred_cb) {
error = cred_cb(&server->cred, url_str, server->url.username, allowed_types, cred_cb_payload);
/* Treat GIT_PASSTHROUGH as though git_credential_acquire_cb isn't set */
if (error == GIT_PASSTHROUGH)
error = 1;
else if (error < 0)
return error;
}
/* Finally, invoke the fallback default credential lookup. */
if (error > 0) {
error = acquire_fallback_cred(&server->cred, url_str, allowed_types);
if (error < 0)
return error;
}
}
/*
* No error occurred but we could not find appropriate credentials.
* This behaves like a pass-through.
*/
return error;
}
static int winhttp_stream_read(
git_smart_subtransport_stream *stream,
char *buffer,
size_t buf_size,
size_t *bytes_read)
{
winhttp_stream *s = (winhttp_stream *)stream;
winhttp_subtransport *t = OWNING_SUBTRANSPORT(s);
DWORD dw_bytes_read;
char replay_count = 0;
int error;
replay:
/* Enforce a reasonable cap on the number of replays */
if (replay_count++ >= GIT_HTTP_REPLAY_MAX) {
git_error_set(GIT_ERROR_HTTP, "too many redirects or authentication replays");
return GIT_ERROR; /* not GIT_EAUTH because the exact cause is not clear */
}
/* Connect if necessary */
if (!s->request && winhttp_stream_connect(s) < 0)
return -1;
if (!s->received_response) {
DWORD status_code, status_code_length, content_type_length, bytes_written;
char expected_content_type_8[MAX_CONTENT_TYPE_LEN];
wchar_t expected_content_type[MAX_CONTENT_TYPE_LEN], content_type[MAX_CONTENT_TYPE_LEN];
if (!s->sent_request) {
if ((error = send_request(s, s->post_body_len, false)) < 0)
return error;
s->sent_request = 1;
}
if (s->chunked) {
GIT_ASSERT(s->verb == post_verb);
/* Flush, if necessary */
if (s->chunk_buffer_len > 0 &&
write_chunk(s->request, s->chunk_buffer, s->chunk_buffer_len) < 0)
return -1;
s->chunk_buffer_len = 0;
/* Write the final chunk. */
if (!WinHttpWriteData(s->request,
"0\r\n\r\n", 5,
&bytes_written)) {
git_error_set(GIT_ERROR_OS, "failed to write final chunk");
return -1;
}
}
else if (s->post_body) {
char *buffer;
DWORD len = s->post_body_len, bytes_read;
if (INVALID_SET_FILE_POINTER == SetFilePointer(s->post_body,
0, 0, FILE_BEGIN) &&
NO_ERROR != GetLastError()) {
git_error_set(GIT_ERROR_OS, "failed to reset file pointer");
return -1;
}
buffer = git__malloc(CACHED_POST_BODY_BUF_SIZE);
GIT_ERROR_CHECK_ALLOC(buffer);
while (len > 0) {
DWORD bytes_written;
if (!ReadFile(s->post_body, buffer,
min(CACHED_POST_BODY_BUF_SIZE, len),
&bytes_read, NULL) ||
!bytes_read) {
git__free(buffer);
git_error_set(GIT_ERROR_OS, "failed to read from temp file");
return -1;
}
if (!WinHttpWriteData(s->request, buffer,
bytes_read, &bytes_written)) {
git__free(buffer);
git_error_set(GIT_ERROR_OS, "failed to write data");
return -1;
}
len -= bytes_read;
GIT_ASSERT(bytes_read == bytes_written);
}
git__free(buffer);
/* Eagerly close the temp file */
CloseHandle(s->post_body);
s->post_body = NULL;
}
if (!WinHttpReceiveResponse(s->request, 0)) {
git_error_set(GIT_ERROR_OS, "failed to receive response");
return -1;
}
/* Verify that we got a 200 back */
status_code_length = sizeof(status_code);
if (!WinHttpQueryHeaders(s->request,
WINHTTP_QUERY_STATUS_CODE | WINHTTP_QUERY_FLAG_NUMBER,
WINHTTP_HEADER_NAME_BY_INDEX,
&status_code, &status_code_length,
WINHTTP_NO_HEADER_INDEX)) {
git_error_set(GIT_ERROR_OS, "failed to retrieve status code");
return -1;
}
/* The implementation of WinHTTP prior to Windows 7 will not
* redirect to an identical URI. Some Git hosters use self-redirects
* as part of their DoS mitigation strategy. Check first to see if we
* have a redirect status code, and that we haven't already streamed
* a post body. (We can't replay a streamed POST.) */
if (!s->chunked &&
(HTTP_STATUS_MOVED == status_code ||
HTTP_STATUS_REDIRECT == status_code ||
(HTTP_STATUS_REDIRECT_METHOD == status_code &&
get_verb == s->verb) ||
HTTP_STATUS_REDIRECT_KEEP_VERB == status_code ||
HTTP_STATUS_PERMANENT_REDIRECT == status_code)) {
/* Check for Windows 7. This workaround is only necessary on
* Windows Vista and earlier. Windows 7 is version 6.1. */
wchar_t *location;
DWORD location_length;
char *location8;
/* OK, fetch the Location header from the redirect. */
if (WinHttpQueryHeaders(s->request,
WINHTTP_QUERY_LOCATION,
WINHTTP_HEADER_NAME_BY_INDEX,
WINHTTP_NO_OUTPUT_BUFFER,
&location_length,
WINHTTP_NO_HEADER_INDEX) ||
GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
git_error_set(GIT_ERROR_OS, "failed to read Location header");
return -1;
}
location = git__malloc(location_length);
GIT_ERROR_CHECK_ALLOC(location);
if (!WinHttpQueryHeaders(s->request,
WINHTTP_QUERY_LOCATION,
WINHTTP_HEADER_NAME_BY_INDEX,
location,
&location_length,
WINHTTP_NO_HEADER_INDEX)) {
git_error_set(GIT_ERROR_OS, "failed to read Location header");
git__free(location);
return -1;
}
/* Convert the Location header to UTF-8 */
if (git__utf16_to_8_alloc(&location8, location) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert Location header to UTF-8");
git__free(location);
return -1;
}
git__free(location);
/* Replay the request */
winhttp_stream_close(s);
if (!git__prefixcmp_icase(location8, prefix_https)) {
bool follow = (t->owner->connect_opts.follow_redirects != GIT_REMOTE_REDIRECT_NONE);
/* Upgrade to secure connection; disconnect and start over */
if (git_net_url_apply_redirect(&t->server.url, location8, follow, s->service_url) < 0) {
git__free(location8);
return -1;
}
winhttp_close_connection(t);
if (winhttp_connect(t) < 0)
return -1;
}
git__free(location8);
goto replay;
}
/* Handle authentication failures */
if (status_code == HTTP_STATUS_DENIED) {
int error = acquire_credentials(s->request,
&t->server,
t->owner->url,
t->owner->connect_opts.callbacks.credentials,
t->owner->connect_opts.callbacks.payload);
if (error < 0) {
return error;
} else if (!error) {
GIT_ASSERT(t->server.cred);
winhttp_stream_close(s);
goto replay;
}
} else if (status_code == HTTP_STATUS_PROXY_AUTH_REQ) {
int error = acquire_credentials(s->request,
&t->proxy,
t->owner->connect_opts.proxy_opts.url,
t->owner->connect_opts.proxy_opts.credentials,
t->owner->connect_opts.proxy_opts.payload);
if (error < 0) {
return error;
} else if (!error) {
GIT_ASSERT(t->proxy.cred);
winhttp_stream_close(s);
goto replay;
}
}
if (HTTP_STATUS_OK != status_code) {
git_error_set(GIT_ERROR_HTTP, "request failed with status code: %lu", status_code);
return -1;
}
/* Verify that we got the correct content-type back */
if (post_verb == s->verb)
p_snprintf(expected_content_type_8, MAX_CONTENT_TYPE_LEN, "application/x-git-%s-result", s->service);
else
p_snprintf(expected_content_type_8, MAX_CONTENT_TYPE_LEN, "application/x-git-%s-advertisement", s->service);
if (git__utf8_to_16(expected_content_type, MAX_CONTENT_TYPE_LEN, expected_content_type_8) < 0) {
git_error_set(GIT_ERROR_OS, "failed to convert expected content-type to wide characters");
return -1;
}
content_type_length = sizeof(content_type);
if (!WinHttpQueryHeaders(s->request,
WINHTTP_QUERY_CONTENT_TYPE,
WINHTTP_HEADER_NAME_BY_INDEX,
&content_type, &content_type_length,
WINHTTP_NO_HEADER_INDEX)) {
git_error_set(GIT_ERROR_OS, "failed to retrieve response content-type");
return -1;
}
if (wcscmp(expected_content_type, content_type)) {
git_error_set(GIT_ERROR_HTTP, "received unexpected content-type");
return -1;
}
s->received_response = 1;
}
if (!WinHttpReadData(s->request,
(LPVOID)buffer,
(DWORD)buf_size,
&dw_bytes_read))
{
git_error_set(GIT_ERROR_OS, "failed to read data");
return -1;
}
*bytes_read = dw_bytes_read;
return 0;
}
static int winhttp_stream_write_single(
git_smart_subtransport_stream *stream,
const char *buffer,
size_t len)
{
winhttp_stream *s = (winhttp_stream *)stream;
DWORD bytes_written;
int error;
if (!s->request && winhttp_stream_connect(s) < 0)
return -1;
/* This implementation of write permits only a single call. */
if (s->sent_request) {
git_error_set(GIT_ERROR_HTTP, "subtransport configured for only one write");
return -1;
}
if ((error = send_request(s, len, false)) < 0)
return error;
s->sent_request = 1;
if (!WinHttpWriteData(s->request,
(LPCVOID)buffer,
(DWORD)len,
&bytes_written)) {
git_error_set(GIT_ERROR_OS, "failed to write data");
return -1;
}
GIT_ASSERT((DWORD)len == bytes_written);
return 0;
}
static int put_uuid_string(LPWSTR buffer, size_t buffer_len_cch)
{
UUID uuid;
RPC_STATUS status = UuidCreate(&uuid);
int result;
if (RPC_S_OK != status &&
RPC_S_UUID_LOCAL_ONLY != status &&
RPC_S_UUID_NO_ADDRESS != status) {
git_error_set(GIT_ERROR_HTTP, "unable to generate name for temp file");
return -1;
}
if (buffer_len_cch < UUID_LENGTH_CCH + 1) {
git_error_set(GIT_ERROR_HTTP, "buffer too small for name of temp file");
return -1;
}
#if !defined(__MINGW32__) || defined(MINGW_HAS_SECURE_API)
result = swprintf_s(buffer, buffer_len_cch,
#else
result = wsprintfW(buffer,
#endif
L"%08x%04x%04x%02x%02x%02x%02x%02x%02x%02x%02x",
uuid.Data1, uuid.Data2, uuid.Data3,
uuid.Data4[0], uuid.Data4[1], uuid.Data4[2], uuid.Data4[3],
uuid.Data4[4], uuid.Data4[5], uuid.Data4[6], uuid.Data4[7]);
if (result < UUID_LENGTH_CCH) {
git_error_set(GIT_ERROR_OS, "unable to generate name for temp file");
return -1;
}
return 0;
}
static int get_temp_file(LPWSTR buffer, DWORD buffer_len_cch)
{
size_t len;
if (!GetTempPathW(buffer_len_cch, buffer)) {
git_error_set(GIT_ERROR_OS, "failed to get temp path");
return -1;
}
len = wcslen(buffer);
if (buffer[len - 1] != '\\' && len < buffer_len_cch)
buffer[len++] = '\\';
if (put_uuid_string(&buffer[len], (size_t)buffer_len_cch - len) < 0)
return -1;
return 0;
}
static int winhttp_stream_write_buffered(
git_smart_subtransport_stream *stream,
const char *buffer,
size_t len)
{
winhttp_stream *s = (winhttp_stream *)stream;
DWORD bytes_written;
if (!s->request && winhttp_stream_connect(s) < 0)
return -1;
/* Buffer the payload, using a temporary file so we delegate
* memory management of the data to the operating system. */
if (!s->post_body) {
wchar_t temp_path[MAX_PATH + 1];
if (get_temp_file(temp_path, MAX_PATH + 1) < 0)
return -1;
s->post_body = CreateFileW(temp_path,
GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_DELETE, NULL,
CREATE_NEW,
FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE | FILE_FLAG_SEQUENTIAL_SCAN,
NULL);
if (INVALID_HANDLE_VALUE == s->post_body) {
s->post_body = NULL;
git_error_set(GIT_ERROR_OS, "failed to create temporary file");
return -1;
}
}
if (!WriteFile(s->post_body, buffer, (DWORD)len, &bytes_written, NULL)) {
git_error_set(GIT_ERROR_OS, "failed to write to temporary file");
return -1;
}
GIT_ASSERT((DWORD)len == bytes_written);
s->post_body_len += bytes_written;
return 0;
}
static int winhttp_stream_write_chunked(
git_smart_subtransport_stream *stream,
const char *buffer,
size_t len)
{
winhttp_stream *s = (winhttp_stream *)stream;
int error;
if (!s->request && winhttp_stream_connect(s) < 0)
return -1;
if (!s->sent_request) {
/* Send Transfer-Encoding: chunked header */
if (!WinHttpAddRequestHeaders(s->request,
transfer_encoding, (ULONG) -1L,
WINHTTP_ADDREQ_FLAG_ADD)) {
git_error_set(GIT_ERROR_OS, "failed to add a header to the request");
return -1;
}
if ((error = send_request(s, 0, true)) < 0)
return error;
s->sent_request = 1;
}
if (len > CACHED_POST_BODY_BUF_SIZE) {
/* Flush, if necessary */
if (s->chunk_buffer_len > 0) {
if (write_chunk(s->request, s->chunk_buffer, s->chunk_buffer_len) < 0)
return -1;
s->chunk_buffer_len = 0;
}
/* Write chunk directly */
if (write_chunk(s->request, buffer, len) < 0)
return -1;
}
else {
/* Append as much to the buffer as we can */
int count = (int)min(CACHED_POST_BODY_BUF_SIZE - s->chunk_buffer_len, len);
if (!s->chunk_buffer) {
s->chunk_buffer = git__malloc(CACHED_POST_BODY_BUF_SIZE);
GIT_ERROR_CHECK_ALLOC(s->chunk_buffer);
}
memcpy(s->chunk_buffer + s->chunk_buffer_len, buffer, count);
s->chunk_buffer_len += count;
buffer += count;
len -= count;
/* Is the buffer full? If so, then flush */
if (CACHED_POST_BODY_BUF_SIZE == s->chunk_buffer_len) {
if (write_chunk(s->request, s->chunk_buffer, s->chunk_buffer_len) < 0)
return -1;
s->chunk_buffer_len = 0;
/* Is there any remaining data from the source? */
if (len > 0) {
memcpy(s->chunk_buffer, buffer, len);
s->chunk_buffer_len = (unsigned int)len;
}
}
}
return 0;
}
static void winhttp_stream_free(git_smart_subtransport_stream *stream)
{
winhttp_stream *s = (winhttp_stream *)stream;
winhttp_stream_close(s);
git__free(s);
}
static int winhttp_stream_alloc(winhttp_subtransport *t, winhttp_stream **stream)
{
winhttp_stream *s;
if (!stream)
return -1;
s = git__calloc(1, sizeof(winhttp_stream));
GIT_ERROR_CHECK_ALLOC(s);
s->parent.subtransport = &t->parent;
s->parent.read = winhttp_stream_read;
s->parent.write = winhttp_stream_write_single;
s->parent.free = winhttp_stream_free;
*stream = s;
return 0;
}
static int winhttp_uploadpack_ls(
winhttp_subtransport *t,
winhttp_stream *s)
{
GIT_UNUSED(t);
s->service = upload_pack_service;
s->service_url = upload_pack_ls_service_url;
s->verb = get_verb;
return 0;
}
static int winhttp_uploadpack(
winhttp_subtransport *t,
winhttp_stream *s)
{
GIT_UNUSED(t);
s->service = upload_pack_service;
s->service_url = upload_pack_service_url;
s->verb = post_verb;
return 0;
}
static int winhttp_receivepack_ls(
winhttp_subtransport *t,
winhttp_stream *s)
{
GIT_UNUSED(t);
s->service = receive_pack_service;
s->service_url = receive_pack_ls_service_url;
s->verb = get_verb;
return 0;
}
static int winhttp_receivepack(
winhttp_subtransport *t,
winhttp_stream *s)
{
GIT_UNUSED(t);
/* WinHTTP only supports Transfer-Encoding: chunked
* on Windows Vista (NT 6.0) and higher. */
s->chunked = git_has_win32_version(6, 0, 0);
if (s->chunked)
s->parent.write = winhttp_stream_write_chunked;
else
s->parent.write = winhttp_stream_write_buffered;
s->service = receive_pack_service;
s->service_url = receive_pack_service_url;
s->verb = post_verb;
return 0;
}
static int winhttp_action(
git_smart_subtransport_stream **stream,
git_smart_subtransport *subtransport,
const char *url,
git_smart_service_t action)
{
winhttp_subtransport *t = (winhttp_subtransport *)subtransport;
winhttp_stream *s;
int ret = -1;
if (!t->connection)
if ((ret = git_net_url_parse(&t->server.url, url)) < 0 ||
(ret = winhttp_connect(t)) < 0)
return ret;
if (winhttp_stream_alloc(t, &s) < 0)
return -1;
if (!stream)
return -1;
switch (action)
{
case GIT_SERVICE_UPLOADPACK_LS:
ret = winhttp_uploadpack_ls(t, s);
break;
case GIT_SERVICE_UPLOADPACK:
ret = winhttp_uploadpack(t, s);
break;
case GIT_SERVICE_RECEIVEPACK_LS:
ret = winhttp_receivepack_ls(t, s);
break;
case GIT_SERVICE_RECEIVEPACK:
ret = winhttp_receivepack(t, s);
break;
default:
GIT_ASSERT(0);
}
if (!ret)
*stream = &s->parent;
return ret;
}
static int winhttp_close(git_smart_subtransport *subtransport)
{
winhttp_subtransport *t = (winhttp_subtransport *)subtransport;
git_net_url_dispose(&t->server.url);
git_net_url_dispose(&t->proxy.url);
if (t->server.cred) {
t->server.cred->free(t->server.cred);
t->server.cred = NULL;
}
if (t->proxy.cred) {
t->proxy.cred->free(t->proxy.cred);
t->proxy.cred = NULL;
}
return winhttp_close_connection(t);
}
static void winhttp_free(git_smart_subtransport *subtransport)
{
winhttp_subtransport *t = (winhttp_subtransport *)subtransport;
winhttp_close(subtransport);
git__free(t);
}
int git_smart_subtransport_http(git_smart_subtransport **out, git_transport *owner, void *param)
{
winhttp_subtransport *t;
GIT_UNUSED(param);
if (!out)
return -1;
t = git__calloc(1, sizeof(winhttp_subtransport));
GIT_ERROR_CHECK_ALLOC(t);
t->owner = (transport_smart *)owner;
t->parent.action = winhttp_action;
t->parent.close = winhttp_close;
t->parent.free = winhttp_free;
*out = (git_smart_subtransport *) t;
return 0;
}
#endif /* GIT_WINHTTP */
| libgit2-main | src/libgit2/transports/winhttp.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#ifndef GIT_WINHTTP
#include "http_parser.h"
#include "net.h"
#include "netops.h"
#include "remote.h"
#include "smart.h"
#include "auth.h"
#include "http.h"
#include "auth_negotiate.h"
#include "auth_ntlm.h"
#include "trace.h"
#include "streams/tls.h"
#include "streams/socket.h"
#include "httpclient.h"
#include "git2/sys/credential.h"
bool git_http__expect_continue = false;
typedef enum {
HTTP_STATE_NONE = 0,
HTTP_STATE_SENDING_REQUEST,
HTTP_STATE_RECEIVING_RESPONSE,
HTTP_STATE_DONE
} http_state;
typedef struct {
git_http_method method;
const char *url;
const char *request_type;
const char *response_type;
unsigned int initial : 1,
chunked : 1;
} http_service;
typedef struct {
git_smart_subtransport_stream parent;
const http_service *service;
http_state state;
unsigned replay_count;
} http_stream;
typedef struct {
git_net_url url;
git_credential *cred;
unsigned auth_schemetypes;
unsigned url_cred_presented : 1;
} http_server;
typedef struct {
git_smart_subtransport parent;
transport_smart *owner;
http_server server;
http_server proxy;
git_http_client *http_client;
} http_subtransport;
static const http_service upload_pack_ls_service = {
GIT_HTTP_METHOD_GET, "/info/refs?service=git-upload-pack",
NULL,
"application/x-git-upload-pack-advertisement",
1,
0
};
static const http_service upload_pack_service = {
GIT_HTTP_METHOD_POST, "/git-upload-pack",
"application/x-git-upload-pack-request",
"application/x-git-upload-pack-result",
0,
0
};
static const http_service receive_pack_ls_service = {
GIT_HTTP_METHOD_GET, "/info/refs?service=git-receive-pack",
NULL,
"application/x-git-receive-pack-advertisement",
1,
0
};
static const http_service receive_pack_service = {
GIT_HTTP_METHOD_POST, "/git-receive-pack",
"application/x-git-receive-pack-request",
"application/x-git-receive-pack-result",
0,
1
};
#define SERVER_TYPE_REMOTE "remote"
#define SERVER_TYPE_PROXY "proxy"
#define OWNING_SUBTRANSPORT(s) ((http_subtransport *)(s)->parent.subtransport)
static int apply_url_credentials(
git_credential **cred,
unsigned int allowed_types,
const char *username,
const char *password)
{
GIT_ASSERT_ARG(username);
if (!password)
password = "";
if (allowed_types & GIT_CREDENTIAL_USERPASS_PLAINTEXT)
return git_credential_userpass_plaintext_new(cred, username, password);
if ((allowed_types & GIT_CREDENTIAL_DEFAULT) && *username == '\0' && *password == '\0')
return git_credential_default_new(cred);
return GIT_PASSTHROUGH;
}
GIT_INLINE(void) free_cred(git_credential **cred)
{
if (*cred) {
git_credential_free(*cred);
(*cred) = NULL;
}
}
static int handle_auth(
http_server *server,
const char *server_type,
const char *url,
unsigned int allowed_schemetypes,
unsigned int allowed_credtypes,
git_credential_acquire_cb callback,
void *callback_payload)
{
int error = 1;
if (server->cred)
free_cred(&server->cred);
/* Start with URL-specified credentials, if there were any. */
if ((allowed_credtypes & GIT_CREDENTIAL_USERPASS_PLAINTEXT) &&
!server->url_cred_presented &&
server->url.username) {
error = apply_url_credentials(&server->cred, allowed_credtypes, server->url.username, server->url.password);
server->url_cred_presented = 1;
/* treat GIT_PASSTHROUGH as if callback isn't set */
if (error == GIT_PASSTHROUGH)
error = 1;
}
if (error > 0 && callback) {
error = callback(&server->cred, url, server->url.username, allowed_credtypes, callback_payload);
/* treat GIT_PASSTHROUGH as if callback isn't set */
if (error == GIT_PASSTHROUGH)
error = 1;
}
if (error > 0) {
git_error_set(GIT_ERROR_HTTP, "%s authentication required but no callback set", server_type);
error = GIT_EAUTH;
}
if (!error)
server->auth_schemetypes = allowed_schemetypes;
return error;
}
GIT_INLINE(int) handle_remote_auth(
http_stream *stream,
git_http_response *response)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_remote_connect_options *connect_opts = &transport->owner->connect_opts;
if (response->server_auth_credtypes == 0) {
git_error_set(GIT_ERROR_HTTP, "server requires authentication that we do not support");
return GIT_EAUTH;
}
/* Otherwise, prompt for credentials. */
return handle_auth(
&transport->server,
SERVER_TYPE_REMOTE,
transport->owner->url,
response->server_auth_schemetypes,
response->server_auth_credtypes,
connect_opts->callbacks.credentials,
connect_opts->callbacks.payload);
}
GIT_INLINE(int) handle_proxy_auth(
http_stream *stream,
git_http_response *response)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_remote_connect_options *connect_opts = &transport->owner->connect_opts;
if (response->proxy_auth_credtypes == 0) {
git_error_set(GIT_ERROR_HTTP, "proxy requires authentication that we do not support");
return GIT_EAUTH;
}
/* Otherwise, prompt for credentials. */
return handle_auth(
&transport->proxy,
SERVER_TYPE_PROXY,
connect_opts->proxy_opts.url,
response->server_auth_schemetypes,
response->proxy_auth_credtypes,
connect_opts->proxy_opts.credentials,
connect_opts->proxy_opts.payload);
}
static bool allow_redirect(http_stream *stream)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
switch (transport->owner->connect_opts.follow_redirects) {
case GIT_REMOTE_REDIRECT_INITIAL:
return (stream->service->initial == 1);
case GIT_REMOTE_REDIRECT_ALL:
return true;
default:
return false;
}
}
static int handle_response(
bool *complete,
http_stream *stream,
git_http_response *response,
bool allow_replay)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
int error;
*complete = false;
if (allow_replay && git_http_response_is_redirect(response)) {
if (!response->location) {
git_error_set(GIT_ERROR_HTTP, "redirect without location");
return -1;
}
if (git_net_url_apply_redirect(&transport->server.url, response->location, allow_redirect(stream), stream->service->url) < 0) {
return -1;
}
return 0;
} else if (git_http_response_is_redirect(response)) {
git_error_set(GIT_ERROR_HTTP, "unexpected redirect");
return -1;
}
/* If we're in the middle of challenge/response auth, continue. */
if (allow_replay && response->resend_credentials) {
return 0;
} else if (allow_replay && response->status == GIT_HTTP_STATUS_UNAUTHORIZED) {
if ((error = handle_remote_auth(stream, response)) < 0)
return error;
return git_http_client_skip_body(transport->http_client);
} else if (allow_replay && response->status == GIT_HTTP_STATUS_PROXY_AUTHENTICATION_REQUIRED) {
if ((error = handle_proxy_auth(stream, response)) < 0)
return error;
return git_http_client_skip_body(transport->http_client);
} else if (response->status == GIT_HTTP_STATUS_UNAUTHORIZED ||
response->status == GIT_HTTP_STATUS_PROXY_AUTHENTICATION_REQUIRED) {
git_error_set(GIT_ERROR_HTTP, "unexpected authentication failure");
return GIT_EAUTH;
}
if (response->status != GIT_HTTP_STATUS_OK) {
git_error_set(GIT_ERROR_HTTP, "unexpected http status code: %d", response->status);
return -1;
}
/* The response must contain a Content-Type header. */
if (!response->content_type) {
git_error_set(GIT_ERROR_HTTP, "no content-type header in response");
return -1;
}
/* The Content-Type header must match our expectation. */
if (strcmp(response->content_type, stream->service->response_type) != 0) {
git_error_set(GIT_ERROR_HTTP, "invalid content-type: '%s'", response->content_type);
return -1;
}
*complete = true;
stream->state = HTTP_STATE_RECEIVING_RESPONSE;
return 0;
}
static int lookup_proxy(
bool *out_use,
http_subtransport *transport)
{
git_remote_connect_options *connect_opts = &transport->owner->connect_opts;
const char *proxy;
git_remote *remote;
char *config = NULL;
int error = 0;
*out_use = false;
git_net_url_dispose(&transport->proxy.url);
switch (connect_opts->proxy_opts.type) {
case GIT_PROXY_SPECIFIED:
proxy = connect_opts->proxy_opts.url;
break;
case GIT_PROXY_AUTO:
remote = transport->owner->owner;
error = git_remote__http_proxy(&config, remote, &transport->server.url);
if (error || !config)
goto done;
proxy = config;
break;
default:
return 0;
}
if (!proxy ||
(error = git_net_url_parse(&transport->proxy.url, proxy)) < 0)
goto done;
*out_use = true;
done:
git__free(config);
return error;
}
static int generate_request(
git_net_url *url,
git_http_request *request,
http_stream *stream,
size_t len)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
bool use_proxy = false;
int error;
if ((error = git_net_url_joinpath(url,
&transport->server.url, stream->service->url)) < 0 ||
(error = lookup_proxy(&use_proxy, transport)) < 0)
return error;
request->method = stream->service->method;
request->url = url;
request->credentials = transport->server.cred;
request->proxy = use_proxy ? &transport->proxy.url : NULL;
request->proxy_credentials = transport->proxy.cred;
request->custom_headers = &transport->owner->connect_opts.custom_headers;
if (stream->service->method == GIT_HTTP_METHOD_POST) {
request->chunked = stream->service->chunked;
request->content_length = stream->service->chunked ? 0 : len;
request->content_type = stream->service->request_type;
request->accept = stream->service->response_type;
request->expect_continue = git_http__expect_continue;
}
return 0;
}
/*
* Read from an HTTP transport - for the first invocation of this function
* (ie, when stream->state == HTTP_STATE_NONE), we'll send a GET request
* to the remote host. We will stream that data back on all subsequent
* calls.
*/
static int http_stream_read(
git_smart_subtransport_stream *s,
char *buffer,
size_t buffer_size,
size_t *out_len)
{
http_stream *stream = (http_stream *)s;
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_net_url url = GIT_NET_URL_INIT;
git_net_url proxy_url = GIT_NET_URL_INIT;
git_http_request request = {0};
git_http_response response = {0};
bool complete;
int error;
*out_len = 0;
if (stream->state == HTTP_STATE_NONE) {
stream->state = HTTP_STATE_SENDING_REQUEST;
stream->replay_count = 0;
}
/*
* Formulate the URL, send the request and read the response
* headers. Some of the request body may also be read.
*/
while (stream->state == HTTP_STATE_SENDING_REQUEST &&
stream->replay_count < GIT_HTTP_REPLAY_MAX) {
git_net_url_dispose(&url);
git_net_url_dispose(&proxy_url);
git_http_response_dispose(&response);
if ((error = generate_request(&url, &request, stream, 0)) < 0 ||
(error = git_http_client_send_request(
transport->http_client, &request)) < 0 ||
(error = git_http_client_read_response(
&response, transport->http_client)) < 0 ||
(error = handle_response(&complete, stream, &response, true)) < 0)
goto done;
if (complete)
break;
stream->replay_count++;
}
if (stream->state == HTTP_STATE_SENDING_REQUEST) {
git_error_set(GIT_ERROR_HTTP, "too many redirects or authentication replays");
error = GIT_ERROR; /* not GIT_EAUTH, because the exact cause is unclear */
goto done;
}
GIT_ASSERT(stream->state == HTTP_STATE_RECEIVING_RESPONSE);
error = git_http_client_read_body(transport->http_client, buffer, buffer_size);
if (error > 0) {
*out_len = error;
error = 0;
}
done:
git_net_url_dispose(&url);
git_net_url_dispose(&proxy_url);
git_http_response_dispose(&response);
return error;
}
static bool needs_probe(http_stream *stream)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
return (transport->server.auth_schemetypes == GIT_HTTP_AUTH_NTLM ||
transport->server.auth_schemetypes == GIT_HTTP_AUTH_NEGOTIATE);
}
static int send_probe(http_stream *stream)
{
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_http_client *client = transport->http_client;
const char *probe = "0000";
size_t len = 4;
git_net_url url = GIT_NET_URL_INIT;
git_http_request request = {0};
git_http_response response = {0};
bool complete = false;
size_t step, steps = 1;
int error;
/* NTLM requires a full challenge/response */
if (transport->server.auth_schemetypes == GIT_HTTP_AUTH_NTLM)
steps = GIT_AUTH_STEPS_NTLM;
/*
* Send at most two requests: one without any authentication to see
* if we get prompted to authenticate. If we do, send a second one
* with the first authentication message. The final authentication
* message with the response will occur with the *actual* POST data.
*/
for (step = 0; step < steps && !complete; step++) {
git_net_url_dispose(&url);
git_http_response_dispose(&response);
if ((error = generate_request(&url, &request, stream, len)) < 0 ||
(error = git_http_client_send_request(client, &request)) < 0 ||
(error = git_http_client_send_body(client, probe, len)) < 0 ||
(error = git_http_client_read_response(&response, client)) < 0 ||
(error = git_http_client_skip_body(client)) < 0 ||
(error = handle_response(&complete, stream, &response, true)) < 0)
goto done;
}
done:
git_http_response_dispose(&response);
git_net_url_dispose(&url);
return error;
}
/*
* Write to an HTTP transport - for the first invocation of this function
* (ie, when stream->state == HTTP_STATE_NONE), we'll send a POST request
* to the remote host. If we're sending chunked data, then subsequent calls
* will write the additional data given in the buffer. If we're not chunking,
* then the caller should have given us all the data in the original call.
* The caller should call http_stream_read_response to get the result.
*/
static int http_stream_write(
git_smart_subtransport_stream *s,
const char *buffer,
size_t len)
{
http_stream *stream = GIT_CONTAINER_OF(s, http_stream, parent);
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_net_url url = GIT_NET_URL_INIT;
git_http_request request = {0};
git_http_response response = {0};
int error;
while (stream->state == HTTP_STATE_NONE &&
stream->replay_count < GIT_HTTP_REPLAY_MAX) {
git_net_url_dispose(&url);
git_http_response_dispose(&response);
/*
* If we're authenticating with a connection-based mechanism
* (NTLM, Kerberos), send a "probe" packet. Servers SHOULD
* authenticate an entire keep-alive connection, so ideally
* we should not need to authenticate but some servers do
* not support this. By sending a probe packet, we'll be
* able to follow up with a second POST using the actual
* data (and, in the degenerate case, the authentication
* header as well).
*/
if (needs_probe(stream) && (error = send_probe(stream)) < 0)
goto done;
/* Send the regular POST request. */
if ((error = generate_request(&url, &request, stream, len)) < 0 ||
(error = git_http_client_send_request(
transport->http_client, &request)) < 0)
goto done;
if (request.expect_continue &&
git_http_client_has_response(transport->http_client)) {
bool complete;
/*
* If we got a response to an expect/continue, then
* it's something other than a 100 and we should
* deal with the response somehow.
*/
if ((error = git_http_client_read_response(&response, transport->http_client)) < 0 ||
(error = handle_response(&complete, stream, &response, true)) < 0)
goto done;
} else {
stream->state = HTTP_STATE_SENDING_REQUEST;
}
stream->replay_count++;
}
if (stream->state == HTTP_STATE_NONE) {
git_error_set(GIT_ERROR_HTTP,
"too many redirects or authentication replays");
error = GIT_ERROR; /* not GIT_EAUTH because the exact cause is unclear */
goto done;
}
GIT_ASSERT(stream->state == HTTP_STATE_SENDING_REQUEST);
error = git_http_client_send_body(transport->http_client, buffer, len);
done:
git_http_response_dispose(&response);
git_net_url_dispose(&url);
return error;
}
/*
* Read from an HTTP transport after it has been written to. This is the
* response from a POST request made by http_stream_write.
*/
static int http_stream_read_response(
git_smart_subtransport_stream *s,
char *buffer,
size_t buffer_size,
size_t *out_len)
{
http_stream *stream = (http_stream *)s;
http_subtransport *transport = OWNING_SUBTRANSPORT(stream);
git_http_client *client = transport->http_client;
git_http_response response = {0};
bool complete;
int error;
*out_len = 0;
if (stream->state == HTTP_STATE_SENDING_REQUEST) {
if ((error = git_http_client_read_response(&response, client)) < 0 ||
(error = handle_response(&complete, stream, &response, false)) < 0)
goto done;
GIT_ASSERT(complete);
stream->state = HTTP_STATE_RECEIVING_RESPONSE;
}
error = git_http_client_read_body(client, buffer, buffer_size);
if (error > 0) {
*out_len = error;
error = 0;
}
done:
git_http_response_dispose(&response);
return error;
}
static void http_stream_free(git_smart_subtransport_stream *stream)
{
http_stream *s = GIT_CONTAINER_OF(stream, http_stream, parent);
git__free(s);
}
static const http_service *select_service(git_smart_service_t action)
{
switch (action) {
case GIT_SERVICE_UPLOADPACK_LS:
return &upload_pack_ls_service;
case GIT_SERVICE_UPLOADPACK:
return &upload_pack_service;
case GIT_SERVICE_RECEIVEPACK_LS:
return &receive_pack_ls_service;
case GIT_SERVICE_RECEIVEPACK:
return &receive_pack_service;
}
return NULL;
}
static int http_action(
git_smart_subtransport_stream **out,
git_smart_subtransport *t,
const char *url,
git_smart_service_t action)
{
http_subtransport *transport = GIT_CONTAINER_OF(t, http_subtransport, parent);
git_remote_connect_options *connect_opts = &transport->owner->connect_opts;
http_stream *stream;
const http_service *service;
int error;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(t);
*out = NULL;
/*
* If we've seen a redirect then preserve the location that we've
* been given. This is important to continue authorization against
* the redirect target, not the user-given source; the endpoint may
* have redirected us from HTTP->HTTPS and is using an auth mechanism
* that would be insecure in plaintext (eg, HTTP Basic).
*/
if (!git_net_url_valid(&transport->server.url) &&
(error = git_net_url_parse(&transport->server.url, url)) < 0)
return error;
if ((service = select_service(action)) == NULL) {
git_error_set(GIT_ERROR_HTTP, "invalid action");
return -1;
}
stream = git__calloc(sizeof(http_stream), 1);
GIT_ERROR_CHECK_ALLOC(stream);
if (!transport->http_client) {
git_http_client_options opts = {0};
opts.server_certificate_check_cb = connect_opts->callbacks.certificate_check;
opts.server_certificate_check_payload = connect_opts->callbacks.payload;
opts.proxy_certificate_check_cb = connect_opts->proxy_opts.certificate_check;
opts.proxy_certificate_check_payload = connect_opts->proxy_opts.payload;
if (git_http_client_new(&transport->http_client, &opts) < 0)
return -1;
}
stream->service = service;
stream->parent.subtransport = &transport->parent;
if (service->method == GIT_HTTP_METHOD_GET) {
stream->parent.read = http_stream_read;
} else {
stream->parent.write = http_stream_write;
stream->parent.read = http_stream_read_response;
}
stream->parent.free = http_stream_free;
*out = (git_smart_subtransport_stream *)stream;
return 0;
}
static int http_close(git_smart_subtransport *t)
{
http_subtransport *transport = GIT_CONTAINER_OF(t, http_subtransport, parent);
free_cred(&transport->server.cred);
free_cred(&transport->proxy.cred);
transport->server.url_cred_presented = false;
transport->proxy.url_cred_presented = false;
git_net_url_dispose(&transport->server.url);
git_net_url_dispose(&transport->proxy.url);
return 0;
}
static void http_free(git_smart_subtransport *t)
{
http_subtransport *transport = GIT_CONTAINER_OF(t, http_subtransport, parent);
git_http_client_free(transport->http_client);
http_close(t);
git__free(transport);
}
int git_smart_subtransport_http(git_smart_subtransport **out, git_transport *owner, void *param)
{
http_subtransport *transport;
GIT_UNUSED(param);
GIT_ASSERT_ARG(out);
transport = git__calloc(sizeof(http_subtransport), 1);
GIT_ERROR_CHECK_ALLOC(transport);
transport->owner = (transport_smart *)owner;
transport->parent.action = http_action;
transport->parent.close = http_close;
transport->parent.free = http_free;
*out = (git_smart_subtransport *) transport;
return 0;
}
#endif /* !GIT_WINHTTP */
| libgit2-main | src/libgit2/transports/http.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "git2.h"
#include "git2/odb_backend.h"
#include "smart.h"
#include "refs.h"
#include "repository.h"
#include "push.h"
#include "pack-objects.h"
#include "remote.h"
#include "util.h"
#include "revwalk.h"
#define NETWORK_XFER_THRESHOLD (100*1024)
/* The minimal interval between progress updates (in seconds). */
#define MIN_PROGRESS_UPDATE_INTERVAL 0.5
bool git_smart__ofs_delta_enabled = true;
int git_smart__store_refs(transport_smart *t, int flushes)
{
gitno_buffer *buf = &t->buffer;
git_vector *refs = &t->refs;
int error, flush = 0, recvd;
const char *line_end = NULL;
git_pkt *pkt = NULL;
size_t i;
/* Clear existing refs in case git_remote_connect() is called again
* after git_remote_disconnect().
*/
git_vector_foreach(refs, i, pkt) {
git_pkt_free(pkt);
}
git_vector_clear(refs);
pkt = NULL;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, &line_end, buf->data, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS)
return error;
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
if (recvd == 0) {
git_error_set(GIT_ERROR_NET, "early EOF");
return GIT_EEOF;
}
continue;
}
if (gitno_consume(buf, line_end) < 0)
return -1;
if (pkt->type == GIT_PKT_ERR) {
git_error_set(GIT_ERROR_NET, "remote error: %s", ((git_pkt_err *)pkt)->error);
git__free(pkt);
return -1;
}
if (pkt->type != GIT_PKT_FLUSH && git_vector_insert(refs, pkt) < 0)
return -1;
if (pkt->type == GIT_PKT_FLUSH) {
flush++;
git_pkt_free(pkt);
}
} while (flush < flushes);
return flush;
}
static int append_symref(const char **out, git_vector *symrefs, const char *ptr)
{
int error;
const char *end;
git_str buf = GIT_STR_INIT;
git_refspec *mapping = NULL;
ptr += strlen(GIT_CAP_SYMREF);
if (*ptr != '=')
goto on_invalid;
ptr++;
if (!(end = strchr(ptr, ' ')) &&
!(end = strchr(ptr, '\0')))
goto on_invalid;
if ((error = git_str_put(&buf, ptr, end - ptr)) < 0)
return error;
/* symref mapping has refspec format */
mapping = git__calloc(1, sizeof(git_refspec));
GIT_ERROR_CHECK_ALLOC(mapping);
error = git_refspec__parse(mapping, git_str_cstr(&buf), true);
git_str_dispose(&buf);
/* if the error isn't OOM, then it's a parse error; let's use a nicer message */
if (error < 0) {
if (git_error_last()->klass != GIT_ERROR_NOMEMORY)
goto on_invalid;
git__free(mapping);
return error;
}
if ((error = git_vector_insert(symrefs, mapping)) < 0)
return error;
*out = end;
return 0;
on_invalid:
git_error_set(GIT_ERROR_NET, "remote sent invalid symref");
git_refspec__dispose(mapping);
git__free(mapping);
return -1;
}
int git_smart__detect_caps(git_pkt_ref *pkt, transport_smart_caps *caps, git_vector *symrefs)
{
const char *ptr;
/* No refs or capabilities, odd but not a problem */
if (pkt == NULL || pkt->capabilities == NULL)
return GIT_ENOTFOUND;
ptr = pkt->capabilities;
while (ptr != NULL && *ptr != '\0') {
if (*ptr == ' ')
ptr++;
if (git_smart__ofs_delta_enabled && !git__prefixcmp(ptr, GIT_CAP_OFS_DELTA)) {
caps->common = caps->ofs_delta = 1;
ptr += strlen(GIT_CAP_OFS_DELTA);
continue;
}
/* Keep multi_ack_detailed before multi_ack */
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK_DETAILED)) {
caps->common = caps->multi_ack_detailed = 1;
ptr += strlen(GIT_CAP_MULTI_ACK_DETAILED);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_MULTI_ACK)) {
caps->common = caps->multi_ack = 1;
ptr += strlen(GIT_CAP_MULTI_ACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_INCLUDE_TAG)) {
caps->common = caps->include_tag = 1;
ptr += strlen(GIT_CAP_INCLUDE_TAG);
continue;
}
/* Keep side-band check after side-band-64k */
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND_64K)) {
caps->common = caps->side_band_64k = 1;
ptr += strlen(GIT_CAP_SIDE_BAND_64K);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SIDE_BAND)) {
caps->common = caps->side_band = 1;
ptr += strlen(GIT_CAP_SIDE_BAND);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_DELETE_REFS)) {
caps->common = caps->delete_refs = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_THIN_PACK)) {
caps->common = caps->thin_pack = 1;
ptr += strlen(GIT_CAP_THIN_PACK);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_SYMREF)) {
int error;
if ((error = append_symref(&ptr, symrefs, ptr)) < 0)
return error;
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_WANT_TIP_SHA1)) {
caps->common = caps->want_tip_sha1 = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
if (!git__prefixcmp(ptr, GIT_CAP_WANT_REACHABLE_SHA1)) {
caps->common = caps->want_reachable_sha1 = 1;
ptr += strlen(GIT_CAP_DELETE_REFS);
continue;
}
/* We don't know this capability, so skip it */
ptr = strchr(ptr, ' ');
}
return 0;
}
static int recv_pkt(git_pkt **out_pkt, git_pkt_type *out_type, gitno_buffer *buf)
{
const char *ptr = buf->data, *line_end = ptr;
git_pkt *pkt = NULL;
int error = 0, ret;
do {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, &line_end, ptr, buf->offset);
else
error = GIT_EBUFS;
if (error == 0)
break; /* return the pkt */
if (error < 0 && error != GIT_EBUFS)
return error;
if ((ret = gitno_recv(buf)) < 0) {
return ret;
} else if (ret == 0) {
git_error_set(GIT_ERROR_NET, "early EOF");
return GIT_EEOF;
}
} while (error);
if (gitno_consume(buf, line_end) < 0)
return -1;
if (out_type != NULL)
*out_type = pkt->type;
if (out_pkt != NULL)
*out_pkt = pkt;
else
git__free(pkt);
return error;
}
static int store_common(transport_smart *t)
{
git_pkt *pkt = NULL;
gitno_buffer *buf = &t->buffer;
int error;
do {
if ((error = recv_pkt(&pkt, NULL, buf)) < 0)
return error;
if (pkt->type != GIT_PKT_ACK) {
git__free(pkt);
return 0;
}
if (git_vector_insert(&t->common, pkt) < 0) {
git__free(pkt);
return -1;
}
} while (1);
return 0;
}
static int wait_while_ack(gitno_buffer *buf)
{
int error;
git_pkt *pkt = NULL;
git_pkt_ack *ack = NULL;
while (1) {
git_pkt_free(pkt);
if ((error = recv_pkt(&pkt, NULL, buf)) < 0)
return error;
if (pkt->type == GIT_PKT_NAK)
break;
if (pkt->type != GIT_PKT_ACK)
continue;
ack = (git_pkt_ack*)pkt;
if (ack->status != GIT_ACK_CONTINUE &&
ack->status != GIT_ACK_COMMON &&
ack->status != GIT_ACK_READY) {
break;
}
}
git_pkt_free(pkt);
return 0;
}
int git_smart__negotiate_fetch(git_transport *transport, git_repository *repo, const git_remote_head * const *wants, size_t count)
{
transport_smart *t = (transport_smart *)transport;
git_revwalk__push_options opts = GIT_REVWALK__PUSH_OPTIONS_INIT;
gitno_buffer *buf = &t->buffer;
git_str data = GIT_STR_INIT;
git_revwalk *walk = NULL;
int error = -1;
git_pkt_type pkt_type;
unsigned int i;
git_oid oid;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
return error;
if ((error = git_revwalk_new(&walk, repo)) < 0)
goto on_error;
opts.insert_by_date = 1;
if ((error = git_revwalk__push_glob(walk, "refs/*", &opts)) < 0)
goto on_error;
/*
* Our support for ACK extensions is simply to parse them. On
* the first ACK we will accept that as enough common
* objects. We give up if we haven't found an answer in the
* first 256 we send.
*/
i = 0;
while (i < 256) {
error = git_revwalk_next(&oid, walk);
if (error < 0) {
if (GIT_ITEROVER == error)
break;
goto on_error;
}
git_pkt_buffer_have(&oid, &data);
i++;
if (i % 20 == 0) {
if (t->cancelled.val) {
git_error_set(GIT_ERROR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
git_pkt_buffer_flush(&data);
if (git_str_oom(&data)) {
error = -1;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_str_clear(&data);
if (t->caps.multi_ack || t->caps.multi_ack_detailed) {
if ((error = store_common(t)) < 0)
goto on_error;
} else {
if ((error = recv_pkt(NULL, &pkt_type, buf)) < 0)
goto on_error;
if (pkt_type == GIT_PKT_ACK) {
break;
} else if (pkt_type == GIT_PKT_NAK) {
continue;
} else {
git_error_set(GIT_ERROR_NET, "unexpected pkt type");
error = -1;
goto on_error;
}
}
}
if (t->common.length > 0)
break;
if (i % 20 == 0 && t->rpc) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_str_oom(&data)) {
error = -1;
goto on_error;
}
}
}
/* Tell the other end that we're done negotiating */
if (t->rpc && t->common.length > 0) {
git_pkt_ack *pkt;
unsigned int j;
if ((error = git_pkt_buffer_wants(wants, count, &t->caps, &data)) < 0)
goto on_error;
git_vector_foreach(&t->common, j, pkt) {
if ((error = git_pkt_buffer_have(&pkt->oid, &data)) < 0)
goto on_error;
}
if (git_str_oom(&data)) {
error = -1;
goto on_error;
}
}
if ((error = git_pkt_buffer_done(&data)) < 0)
goto on_error;
if (t->cancelled.val) {
git_error_set(GIT_ERROR_NET, "The fetch was cancelled by the user");
error = GIT_EUSER;
goto on_error;
}
if ((error = git_smart__negotiation_step(&t->parent, data.ptr, data.size)) < 0)
goto on_error;
git_str_dispose(&data);
git_revwalk_free(walk);
/* Now let's eat up whatever the server gives us */
if (!t->caps.multi_ack && !t->caps.multi_ack_detailed) {
if ((error = recv_pkt(NULL, &pkt_type, buf)) < 0)
return error;
if (pkt_type != GIT_PKT_ACK && pkt_type != GIT_PKT_NAK) {
git_error_set(GIT_ERROR_NET, "unexpected pkt type");
return -1;
}
} else {
error = wait_while_ack(buf);
}
return error;
on_error:
git_revwalk_free(walk);
git_str_dispose(&data);
return error;
}
static int no_sideband(transport_smart *t, struct git_odb_writepack *writepack, gitno_buffer *buf, git_indexer_progress *stats)
{
int recvd;
do {
if (t->cancelled.val) {
git_error_set(GIT_ERROR_NET, "the fetch was cancelled by the user");
return GIT_EUSER;
}
if (writepack->append(writepack, buf->data, buf->offset, stats) < 0)
return -1;
gitno_consume_n(buf, buf->offset);
if ((recvd = gitno_recv(buf)) < 0)
return recvd;
} while(recvd > 0);
if (writepack->commit(writepack, stats) < 0)
return -1;
return 0;
}
struct network_packetsize_payload
{
git_indexer_progress_cb callback;
void *payload;
git_indexer_progress *stats;
size_t last_fired_bytes;
};
static int network_packetsize(size_t received, void *payload)
{
struct network_packetsize_payload *npp = (struct network_packetsize_payload*)payload;
/* Accumulate bytes */
npp->stats->received_bytes += received;
/* Fire notification if the threshold is reached */
if ((npp->stats->received_bytes - npp->last_fired_bytes) > NETWORK_XFER_THRESHOLD) {
npp->last_fired_bytes = npp->stats->received_bytes;
if (npp->callback(npp->stats, npp->payload))
return GIT_EUSER;
}
return 0;
}
int git_smart__download_pack(
git_transport *transport,
git_repository *repo,
git_indexer_progress *stats)
{
transport_smart *t = (transport_smart *)transport;
gitno_buffer *buf = &t->buffer;
git_odb *odb;
struct git_odb_writepack *writepack = NULL;
int error = 0;
struct network_packetsize_payload npp = {0};
git_indexer_progress_cb progress_cb = t->connect_opts.callbacks.transfer_progress;
void *progress_payload = t->connect_opts.callbacks.payload;
memset(stats, 0, sizeof(git_indexer_progress));
if (progress_cb) {
npp.callback = progress_cb;
npp.payload = progress_payload;
npp.stats = stats;
t->packetsize_cb = &network_packetsize;
t->packetsize_payload = &npp;
/* We might have something in the buffer already from negotiate_fetch */
if (t->buffer.offset > 0 && !t->cancelled.val)
if (t->packetsize_cb(t->buffer.offset, t->packetsize_payload))
git_atomic32_set(&t->cancelled, 1);
}
if ((error = git_repository_odb__weakptr(&odb, repo)) < 0 ||
((error = git_odb_write_pack(&writepack, odb, progress_cb, progress_payload)) != 0))
goto done;
/*
* If the remote doesn't support the side-band, we can feed
* the data directly to the pack writer. Otherwise, we need to
* check which one belongs there.
*/
if (!t->caps.side_band && !t->caps.side_band_64k) {
error = no_sideband(t, writepack, buf, stats);
goto done;
}
do {
git_pkt *pkt = NULL;
/* Check cancellation before network call */
if (t->cancelled.val) {
git_error_clear();
error = GIT_EUSER;
goto done;
}
if ((error = recv_pkt(&pkt, NULL, buf)) >= 0) {
/* Check cancellation after network call */
if (t->cancelled.val) {
git_error_clear();
error = GIT_EUSER;
} else if (pkt->type == GIT_PKT_PROGRESS) {
if (t->connect_opts.callbacks.sideband_progress) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
if (p->len > INT_MAX) {
git_error_set(GIT_ERROR_NET, "oversized progress message");
error = GIT_ERROR;
goto done;
}
error = t->connect_opts.callbacks.sideband_progress(p->data, (int)p->len, t->connect_opts.callbacks.payload);
}
} else if (pkt->type == GIT_PKT_DATA) {
git_pkt_data *p = (git_pkt_data *) pkt;
if (p->len)
error = writepack->append(writepack, p->data, p->len, stats);
} else if (pkt->type == GIT_PKT_FLUSH) {
/* A flush indicates the end of the packfile */
git__free(pkt);
break;
}
}
git_pkt_free(pkt);
if (error < 0)
goto done;
} while (1);
/*
* Trailing execution of progress_cb, if necessary...
* Only the callback through the npp datastructure currently
* updates the last_fired_bytes value. It is possible that
* progress has already been reported with the correct
* "received_bytes" value, but until (if?) this is unified
* then we will report progress again to be sure that the
* correct last received_bytes value is reported.
*/
if (npp.callback && npp.stats->received_bytes > npp.last_fired_bytes) {
error = npp.callback(npp.stats, npp.payload);
if (error != 0)
goto done;
}
error = writepack->commit(writepack, stats);
done:
if (writepack)
writepack->free(writepack);
if (progress_cb) {
t->packetsize_cb = NULL;
t->packetsize_payload = NULL;
}
return error;
}
static int gen_pktline(git_str *buf, git_push *push)
{
push_spec *spec;
size_t i, len;
char old_id[GIT_OID_SHA1_HEXSIZE+1], new_id[GIT_OID_SHA1_HEXSIZE+1];
old_id[GIT_OID_SHA1_HEXSIZE] = '\0'; new_id[GIT_OID_SHA1_HEXSIZE] = '\0';
git_vector_foreach(&push->specs, i, spec) {
len = 2*GIT_OID_SHA1_HEXSIZE + 7 + strlen(spec->refspec.dst);
if (i == 0) {
++len; /* '\0' */
if (push->report_status)
len += strlen(GIT_CAP_REPORT_STATUS) + 1;
len += strlen(GIT_CAP_SIDE_BAND_64K) + 1;
}
git_oid_fmt(old_id, &spec->roid);
git_oid_fmt(new_id, &spec->loid);
git_str_printf(buf, "%04"PRIxZ"%s %s %s", len, old_id, new_id, spec->refspec.dst);
if (i == 0) {
git_str_putc(buf, '\0');
/* Core git always starts their capabilities string with a space */
if (push->report_status) {
git_str_putc(buf, ' ');
git_str_printf(buf, GIT_CAP_REPORT_STATUS);
}
git_str_putc(buf, ' ');
git_str_printf(buf, GIT_CAP_SIDE_BAND_64K);
}
git_str_putc(buf, '\n');
}
git_str_puts(buf, "0000");
return git_str_oom(buf) ? -1 : 0;
}
static int add_push_report_pkt(git_push *push, git_pkt *pkt)
{
push_status *status;
switch (pkt->type) {
case GIT_PKT_OK:
status = git__calloc(1, sizeof(push_status));
GIT_ERROR_CHECK_ALLOC(status);
status->msg = NULL;
status->ref = git__strdup(((git_pkt_ok *)pkt)->ref);
if (!status->ref ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_NG:
status = git__calloc(1, sizeof(push_status));
GIT_ERROR_CHECK_ALLOC(status);
status->ref = git__strdup(((git_pkt_ng *)pkt)->ref);
status->msg = git__strdup(((git_pkt_ng *)pkt)->msg);
if (!status->ref || !status->msg ||
git_vector_insert(&push->status, status) < 0) {
git_push_status_free(status);
return -1;
}
break;
case GIT_PKT_UNPACK:
push->unpack_ok = ((git_pkt_unpack *)pkt)->unpack_ok;
break;
case GIT_PKT_FLUSH:
return GIT_ITEROVER;
default:
git_error_set(GIT_ERROR_NET, "report-status: protocol error");
return -1;
}
return 0;
}
static int add_push_report_sideband_pkt(git_push *push, git_pkt_data *data_pkt, git_str *data_pkt_buf)
{
git_pkt *pkt;
const char *line, *line_end = NULL;
size_t line_len;
int error;
int reading_from_buf = data_pkt_buf->size > 0;
if (reading_from_buf) {
/* We had an existing partial packet, so add the new
* packet to the buffer and parse the whole thing */
git_str_put(data_pkt_buf, data_pkt->data, data_pkt->len);
line = data_pkt_buf->ptr;
line_len = data_pkt_buf->size;
}
else {
line = data_pkt->data;
line_len = data_pkt->len;
}
while (line_len > 0) {
error = git_pkt_parse_line(&pkt, &line_end, line, line_len);
if (error == GIT_EBUFS) {
/* Buffer the data when the inner packet is split
* across multiple sideband packets */
if (!reading_from_buf)
git_str_put(data_pkt_buf, line, line_len);
error = 0;
goto done;
}
else if (error < 0)
goto done;
/* Advance in the buffer */
line_len -= (line_end - line);
line = line_end;
error = add_push_report_pkt(push, pkt);
git_pkt_free(pkt);
if (error < 0 && error != GIT_ITEROVER)
goto done;
}
error = 0;
done:
if (reading_from_buf)
git_str_consume(data_pkt_buf, line_end);
return error;
}
static int parse_report(transport_smart *transport, git_push *push)
{
git_pkt *pkt = NULL;
const char *line_end = NULL;
gitno_buffer *buf = &transport->buffer;
int error, recvd;
git_str data_pkt_buf = GIT_STR_INIT;
for (;;) {
if (buf->offset > 0)
error = git_pkt_parse_line(&pkt, &line_end,
buf->data, buf->offset);
else
error = GIT_EBUFS;
if (error < 0 && error != GIT_EBUFS) {
error = -1;
goto done;
}
if (error == GIT_EBUFS) {
if ((recvd = gitno_recv(buf)) < 0) {
error = recvd;
goto done;
}
if (recvd == 0) {
git_error_set(GIT_ERROR_NET, "early EOF");
error = GIT_EEOF;
goto done;
}
continue;
}
if (gitno_consume(buf, line_end) < 0)
return -1;
error = 0;
switch (pkt->type) {
case GIT_PKT_DATA:
/* This is a sideband packet which contains other packets */
error = add_push_report_sideband_pkt(push, (git_pkt_data *)pkt, &data_pkt_buf);
break;
case GIT_PKT_ERR:
git_error_set(GIT_ERROR_NET, "report-status: Error reported: %s",
((git_pkt_err *)pkt)->error);
error = -1;
break;
case GIT_PKT_PROGRESS:
if (transport->connect_opts.callbacks.sideband_progress) {
git_pkt_progress *p = (git_pkt_progress *) pkt;
if (p->len > INT_MAX) {
git_error_set(GIT_ERROR_NET, "oversized progress message");
error = GIT_ERROR;
goto done;
}
error = transport->connect_opts.callbacks.sideband_progress(p->data, (int)p->len, transport->connect_opts.callbacks.payload);
}
break;
default:
error = add_push_report_pkt(push, pkt);
break;
}
git_pkt_free(pkt);
/* add_push_report_pkt returns GIT_ITEROVER when it receives a flush */
if (error == GIT_ITEROVER) {
error = 0;
if (data_pkt_buf.size > 0) {
/* If there was data remaining in the pack data buffer,
* then the server sent a partial pkt-line */
git_error_set(GIT_ERROR_NET, "incomplete pack data pkt-line");
error = GIT_ERROR;
}
goto done;
}
if (error < 0) {
goto done;
}
}
done:
git_str_dispose(&data_pkt_buf);
return error;
}
static int add_ref_from_push_spec(git_vector *refs, push_spec *push_spec)
{
git_pkt_ref *added = git__calloc(1, sizeof(git_pkt_ref));
GIT_ERROR_CHECK_ALLOC(added);
added->type = GIT_PKT_REF;
git_oid_cpy(&added->head.oid, &push_spec->loid);
added->head.name = git__strdup(push_spec->refspec.dst);
if (!added->head.name ||
git_vector_insert(refs, added) < 0) {
git_pkt_free((git_pkt *)added);
return -1;
}
return 0;
}
static int update_refs_from_report(
git_vector *refs,
git_vector *push_specs,
git_vector *push_report)
{
git_pkt_ref *ref;
push_spec *push_spec;
push_status *push_status;
size_t i, j, refs_len;
int cmp;
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report */
if (push_specs->length != push_report->length) {
git_error_set(GIT_ERROR_NET, "report-status: protocol error");
return -1;
}
/* We require that push_specs be sorted with push_spec_rref_cmp,
* and that push_report be sorted with push_status_ref_cmp */
git_vector_sort(push_specs);
git_vector_sort(push_report);
git_vector_foreach(push_specs, i, push_spec) {
push_status = git_vector_get(push_report, i);
/* For each push spec we sent to the server, we should have
* gotten back a status packet in the push report which matches */
if (strcmp(push_spec->refspec.dst, push_status->ref)) {
git_error_set(GIT_ERROR_NET, "report-status: protocol error");
return -1;
}
}
/* We require that refs be sorted with ref_name_cmp */
git_vector_sort(refs);
i = j = 0;
refs_len = refs->length;
/* Merge join push_specs with refs */
while (i < push_specs->length && j < refs_len) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
ref = git_vector_get(refs, j);
cmp = strcmp(push_spec->refspec.dst, ref->head.name);
/* Iterate appropriately */
if (cmp <= 0) i++;
if (cmp >= 0) j++;
/* Add case */
if (cmp < 0 &&
!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
/* Update case, delete case */
if (cmp == 0 &&
!push_status->msg)
git_oid_cpy(&ref->head.oid, &push_spec->loid);
}
for (; i < push_specs->length; i++) {
push_spec = git_vector_get(push_specs, i);
push_status = git_vector_get(push_report, i);
/* Add case */
if (!push_status->msg &&
add_ref_from_push_spec(refs, push_spec) < 0)
return -1;
}
/* Remove any refs which we updated to have a zero OID. */
git_vector_rforeach(refs, i, ref) {
if (git_oid_is_zero(&ref->head.oid)) {
git_vector_remove(refs, i);
git_pkt_free((git_pkt *)ref);
}
}
git_vector_sort(refs);
return 0;
}
struct push_packbuilder_payload
{
git_smart_subtransport_stream *stream;
git_packbuilder *pb;
git_push_transfer_progress_cb cb;
void *cb_payload;
size_t last_bytes;
double last_progress_report_time;
};
static int stream_thunk(void *buf, size_t size, void *data)
{
int error = 0;
struct push_packbuilder_payload *payload = data;
if ((error = payload->stream->write(payload->stream, (const char *)buf, size)) < 0)
return error;
if (payload->cb) {
double current_time = git__timer();
double elapsed = current_time - payload->last_progress_report_time;
payload->last_bytes += size;
if (elapsed < 0 || elapsed >= MIN_PROGRESS_UPDATE_INTERVAL) {
payload->last_progress_report_time = current_time;
error = payload->cb(payload->pb->nr_written, payload->pb->nr_objects, payload->last_bytes, payload->cb_payload);
}
}
return error;
}
int git_smart__push(git_transport *transport, git_push *push)
{
transport_smart *t = (transport_smart *)transport;
git_remote_callbacks *cbs = &t->connect_opts.callbacks;
struct push_packbuilder_payload packbuilder_payload = {0};
git_str pktline = GIT_STR_INIT;
int error = 0, need_pack = 0;
push_spec *spec;
unsigned int i;
packbuilder_payload.pb = push->pb;
if (cbs && cbs->push_transfer_progress) {
packbuilder_payload.cb = cbs->push_transfer_progress;
packbuilder_payload.cb_payload = cbs->payload;
}
#ifdef PUSH_DEBUG
{
git_remote_head *head;
char hex[GIT_OID_SHA1_HEXSIZE+1]; hex[GIT_OID_SHA1_HEXSIZE] = '\0';
git_vector_foreach(&push->remote->refs, i, head) {
git_oid_fmt(hex, &head->oid);
fprintf(stderr, "%s (%s)\n", hex, head->name);
}
git_vector_foreach(&push->specs, i, spec) {
git_oid_fmt(hex, &spec->roid);
fprintf(stderr, "%s (%s) -> ", hex, spec->lref);
git_oid_fmt(hex, &spec->loid);
fprintf(stderr, "%s (%s)\n", hex, spec->rref ?
spec->rref : spec->lref);
}
}
#endif
/*
* Figure out if we need to send a packfile; which is in all
* cases except when we only send delete commands
*/
git_vector_foreach(&push->specs, i, spec) {
if (spec->refspec.src && spec->refspec.src[0] != '\0') {
need_pack = 1;
break;
}
}
/* prepare pack before sending pack header to avoid timeouts */
if (need_pack && ((error = git_packbuilder__prepare(push->pb))) < 0)
goto done;
if ((error = git_smart__get_push_stream(t, &packbuilder_payload.stream)) < 0 ||
(error = gen_pktline(&pktline, push)) < 0 ||
(error = packbuilder_payload.stream->write(packbuilder_payload.stream, git_str_cstr(&pktline), git_str_len(&pktline))) < 0)
goto done;
if (need_pack &&
(error = git_packbuilder_foreach(push->pb, &stream_thunk, &packbuilder_payload)) < 0)
goto done;
/* If we sent nothing or the server doesn't support report-status, then
* we consider the pack to have been unpacked successfully */
if (!push->specs.length || !push->report_status)
push->unpack_ok = 1;
else if ((error = parse_report(t, push)) < 0)
goto done;
/* If progress is being reported write the final report */
if (cbs && cbs->push_transfer_progress) {
error = cbs->push_transfer_progress(
push->pb->nr_written,
push->pb->nr_objects,
packbuilder_payload.last_bytes,
cbs->payload);
if (error < 0)
goto done;
}
if (push->status.length) {
error = update_refs_from_report(&t->refs, &push->specs, &push->status);
if (error < 0)
goto done;
error = git_smart__update_heads(t, NULL);
}
done:
git_str_dispose(&pktline);
return error;
}
| libgit2-main | src/libgit2/transports/smart_protocol.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "common.h"
#include "netops.h"
#include "stream.h"
#include "streams/socket.h"
#include "git2/sys/transport.h"
#define OWNING_SUBTRANSPORT(s) ((git_subtransport *)(s)->parent.subtransport)
static const char prefix_git[] = "git://";
static const char cmd_uploadpack[] = "git-upload-pack";
static const char cmd_receivepack[] = "git-receive-pack";
typedef struct {
git_smart_subtransport_stream parent;
git_stream *io;
const char *cmd;
char *url;
unsigned sent_command : 1;
} git_proto_stream;
typedef struct {
git_smart_subtransport parent;
git_transport *owner;
git_proto_stream *current_stream;
} git_subtransport;
/*
* Create a git protocol request.
*
* For example: 0035git-upload-pack /libgit2/libgit2\0host=github.com\0
*/
static int gen_proto(git_str *request, const char *cmd, const char *url)
{
char *delim, *repo;
char host[] = "host=";
size_t len;
delim = strchr(url, '/');
if (delim == NULL) {
git_error_set(GIT_ERROR_NET, "malformed URL");
return -1;
}
repo = delim;
if (repo[1] == '~')
++repo;
delim = strchr(url, ':');
if (delim == NULL)
delim = strchr(url, '/');
len = 4 + strlen(cmd) + 1 + strlen(repo) + 1 + strlen(host) + (delim - url) + 1;
git_str_grow(request, len);
git_str_printf(request, "%04x%s %s%c%s",
(unsigned int)(len & 0x0FFFF), cmd, repo, 0, host);
git_str_put(request, url, delim - url);
git_str_putc(request, '\0');
if (git_str_oom(request))
return -1;
return 0;
}
static int send_command(git_proto_stream *s)
{
git_str request = GIT_STR_INIT;
int error;
if ((error = gen_proto(&request, s->cmd, s->url)) < 0)
goto cleanup;
if ((error = git_stream__write_full(s->io, request.ptr, request.size, 0)) < 0)
goto cleanup;
s->sent_command = 1;
cleanup:
git_str_dispose(&request);
return error;
}
static int git_proto_stream_read(
git_smart_subtransport_stream *stream,
char *buffer,
size_t buf_size,
size_t *bytes_read)
{
int error;
git_proto_stream *s = (git_proto_stream *)stream;
gitno_buffer buf;
*bytes_read = 0;
if (!s->sent_command && (error = send_command(s)) < 0)
return error;
gitno_buffer_setup_fromstream(s->io, &buf, buffer, buf_size);
if ((error = gitno_recv(&buf)) < 0)
return error;
*bytes_read = buf.offset;
return 0;
}
static int git_proto_stream_write(
git_smart_subtransport_stream *stream,
const char *buffer,
size_t len)
{
git_proto_stream *s = (git_proto_stream *)stream;
int error;
if (!s->sent_command && (error = send_command(s)) < 0)
return error;
return git_stream__write_full(s->io, buffer, len, 0);
}
static void git_proto_stream_free(git_smart_subtransport_stream *stream)
{
git_proto_stream *s;
git_subtransport *t;
if (!stream)
return;
s = (git_proto_stream *)stream;
t = OWNING_SUBTRANSPORT(s);
t->current_stream = NULL;
git_stream_close(s->io);
git_stream_free(s->io);
git__free(s->url);
git__free(s);
}
static int git_proto_stream_alloc(
git_subtransport *t,
const char *url,
const char *cmd,
const char *host,
const char *port,
git_smart_subtransport_stream **stream)
{
git_proto_stream *s;
if (!stream)
return -1;
s = git__calloc(1, sizeof(git_proto_stream));
GIT_ERROR_CHECK_ALLOC(s);
s->parent.subtransport = &t->parent;
s->parent.read = git_proto_stream_read;
s->parent.write = git_proto_stream_write;
s->parent.free = git_proto_stream_free;
s->cmd = cmd;
s->url = git__strdup(url);
if (!s->url) {
git__free(s);
return -1;
}
if ((git_socket_stream_new(&s->io, host, port)) < 0)
return -1;
GIT_ERROR_CHECK_VERSION(s->io, GIT_STREAM_VERSION, "git_stream");
*stream = &s->parent;
return 0;
}
static int _git_uploadpack_ls(
git_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
git_net_url urldata = GIT_NET_URL_INIT;
const char *stream_url = url;
const char *host, *port;
git_proto_stream *s;
int error;
*stream = NULL;
if (!git__prefixcmp(url, prefix_git))
stream_url += strlen(prefix_git);
if ((error = git_net_url_parse(&urldata, url)) < 0)
return error;
host = urldata.host;
port = urldata.port ? urldata.port : GIT_DEFAULT_PORT;
error = git_proto_stream_alloc(t, stream_url, cmd_uploadpack, host, port, stream);
git_net_url_dispose(&urldata);
if (error < 0) {
git_proto_stream_free(*stream);
return error;
}
s = (git_proto_stream *) *stream;
if ((error = git_stream_connect(s->io)) < 0) {
git_proto_stream_free(*stream);
return error;
}
t->current_stream = s;
return 0;
}
static int _git_uploadpack(
git_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
GIT_UNUSED(url);
if (t->current_stream) {
*stream = &t->current_stream->parent;
return 0;
}
git_error_set(GIT_ERROR_NET, "must call UPLOADPACK_LS before UPLOADPACK");
return -1;
}
static int _git_receivepack_ls(
git_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
git_net_url urldata = GIT_NET_URL_INIT;
const char *stream_url = url;
git_proto_stream *s;
int error;
*stream = NULL;
if (!git__prefixcmp(url, prefix_git))
stream_url += strlen(prefix_git);
if ((error = git_net_url_parse(&urldata, url)) < 0)
return error;
error = git_proto_stream_alloc(t, stream_url, cmd_receivepack, urldata.host, urldata.port, stream);
git_net_url_dispose(&urldata);
if (error < 0) {
git_proto_stream_free(*stream);
return error;
}
s = (git_proto_stream *) *stream;
if ((error = git_stream_connect(s->io)) < 0)
return error;
t->current_stream = s;
return 0;
}
static int _git_receivepack(
git_subtransport *t,
const char *url,
git_smart_subtransport_stream **stream)
{
GIT_UNUSED(url);
if (t->current_stream) {
*stream = &t->current_stream->parent;
return 0;
}
git_error_set(GIT_ERROR_NET, "must call RECEIVEPACK_LS before RECEIVEPACK");
return -1;
}
static int _git_action(
git_smart_subtransport_stream **stream,
git_smart_subtransport *subtransport,
const char *url,
git_smart_service_t action)
{
git_subtransport *t = (git_subtransport *) subtransport;
switch (action) {
case GIT_SERVICE_UPLOADPACK_LS:
return _git_uploadpack_ls(t, url, stream);
case GIT_SERVICE_UPLOADPACK:
return _git_uploadpack(t, url, stream);
case GIT_SERVICE_RECEIVEPACK_LS:
return _git_receivepack_ls(t, url, stream);
case GIT_SERVICE_RECEIVEPACK:
return _git_receivepack(t, url, stream);
}
*stream = NULL;
return -1;
}
static int _git_close(git_smart_subtransport *subtransport)
{
git_subtransport *t = (git_subtransport *) subtransport;
GIT_ASSERT(!t->current_stream);
GIT_UNUSED(t);
return 0;
}
static void _git_free(git_smart_subtransport *subtransport)
{
git_subtransport *t = (git_subtransport *) subtransport;
git__free(t);
}
int git_smart_subtransport_git(git_smart_subtransport **out, git_transport *owner, void *param)
{
git_subtransport *t;
GIT_UNUSED(param);
if (!out)
return -1;
t = git__calloc(1, sizeof(git_subtransport));
GIT_ERROR_CHECK_ALLOC(t);
t->owner = owner;
t->parent.action = _git_action;
t->parent.close = _git_close;
t->parent.free = _git_free;
*out = (git_smart_subtransport *) t;
return 0;
}
| libgit2-main | src/libgit2/transports/git.c |
/*
* Copyright (C) the libgit2 contributors. All rights reserved.
*
* This file is part of libgit2, distributed under the GNU GPL v2 with
* a Linking Exception. For full terms see the included COPYING file.
*/
#include "smart.h"
#include "git2.h"
#include "git2/sys/remote.h"
#include "refs.h"
#include "refspec.h"
#include "proxy.h"
static int git_smart__recv_cb(gitno_buffer *buf)
{
transport_smart *t = (transport_smart *) buf->cb_data;
size_t old_len, bytes_read;
int error;
GIT_ASSERT(t->current_stream);
old_len = buf->offset;
if ((error = t->current_stream->read(t->current_stream, buf->data + buf->offset, buf->len - buf->offset, &bytes_read)) < 0)
return error;
buf->offset += bytes_read;
if (t->packetsize_cb && !t->cancelled.val) {
error = t->packetsize_cb(bytes_read, t->packetsize_payload);
if (error) {
git_atomic32_set(&t->cancelled, 1);
return GIT_EUSER;
}
}
return (int)(buf->offset - old_len);
}
GIT_INLINE(int) git_smart__reset_stream(transport_smart *t, bool close_subtransport)
{
if (t->current_stream) {
t->current_stream->free(t->current_stream);
t->current_stream = NULL;
}
if (close_subtransport) {
git__free(t->url);
t->url = NULL;
if (t->wrapped->close(t->wrapped) < 0)
return -1;
}
return 0;
}
int git_smart__update_heads(transport_smart *t, git_vector *symrefs)
{
size_t i;
git_pkt *pkt;
git_vector_clear(&t->heads);
git_vector_foreach(&t->refs, i, pkt) {
git_pkt_ref *ref = (git_pkt_ref *) pkt;
if (pkt->type != GIT_PKT_REF)
continue;
if (symrefs) {
git_refspec *spec;
git_str buf = GIT_STR_INIT;
size_t j;
int error = 0;
git_vector_foreach(symrefs, j, spec) {
git_str_clear(&buf);
if (git_refspec_src_matches(spec, ref->head.name) &&
!(error = git_refspec__transform(&buf, spec, ref->head.name))) {
git__free(ref->head.symref_target);
ref->head.symref_target = git_str_detach(&buf);
}
}
git_str_dispose(&buf);
if (error < 0)
return error;
}
if (git_vector_insert(&t->heads, &ref->head) < 0)
return -1;
}
return 0;
}
static void free_symrefs(git_vector *symrefs)
{
git_refspec *spec;
size_t i;
git_vector_foreach(symrefs, i, spec) {
git_refspec__dispose(spec);
git__free(spec);
}
git_vector_free(symrefs);
}
static int git_smart__connect(
git_transport *transport,
const char *url,
int direction,
const git_remote_connect_options *connect_opts)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_smart_subtransport_stream *stream;
int error;
git_pkt *pkt;
git_pkt_ref *first;
git_vector symrefs;
git_smart_service_t service;
if (git_smart__reset_stream(t, true) < 0)
return -1;
if (git_remote_connect_options_normalize(&t->connect_opts, t->owner->repo, connect_opts) < 0)
return -1;
t->url = git__strdup(url);
GIT_ERROR_CHECK_ALLOC(t->url);
t->direction = direction;
if (GIT_DIRECTION_FETCH == t->direction) {
service = GIT_SERVICE_UPLOADPACK_LS;
} else if (GIT_DIRECTION_PUSH == t->direction) {
service = GIT_SERVICE_RECEIVEPACK_LS;
} else {
git_error_set(GIT_ERROR_NET, "invalid direction");
return -1;
}
if ((error = t->wrapped->action(&stream, t->wrapped, t->url, service)) < 0)
return error;
/* Save off the current stream (i.e. socket) that we are working with */
t->current_stream = stream;
gitno_buffer_setup_callback(&t->buffer, t->buffer_data, sizeof(t->buffer_data), git_smart__recv_cb, t);
/* 2 flushes for RPC; 1 for stateful */
if ((error = git_smart__store_refs(t, t->rpc ? 2 : 1)) < 0)
return error;
/* Strip the comment packet for RPC */
if (t->rpc) {
pkt = (git_pkt *)git_vector_get(&t->refs, 0);
if (!pkt || GIT_PKT_COMMENT != pkt->type) {
git_error_set(GIT_ERROR_NET, "invalid response");
return -1;
} else {
/* Remove the comment pkt from the list */
git_vector_remove(&t->refs, 0);
git__free(pkt);
}
}
/* We now have loaded the refs. */
t->have_refs = 1;
pkt = (git_pkt *)git_vector_get(&t->refs, 0);
if (pkt && GIT_PKT_REF != pkt->type) {
git_error_set(GIT_ERROR_NET, "invalid response");
return -1;
}
first = (git_pkt_ref *)pkt;
if ((error = git_vector_init(&symrefs, 1, NULL)) < 0)
return error;
/* Detect capabilities */
if ((error = git_smart__detect_caps(first, &t->caps, &symrefs)) == 0) {
/* If the only ref in the list is capabilities^{} with OID_ZERO, remove it */
if (1 == t->refs.length && !strcmp(first->head.name, "capabilities^{}") &&
git_oid_is_zero(&first->head.oid)) {
git_vector_clear(&t->refs);
git_pkt_free((git_pkt *)first);
}
/* Keep a list of heads for _ls */
git_smart__update_heads(t, &symrefs);
} else if (error == GIT_ENOTFOUND) {
/* There was no ref packet received, or the cap list was empty */
error = 0;
} else {
git_error_set(GIT_ERROR_NET, "invalid response");
goto cleanup;
}
if (t->rpc && (error = git_smart__reset_stream(t, false)) < 0)
goto cleanup;
/* We're now logically connected. */
t->connected = 1;
cleanup:
free_symrefs(&symrefs);
return error;
}
static int git_smart__set_connect_opts(
git_transport *transport,
const git_remote_connect_options *opts)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
if (!t->connected) {
git_error_set(GIT_ERROR_NET, "cannot reconfigure a transport that is not connected");
return -1;
}
return git_remote_connect_options_normalize(&t->connect_opts, t->owner->repo, opts);
}
static int git_smart__capabilities(unsigned int *capabilities, git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
*capabilities = 0;
if (t->caps.want_tip_sha1)
*capabilities |= GIT_REMOTE_CAPABILITY_TIP_OID;
if (t->caps.want_reachable_sha1)
*capabilities |= GIT_REMOTE_CAPABILITY_REACHABLE_OID;
return 0;
}
static int git_smart__ls(const git_remote_head ***out, size_t *size, git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
if (!t->have_refs) {
git_error_set(GIT_ERROR_NET, "the transport has not yet loaded the refs");
return -1;
}
*out = (const git_remote_head **) t->heads.contents;
*size = t->heads.length;
return 0;
}
int git_smart__negotiation_step(git_transport *transport, void *data, size_t len)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_smart_subtransport_stream *stream;
int error;
if (t->rpc && git_smart__reset_stream(t, false) < 0)
return -1;
if (GIT_DIRECTION_FETCH != t->direction) {
git_error_set(GIT_ERROR_NET, "this operation is only valid for fetch");
return -1;
}
if ((error = t->wrapped->action(&stream, t->wrapped, t->url, GIT_SERVICE_UPLOADPACK)) < 0)
return error;
/* If this is a stateful implementation, the stream we get back should be the same */
GIT_ASSERT(t->rpc || t->current_stream == stream);
/* Save off the current stream (i.e. socket) that we are working with */
t->current_stream = stream;
if ((error = stream->write(stream, (const char *)data, len)) < 0)
return error;
gitno_buffer_setup_callback(&t->buffer, t->buffer_data, sizeof(t->buffer_data), git_smart__recv_cb, t);
return 0;
}
int git_smart__get_push_stream(transport_smart *t, git_smart_subtransport_stream **stream)
{
int error;
if (t->rpc && git_smart__reset_stream(t, false) < 0)
return -1;
if (GIT_DIRECTION_PUSH != t->direction) {
git_error_set(GIT_ERROR_NET, "this operation is only valid for push");
return -1;
}
if ((error = t->wrapped->action(stream, t->wrapped, t->url, GIT_SERVICE_RECEIVEPACK)) < 0)
return error;
/* If this is a stateful implementation, the stream we get back should be the same */
GIT_ASSERT(t->rpc || t->current_stream == *stream);
/* Save off the current stream (i.e. socket) that we are working with */
t->current_stream = *stream;
gitno_buffer_setup_callback(&t->buffer, t->buffer_data, sizeof(t->buffer_data), git_smart__recv_cb, t);
return 0;
}
static void git_smart__cancel(git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_atomic32_set(&t->cancelled, 1);
}
static int git_smart__is_connected(git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
return t->connected;
}
static int git_smart__close(git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_vector *common = &t->common;
unsigned int i;
git_pkt *p;
int ret;
git_smart_subtransport_stream *stream;
const char flush[] = "0000";
/*
* If we're still connected at this point and not using RPC,
* we should say goodbye by sending a flush, or git-daemon
* will complain that we disconnected unexpectedly.
*/
if (t->connected && !t->rpc &&
!t->wrapped->action(&stream, t->wrapped, t->url, GIT_SERVICE_UPLOADPACK)) {
t->current_stream->write(t->current_stream, flush, 4);
}
ret = git_smart__reset_stream(t, true);
git_vector_foreach(common, i, p)
git_pkt_free(p);
git_vector_free(common);
if (t->url) {
git__free(t->url);
t->url = NULL;
}
t->connected = 0;
return ret;
}
static void git_smart__free(git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_vector *refs = &t->refs;
unsigned int i;
git_pkt *p;
/* Make sure that the current stream is closed, if we have one. */
git_smart__close(transport);
/* Free the subtransport */
t->wrapped->free(t->wrapped);
git_vector_free(&t->heads);
git_vector_foreach(refs, i, p)
git_pkt_free(p);
git_vector_free(refs);
git_remote_connect_options_dispose(&t->connect_opts);
git__free(t);
}
static int ref_name_cmp(const void *a, const void *b)
{
const git_pkt_ref *ref_a = a, *ref_b = b;
return strcmp(ref_a->head.name, ref_b->head.name);
}
int git_transport_smart_certificate_check(git_transport *transport, git_cert *cert, int valid, const char *hostname)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_remote_connect_options *connect_opts = &t->connect_opts;
GIT_ASSERT_ARG(transport);
GIT_ASSERT_ARG(cert);
GIT_ASSERT_ARG(hostname);
if (!connect_opts->callbacks.certificate_check)
return GIT_PASSTHROUGH;
return connect_opts->callbacks.certificate_check(cert, valid, hostname, connect_opts->callbacks.payload);
}
int git_transport_smart_credentials(git_credential **out, git_transport *transport, const char *user, int methods)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
git_remote_connect_options *connect_opts = &t->connect_opts;
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(transport);
if (!connect_opts->callbacks.credentials)
return GIT_PASSTHROUGH;
return connect_opts->callbacks.credentials(out, t->url, user, methods, connect_opts->callbacks.payload);
}
int git_transport_remote_connect_options(
git_remote_connect_options *out,
git_transport *transport)
{
transport_smart *t = GIT_CONTAINER_OF(transport, transport_smart, parent);
GIT_ASSERT_ARG(out);
GIT_ASSERT_ARG(transport);
return git_remote_connect_options_dup(out, &t->connect_opts);
}
int git_transport_smart(git_transport **out, git_remote *owner, void *param)
{
transport_smart *t;
git_smart_subtransport_definition *definition = (git_smart_subtransport_definition *)param;
if (!param)
return -1;
t = git__calloc(1, sizeof(transport_smart));
GIT_ERROR_CHECK_ALLOC(t);
t->parent.version = GIT_TRANSPORT_VERSION;
t->parent.connect = git_smart__connect;
t->parent.set_connect_opts = git_smart__set_connect_opts;
t->parent.capabilities = git_smart__capabilities;
t->parent.close = git_smart__close;
t->parent.free = git_smart__free;
t->parent.negotiate_fetch = git_smart__negotiate_fetch;
t->parent.download_pack = git_smart__download_pack;
t->parent.push = git_smart__push;
t->parent.ls = git_smart__ls;
t->parent.is_connected = git_smart__is_connected;
t->parent.cancel = git_smart__cancel;
t->owner = owner;
t->rpc = definition->rpc;
if (git_vector_init(&t->refs, 16, ref_name_cmp) < 0) {
git__free(t);
return -1;
}
if (git_vector_init(&t->heads, 16, ref_name_cmp) < 0) {
git__free(t);
return -1;
}
if (definition->callback(&t->wrapped, &t->parent, definition->param) < 0) {
git__free(t);
return -1;
}
*out = (git_transport *) t;
return 0;
}
| libgit2-main | src/libgit2/transports/smart.c |
#define _GNU_SOURCE
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <limits.h>
#include <errno.h>
#include <signal.h>
#include <math.h>
#include <time.h>
#define PAGE_SIZE 4096
#define FREQ 3.9
static int test_hugepage = 0;
static int random_list = 0;
static void die(const char *fmt, ...)
{
va_list argp;
va_start(argp, fmt);
vfprintf(stderr, fmt, argp);
va_end(argp);
fputc('\n', stderr);
exit(1);
}
static volatile int stop = 0;
void alarm_handler(int sig)
{
stop = 1;
}
unsigned long usec_diff(struct timeval *a, struct timeval *b)
{
unsigned long usec;
usec = (b->tv_sec - a->tv_sec)*1000000;
usec += b->tv_usec - a->tv_usec;
return usec;
}
/*
* Warmup run.
*
* This is mainly to make sure that we can go around the
* map without timing any writeback activity from the cache
* from creating the map.
*/
static unsigned long warmup(void *map)
{
unsigned int offset = 0;
struct timeval start, end;
gettimeofday(&start, NULL);
do {
offset = *(volatile unsigned int *)(map + offset);
} while (offset);
gettimeofday(&end, NULL);
return usec_diff(&start, &end);
}
static double do_test(void *map)
{
unsigned long count = 0, offset = 0, usec;
struct timeval start, end;
struct itimerval itval = {
.it_interval = { 0, 0 },
.it_value = { 0, 0 },
};
/*
* Do one run without counting, and make sure we can do
* at least five runs, and have at least about 0.2s of
* timing granularity (0.2s selected randomly to make the
* run-of-five take 1s in the fast case).
*/
usec = warmup(map) * 5;
if (usec < 200000)
usec = 200000;
itval.it_value.tv_sec = usec / 1000000;
itval.it_value.tv_usec = usec % 1000000;
stop = 0;
signal(SIGALRM, alarm_handler);
setitimer(ITIMER_REAL, &itval, NULL);
gettimeofday(&start, NULL);
do {
count++;
offset = *(unsigned int *)(map + offset);
} while (!stop);
gettimeofday(&end, NULL);
usec = usec_diff(&start, &end);
// Make sure the compiler doesn't compile away offset
*(volatile unsigned int *)(map + offset);
// return cycle time in ns
return 1000 * (double) usec / count;
}
static unsigned long get_num(const char *str)
{
char *end, c;
unsigned long val;
if (!str)
return 0;
val = strtoul(str, &end, 0);
if (!val || val == ULONG_MAX)
return 0;
while ((c = *end++) != 0) {
switch (c) {
case 'k':
val <<= 10;
break;
case 'M':
val <<= 20;
break;
case 'G':
val <<= 30;
break;
default:
return 0;
}
}
return val;
}
static void randomize_map(void *map, unsigned long size, unsigned long stride)
{
unsigned long off;
unsigned int *lastpos, *rnd;
int n;
rnd = calloc(size / stride + 1, sizeof(unsigned int));
if (!rnd)
die("out of memory");
/* Create sorted list of offsets */
for (n = 0, off = 0; off < size; n++, off += stride)
rnd[n] = off;
/* Randomize the offsets */
for (n = 0, off = 0; off < size; n++, off += stride) {
unsigned int m = (unsigned long)random() % (size / stride);
unsigned int tmp = rnd[n];
rnd[n] = rnd[m];
rnd[m] = tmp;
}
/* Create a circular list from the random offsets */
lastpos = map;
for (n = 0, off = 0; off < size; n++, off += stride) {
lastpos = map + rnd[n];
*lastpos = rnd[n+1];
}
*lastpos = rnd[0];
free(rnd);
}
// Hugepage size
#define HUGEPAGE (2*1024*1024)
static void *create_map(void *map, unsigned long size, unsigned long stride)
{
unsigned int flags = MAP_PRIVATE | MAP_ANONYMOUS;
unsigned long off, mapsize;
unsigned int *lastpos;
/*
* If we're using hugepages, we will just re-use any existing
* hugepage map - the issues with different physical page
* allocations for cache associativity testing just isn't worth
* it with large pages.
*
* With regular pages, just mmap over the old allocation to
* force new page allocations. Hopefully this will then make
* the virtual mapping different enough to matter for timings.
*/
if (map) {
if (test_hugepage)
return map;
flags |= MAP_FIXED;
}
mapsize = size;
if (test_hugepage)
mapsize += 2*HUGEPAGE;
map = mmap(map, mapsize, PROT_READ | PROT_WRITE, flags, -1, 0);
if (map == MAP_FAILED)
die("mmap failed");
if (test_hugepage) {
unsigned long mapstart = (unsigned long) map;
mapstart += HUGEPAGE-1;
mapstart &= ~(HUGEPAGE-1);
map = (void *)mapstart;
mapsize = size + HUGEPAGE-1;
mapsize &= ~(HUGEPAGE-1);
madvise(map, mapsize, MADV_HUGEPAGE);
} else {
/*
* Christian Borntraeger tested on an s390, and had
* transparent hugepages set to "always", which meant
* that the small-page case never triggered at all
* unless you explicitly ask for it.
*/
madvise(map, mapsize, MADV_NOHUGEPAGE);
}
lastpos = map;
for (off = 0; off < size; off += stride) {
lastpos = map + off;
*lastpos = off + stride;
}
*lastpos = 0;
return map;
}
int main(int argc, char **argv)
{
unsigned long stride, size;
const char *arg;
void *map;
double cycles;
srandom(time(NULL));
while ((arg = argv[1]) != NULL) {
if (*arg != '-')
break;
for (;;) {
switch (*++arg) {
case 0:
break;
case 'H':
test_hugepage = 1;
continue;
case 'r':
random_list = 1;
continue;
default:
die("Unknown flag '%s'", arg);
}
break;
}
argv++;
}
size = get_num(argv[1]);
stride = get_num(argv[2]);
if (stride < 4 || size < stride)
die("bad arguments: test-tlb [-H] <size> <stride>");
map = NULL;
cycles = 1e10;
for (int i = 0; i < 5; i++) {
double d;
map = create_map(map, size, stride);
if (random_list)
randomize_map(map, size, stride);
d = do_test(map);
if (d < cycles)
cycles = d;
}
printf("%6.2fns (~%.1f cycles)\n",
cycles, cycles*FREQ);
return 0;
}
| test-tlb-master | test-tlb.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
#include <linux/ext2_fs.h>
#include <linux/romfs_fs.h>
#include <uapi/linux/cramfs_fs.h>
#include <linux/initrd.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "do_mounts.h"
#include "../fs/squashfs/squashfs_fs.h"
#include <linux/decompress/generic.h>
static struct file *in_file, *out_file;
static loff_t in_pos, out_pos;
static int __init prompt_ramdisk(char *str)
{
pr_warn("ignoring the deprecated prompt_ramdisk= option\n");
return 1;
}
__setup("prompt_ramdisk=", prompt_ramdisk);
int __initdata rd_image_start; /* starting block # of image */
static int __init ramdisk_start_setup(char *str)
{
rd_image_start = simple_strtol(str,NULL,0);
return 1;
}
__setup("ramdisk_start=", ramdisk_start_setup);
static int __init crd_load(decompress_fn deco);
/*
* This routine tries to find a RAM disk image to load, and returns the
* number of blocks to read for a non-compressed image, 0 if the image
* is a compressed image, and -1 if an image with the right magic
* numbers could not be found.
*
* We currently check for the following magic numbers:
* minix
* ext2
* romfs
* cramfs
* squashfs
* gzip
* bzip2
* lzma
* xz
* lzo
* lz4
*/
static int __init
identify_ramdisk_image(struct file *file, loff_t pos,
decompress_fn *decompressor)
{
const int size = 512;
struct minix_super_block *minixsb;
struct romfs_super_block *romfsb;
struct cramfs_super *cramfsb;
struct squashfs_super_block *squashfsb;
int nblocks = -1;
unsigned char *buf;
const char *compress_name;
unsigned long n;
int start_block = rd_image_start;
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
minixsb = (struct minix_super_block *) buf;
romfsb = (struct romfs_super_block *) buf;
cramfsb = (struct cramfs_super *) buf;
squashfsb = (struct squashfs_super_block *) buf;
memset(buf, 0xe5, size);
/*
* Read block 0 to test for compressed kernel
*/
pos = start_block * BLOCK_SIZE;
kernel_read(file, buf, size, &pos);
*decompressor = decompress_method(buf, size, &compress_name);
if (compress_name) {
printk(KERN_NOTICE "RAMDISK: %s image found at block %d\n",
compress_name, start_block);
if (!*decompressor)
printk(KERN_EMERG
"RAMDISK: %s decompressor not configured!\n",
compress_name);
nblocks = 0;
goto done;
}
/* romfs is at block zero too */
if (romfsb->word0 == ROMSB_WORD0 &&
romfsb->word1 == ROMSB_WORD1) {
printk(KERN_NOTICE
"RAMDISK: romfs filesystem found at block %d\n",
start_block);
nblocks = (ntohl(romfsb->size)+BLOCK_SIZE-1)>>BLOCK_SIZE_BITS;
goto done;
}
if (cramfsb->magic == CRAMFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: cramfs filesystem found at block %d\n",
start_block);
nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
goto done;
}
/* squashfs is at block zero too */
if (le32_to_cpu(squashfsb->s_magic) == SQUASHFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: squashfs filesystem found at block %d\n",
start_block);
nblocks = (le64_to_cpu(squashfsb->bytes_used) + BLOCK_SIZE - 1)
>> BLOCK_SIZE_BITS;
goto done;
}
/*
* Read 512 bytes further to check if cramfs is padded
*/
pos = start_block * BLOCK_SIZE + 0x200;
kernel_read(file, buf, size, &pos);
if (cramfsb->magic == CRAMFS_MAGIC) {
printk(KERN_NOTICE
"RAMDISK: cramfs filesystem found at block %d\n",
start_block);
nblocks = (cramfsb->size + BLOCK_SIZE - 1) >> BLOCK_SIZE_BITS;
goto done;
}
/*
* Read block 1 to test for minix and ext2 superblock
*/
pos = (start_block + 1) * BLOCK_SIZE;
kernel_read(file, buf, size, &pos);
/* Try minix */
if (minixsb->s_magic == MINIX_SUPER_MAGIC ||
minixsb->s_magic == MINIX_SUPER_MAGIC2) {
printk(KERN_NOTICE
"RAMDISK: Minix filesystem found at block %d\n",
start_block);
nblocks = minixsb->s_nzones << minixsb->s_log_zone_size;
goto done;
}
/* Try ext2 */
n = ext2_image_size(buf);
if (n) {
printk(KERN_NOTICE
"RAMDISK: ext2 filesystem found at block %d\n",
start_block);
nblocks = n;
goto done;
}
printk(KERN_NOTICE
"RAMDISK: Couldn't find valid RAM disk image starting at %d.\n",
start_block);
done:
kfree(buf);
return nblocks;
}
static unsigned long nr_blocks(struct file *file)
{
struct inode *inode = file->f_mapping->host;
if (!S_ISBLK(inode->i_mode))
return 0;
return i_size_read(inode) >> 10;
}
int __init rd_load_image(char *from)
{
int res = 0;
unsigned long rd_blocks, devblocks;
int nblocks, i;
char *buf = NULL;
unsigned short rotate = 0;
decompress_fn decompressor = NULL;
#if !defined(CONFIG_S390)
char rotator[4] = { '|' , '/' , '-' , '\\' };
#endif
out_file = filp_open("/dev/ram", O_RDWR, 0);
if (IS_ERR(out_file))
goto out;
in_file = filp_open(from, O_RDONLY, 0);
if (IS_ERR(in_file))
goto noclose_input;
in_pos = rd_image_start * BLOCK_SIZE;
nblocks = identify_ramdisk_image(in_file, in_pos, &decompressor);
if (nblocks < 0)
goto done;
if (nblocks == 0) {
if (crd_load(decompressor) == 0)
goto successful_load;
goto done;
}
/*
* NOTE NOTE: nblocks is not actually blocks but
* the number of kibibytes of data to load into a ramdisk.
*/
rd_blocks = nr_blocks(out_file);
if (nblocks > rd_blocks) {
printk("RAMDISK: image too big! (%dKiB/%ldKiB)\n",
nblocks, rd_blocks);
goto done;
}
/*
* OK, time to copy in the data
*/
if (strcmp(from, "/initrd.image") == 0)
devblocks = nblocks;
else
devblocks = nr_blocks(in_file);
if (devblocks == 0) {
printk(KERN_ERR "RAMDISK: could not determine device size\n");
goto done;
}
buf = kmalloc(BLOCK_SIZE, GFP_KERNEL);
if (!buf) {
printk(KERN_ERR "RAMDISK: could not allocate buffer\n");
goto done;
}
printk(KERN_NOTICE "RAMDISK: Loading %dKiB [%ld disk%s] into ram disk... ",
nblocks, ((nblocks-1)/devblocks)+1, nblocks>devblocks ? "s" : "");
for (i = 0; i < nblocks; i++) {
if (i && (i % devblocks == 0)) {
pr_cont("done disk #1.\n");
rotate = 0;
fput(in_file);
break;
}
kernel_read(in_file, buf, BLOCK_SIZE, &in_pos);
kernel_write(out_file, buf, BLOCK_SIZE, &out_pos);
#if !defined(CONFIG_S390)
if (!(i % 16)) {
pr_cont("%c\b", rotator[rotate & 0x3]);
rotate++;
}
#endif
}
pr_cont("done.\n");
successful_load:
res = 1;
done:
fput(in_file);
noclose_input:
fput(out_file);
out:
kfree(buf);
init_unlink("/dev/ram");
return res;
}
int __init rd_load_disk(int n)
{
create_dev("/dev/root", ROOT_DEV);
create_dev("/dev/ram", MKDEV(RAMDISK_MAJOR, n));
return rd_load_image("/dev/root");
}
static int exit_code;
static int decompress_error;
static long __init compr_fill(void *buf, unsigned long len)
{
long r = kernel_read(in_file, buf, len, &in_pos);
if (r < 0)
printk(KERN_ERR "RAMDISK: error while reading compressed data");
else if (r == 0)
printk(KERN_ERR "RAMDISK: EOF while reading compressed data");
return r;
}
static long __init compr_flush(void *window, unsigned long outcnt)
{
long written = kernel_write(out_file, window, outcnt, &out_pos);
if (written != outcnt) {
if (decompress_error == 0)
printk(KERN_ERR
"RAMDISK: incomplete write (%ld != %ld)\n",
written, outcnt);
decompress_error = 1;
return -1;
}
return outcnt;
}
static void __init error(char *x)
{
printk(KERN_ERR "%s\n", x);
exit_code = 1;
decompress_error = 1;
}
static int __init crd_load(decompress_fn deco)
{
int result;
if (!deco) {
pr_emerg("Invalid ramdisk decompression routine. "
"Select appropriate config option.\n");
panic("Could not decompress initial ramdisk image.");
}
result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error);
if (decompress_error)
result = 1;
return result;
}
| linux-master | init/do_mounts_rd.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/ctype.h>
#include <linux/fd.h>
#include <linux/tty.h>
#include <linux/suspend.h>
#include <linux/root_dev.h>
#include <linux/security.h>
#include <linux/delay.h>
#include <linux/mount.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/initrd.h>
#include <linux/async.h>
#include <linux/fs_struct.h>
#include <linux/slab.h>
#include <linux/ramfs.h>
#include <linux/shmem_fs.h>
#include <linux/ktime.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_fs_sb.h>
#include <linux/nfs_mount.h>
#include <linux/raid/detect.h>
#include <uapi/linux/mount.h>
#include "do_mounts.h"
int root_mountflags = MS_RDONLY | MS_SILENT;
static char __initdata saved_root_name[64];
static int root_wait;
dev_t ROOT_DEV;
static int __init load_ramdisk(char *str)
{
pr_warn("ignoring the deprecated load_ramdisk= option\n");
return 1;
}
__setup("load_ramdisk=", load_ramdisk);
static int __init readonly(char *str)
{
if (*str)
return 0;
root_mountflags |= MS_RDONLY;
return 1;
}
static int __init readwrite(char *str)
{
if (*str)
return 0;
root_mountflags &= ~MS_RDONLY;
return 1;
}
__setup("ro", readonly);
__setup("rw", readwrite);
static int __init root_dev_setup(char *line)
{
strscpy(saved_root_name, line, sizeof(saved_root_name));
return 1;
}
__setup("root=", root_dev_setup);
static int __init rootwait_setup(char *str)
{
if (*str)
return 0;
root_wait = -1;
return 1;
}
__setup("rootwait", rootwait_setup);
static int __init rootwait_timeout_setup(char *str)
{
int sec;
if (kstrtoint(str, 0, &sec) || sec < 0) {
pr_warn("ignoring invalid rootwait value\n");
goto ignore;
}
if (check_mul_overflow(sec, MSEC_PER_SEC, &root_wait)) {
pr_warn("ignoring excessive rootwait value\n");
goto ignore;
}
return 1;
ignore:
/* Fallback to indefinite wait */
root_wait = -1;
return 1;
}
__setup("rootwait=", rootwait_timeout_setup);
static char * __initdata root_mount_data;
static int __init root_data_setup(char *str)
{
root_mount_data = str;
return 1;
}
static char * __initdata root_fs_names;
static int __init fs_names_setup(char *str)
{
root_fs_names = str;
return 1;
}
static unsigned int __initdata root_delay;
static int __init root_delay_setup(char *str)
{
root_delay = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("rootflags=", root_data_setup);
__setup("rootfstype=", fs_names_setup);
__setup("rootdelay=", root_delay_setup);
/* This can return zero length strings. Caller should check */
static int __init split_fs_names(char *page, size_t size)
{
int count = 1;
char *p = page;
strscpy(p, root_fs_names, size);
while (*p++) {
if (p[-1] == ',') {
p[-1] = '\0';
count++;
}
}
return count;
}
static int __init do_mount_root(const char *name, const char *fs,
const int flags, const void *data)
{
struct super_block *s;
struct page *p = NULL;
char *data_page = NULL;
int ret;
if (data) {
/* init_mount() requires a full page as fifth argument */
p = alloc_page(GFP_KERNEL);
if (!p)
return -ENOMEM;
data_page = page_address(p);
/* zero-pad. init_mount() will make sure it's terminated */
strncpy(data_page, data, PAGE_SIZE);
}
ret = init_mount(name, "/root", fs, flags, data_page);
if (ret)
goto out;
init_chdir("/root");
s = current->fs->pwd.dentry->d_sb;
ROOT_DEV = s->s_dev;
printk(KERN_INFO
"VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
s->s_type->name,
sb_rdonly(s) ? " readonly" : "",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
out:
if (p)
put_page(p);
return ret;
}
void __init mount_root_generic(char *name, char *pretty_name, int flags)
{
struct page *page = alloc_page(GFP_KERNEL);
char *fs_names = page_address(page);
char *p;
char b[BDEVNAME_SIZE];
int num_fs, i;
scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
if (root_fs_names)
num_fs = split_fs_names(fs_names, PAGE_SIZE);
else
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
retry:
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
int err;
if (!*p)
continue;
err = do_mount_root(name, p, flags, root_mount_data);
switch (err) {
case 0:
goto out;
case -EACCES:
case -EINVAL:
continue;
}
/*
* Allow the user to distinguish between failed sys_open
* and bad superblock on root device.
* and give them a list of the available devices
*/
printk("VFS: Cannot open root device \"%s\" or %s: error %d\n",
pretty_name, b, err);
printk("Please append a correct \"root=\" boot option; here are the available partitions:\n");
printk_all_partitions();
if (root_fs_names)
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
if (!num_fs)
pr_err("Can't find any bdev filesystem to be used for mount!\n");
else {
pr_err("List of all bdev filesystems:\n");
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1)
pr_err(" %s", p);
pr_err("\n");
}
panic("VFS: Unable to mount root fs on %s", b);
}
if (!(flags & SB_RDONLY)) {
flags |= SB_RDONLY;
goto retry;
}
printk("List of all partitions:\n");
printk_all_partitions();
printk("No filesystem could mount root, tried: ");
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1)
printk(" %s", p);
printk("\n");
panic("VFS: Unable to mount root fs on %s", b);
out:
put_page(page);
}
#ifdef CONFIG_ROOT_NFS
#define NFSROOT_TIMEOUT_MIN 5
#define NFSROOT_TIMEOUT_MAX 30
#define NFSROOT_RETRY_MAX 5
static void __init mount_nfs_root(void)
{
char *root_dev, *root_data;
unsigned int timeout;
int try;
if (nfs_root_data(&root_dev, &root_data))
goto fail;
/*
* The server or network may not be ready, so try several
* times. Stop after a few tries in case the client wants
* to fall back to other boot methods.
*/
timeout = NFSROOT_TIMEOUT_MIN;
for (try = 1; ; try++) {
if (!do_mount_root(root_dev, "nfs", root_mountflags, root_data))
return;
if (try > NFSROOT_RETRY_MAX)
break;
/* Wait, in case the server refused us immediately */
ssleep(timeout);
timeout <<= 1;
if (timeout > NFSROOT_TIMEOUT_MAX)
timeout = NFSROOT_TIMEOUT_MAX;
}
fail:
pr_err("VFS: Unable to mount root fs via NFS.\n");
}
#else
static inline void mount_nfs_root(void)
{
}
#endif /* CONFIG_ROOT_NFS */
#ifdef CONFIG_CIFS_ROOT
#define CIFSROOT_TIMEOUT_MIN 5
#define CIFSROOT_TIMEOUT_MAX 30
#define CIFSROOT_RETRY_MAX 5
static void __init mount_cifs_root(void)
{
char *root_dev, *root_data;
unsigned int timeout;
int try;
if (cifs_root_data(&root_dev, &root_data))
goto fail;
timeout = CIFSROOT_TIMEOUT_MIN;
for (try = 1; ; try++) {
if (!do_mount_root(root_dev, "cifs", root_mountflags,
root_data))
return;
if (try > CIFSROOT_RETRY_MAX)
break;
ssleep(timeout);
timeout <<= 1;
if (timeout > CIFSROOT_TIMEOUT_MAX)
timeout = CIFSROOT_TIMEOUT_MAX;
}
fail:
pr_err("VFS: Unable to mount root fs via SMB.\n");
}
#else
static inline void mount_cifs_root(void)
{
}
#endif /* CONFIG_CIFS_ROOT */
static bool __init fs_is_nodev(char *fstype)
{
struct file_system_type *fs = get_fs_type(fstype);
bool ret = false;
if (fs) {
ret = !(fs->fs_flags & FS_REQUIRES_DEV);
put_filesystem(fs);
}
return ret;
}
static int __init mount_nodev_root(char *root_device_name)
{
char *fs_names, *fstype;
int err = -EINVAL;
int num_fs, i;
fs_names = (void *)__get_free_page(GFP_KERNEL);
if (!fs_names)
return -EINVAL;
num_fs = split_fs_names(fs_names, PAGE_SIZE);
for (i = 0, fstype = fs_names; i < num_fs;
i++, fstype += strlen(fstype) + 1) {
if (!*fstype)
continue;
if (!fs_is_nodev(fstype))
continue;
err = do_mount_root(root_device_name, fstype, root_mountflags,
root_mount_data);
if (!err)
break;
}
free_page((unsigned long)fs_names);
return err;
}
#ifdef CONFIG_BLOCK
static void __init mount_block_root(char *root_device_name)
{
int err = create_dev("/dev/root", ROOT_DEV);
if (err < 0)
pr_emerg("Failed to create /dev/root: %d\n", err);
mount_root_generic("/dev/root", root_device_name, root_mountflags);
}
#else
static inline void mount_block_root(char *root_device_name)
{
}
#endif /* CONFIG_BLOCK */
void __init mount_root(char *root_device_name)
{
switch (ROOT_DEV) {
case Root_NFS:
mount_nfs_root();
break;
case Root_CIFS:
mount_cifs_root();
break;
case Root_Generic:
mount_root_generic(root_device_name, root_device_name,
root_mountflags);
break;
case 0:
if (root_device_name && root_fs_names &&
mount_nodev_root(root_device_name) == 0)
break;
fallthrough;
default:
mount_block_root(root_device_name);
break;
}
}
/* wait for any asynchronous scanning to complete */
static void __init wait_for_root(char *root_device_name)
{
ktime_t end;
if (ROOT_DEV != 0)
return;
pr_info("Waiting for root device %s...\n", root_device_name);
end = ktime_add_ms(ktime_get_raw(), root_wait);
while (!driver_probe_done() ||
early_lookup_bdev(root_device_name, &ROOT_DEV) < 0) {
msleep(5);
if (root_wait > 0 && ktime_after(ktime_get_raw(), end))
break;
}
async_synchronize_full();
}
static dev_t __init parse_root_device(char *root_device_name)
{
int error;
dev_t dev;
if (!strncmp(root_device_name, "mtd", 3) ||
!strncmp(root_device_name, "ubi", 3))
return Root_Generic;
if (strcmp(root_device_name, "/dev/nfs") == 0)
return Root_NFS;
if (strcmp(root_device_name, "/dev/cifs") == 0)
return Root_CIFS;
if (strcmp(root_device_name, "/dev/ram") == 0)
return Root_RAM0;
error = early_lookup_bdev(root_device_name, &dev);
if (error) {
if (error == -EINVAL && root_wait) {
pr_err("Disabling rootwait; root= is invalid.\n");
root_wait = 0;
}
return 0;
}
return dev;
}
/*
* Prepare the namespace - decide what/where to mount, load ramdisks, etc.
*/
void __init prepare_namespace(void)
{
if (root_delay) {
printk(KERN_INFO "Waiting %d sec before mounting root device...\n",
root_delay);
ssleep(root_delay);
}
/*
* wait for the known devices to complete their probing
*
* Note: this is a potential source of long boot delays.
* For example, it is not atypical to wait 5 seconds here
* for the touchpad of a laptop to initialize.
*/
wait_for_device_probe();
md_run_setup();
if (saved_root_name[0])
ROOT_DEV = parse_root_device(saved_root_name);
if (initrd_load(saved_root_name))
goto out;
if (root_wait)
wait_for_root(saved_root_name);
mount_root(saved_root_name);
out:
devtmpfs_mount();
init_mount(".", "/", NULL, MS_MOVE, NULL);
init_chroot(".");
}
static bool is_tmpfs;
static int rootfs_init_fs_context(struct fs_context *fc)
{
if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs)
return shmem_init_fs_context(fc);
return ramfs_init_fs_context(fc);
}
struct file_system_type rootfs_fs_type = {
.name = "rootfs",
.init_fs_context = rootfs_init_fs_context,
.kill_sb = kill_litter_super,
};
void __init init_rootfs(void)
{
if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] &&
(!root_fs_names || strstr(root_fs_names, "tmpfs")))
is_tmpfs = true;
}
| linux-master | init/do_mounts.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/unistd.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/minix_fs.h>
#include <linux/romfs_fs.h>
#include <linux/initrd.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include <linux/kmod.h>
#include <uapi/linux/mount.h>
#include "do_mounts.h"
unsigned long initrd_start, initrd_end;
int initrd_below_start_ok;
static unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */
static int __initdata mount_initrd = 1;
phys_addr_t phys_initrd_start __initdata;
unsigned long phys_initrd_size __initdata;
#ifdef CONFIG_SYSCTL
static struct ctl_table kern_do_mounts_initrd_table[] = {
{
.procname = "real-root-dev",
.data = &real_root_dev,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static __init int kernel_do_mounts_initrd_sysctls_init(void)
{
register_sysctl_init("kernel", kern_do_mounts_initrd_table);
return 0;
}
late_initcall(kernel_do_mounts_initrd_sysctls_init);
#endif /* CONFIG_SYSCTL */
static int __init no_initrd(char *str)
{
mount_initrd = 0;
return 1;
}
__setup("noinitrd", no_initrd);
static int __init early_initrdmem(char *p)
{
phys_addr_t start;
unsigned long size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrdmem", early_initrdmem);
static int __init early_initrd(char *p)
{
return early_initrdmem(p);
}
early_param("initrd", early_initrd);
static int __init init_linuxrc(struct subprocess_info *info, struct cred *new)
{
ksys_unshare(CLONE_FS | CLONE_FILES);
console_on_rootfs();
/* move initrd over / and chdir/chroot in initrd root */
init_chdir("/root");
init_mount(".", "/", NULL, MS_MOVE, NULL);
init_chroot(".");
ksys_setsid();
return 0;
}
static void __init handle_initrd(char *root_device_name)
{
struct subprocess_info *info;
static char *argv[] = { "linuxrc", NULL, };
extern char *envp_init[];
int error;
pr_warn("using deprecated initrd support, will be removed in 2021.\n");
real_root_dev = new_encode_dev(ROOT_DEV);
create_dev("/dev/root.old", Root_RAM0);
/* mount initrd on rootfs' /root */
mount_root_generic("/dev/root.old", root_device_name,
root_mountflags & ~MS_RDONLY);
init_mkdir("/old", 0700);
init_chdir("/old");
info = call_usermodehelper_setup("/linuxrc", argv, envp_init,
GFP_KERNEL, init_linuxrc, NULL, NULL);
if (!info)
return;
call_usermodehelper_exec(info, UMH_WAIT_PROC|UMH_FREEZABLE);
/* move initrd to rootfs' /old */
init_mount("..", ".", NULL, MS_MOVE, NULL);
/* switch root and cwd back to / of rootfs */
init_chroot("..");
if (new_decode_dev(real_root_dev) == Root_RAM0) {
init_chdir("/old");
return;
}
init_chdir("/");
ROOT_DEV = new_decode_dev(real_root_dev);
mount_root(root_device_name);
printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
error = init_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
if (!error)
printk("okay\n");
else {
if (error == -ENOENT)
printk("/initrd does not exist. Ignored.\n");
else
printk("failed\n");
printk(KERN_NOTICE "Unmounting old root\n");
init_umount("/old", MNT_DETACH);
}
}
bool __init initrd_load(char *root_device_name)
{
if (mount_initrd) {
create_dev("/dev/ram", Root_RAM0);
/*
* Load the initrd data into /dev/ram0. Execute it as initrd
* unless /dev/ram0 is supposed to be our actual root device,
* in that case the ram disk is just set up here, and gets
* mounted in the normal path.
*/
if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
init_unlink("/initrd.image");
handle_initrd(root_device_name);
return true;
}
}
init_unlink("/initrd.image");
return false;
}
| linux-master | init/do_mounts_initrd.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/async.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/dirent.h>
#include <linux/syscalls.h>
#include <linux/utime.h>
#include <linux/file.h>
#include <linux/kstrtox.h>
#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/namei.h>
#include <linux/init_syscalls.h>
#include <linux/task_work.h>
#include <linux/umh.h>
static __initdata bool csum_present;
static __initdata u32 io_csum;
static ssize_t __init xwrite(struct file *file, const unsigned char *p,
size_t count, loff_t *pos)
{
ssize_t out = 0;
/* sys_write only can write MAX_RW_COUNT aka 2G-4K bytes at most */
while (count) {
ssize_t rv = kernel_write(file, p, count, pos);
if (rv < 0) {
if (rv == -EINTR || rv == -EAGAIN)
continue;
return out ? out : rv;
} else if (rv == 0)
break;
if (csum_present) {
ssize_t i;
for (i = 0; i < rv; i++)
io_csum += p[i];
}
p += rv;
out += rv;
count -= rv;
}
return out;
}
static __initdata char *message;
static void __init error(char *x)
{
if (!message)
message = x;
}
#define panic_show_mem(fmt, ...) \
({ show_mem(); panic(fmt, ##__VA_ARGS__); })
/* link hash */
#define N_ALIGN(len) ((((len) + 1) & ~3) + 2)
static __initdata struct hash {
int ino, minor, major;
umode_t mode;
struct hash *next;
char name[N_ALIGN(PATH_MAX)];
} *head[32];
static inline int hash(int major, int minor, int ino)
{
unsigned long tmp = ino + minor + (major << 3);
tmp += tmp >> 5;
return tmp & 31;
}
static char __init *find_link(int major, int minor, int ino,
umode_t mode, char *name)
{
struct hash **p, *q;
for (p = head + hash(major, minor, ino); *p; p = &(*p)->next) {
if ((*p)->ino != ino)
continue;
if ((*p)->minor != minor)
continue;
if ((*p)->major != major)
continue;
if (((*p)->mode ^ mode) & S_IFMT)
continue;
return (*p)->name;
}
q = kmalloc(sizeof(struct hash), GFP_KERNEL);
if (!q)
panic_show_mem("can't allocate link hash entry");
q->major = major;
q->minor = minor;
q->ino = ino;
q->mode = mode;
strcpy(q->name, name);
q->next = NULL;
*p = q;
return NULL;
}
static void __init free_hash(void)
{
struct hash **p, *q;
for (p = head; p < head + 32; p++) {
while (*p) {
q = *p;
*p = q->next;
kfree(q);
}
}
}
#ifdef CONFIG_INITRAMFS_PRESERVE_MTIME
static void __init do_utime(char *filename, time64_t mtime)
{
struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
init_utimes(filename, t);
}
static void __init do_utime_path(const struct path *path, time64_t mtime)
{
struct timespec64 t[2] = { { .tv_sec = mtime }, { .tv_sec = mtime } };
vfs_utimes(path, t);
}
static __initdata LIST_HEAD(dir_list);
struct dir_entry {
struct list_head list;
time64_t mtime;
char name[];
};
static void __init dir_add(const char *name, time64_t mtime)
{
size_t nlen = strlen(name) + 1;
struct dir_entry *de;
de = kmalloc(sizeof(struct dir_entry) + nlen, GFP_KERNEL);
if (!de)
panic_show_mem("can't allocate dir_entry buffer");
INIT_LIST_HEAD(&de->list);
strscpy(de->name, name, nlen);
de->mtime = mtime;
list_add(&de->list, &dir_list);
}
static void __init dir_utime(void)
{
struct dir_entry *de, *tmp;
list_for_each_entry_safe(de, tmp, &dir_list, list) {
list_del(&de->list);
do_utime(de->name, de->mtime);
kfree(de);
}
}
#else
static void __init do_utime(char *filename, time64_t mtime) {}
static void __init do_utime_path(const struct path *path, time64_t mtime) {}
static void __init dir_add(const char *name, time64_t mtime) {}
static void __init dir_utime(void) {}
#endif
static __initdata time64_t mtime;
/* cpio header parsing */
static __initdata unsigned long ino, major, minor, nlink;
static __initdata umode_t mode;
static __initdata unsigned long body_len, name_len;
static __initdata uid_t uid;
static __initdata gid_t gid;
static __initdata unsigned rdev;
static __initdata u32 hdr_csum;
static void __init parse_header(char *s)
{
unsigned long parsed[13];
char buf[9];
int i;
buf[8] = '\0';
for (i = 0, s += 6; i < 13; i++, s += 8) {
memcpy(buf, s, 8);
parsed[i] = simple_strtoul(buf, NULL, 16);
}
ino = parsed[0];
mode = parsed[1];
uid = parsed[2];
gid = parsed[3];
nlink = parsed[4];
mtime = parsed[5]; /* breaks in y2106 */
body_len = parsed[6];
major = parsed[7];
minor = parsed[8];
rdev = new_encode_dev(MKDEV(parsed[9], parsed[10]));
name_len = parsed[11];
hdr_csum = parsed[12];
}
/* FSM */
static __initdata enum state {
Start,
Collect,
GotHeader,
SkipIt,
GotName,
CopyFile,
GotSymlink,
Reset
} state, next_state;
static __initdata char *victim;
static unsigned long byte_count __initdata;
static __initdata loff_t this_header, next_header;
static inline void __init eat(unsigned n)
{
victim += n;
this_header += n;
byte_count -= n;
}
static __initdata char *collected;
static long remains __initdata;
static __initdata char *collect;
static void __init read_into(char *buf, unsigned size, enum state next)
{
if (byte_count >= size) {
collected = victim;
eat(size);
state = next;
} else {
collect = collected = buf;
remains = size;
next_state = next;
state = Collect;
}
}
static __initdata char *header_buf, *symlink_buf, *name_buf;
static int __init do_start(void)
{
read_into(header_buf, 110, GotHeader);
return 0;
}
static int __init do_collect(void)
{
unsigned long n = remains;
if (byte_count < n)
n = byte_count;
memcpy(collect, victim, n);
eat(n);
collect += n;
if ((remains -= n) != 0)
return 1;
state = next_state;
return 0;
}
static int __init do_header(void)
{
if (!memcmp(collected, "070701", 6)) {
csum_present = false;
} else if (!memcmp(collected, "070702", 6)) {
csum_present = true;
} else {
if (memcmp(collected, "070707", 6) == 0)
error("incorrect cpio method used: use -H newc option");
else
error("no cpio magic");
return 1;
}
parse_header(collected);
next_header = this_header + N_ALIGN(name_len) + body_len;
next_header = (next_header + 3) & ~3;
state = SkipIt;
if (name_len <= 0 || name_len > PATH_MAX)
return 0;
if (S_ISLNK(mode)) {
if (body_len > PATH_MAX)
return 0;
collect = collected = symlink_buf;
remains = N_ALIGN(name_len) + body_len;
next_state = GotSymlink;
state = Collect;
return 0;
}
if (S_ISREG(mode) || !body_len)
read_into(name_buf, N_ALIGN(name_len), GotName);
return 0;
}
static int __init do_skip(void)
{
if (this_header + byte_count < next_header) {
eat(byte_count);
return 1;
} else {
eat(next_header - this_header);
state = next_state;
return 0;
}
}
static int __init do_reset(void)
{
while (byte_count && *victim == '\0')
eat(1);
if (byte_count && (this_header & 3))
error("broken padding");
return 1;
}
static void __init clean_path(char *path, umode_t fmode)
{
struct kstat st;
if (!init_stat(path, &st, AT_SYMLINK_NOFOLLOW) &&
(st.mode ^ fmode) & S_IFMT) {
if (S_ISDIR(st.mode))
init_rmdir(path);
else
init_unlink(path);
}
}
static int __init maybe_link(void)
{
if (nlink >= 2) {
char *old = find_link(major, minor, ino, mode, collected);
if (old) {
clean_path(collected, 0);
return (init_link(old, collected) < 0) ? -1 : 1;
}
}
return 0;
}
static __initdata struct file *wfile;
static __initdata loff_t wfile_pos;
static int __init do_name(void)
{
state = SkipIt;
next_state = Reset;
if (strcmp(collected, "TRAILER!!!") == 0) {
free_hash();
return 0;
}
clean_path(collected, mode);
if (S_ISREG(mode)) {
int ml = maybe_link();
if (ml >= 0) {
int openflags = O_WRONLY|O_CREAT;
if (ml != 1)
openflags |= O_TRUNC;
wfile = filp_open(collected, openflags, mode);
if (IS_ERR(wfile))
return 0;
wfile_pos = 0;
io_csum = 0;
vfs_fchown(wfile, uid, gid);
vfs_fchmod(wfile, mode);
if (body_len)
vfs_truncate(&wfile->f_path, body_len);
state = CopyFile;
}
} else if (S_ISDIR(mode)) {
init_mkdir(collected, mode);
init_chown(collected, uid, gid, 0);
init_chmod(collected, mode);
dir_add(collected, mtime);
} else if (S_ISBLK(mode) || S_ISCHR(mode) ||
S_ISFIFO(mode) || S_ISSOCK(mode)) {
if (maybe_link() == 0) {
init_mknod(collected, mode, rdev);
init_chown(collected, uid, gid, 0);
init_chmod(collected, mode);
do_utime(collected, mtime);
}
}
return 0;
}
static int __init do_copy(void)
{
if (byte_count >= body_len) {
if (xwrite(wfile, victim, body_len, &wfile_pos) != body_len)
error("write error");
do_utime_path(&wfile->f_path, mtime);
fput(wfile);
if (csum_present && io_csum != hdr_csum)
error("bad data checksum");
eat(body_len);
state = SkipIt;
return 0;
} else {
if (xwrite(wfile, victim, byte_count, &wfile_pos) != byte_count)
error("write error");
body_len -= byte_count;
eat(byte_count);
return 1;
}
}
static int __init do_symlink(void)
{
collected[N_ALIGN(name_len) + body_len] = '\0';
clean_path(collected, 0);
init_symlink(collected + N_ALIGN(name_len), collected);
init_chown(collected, uid, gid, AT_SYMLINK_NOFOLLOW);
do_utime(collected, mtime);
state = SkipIt;
next_state = Reset;
return 0;
}
static __initdata int (*actions[])(void) = {
[Start] = do_start,
[Collect] = do_collect,
[GotHeader] = do_header,
[SkipIt] = do_skip,
[GotName] = do_name,
[CopyFile] = do_copy,
[GotSymlink] = do_symlink,
[Reset] = do_reset,
};
static long __init write_buffer(char *buf, unsigned long len)
{
byte_count = len;
victim = buf;
while (!actions[state]())
;
return len - byte_count;
}
static long __init flush_buffer(void *bufv, unsigned long len)
{
char *buf = bufv;
long written;
long origLen = len;
if (message)
return -1;
while ((written = write_buffer(buf, len)) < len && !message) {
char c = buf[written];
if (c == '0') {
buf += written;
len -= written;
state = Start;
} else if (c == 0) {
buf += written;
len -= written;
state = Reset;
} else
error("junk within compressed archive");
}
return origLen;
}
static unsigned long my_inptr __initdata; /* index of next byte to be processed in inbuf */
#include <linux/decompress/generic.h>
static char * __init unpack_to_rootfs(char *buf, unsigned long len)
{
long written;
decompress_fn decompress;
const char *compress_name;
static __initdata char msg_buf[64];
header_buf = kmalloc(110, GFP_KERNEL);
symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
if (!header_buf || !symlink_buf || !name_buf)
panic_show_mem("can't allocate buffers");
state = Start;
this_header = 0;
message = NULL;
while (!message && len) {
loff_t saved_offset = this_header;
if (*buf == '0' && !(this_header & 3)) {
state = Start;
written = write_buffer(buf, len);
buf += written;
len -= written;
continue;
}
if (!*buf) {
buf++;
len--;
this_header++;
continue;
}
this_header = 0;
decompress = decompress_method(buf, len, &compress_name);
pr_debug("Detected %s compressed data\n", compress_name);
if (decompress) {
int res = decompress(buf, len, NULL, flush_buffer, NULL,
&my_inptr, error);
if (res)
error("decompressor failed");
} else if (compress_name) {
if (!message) {
snprintf(msg_buf, sizeof msg_buf,
"compression method %s not configured",
compress_name);
message = msg_buf;
}
} else
error("invalid magic at start of compressed archive");
if (state != Reset)
error("junk at the end of compressed archive");
this_header = saved_offset + my_inptr;
buf += my_inptr;
len -= my_inptr;
}
dir_utime();
kfree(name_buf);
kfree(symlink_buf);
kfree(header_buf);
return message;
}
static int __initdata do_retain_initrd;
static int __init retain_initrd_param(char *str)
{
if (*str)
return 0;
do_retain_initrd = 1;
return 1;
}
__setup("retain_initrd", retain_initrd_param);
#ifdef CONFIG_ARCH_HAS_KEEPINITRD
static int __init keepinitrd_setup(char *__unused)
{
do_retain_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
static bool __initdata initramfs_async = true;
static int __init initramfs_async_setup(char *str)
{
return kstrtobool(str, &initramfs_async) == 0;
}
__setup("initramfs_async=", initramfs_async_setup);
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
#include <linux/initrd.h>
#include <linux/kexec.h>
void __init reserve_initrd_mem(void)
{
phys_addr_t start;
unsigned long size;
/* Ignore the virtul address computed during device tree parsing */
initrd_start = initrd_end = 0;
if (!phys_initrd_size)
return;
/*
* Round the memory region to page boundaries as per free_initrd_mem()
* This allows us to detect whether the pages overlapping the initrd
* are in use, but more importantly, reserves the entire set of pages
* as we don't want these pages allocated for other purposes.
*/
start = round_down(phys_initrd_start, PAGE_SIZE);
size = phys_initrd_size + (phys_initrd_start - start);
size = round_up(size, PAGE_SIZE);
if (!memblock_is_region_memory(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
(u64)start, size);
goto disable;
}
if (memblock_is_region_reserved(start, size)) {
pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
(u64)start, size);
goto disable;
}
memblock_reserve(start, size);
/* Now convert initrd to virtual addresses */
initrd_start = (unsigned long)__va(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
initrd_below_start_ok = 1;
return;
disable:
pr_cont(" - disabling initrd\n");
initrd_start = 0;
initrd_end = 0;
}
void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
{
#ifdef CONFIG_ARCH_KEEP_MEMBLOCK
unsigned long aligned_start = ALIGN_DOWN(start, PAGE_SIZE);
unsigned long aligned_end = ALIGN(end, PAGE_SIZE);
memblock_free((void *)aligned_start, aligned_end - aligned_start);
#endif
free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
"initrd");
}
#ifdef CONFIG_KEXEC_CORE
static bool __init kexec_free_initrd(void)
{
unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
/*
* If the initrd region is overlapped with crashkernel reserved region,
* free only memory that is not part of crashkernel region.
*/
if (initrd_start >= crashk_end || initrd_end <= crashk_start)
return false;
/*
* Initialize initrd memory region since the kexec boot does not do.
*/
memset((void *)initrd_start, 0, initrd_end - initrd_start);
if (initrd_start < crashk_start)
free_initrd_mem(initrd_start, crashk_start);
if (initrd_end > crashk_end)
free_initrd_mem(crashk_end, initrd_end);
return true;
}
#else
static inline bool kexec_free_initrd(void)
{
return false;
}
#endif /* CONFIG_KEXEC_CORE */
#ifdef CONFIG_BLK_DEV_RAM
static void __init populate_initrd_image(char *err)
{
ssize_t written;
struct file *file;
loff_t pos = 0;
unpack_to_rootfs(__initramfs_start, __initramfs_size);
printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
err);
file = filp_open("/initrd.image", O_WRONLY | O_CREAT, 0700);
if (IS_ERR(file))
return;
written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
&pos);
if (written != initrd_end - initrd_start)
pr_err("/initrd.image: incomplete write (%zd != %ld)\n",
written, initrd_end - initrd_start);
fput(file);
}
#endif /* CONFIG_BLK_DEV_RAM */
static void __init do_populate_rootfs(void *unused, async_cookie_t cookie)
{
/* Load the built in initramfs */
char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
if (err)
panic_show_mem("%s", err); /* Failed to decompress INTERNAL initramfs */
if (!initrd_start || IS_ENABLED(CONFIG_INITRAMFS_FORCE))
goto done;
if (IS_ENABLED(CONFIG_BLK_DEV_RAM))
printk(KERN_INFO "Trying to unpack rootfs image as initramfs...\n");
else
printk(KERN_INFO "Unpacking initramfs...\n");
err = unpack_to_rootfs((char *)initrd_start, initrd_end - initrd_start);
if (err) {
#ifdef CONFIG_BLK_DEV_RAM
populate_initrd_image(err);
#else
printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err);
#endif
}
done:
/*
* If the initrd region is overlapped with crashkernel reserved region,
* free only memory that is not part of crashkernel region.
*/
if (!do_retain_initrd && initrd_start && !kexec_free_initrd())
free_initrd_mem(initrd_start, initrd_end);
initrd_start = 0;
initrd_end = 0;
flush_delayed_fput();
task_work_run();
}
static ASYNC_DOMAIN_EXCLUSIVE(initramfs_domain);
static async_cookie_t initramfs_cookie;
void wait_for_initramfs(void)
{
if (!initramfs_cookie) {
/*
* Something before rootfs_initcall wants to access
* the filesystem/initramfs. Probably a bug. Make a
* note, avoid deadlocking the machine, and let the
* caller's access fail as it used to.
*/
pr_warn_once("wait_for_initramfs() called before rootfs_initcalls\n");
return;
}
async_synchronize_cookie_domain(initramfs_cookie + 1, &initramfs_domain);
}
EXPORT_SYMBOL_GPL(wait_for_initramfs);
static int __init populate_rootfs(void)
{
initramfs_cookie = async_schedule_domain(do_populate_rootfs, NULL,
&initramfs_domain);
usermodehelper_enable();
if (!initramfs_async)
wait_for_initramfs();
return 0;
}
rootfs_initcall(populate_rootfs);
| linux-master | init/initramfs.c |
// SPDX-License-Identifier: GPL-2.0-only
#include <generated/compile.h>
#include <generated/utsrelease.h>
#include <linux/proc_ns.h>
#include <linux/refcount.h>
#include <linux/uts.h>
#include <linux/utsname.h>
struct uts_namespace init_uts_ns = {
.ns.count = REFCOUNT_INIT(2),
.name = {
.sysname = UTS_SYSNAME,
.nodename = UTS_NODENAME,
.release = UTS_RELEASE,
.version = UTS_VERSION,
.machine = UTS_MACHINE,
.domainname = UTS_DOMAINNAME,
},
.user_ns = &init_user_ns,
.ns.inum = PROC_UTS_INIT_INO,
#ifdef CONFIG_UTS_NS
.ns.ops = &utsns_operations,
#endif
};
/* FIXED STRINGS! Don't touch! */
const char linux_banner[] =
"Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
| linux-master | init/version-timestamp.c |
// SPDX-License-Identifier: GPL-2.0
#include <linux/init_task.h>
#include <linux/export.h>
#include <linux/mqueue.h>
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/audit.h>
#include <linux/numa.h>
#include <linux/scs.h>
#include <linux/uaccess.h>
static struct signal_struct init_signals = {
.nr_threads = 1,
.thread_head = LIST_HEAD_INIT(init_task.thread_node),
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(init_signals.wait_chldexit),
.shared_pending = {
.list = LIST_HEAD_INIT(init_signals.shared_pending.list),
.signal = {{0}}
},
.multiprocess = HLIST_HEAD_INIT,
.rlim = INIT_RLIMITS,
.cred_guard_mutex = __MUTEX_INITIALIZER(init_signals.cred_guard_mutex),
.exec_update_lock = __RWSEM_INITIALIZER(init_signals.exec_update_lock),
#ifdef CONFIG_POSIX_TIMERS
.posix_timers = LIST_HEAD_INIT(init_signals.posix_timers),
.cputimer = {
.cputime_atomic = INIT_CPUTIME_ATOMIC,
},
#endif
INIT_CPU_TIMERS(init_signals)
.pids = {
[PIDTYPE_PID] = &init_struct_pid,
[PIDTYPE_TGID] = &init_struct_pid,
[PIDTYPE_PGID] = &init_struct_pid,
[PIDTYPE_SID] = &init_struct_pid,
},
INIT_PREV_CPUTIME(init_signals)
};
static struct sighand_struct init_sighand = {
.count = REFCOUNT_INIT(1),
.action = { { { .sa_handler = SIG_DFL, } }, },
.siglock = __SPIN_LOCK_UNLOCKED(init_sighand.siglock),
.signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh),
};
#ifdef CONFIG_SHADOW_CALL_STACK
unsigned long init_shadow_call_stack[SCS_SIZE / sizeof(long)]
__init_task_data = {
[(SCS_SIZE / sizeof(long)) - 1] = SCS_END_MAGIC
};
#endif
/*
* Set up the first task table, touch at your own risk!. Base=0,
* limit=0x1fffff (=2MB)
*/
struct task_struct init_task
#ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK
__init_task_data
#endif
__aligned(L1_CACHE_BYTES)
= {
#ifdef CONFIG_THREAD_INFO_IN_TASK
.thread_info = INIT_THREAD_INFO(init_task),
.stack_refcount = REFCOUNT_INIT(1),
#endif
.__state = 0,
.stack = init_stack,
.usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.user_cpus_ptr = NULL,
.cpus_mask = CPU_MASK_ALL,
.nr_cpus_allowed= NR_CPUS,
.mm = NULL,
.active_mm = &init_mm,
.restart_block = {
.fn = do_no_restart_syscall,
},
.se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
},
.rt = {
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
.time_slice = RR_TIMESLICE,
},
.tasks = LIST_HEAD_INIT(init_task.tasks),
#ifdef CONFIG_SMP
.pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
#endif
#ifdef CONFIG_CGROUP_SCHED
.sched_task_group = &root_task_group,
#endif
.ptraced = LIST_HEAD_INIT(init_task.ptraced),
.ptrace_entry = LIST_HEAD_INIT(init_task.ptrace_entry),
.real_parent = &init_task,
.parent = &init_task,
.children = LIST_HEAD_INIT(init_task.children),
.sibling = LIST_HEAD_INIT(init_task.sibling),
.group_leader = &init_task,
RCU_POINTER_INITIALIZER(real_cred, &init_cred),
RCU_POINTER_INITIALIZER(cred, &init_cred),
.comm = INIT_TASK_COMM,
.thread = INIT_THREAD,
.fs = &init_fs,
.files = &init_files,
#ifdef CONFIG_IO_URING
.io_uring = NULL,
#endif
.signal = &init_signals,
.sighand = &init_sighand,
.nsproxy = &init_nsproxy,
.pending = {
.list = LIST_HEAD_INIT(init_task.pending.list),
.signal = {{0}}
},
.blocked = {{0}},
.alloc_lock = __SPIN_LOCK_UNLOCKED(init_task.alloc_lock),
.journal_info = NULL,
INIT_CPU_TIMERS(init_task)
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock),
.timer_slack_ns = 50000, /* 50 usec default slack */
.thread_pid = &init_struct_pid,
.thread_group = LIST_HEAD_INIT(init_task.thread_group),
.thread_node = LIST_HEAD_INIT(init_signals.thread_head),
#ifdef CONFIG_AUDIT
.loginuid = INVALID_UID,
.sessionid = AUDIT_SID_UNSET,
#endif
#ifdef CONFIG_PERF_EVENTS
.perf_event_mutex = __MUTEX_INITIALIZER(init_task.perf_event_mutex),
.perf_event_list = LIST_HEAD_INIT(init_task.perf_event_list),
#endif
#ifdef CONFIG_PREEMPT_RCU
.rcu_read_lock_nesting = 0,
.rcu_read_unlock_special.s = 0,
.rcu_node_entry = LIST_HEAD_INIT(init_task.rcu_node_entry),
.rcu_blocked_node = NULL,
#endif
#ifdef CONFIG_TASKS_RCU
.rcu_tasks_holdout = false,
.rcu_tasks_holdout_list = LIST_HEAD_INIT(init_task.rcu_tasks_holdout_list),
.rcu_tasks_idle_cpu = -1,
#endif
#ifdef CONFIG_TASKS_TRACE_RCU
.trc_reader_nesting = 0,
.trc_reader_special.s = 0,
.trc_holdout_list = LIST_HEAD_INIT(init_task.trc_holdout_list),
.trc_blkd_node = LIST_HEAD_INIT(init_task.trc_blkd_node),
#endif
#ifdef CONFIG_CPUSETS
.mems_allowed_seq = SEQCNT_SPINLOCK_ZERO(init_task.mems_allowed_seq,
&init_task.alloc_lock),
#endif
#ifdef CONFIG_RT_MUTEXES
.pi_waiters = RB_ROOT_CACHED,
.pi_top_task = NULL,
#endif
INIT_PREV_CPUTIME(init_task)
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
.vtime.seqcount = SEQCNT_ZERO(init_task.vtime_seqcount),
.vtime.starttime = 0,
.vtime.state = VTIME_SYS,
#endif
#ifdef CONFIG_NUMA_BALANCING
.numa_preferred_nid = NUMA_NO_NODE,
.numa_group = NULL,
.numa_faults = NULL,
#endif
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
.kasan_depth = 1,
#endif
#ifdef CONFIG_KCSAN
.kcsan_ctx = {
.scoped_accesses = {LIST_POISON1, NULL},
},
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
.softirqs_enabled = 1,
#endif
#ifdef CONFIG_LOCKDEP
.lockdep_depth = 0, /* no locks held yet */
.curr_chain_key = INITIAL_CHAIN_KEY,
.lockdep_recursion = 0,
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.ret_stack = NULL,
.tracing_graph_pause = ATOMIC_INIT(0),
#endif
#if defined(CONFIG_TRACING) && defined(CONFIG_PREEMPTION)
.trace_recursion = 0,
#endif
#ifdef CONFIG_LIVEPATCH
.patch_state = KLP_UNDEFINED,
#endif
#ifdef CONFIG_SECURITY
.security = NULL,
#endif
#ifdef CONFIG_SECCOMP_FILTER
.seccomp = { .filter_count = ATOMIC_INIT(0) },
#endif
};
EXPORT_SYMBOL(init_task);
/*
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info init_thread_info __init_thread_info = INIT_THREAD_INFO(init_task);
#endif
| linux-master | init/init_task.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/init/main.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* GK 2/5/95 - Changed to support mounting root fs via NFS
* Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96
* Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96
* Simplified starting of init: Michael A. Griffith <[email protected]>
*/
#define DEBUG /* Enable initcall_debug */
#include <linux/types.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/binfmts.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/stackprotector.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/initrd.h>
#include <linux/memblock.h>
#include <linux/acpi.h>
#include <linux/bootconfig.h>
#include <linux/console.h>
#include <linux/nmi.h>
#include <linux/percpu.h>
#include <linux/kmod.h>
#include <linux/kprobes.h>
#include <linux/kmsan.h>
#include <linux/vmalloc.h>
#include <linux/kernel_stat.h>
#include <linux/start_kernel.h>
#include <linux/security.h>
#include <linux/smp.h>
#include <linux/profile.h>
#include <linux/kfence.h>
#include <linux/rcupdate.h>
#include <linux/srcu.h>
#include <linux/moduleparam.h>
#include <linux/kallsyms.h>
#include <linux/buildid.h>
#include <linux/writeback.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/cgroup.h>
#include <linux/efi.h>
#include <linux/tick.h>
#include <linux/sched/isolation.h>
#include <linux/interrupt.h>
#include <linux/taskstats_kern.h>
#include <linux/delayacct.h>
#include <linux/unistd.h>
#include <linux/utsname.h>
#include <linux/rmap.h>
#include <linux/mempolicy.h>
#include <linux/key.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
#include <linux/kmemleak.h>
#include <linux/padata.h>
#include <linux/pid_namespace.h>
#include <linux/device/driver.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/sched/init.h>
#include <linux/signal.h>
#include <linux/idr.h>
#include <linux/kgdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/pti.h>
#include <linux/blkdev.h>
#include <linux/sched/clock.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/context_tracking.h>
#include <linux/random.h>
#include <linux/list.h>
#include <linux/integrity.h>
#include <linux/proc_ns.h>
#include <linux/io.h>
#include <linux/cache.h>
#include <linux/rodata_test.h>
#include <linux/jump_label.h>
#include <linux/kcsan.h>
#include <linux/init_syscalls.h>
#include <linux/stackdepot.h>
#include <linux/randomize_kstack.h>
#include <net/net_namespace.h>
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/sections.h>
#include <asm/cacheflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/initcall.h>
#include <kunit/test.h>
static int kernel_init(void *);
/*
* Debug helper: via this flag we know that we are in 'early bootup code'
* where only the boot processor is running with IRQ disabled. This means
* two things - IRQ must not be enabled before the flag is cleared and some
* operations which are not allowed with IRQ disabled are allowed while the
* flag is set.
*/
bool early_boot_irqs_disabled __read_mostly;
enum system_states system_state __read_mostly;
EXPORT_SYMBOL(system_state);
/*
* Boot command-line arguments
*/
#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT
#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT
/* Default late time init is NULL. archs can override this later. */
void (*__initdata late_time_init)(void);
/* Untouched command line saved by arch-specific code. */
char __initdata boot_command_line[COMMAND_LINE_SIZE];
/* Untouched saved command line (eg. for /proc) */
char *saved_command_line __ro_after_init;
unsigned int saved_command_line_len __ro_after_init;
/* Command line for parameter parsing */
static char *static_command_line;
/* Untouched extra command line */
static char *extra_command_line;
/* Extra init arguments */
static char *extra_init_args;
#ifdef CONFIG_BOOT_CONFIG
/* Is bootconfig on command line? */
static bool bootconfig_found;
static size_t initargs_offs;
#else
# define bootconfig_found false
# define initargs_offs 0
#endif
static char *execute_command;
static char *ramdisk_execute_command = "/init";
/*
* Used to generate warnings if static_key manipulation functions are used
* before jump_label_init is called.
*/
bool static_key_initialized __read_mostly;
EXPORT_SYMBOL_GPL(static_key_initialized);
/*
* If set, this is an indication to the drivers that reset the underlying
* device before going ahead with the initialization otherwise driver might
* rely on the BIOS and skip the reset operation.
*
* This is useful if kernel is booting in an unreliable environment.
* For ex. kdump situation where previous kernel has crashed, BIOS has been
* skipped and devices will be in unknown state.
*/
unsigned int reset_devices;
EXPORT_SYMBOL(reset_devices);
static int __init set_reset_devices(char *str)
{
reset_devices = 1;
return 1;
}
__setup("reset_devices", set_reset_devices);
static const char *argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
const char *envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
static const char *panic_later, *panic_param;
static bool __init obsolete_checksetup(char *line)
{
const struct obs_kernel_param *p;
bool had_early_param = false;
p = __setup_start;
do {
int n = strlen(p->str);
if (parameqn(line, p->str, n)) {
if (p->early) {
/* Already done in parse_early_param?
* (Needs exact match on param part).
* Keep iterating, as we can have early
* params and __setups of same names 8( */
if (line[n] == '\0' || line[n] == '=')
had_early_param = true;
} else if (!p->setup_func) {
pr_warn("Parameter %s is obsolete, ignored\n",
p->str);
return true;
} else if (p->setup_func(line + n))
return true;
}
p++;
} while (p < __setup_end);
return had_early_param;
}
/*
* This should be approx 2 Bo*oMips to start (note initial shift), and will
* still work even if initially too large, it will just take slightly longer
*/
unsigned long loops_per_jiffy = (1<<12);
EXPORT_SYMBOL(loops_per_jiffy);
static int __init debug_kernel(char *str)
{
console_loglevel = CONSOLE_LOGLEVEL_DEBUG;
return 0;
}
static int __init quiet_kernel(char *str)
{
console_loglevel = CONSOLE_LOGLEVEL_QUIET;
return 0;
}
early_param("debug", debug_kernel);
early_param("quiet", quiet_kernel);
static int __init loglevel(char *str)
{
int newlevel;
/*
* Only update loglevel value when a correct setting was passed,
* to prevent blind crashes (when loglevel being set to 0) that
* are quite hard to debug
*/
if (get_option(&str, &newlevel)) {
console_loglevel = newlevel;
return 0;
}
return -EINVAL;
}
early_param("loglevel", loglevel);
#ifdef CONFIG_BLK_DEV_INITRD
static void * __init get_boot_config_from_initrd(size_t *_size)
{
u32 size, csum;
char *data;
u32 *hdr;
int i;
if (!initrd_end)
return NULL;
data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
/*
* Since Grub may align the size of initrd to 4, we must
* check the preceding 3 bytes as well.
*/
for (i = 0; i < 4; i++) {
if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
goto found;
data--;
}
return NULL;
found:
hdr = (u32 *)(data - 8);
size = le32_to_cpu(hdr[0]);
csum = le32_to_cpu(hdr[1]);
data = ((void *)hdr) - size;
if ((unsigned long)data < initrd_start) {
pr_err("bootconfig size %d is greater than initrd size %ld\n",
size, initrd_end - initrd_start);
return NULL;
}
if (xbc_calc_checksum(data, size) != csum) {
pr_err("bootconfig checksum failed\n");
return NULL;
}
/* Remove bootconfig from initramfs/initrd */
initrd_end = (unsigned long)data;
if (_size)
*_size = size;
return data;
}
#else
static void * __init get_boot_config_from_initrd(size_t *_size)
{
return NULL;
}
#endif
#ifdef CONFIG_BOOT_CONFIG
static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata;
#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0)
static int __init xbc_snprint_cmdline(char *buf, size_t size,
struct xbc_node *root)
{
struct xbc_node *knode, *vnode;
char *end = buf + size;
const char *val;
int ret;
xbc_node_for_each_key_value(root, knode, val) {
ret = xbc_node_compose_key_after(root, knode,
xbc_namebuf, XBC_KEYLEN_MAX);
if (ret < 0)
return ret;
vnode = xbc_node_get_child(knode);
if (!vnode) {
ret = snprintf(buf, rest(buf, end), "%s ", xbc_namebuf);
if (ret < 0)
return ret;
buf += ret;
continue;
}
xbc_array_for_each_value(vnode, val) {
ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ",
xbc_namebuf, val);
if (ret < 0)
return ret;
buf += ret;
}
}
return buf - (end - size);
}
#undef rest
/* Make an extra command line under given key word */
static char * __init xbc_make_cmdline(const char *key)
{
struct xbc_node *root;
char *new_cmdline;
int ret, len = 0;
root = xbc_find_node(key);
if (!root)
return NULL;
/* Count required buffer size */
len = xbc_snprint_cmdline(NULL, 0, root);
if (len <= 0)
return NULL;
new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES);
if (!new_cmdline) {
pr_err("Failed to allocate memory for extra kernel cmdline.\n");
return NULL;
}
ret = xbc_snprint_cmdline(new_cmdline, len + 1, root);
if (ret < 0 || ret > len) {
pr_err("Failed to print extra kernel cmdline.\n");
memblock_free(new_cmdline, len + 1);
return NULL;
}
return new_cmdline;
}
static int __init bootconfig_params(char *param, char *val,
const char *unused, void *arg)
{
if (strcmp(param, "bootconfig") == 0) {
bootconfig_found = true;
}
return 0;
}
static int __init warn_bootconfig(char *str)
{
/* The 'bootconfig' has been handled by bootconfig_params(). */
return 0;
}
static void __init setup_boot_config(void)
{
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
const char *msg, *data;
int pos, ret;
size_t size;
char *err;
/* Cut out the bootconfig data even if we have no bootconfig option */
data = get_boot_config_from_initrd(&size);
/* If there is no bootconfig in initrd, try embedded one. */
if (!data)
data = xbc_get_embedded_bootconfig(&size);
strscpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
bootconfig_params);
if (IS_ERR(err) || !(bootconfig_found || IS_ENABLED(CONFIG_BOOT_CONFIG_FORCE)))
return;
/* parse_args() stops at the next param of '--' and returns an address */
if (err)
initargs_offs = err - tmp_cmdline;
if (!data) {
/* If user intended to use bootconfig, show an error level message */
if (bootconfig_found)
pr_err("'bootconfig' found on command line, but no bootconfig found\n");
else
pr_info("No bootconfig data provided, so skipping bootconfig");
return;
}
if (size >= XBC_DATA_MAX) {
pr_err("bootconfig size %ld greater than max size %d\n",
(long)size, XBC_DATA_MAX);
return;
}
ret = xbc_init(data, size, &msg, &pos);
if (ret < 0) {
if (pos < 0)
pr_err("Failed to init bootconfig: %s.\n", msg);
else
pr_err("Failed to parse bootconfig: %s at %d.\n",
msg, pos);
} else {
xbc_get_info(&ret, NULL);
pr_info("Load bootconfig: %ld bytes %d nodes\n", (long)size, ret);
/* keys starting with "kernel." are passed via cmdline */
extra_command_line = xbc_make_cmdline("kernel");
/* Also, "init." keys are init arguments */
extra_init_args = xbc_make_cmdline("init");
}
return;
}
static void __init exit_boot_config(void)
{
xbc_exit();
}
#else /* !CONFIG_BOOT_CONFIG */
static void __init setup_boot_config(void)
{
/* Remove bootconfig data from initrd */
get_boot_config_from_initrd(NULL);
}
static int __init warn_bootconfig(char *str)
{
pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n");
return 0;
}
#define exit_boot_config() do {} while (0)
#endif /* CONFIG_BOOT_CONFIG */
early_param("bootconfig", warn_bootconfig);
/* Change NUL term back to "=", to make "param" the whole string. */
static void __init repair_env_string(char *param, char *val)
{
if (val) {
/* param=val or param="val"? */
if (val == param+strlen(param)+1)
val[-1] = '=';
else if (val == param+strlen(param)+2) {
val[-2] = '=';
memmove(val-1, val, strlen(val)+1);
} else
BUG();
}
}
/* Anything after -- gets handed straight to init. */
static int __init set_init_arg(char *param, char *val,
const char *unused, void *arg)
{
unsigned int i;
if (panic_later)
return 0;
repair_env_string(param, val);
for (i = 0; argv_init[i]; i++) {
if (i == MAX_INIT_ARGS) {
panic_later = "init";
panic_param = param;
return 0;
}
}
argv_init[i] = param;
return 0;
}
/*
* Unknown boot options get handed to init, unless they look like
* unused parameters (modprobe will find them in /proc/cmdline).
*/
static int __init unknown_bootoption(char *param, char *val,
const char *unused, void *arg)
{
size_t len = strlen(param);
repair_env_string(param, val);
/* Handle obsolete-style parameters */
if (obsolete_checksetup(param))
return 0;
/* Unused module parameter. */
if (strnchr(param, len, '.'))
return 0;
if (panic_later)
return 0;
if (val) {
/* Environment option */
unsigned int i;
for (i = 0; envp_init[i]; i++) {
if (i == MAX_INIT_ENVS) {
panic_later = "env";
panic_param = param;
}
if (!strncmp(param, envp_init[i], len+1))
break;
}
envp_init[i] = param;
} else {
/* Command line option */
unsigned int i;
for (i = 0; argv_init[i]; i++) {
if (i == MAX_INIT_ARGS) {
panic_later = "init";
panic_param = param;
}
}
argv_init[i] = param;
}
return 0;
}
static int __init init_setup(char *str)
{
unsigned int i;
execute_command = str;
/*
* In case LILO is going to boot us with default command line,
* it prepends "auto" before the whole cmdline which makes
* the shell think it should execute a script with such name.
* So we ignore all arguments entered _before_ init=... [MJ]
*/
for (i = 1; i < MAX_INIT_ARGS; i++)
argv_init[i] = NULL;
return 1;
}
__setup("init=", init_setup);
static int __init rdinit_setup(char *str)
{
unsigned int i;
ramdisk_execute_command = str;
/* See "auto" comment in init_setup */
for (i = 1; i < MAX_INIT_ARGS; i++)
argv_init[i] = NULL;
return 1;
}
__setup("rdinit=", rdinit_setup);
#ifndef CONFIG_SMP
static const unsigned int setup_max_cpus = NR_CPUS;
static inline void setup_nr_cpu_ids(void) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }
#endif
/*
* We need to store the untouched command line for future reference.
* We also need to store the touched command line since the parameter
* parsing is performed in place, and we should allow a component to
* store reference of name/value for future reference.
*/
static void __init setup_command_line(char *command_line)
{
size_t len, xlen = 0, ilen = 0;
if (extra_command_line)
xlen = strlen(extra_command_line);
if (extra_init_args)
ilen = strlen(extra_init_args) + 4; /* for " -- " */
len = xlen + strlen(boot_command_line) + 1;
saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES);
if (!saved_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen);
static_command_line = memblock_alloc(len, SMP_CACHE_BYTES);
if (!static_command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);
if (xlen) {
/*
* We have to put extra_command_line before boot command
* lines because there could be dashes (separator of init
* command line) in the command lines.
*/
strcpy(saved_command_line, extra_command_line);
strcpy(static_command_line, extra_command_line);
}
strcpy(saved_command_line + xlen, boot_command_line);
strcpy(static_command_line + xlen, command_line);
if (ilen) {
/*
* Append supplemental init boot args to saved_command_line
* so that user can check what command line options passed
* to init.
* The order should always be
* " -- "[bootconfig init-param][cmdline init-param]
*/
if (initargs_offs) {
len = xlen + initargs_offs;
strcpy(saved_command_line + len, extra_init_args);
len += ilen - 4; /* strlen(extra_init_args) */
strcpy(saved_command_line + len,
boot_command_line + initargs_offs - 1);
} else {
len = strlen(saved_command_line);
strcpy(saved_command_line + len, " -- ");
len += 4;
strcpy(saved_command_line + len, extra_init_args);
}
}
saved_command_line_len = strlen(saved_command_line);
}
/*
* We need to finalize in a non-__init function or else race conditions
* between the root thread and the init thread may cause start_kernel to
* be reaped by free_initmem before the root thread has proceeded to
* cpu_idle.
*
* gcc-3.4 accidentally inlines this function, so use noinline.
*/
static __initdata DECLARE_COMPLETION(kthreadd_done);
noinline void __ref __noreturn rest_init(void)
{
struct task_struct *tsk;
int pid;
rcu_scheduler_starting();
/*
* We need to spawn init first so that it obtains pid 1, however
* the init task will end up wanting to create kthreads, which, if
* we schedule it before we create kthreadd, will OOPS.
*/
pid = user_mode_thread(kernel_init, NULL, CLONE_FS);
/*
* Pin init on the boot CPU. Task migration is not properly working
* until sched_init_smp() has been run. It will set the allowed
* CPUs for init to the non isolated CPUs.
*/
rcu_read_lock();
tsk = find_task_by_pid_ns(pid, &init_pid_ns);
tsk->flags |= PF_NO_SETAFFINITY;
set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
rcu_read_unlock();
numa_default_policy();
pid = kernel_thread(kthreadd, NULL, NULL, CLONE_FS | CLONE_FILES);
rcu_read_lock();
kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
rcu_read_unlock();
/*
* Enable might_sleep() and smp_processor_id() checks.
* They cannot be enabled earlier because with CONFIG_PREEMPTION=y
* kernel_thread() would trigger might_sleep() splats. With
* CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
* already, but it's stuck on the kthreadd_done completion.
*/
system_state = SYSTEM_SCHEDULING;
complete(&kthreadd_done);
/*
* The boot idle thread must execute schedule()
* at least once to get things moving:
*/
schedule_preempt_disabled();
/* Call into cpu_idle with preempt disabled */
cpu_startup_entry(CPUHP_ONLINE);
}
/* Check for early params. */
static int __init do_early_param(char *param, char *val,
const char *unused, void *arg)
{
const struct obs_kernel_param *p;
for (p = __setup_start; p < __setup_end; p++) {
if ((p->early && parameq(param, p->str)) ||
(strcmp(param, "console") == 0 &&
strcmp(p->str, "earlycon") == 0)
) {
if (p->setup_func(val) != 0)
pr_warn("Malformed early option '%s'\n", param);
}
}
/* We accept everything at this stage. */
return 0;
}
void __init parse_early_options(char *cmdline)
{
parse_args("early options", cmdline, NULL, 0, 0, 0, NULL,
do_early_param);
}
/* Arch code calls this early on, or if not, just before other parsing. */
void __init parse_early_param(void)
{
static int done __initdata;
static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
if (done)
return;
/* All fall through to do_early_param. */
strscpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
parse_early_options(tmp_cmdline);
done = 1;
}
void __init __weak arch_post_acpi_subsys_init(void) { }
void __init __weak smp_setup_processor_id(void)
{
}
# if THREAD_SIZE >= PAGE_SIZE
void __init __weak thread_stack_cache_init(void)
{
}
#endif
void __init __weak poking_init(void) { }
void __init __weak pgtable_cache_init(void) { }
void __init __weak trap_init(void) { }
bool initcall_debug;
core_param(initcall_debug, initcall_debug, bool, 0644);
#ifdef TRACEPOINTS_ENABLED
static void __init initcall_debug_enable(void);
#else
static inline void initcall_debug_enable(void)
{
}
#endif
#ifdef CONFIG_RANDOMIZE_KSTACK_OFFSET
DEFINE_STATIC_KEY_MAYBE_RO(CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT,
randomize_kstack_offset);
DEFINE_PER_CPU(u32, kstack_offset);
static int __init early_randomize_kstack_offset(char *buf)
{
int ret;
bool bool_result;
ret = kstrtobool(buf, &bool_result);
if (ret)
return ret;
if (bool_result)
static_branch_enable(&randomize_kstack_offset);
else
static_branch_disable(&randomize_kstack_offset);
return 0;
}
early_param("randomize_kstack_offset", early_randomize_kstack_offset);
#endif
void __init __weak __noreturn arch_call_rest_init(void)
{
rest_init();
}
static void __init print_unknown_bootoptions(void)
{
char *unknown_options;
char *end;
const char *const *p;
size_t len;
if (panic_later || (!argv_init[1] && !envp_init[2]))
return;
/*
* Determine how many options we have to print out, plus a space
* before each
*/
len = 1; /* null terminator */
for (p = &argv_init[1]; *p; p++) {
len++;
len += strlen(*p);
}
for (p = &envp_init[2]; *p; p++) {
len++;
len += strlen(*p);
}
unknown_options = memblock_alloc(len, SMP_CACHE_BYTES);
if (!unknown_options) {
pr_err("%s: Failed to allocate %zu bytes\n",
__func__, len);
return;
}
end = unknown_options;
for (p = &argv_init[1]; *p; p++)
end += sprintf(end, " %s", *p);
for (p = &envp_init[2]; *p; p++)
end += sprintf(end, " %s", *p);
/* Start at unknown_options[1] to skip the initial space */
pr_notice("Unknown kernel command line parameters \"%s\", will be passed to user space.\n",
&unknown_options[1]);
memblock_free(unknown_options, len);
}
asmlinkage __visible __init __no_sanitize_address __noreturn __no_stack_protector
void start_kernel(void)
{
char *command_line;
char *after_dashes;
set_task_stack_end_magic(&init_task);
smp_setup_processor_id();
debug_objects_early_init();
init_vmlinux_build_id();
cgroup_init_early();
local_irq_disable();
early_boot_irqs_disabled = true;
/*
* Interrupts are still disabled. Do necessary setups, then
* enable them.
*/
boot_cpu_init();
page_address_init();
pr_notice("%s", linux_banner);
early_security_init();
setup_arch(&command_line);
setup_boot_config();
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
boot_cpu_hotplug_init();
pr_notice("Kernel command line: %s\n", saved_command_line);
/* parameters may set static keys */
jump_label_init();
parse_early_param();
after_dashes = parse_args("Booting kernel",
static_command_line, __start___param,
__stop___param - __start___param,
-1, -1, NULL, &unknown_bootoption);
print_unknown_bootoptions();
if (!IS_ERR_OR_NULL(after_dashes))
parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
NULL, set_init_arg);
if (extra_init_args)
parse_args("Setting extra init args", extra_init_args,
NULL, 0, -1, -1, NULL, set_init_arg);
/* Architectural and non-timekeeping rng init, before allocator init */
random_init_early(command_line);
/*
* These use large bootmem allocations and must precede
* initalization of page allocator
*/
setup_log_buf(0);
vfs_caches_init_early();
sort_main_extable();
trap_init();
mm_core_init();
poking_init();
ftrace_init();
/* trace_printk can be enabled here */
early_trace_init();
/*
* Set up the scheduler prior starting any interrupts (such as the
* timer interrupt). Full topology setup happens at smp_init()
* time - but meanwhile we still have a functioning scheduler.
*/
sched_init();
if (WARN(!irqs_disabled(),
"Interrupts were enabled *very* early, fixing it\n"))
local_irq_disable();
radix_tree_init();
maple_tree_init();
/*
* Set up housekeeping before setting up workqueues to allow the unbound
* workqueue to take non-housekeeping into account.
*/
housekeeping_init();
/*
* Allow workqueue creation and work item queueing/cancelling
* early. Work item execution depends on kthreads and starts after
* workqueue_init().
*/
workqueue_init_early();
rcu_init();
/* Trace events are available after this */
trace_init();
if (initcall_debug)
initcall_debug_enable();
context_tracking_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
init_IRQ();
tick_init();
rcu_init_nohz();
init_timers();
srcu_init();
hrtimers_init();
softirq_init();
timekeeping_init();
time_init();
/* This must be after timekeeping is initialized */
random_init();
/* These make use of the fully initialized rng */
kfence_init();
boot_init_stack_canary();
perf_event_init();
profile_init();
call_function_init();
WARN(!irqs_disabled(), "Interrupts were enabled early\n");
early_boot_irqs_disabled = false;
local_irq_enable();
kmem_cache_init_late();
/*
* HACK ALERT! This is early. We're enabling the console before
* we've done PCI setups etc, and console_init() must be aware of
* this. But we do want output early, in case something goes wrong.
*/
console_init();
if (panic_later)
panic("Too many boot %s vars at `%s'", panic_later,
panic_param);
lockdep_init();
/*
* Need to run this when irqs are enabled, because it wants
* to self-test [hard/soft]-irqs on/off lock inversion bugs
* too:
*/
locking_selftest();
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start && !initrd_below_start_ok &&
page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) {
pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n",
page_to_pfn(virt_to_page((void *)initrd_start)),
min_low_pfn);
initrd_start = 0;
}
#endif
setup_per_cpu_pageset();
numa_policy_init();
acpi_early_init();
if (late_time_init)
late_time_init();
sched_clock_init();
calibrate_delay();
arch_cpu_finalize_init();
pid_idr_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
#endif
thread_stack_cache_init();
cred_init();
fork_init();
proc_caches_init();
uts_ns_init();
key_init();
security_init();
dbg_late_init();
net_ns_init();
vfs_caches_init();
pagecache_init();
signals_init();
seq_file_init();
proc_root_init();
nsfs_init();
cpuset_init();
cgroup_init();
taskstats_init_early();
delayacct_init();
acpi_subsystem_init();
arch_post_acpi_subsys_init();
kcsan_init();
/* Do the rest non-__init'ed, we're now alive */
arch_call_rest_init();
/*
* Avoid stack canaries in callers of boot_init_stack_canary for gcc-10
* and older.
*/
#if !__has_attribute(__no_stack_protector__)
prevent_tail_call_optimization();
#endif
}
/* Call all constructor functions linked into the kernel. */
static void __init do_ctors(void)
{
/*
* For UML, the constructors have already been called by the
* normal setup code as it's just a normal ELF binary, so we
* cannot do it again - but we do need CONFIG_CONSTRUCTORS
* even on UML for modules.
*/
#if defined(CONFIG_CONSTRUCTORS) && !defined(CONFIG_UML)
ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
for (; fn < (ctor_fn_t *) __ctors_end; fn++)
(*fn)();
#endif
}
#ifdef CONFIG_KALLSYMS
struct blacklist_entry {
struct list_head next;
char *buf;
};
static __initdata_or_module LIST_HEAD(blacklisted_initcalls);
static int __init initcall_blacklist(char *str)
{
char *str_entry;
struct blacklist_entry *entry;
/* str argument is a comma-separated list of functions */
do {
str_entry = strsep(&str, ",");
if (str_entry) {
pr_debug("blacklisting initcall %s\n", str_entry);
entry = memblock_alloc(sizeof(*entry),
SMP_CACHE_BYTES);
if (!entry)
panic("%s: Failed to allocate %zu bytes\n",
__func__, sizeof(*entry));
entry->buf = memblock_alloc(strlen(str_entry) + 1,
SMP_CACHE_BYTES);
if (!entry->buf)
panic("%s: Failed to allocate %zu bytes\n",
__func__, strlen(str_entry) + 1);
strcpy(entry->buf, str_entry);
list_add(&entry->next, &blacklisted_initcalls);
}
} while (str_entry);
return 1;
}
static bool __init_or_module initcall_blacklisted(initcall_t fn)
{
struct blacklist_entry *entry;
char fn_name[KSYM_SYMBOL_LEN];
unsigned long addr;
if (list_empty(&blacklisted_initcalls))
return false;
addr = (unsigned long) dereference_function_descriptor(fn);
sprint_symbol_no_offset(fn_name, addr);
/*
* fn will be "function_name [module_name]" where [module_name] is not
* displayed for built-in init functions. Strip off the [module_name].
*/
strreplace(fn_name, ' ', '\0');
list_for_each_entry(entry, &blacklisted_initcalls, next) {
if (!strcmp(fn_name, entry->buf)) {
pr_debug("initcall %s blacklisted\n", fn_name);
return true;
}
}
return false;
}
#else
static int __init initcall_blacklist(char *str)
{
pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n");
return 0;
}
static bool __init_or_module initcall_blacklisted(initcall_t fn)
{
return false;
}
#endif
__setup("initcall_blacklist=", initcall_blacklist);
static __init_or_module void
trace_initcall_start_cb(void *data, initcall_t fn)
{
ktime_t *calltime = data;
printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
static __init_or_module void
trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
{
ktime_t rettime, *calltime = data;
rettime = ktime_get();
printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
fn, ret, (unsigned long long)ktime_us_delta(rettime, *calltime));
}
static ktime_t initcall_calltime;
#ifdef TRACEPOINTS_ENABLED
static void __init initcall_debug_enable(void)
{
int ret;
ret = register_trace_initcall_start(trace_initcall_start_cb,
&initcall_calltime);
ret |= register_trace_initcall_finish(trace_initcall_finish_cb,
&initcall_calltime);
WARN(ret, "Failed to register initcall tracepoints\n");
}
# define do_trace_initcall_start trace_initcall_start
# define do_trace_initcall_finish trace_initcall_finish
#else
static inline void do_trace_initcall_start(initcall_t fn)
{
if (!initcall_debug)
return;
trace_initcall_start_cb(&initcall_calltime, fn);
}
static inline void do_trace_initcall_finish(initcall_t fn, int ret)
{
if (!initcall_debug)
return;
trace_initcall_finish_cb(&initcall_calltime, fn, ret);
}
#endif /* !TRACEPOINTS_ENABLED */
int __init_or_module do_one_initcall(initcall_t fn)
{
int count = preempt_count();
char msgbuf[64];
int ret;
if (initcall_blacklisted(fn))
return -EPERM;
do_trace_initcall_start(fn);
ret = fn();
do_trace_initcall_finish(fn, ret);
msgbuf[0] = 0;
if (preempt_count() != count) {
sprintf(msgbuf, "preemption imbalance ");
preempt_count_set(count);
}
if (irqs_disabled()) {
strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
local_irq_enable();
}
WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf);
add_latent_entropy();
return ret;
}
static initcall_entry_t *initcall_levels[] __initdata = {
__initcall0_start,
__initcall1_start,
__initcall2_start,
__initcall3_start,
__initcall4_start,
__initcall5_start,
__initcall6_start,
__initcall7_start,
__initcall_end,
};
/* Keep these in sync with initcalls in include/linux/init.h */
static const char *initcall_level_names[] __initdata = {
"pure",
"core",
"postcore",
"arch",
"subsys",
"fs",
"device",
"late",
};
static int __init ignore_unknown_bootoption(char *param, char *val,
const char *unused, void *arg)
{
return 0;
}
static void __init do_initcall_level(int level, char *command_line)
{
initcall_entry_t *fn;
parse_args(initcall_level_names[level],
command_line, __start___param,
__stop___param - __start___param,
level, level,
NULL, ignore_unknown_bootoption);
trace_initcall_level(initcall_level_names[level]);
for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
do_one_initcall(initcall_from_entry(fn));
}
static void __init do_initcalls(void)
{
int level;
size_t len = saved_command_line_len + 1;
char *command_line;
command_line = kzalloc(len, GFP_KERNEL);
if (!command_line)
panic("%s: Failed to allocate %zu bytes\n", __func__, len);
for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
/* Parser modifies command_line, restore it each time */
strcpy(command_line, saved_command_line);
do_initcall_level(level, command_line);
}
kfree(command_line);
}
/*
* Ok, the machine is now initialized. None of the devices
* have been touched yet, but the CPU subsystem is up and
* running, and memory and process management works.
*
* Now we can finally start doing some real work..
*/
static void __init do_basic_setup(void)
{
cpuset_init_smp();
driver_init();
init_irq_proc();
do_ctors();
do_initcalls();
}
static void __init do_pre_smp_initcalls(void)
{
initcall_entry_t *fn;
trace_initcall_level("early");
for (fn = __initcall_start; fn < __initcall0_start; fn++)
do_one_initcall(initcall_from_entry(fn));
}
static int run_init_process(const char *init_filename)
{
const char *const *p;
argv_init[0] = init_filename;
pr_info("Run %s as init process\n", init_filename);
pr_debug(" with arguments:\n");
for (p = argv_init; *p; p++)
pr_debug(" %s\n", *p);
pr_debug(" with environment:\n");
for (p = envp_init; *p; p++)
pr_debug(" %s\n", *p);
return kernel_execve(init_filename, argv_init, envp_init);
}
static int try_to_run_init_process(const char *init_filename)
{
int ret;
ret = run_init_process(init_filename);
if (ret && ret != -ENOENT) {
pr_err("Starting init: %s exists but couldn't execute it (error %d)\n",
init_filename, ret);
}
return ret;
}
static noinline void __init kernel_init_freeable(void);
#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX)
bool rodata_enabled __ro_after_init = true;
#ifndef arch_parse_debug_rodata
static inline bool arch_parse_debug_rodata(char *str) { return false; }
#endif
static int __init set_debug_rodata(char *str)
{
if (arch_parse_debug_rodata(str))
return 0;
if (str && !strcmp(str, "on"))
rodata_enabled = true;
else if (str && !strcmp(str, "off"))
rodata_enabled = false;
else
pr_warn("Invalid option string for rodata: '%s'\n", str);
return 0;
}
early_param("rodata", set_debug_rodata);
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static void mark_readonly(void)
{
if (rodata_enabled) {
/*
* load_module() results in W+X mappings, which are cleaned
* up with call_rcu(). Let's make sure that queued work is
* flushed so that we don't hit false positives looking for
* insecure pages which are W+X.
*/
rcu_barrier();
mark_rodata_ro();
rodata_test();
} else
pr_info("Kernel memory protection disabled.\n");
}
#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX)
static inline void mark_readonly(void)
{
pr_warn("Kernel memory protection not selected by kernel config.\n");
}
#else
static inline void mark_readonly(void)
{
pr_warn("This architecture does not have kernel memory protection.\n");
}
#endif
void __weak free_initmem(void)
{
free_initmem_default(POISON_FREE_INITMEM);
}
static int __ref kernel_init(void *unused)
{
int ret;
/*
* Wait until kthreadd is all set-up.
*/
wait_for_completion(&kthreadd_done);
kernel_init_freeable();
/* need to finish all async __init code before freeing the memory */
async_synchronize_full();
system_state = SYSTEM_FREEING_INITMEM;
kprobe_free_init_mem();
ftrace_free_init_mem();
kgdb_free_init_mem();
exit_boot_config();
free_initmem();
mark_readonly();
/*
* Kernel mappings are now finalized - update the userspace page-table
* to finalize PTI.
*/
pti_finalize();
system_state = SYSTEM_RUNNING;
numa_default_policy();
rcu_end_inkernel_boot();
do_sysctl_args();
if (ramdisk_execute_command) {
ret = run_init_process(ramdisk_execute_command);
if (!ret)
return 0;
pr_err("Failed to execute %s (error %d)\n",
ramdisk_execute_command, ret);
}
/*
* We try each of these until one succeeds.
*
* The Bourne shell can be used instead of init if we are
* trying to recover a really broken machine.
*/
if (execute_command) {
ret = run_init_process(execute_command);
if (!ret)
return 0;
panic("Requested init %s failed (error %d).",
execute_command, ret);
}
if (CONFIG_DEFAULT_INIT[0] != '\0') {
ret = run_init_process(CONFIG_DEFAULT_INIT);
if (ret)
pr_err("Default init %s failed (error %d)\n",
CONFIG_DEFAULT_INIT, ret);
else
return 0;
}
if (!try_to_run_init_process("/sbin/init") ||
!try_to_run_init_process("/etc/init") ||
!try_to_run_init_process("/bin/init") ||
!try_to_run_init_process("/bin/sh"))
return 0;
panic("No working init found. Try passing init= option to kernel. "
"See Linux Documentation/admin-guide/init.rst for guidance.");
}
/* Open /dev/console, for stdin/stdout/stderr, this should never fail */
void __init console_on_rootfs(void)
{
struct file *file = filp_open("/dev/console", O_RDWR, 0);
if (IS_ERR(file)) {
pr_err("Warning: unable to open an initial console.\n");
return;
}
init_dup(file);
init_dup(file);
init_dup(file);
fput(file);
}
static noinline void __init kernel_init_freeable(void)
{
/* Now the scheduler is fully set up and can do blocking allocations */
gfp_allowed_mask = __GFP_BITS_MASK;
/*
* init can allocate pages on any node
*/
set_mems_allowed(node_states[N_MEMORY]);
cad_pid = get_pid(task_pid(current));
smp_prepare_cpus(setup_max_cpus);
workqueue_init();
init_mm_internals();
rcu_init_tasks_generic();
do_pre_smp_initcalls();
lockup_detector_init();
smp_init();
sched_init_smp();
workqueue_init_topology();
padata_init();
page_alloc_init_late();
do_basic_setup();
kunit_run_all_tests();
wait_for_initramfs();
console_on_rootfs();
/*
* check if there is an early userspace init. If yes, let it do all
* the work
*/
if (init_eaccess(ramdisk_execute_command) != 0) {
ramdisk_execute_command = NULL;
prepare_namespace();
}
/*
* Ok, we have completed the initial bootup, and
* we're essentially up and running. Get rid of the
* initmem segments and start the user-mode stuff..
*
* rootfs is available now, try loading the public keys
* and default modules
*/
integrity_load_keys();
}
| linux-master | init/main.c |
// SPDX-License-Identifier: GPL-2.0
/* calibrate.c: default delay calibration
*
* Excised from init/main.c
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/timex.h>
#include <linux/smp.h>
#include <linux/percpu.h>
unsigned long lpj_fine;
unsigned long preset_lpj;
static int __init lpj_setup(char *str)
{
preset_lpj = simple_strtoul(str,NULL,0);
return 1;
}
__setup("lpj=", lpj_setup);
#ifdef ARCH_HAS_READ_CURRENT_TIMER
/* This routine uses the read_current_timer() routine and gets the
* loops per jiffy directly, instead of guessing it using delay().
* Also, this code tries to handle non-maskable asynchronous events
* (like SMIs)
*/
#define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100))
#define MAX_DIRECT_CALIBRATION_RETRIES 5
static unsigned long calibrate_delay_direct(void)
{
unsigned long pre_start, start, post_start;
unsigned long pre_end, end, post_end;
unsigned long start_jiffies;
unsigned long timer_rate_min, timer_rate_max;
unsigned long good_timer_sum = 0;
unsigned long good_timer_count = 0;
unsigned long measured_times[MAX_DIRECT_CALIBRATION_RETRIES];
int max = -1; /* index of measured_times with max/min values or not set */
int min = -1;
int i;
if (read_current_timer(&pre_start) < 0 )
return 0;
/*
* A simple loop like
* while ( jiffies < start_jiffies+1)
* start = read_current_timer();
* will not do. As we don't really know whether jiffy switch
* happened first or timer_value was read first. And some asynchronous
* event can happen between these two events introducing errors in lpj.
*
* So, we do
* 1. pre_start <- When we are sure that jiffy switch hasn't happened
* 2. check jiffy switch
* 3. start <- timer value before or after jiffy switch
* 4. post_start <- When we are sure that jiffy switch has happened
*
* Note, we don't know anything about order of 2 and 3.
* Now, by looking at post_start and pre_start difference, we can
* check whether any asynchronous event happened or not
*/
for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
pre_start = 0;
read_current_timer(&start);
start_jiffies = jiffies;
while (time_before_eq(jiffies, start_jiffies + 1)) {
pre_start = start;
read_current_timer(&start);
}
read_current_timer(&post_start);
pre_end = 0;
end = post_start;
while (time_before_eq(jiffies, start_jiffies + 1 +
DELAY_CALIBRATION_TICKS)) {
pre_end = end;
read_current_timer(&end);
}
read_current_timer(&post_end);
timer_rate_max = (post_end - pre_start) /
DELAY_CALIBRATION_TICKS;
timer_rate_min = (pre_end - post_start) /
DELAY_CALIBRATION_TICKS;
/*
* If the upper limit and lower limit of the timer_rate is
* >= 12.5% apart, redo calibration.
*/
if (start >= post_end)
printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
"timer_rate as we had a TSC wrap around"
" start=%lu >=post_end=%lu\n",
start, post_end);
if (start < post_end && pre_start != 0 && pre_end != 0 &&
(timer_rate_max - timer_rate_min) < (timer_rate_max >> 3)) {
good_timer_count++;
good_timer_sum += timer_rate_max;
measured_times[i] = timer_rate_max;
if (max < 0 || timer_rate_max > measured_times[max])
max = i;
if (min < 0 || timer_rate_max < measured_times[min])
min = i;
} else
measured_times[i] = 0;
}
/*
* Find the maximum & minimum - if they differ too much throw out the
* one with the largest difference from the mean and try again...
*/
while (good_timer_count > 1) {
unsigned long estimate;
unsigned long maxdiff;
/* compute the estimate */
estimate = (good_timer_sum/good_timer_count);
maxdiff = estimate >> 3;
/* if range is within 12% let's take it */
if ((measured_times[max] - measured_times[min]) < maxdiff)
return estimate;
/* ok - drop the worse value and try again... */
good_timer_sum = 0;
good_timer_count = 0;
if ((measured_times[max] - estimate) <
(estimate - measured_times[min])) {
printk(KERN_NOTICE "calibrate_delay_direct() dropping "
"min bogoMips estimate %d = %lu\n",
min, measured_times[min]);
measured_times[min] = 0;
min = max;
} else {
printk(KERN_NOTICE "calibrate_delay_direct() dropping "
"max bogoMips estimate %d = %lu\n",
max, measured_times[max]);
measured_times[max] = 0;
max = min;
}
for (i = 0; i < MAX_DIRECT_CALIBRATION_RETRIES; i++) {
if (measured_times[i] == 0)
continue;
good_timer_count++;
good_timer_sum += measured_times[i];
if (measured_times[i] < measured_times[min])
min = i;
if (measured_times[i] > measured_times[max])
max = i;
}
}
printk(KERN_NOTICE "calibrate_delay_direct() failed to get a good "
"estimate for loops_per_jiffy.\nProbably due to long platform "
"interrupts. Consider using \"lpj=\" boot option.\n");
return 0;
}
#else
static unsigned long calibrate_delay_direct(void)
{
return 0;
}
#endif
/*
* This is the number of bits of precision for the loops_per_jiffy. Each
* time we refine our estimate after the first takes 1.5/HZ seconds, so try
* to start with a good estimate.
* For the boot cpu we can skip the delay calibration and assign it a value
* calculated based on the timer frequency.
* For the rest of the CPUs we cannot assume that the timer frequency is same as
* the cpu frequency, hence do the calibration for those.
*/
#define LPS_PREC 8
static unsigned long calibrate_delay_converge(void)
{
/* First stage - slowly accelerate to find initial bounds */
unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit;
int trials = 0, band = 0, trial_in_band = 0;
lpj = (1<<12);
/* wait for "start of" clock tick */
ticks = jiffies;
while (ticks == jiffies)
; /* nothing */
/* Go .. */
ticks = jiffies;
do {
if (++trial_in_band == (1<<band)) {
++band;
trial_in_band = 0;
}
__delay(lpj * band);
trials += band;
} while (ticks == jiffies);
/*
* We overshot, so retreat to a clear underestimate. Then estimate
* the largest likely undershoot. This defines our chop bounds.
*/
trials -= band;
loopadd_base = lpj * band;
lpj_base = lpj * trials;
recalibrate:
lpj = lpj_base;
loopadd = loopadd_base;
/*
* Do a binary approximation to get lpj set to
* equal one clock (up to LPS_PREC bits)
*/
chop_limit = lpj >> LPS_PREC;
while (loopadd > chop_limit) {
lpj += loopadd;
ticks = jiffies;
while (ticks == jiffies)
; /* nothing */
ticks = jiffies;
__delay(lpj);
if (jiffies != ticks) /* longer than 1 tick */
lpj -= loopadd;
loopadd >>= 1;
}
/*
* If we incremented every single time possible, presume we've
* massively underestimated initially, and retry with a higher
* start, and larger range. (Only seen on x86_64, due to SMIs)
*/
if (lpj + loopadd * 2 == lpj_base + loopadd_base * 2) {
lpj_base = lpj;
loopadd_base <<= 2;
goto recalibrate;
}
return lpj;
}
static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 };
/*
* Check if cpu calibration delay is already known. For example,
* some processors with multi-core sockets may have all cores
* with the same calibration delay.
*
* Architectures should override this function if a faster calibration
* method is available.
*/
unsigned long __attribute__((weak)) calibrate_delay_is_known(void)
{
return 0;
}
/*
* Indicate the cpu delay calibration is done. This can be used by
* architectures to stop accepting delay timer registrations after this point.
*/
void __attribute__((weak)) calibration_delay_done(void)
{
}
void calibrate_delay(void)
{
unsigned long lpj;
static bool printed;
int this_cpu = smp_processor_id();
if (per_cpu(cpu_loops_per_jiffy, this_cpu)) {
lpj = per_cpu(cpu_loops_per_jiffy, this_cpu);
if (!printed)
pr_info("Calibrating delay loop (skipped) "
"already calibrated this CPU");
} else if (preset_lpj) {
lpj = preset_lpj;
if (!printed)
pr_info("Calibrating delay loop (skipped) "
"preset value.. ");
} else if ((!printed) && lpj_fine) {
lpj = lpj_fine;
pr_info("Calibrating delay loop (skipped), "
"value calculated using timer frequency.. ");
} else if ((lpj = calibrate_delay_is_known())) {
;
} else if ((lpj = calibrate_delay_direct()) != 0) {
if (!printed)
pr_info("Calibrating delay using timer "
"specific routine.. ");
} else {
if (!printed)
pr_info("Calibrating delay loop... ");
lpj = calibrate_delay_converge();
}
per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj;
if (!printed)
pr_cont("%lu.%02lu BogoMIPS (lpj=%lu)\n",
lpj/(500000/HZ),
(lpj/(5000/HZ)) % 100, lpj);
loops_per_jiffy = lpj;
printed = true;
calibration_delay_done();
}
| linux-master | init/calibrate.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* linux/init/version.c
*
* Copyright (C) 1992 Theodore Ts'o
*
* May be freely distributed as part of Linux.
*/
#include <generated/compile.h>
#include <linux/build-salt.h>
#include <linux/elfnote-lto.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/printk.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/proc_ns.h>
static int __init early_hostname(char *arg)
{
size_t bufsize = sizeof(init_uts_ns.name.nodename);
size_t maxlen = bufsize - 1;
size_t arglen;
arglen = strlcpy(init_uts_ns.name.nodename, arg, bufsize);
if (arglen > maxlen) {
pr_warn("hostname parameter exceeds %zd characters and will be truncated",
maxlen);
}
return 0;
}
early_param("hostname", early_hostname);
const char linux_proc_banner[] =
"%s version %s"
" (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ")"
" (" LINUX_COMPILER ") %s\n";
BUILD_SALT;
BUILD_LTO_INFO;
/*
* init_uts_ns and linux_banner contain the build version and timestamp,
* which are really fixed at the very last step of build process.
* They are compiled with __weak first, and without __weak later.
*/
struct uts_namespace init_uts_ns __weak;
const char linux_banner[] __weak;
#include "version-timestamp.c"
EXPORT_SYMBOL_GPL(init_uts_ns);
| linux-master | init/version.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* init/noinitramfs.c
*
* Copyright (C) 2006, NXP Semiconductors, All Rights Reserved
* Author: Jean-Paul Saman <[email protected]>
*/
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/kdev_t.h>
#include <linux/syscalls.h>
#include <linux/init_syscalls.h>
#include <linux/umh.h>
/*
* Create a simple rootfs that is similar to the default initramfs
*/
static int __init default_rootfs(void)
{
int err;
usermodehelper_enable();
err = init_mkdir("/dev", 0755);
if (err < 0)
goto out;
err = init_mknod("/dev/console", S_IFCHR | S_IRUSR | S_IWUSR,
new_encode_dev(MKDEV(5, 1)));
if (err < 0)
goto out;
err = init_mkdir("/root", 0700);
if (err < 0)
goto out;
return 0;
out:
printk(KERN_WARNING "Failed to create a rootfs\n");
return err;
}
rootfs_initcall(default_rootfs);
| linux-master | init/noinitramfs.c |
/*
* Cryptographic API.
*
* T10 Data Integrity Field CRC16 Crypto Transform
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <[email protected]>
* Copyright (C) 2013 Intel Corporation
* Author: Tim Chen <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/crc-t10dif.h>
#include <linux/module.h>
#include <linux/kernel.h>
/* Table generated using the following polynomium:
* x^16 + x^15 + x^11 + x^9 + x^8 + x^7 + x^5 + x^4 + x^2 + x + 1
* gt: 0x8bb7
*/
static const __u16 t10_dif_crc_table[256] = {
0x0000, 0x8BB7, 0x9CD9, 0x176E, 0xB205, 0x39B2, 0x2EDC, 0xA56B,
0xEFBD, 0x640A, 0x7364, 0xF8D3, 0x5DB8, 0xD60F, 0xC161, 0x4AD6,
0x54CD, 0xDF7A, 0xC814, 0x43A3, 0xE6C8, 0x6D7F, 0x7A11, 0xF1A6,
0xBB70, 0x30C7, 0x27A9, 0xAC1E, 0x0975, 0x82C2, 0x95AC, 0x1E1B,
0xA99A, 0x222D, 0x3543, 0xBEF4, 0x1B9F, 0x9028, 0x8746, 0x0CF1,
0x4627, 0xCD90, 0xDAFE, 0x5149, 0xF422, 0x7F95, 0x68FB, 0xE34C,
0xFD57, 0x76E0, 0x618E, 0xEA39, 0x4F52, 0xC4E5, 0xD38B, 0x583C,
0x12EA, 0x995D, 0x8E33, 0x0584, 0xA0EF, 0x2B58, 0x3C36, 0xB781,
0xD883, 0x5334, 0x445A, 0xCFED, 0x6A86, 0xE131, 0xF65F, 0x7DE8,
0x373E, 0xBC89, 0xABE7, 0x2050, 0x853B, 0x0E8C, 0x19E2, 0x9255,
0x8C4E, 0x07F9, 0x1097, 0x9B20, 0x3E4B, 0xB5FC, 0xA292, 0x2925,
0x63F3, 0xE844, 0xFF2A, 0x749D, 0xD1F6, 0x5A41, 0x4D2F, 0xC698,
0x7119, 0xFAAE, 0xEDC0, 0x6677, 0xC31C, 0x48AB, 0x5FC5, 0xD472,
0x9EA4, 0x1513, 0x027D, 0x89CA, 0x2CA1, 0xA716, 0xB078, 0x3BCF,
0x25D4, 0xAE63, 0xB90D, 0x32BA, 0x97D1, 0x1C66, 0x0B08, 0x80BF,
0xCA69, 0x41DE, 0x56B0, 0xDD07, 0x786C, 0xF3DB, 0xE4B5, 0x6F02,
0x3AB1, 0xB106, 0xA668, 0x2DDF, 0x88B4, 0x0303, 0x146D, 0x9FDA,
0xD50C, 0x5EBB, 0x49D5, 0xC262, 0x6709, 0xECBE, 0xFBD0, 0x7067,
0x6E7C, 0xE5CB, 0xF2A5, 0x7912, 0xDC79, 0x57CE, 0x40A0, 0xCB17,
0x81C1, 0x0A76, 0x1D18, 0x96AF, 0x33C4, 0xB873, 0xAF1D, 0x24AA,
0x932B, 0x189C, 0x0FF2, 0x8445, 0x212E, 0xAA99, 0xBDF7, 0x3640,
0x7C96, 0xF721, 0xE04F, 0x6BF8, 0xCE93, 0x4524, 0x524A, 0xD9FD,
0xC7E6, 0x4C51, 0x5B3F, 0xD088, 0x75E3, 0xFE54, 0xE93A, 0x628D,
0x285B, 0xA3EC, 0xB482, 0x3F35, 0x9A5E, 0x11E9, 0x0687, 0x8D30,
0xE232, 0x6985, 0x7EEB, 0xF55C, 0x5037, 0xDB80, 0xCCEE, 0x4759,
0x0D8F, 0x8638, 0x9156, 0x1AE1, 0xBF8A, 0x343D, 0x2353, 0xA8E4,
0xB6FF, 0x3D48, 0x2A26, 0xA191, 0x04FA, 0x8F4D, 0x9823, 0x1394,
0x5942, 0xD2F5, 0xC59B, 0x4E2C, 0xEB47, 0x60F0, 0x779E, 0xFC29,
0x4BA8, 0xC01F, 0xD771, 0x5CC6, 0xF9AD, 0x721A, 0x6574, 0xEEC3,
0xA415, 0x2FA2, 0x38CC, 0xB37B, 0x1610, 0x9DA7, 0x8AC9, 0x017E,
0x1F65, 0x94D2, 0x83BC, 0x080B, 0xAD60, 0x26D7, 0x31B9, 0xBA0E,
0xF0D8, 0x7B6F, 0x6C01, 0xE7B6, 0x42DD, 0xC96A, 0xDE04, 0x55B3
};
__u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer, size_t len)
{
unsigned int i;
for (i = 0 ; i < len ; i++)
crc = (crc << 8) ^ t10_dif_crc_table[((crc >> 8) ^ buffer[i]) & 0xff];
return crc;
}
EXPORT_SYMBOL(crc_t10dif_generic);
MODULE_DESCRIPTION("T10 DIF CRC calculation common code");
MODULE_LICENSE("GPL");
| linux-master | crypto/crct10dif_common.c |
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
* Test interface for Jitter RNG.
*
* Copyright (C) 2023, Stephan Mueller <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "jitterentropy.h"
#define JENT_TEST_RINGBUFFER_SIZE (1<<10)
#define JENT_TEST_RINGBUFFER_MASK (JENT_TEST_RINGBUFFER_SIZE - 1)
struct jent_testing {
u32 jent_testing_rb[JENT_TEST_RINGBUFFER_SIZE];
u32 rb_reader;
atomic_t rb_writer;
atomic_t jent_testing_enabled;
spinlock_t lock;
wait_queue_head_t read_wait;
};
static struct dentry *jent_raw_debugfs_root = NULL;
/*************************** Generic Data Handling ****************************/
/*
* boot variable:
* 0 ==> No boot test, gathering of runtime data allowed
* 1 ==> Boot test enabled and ready for collecting data, gathering runtime
* data is disabled
* 2 ==> Boot test completed and disabled, gathering of runtime data is
* disabled
*/
static void jent_testing_reset(struct jent_testing *data)
{
unsigned long flags;
spin_lock_irqsave(&data->lock, flags);
data->rb_reader = 0;
atomic_set(&data->rb_writer, 0);
spin_unlock_irqrestore(&data->lock, flags);
}
static void jent_testing_data_init(struct jent_testing *data, u32 boot)
{
/*
* The boot time testing implies we have a running test. If the
* caller wants to clear it, he has to unset the boot_test flag
* at runtime via sysfs to enable regular runtime testing
*/
if (boot)
return;
jent_testing_reset(data);
atomic_set(&data->jent_testing_enabled, 1);
pr_warn("Enabling data collection\n");
}
static void jent_testing_fini(struct jent_testing *data, u32 boot)
{
/* If we have boot data, we do not reset yet to allow data to be read */
if (boot)
return;
atomic_set(&data->jent_testing_enabled, 0);
jent_testing_reset(data);
pr_warn("Disabling data collection\n");
}
static bool jent_testing_store(struct jent_testing *data, u32 value,
u32 *boot)
{
unsigned long flags;
if (!atomic_read(&data->jent_testing_enabled) && (*boot != 1))
return false;
spin_lock_irqsave(&data->lock, flags);
/*
* Disable entropy testing for boot time testing after ring buffer
* is filled.
*/
if (*boot) {
if (((u32)atomic_read(&data->rb_writer)) >
JENT_TEST_RINGBUFFER_SIZE) {
*boot = 2;
pr_warn_once("One time data collection test disabled\n");
spin_unlock_irqrestore(&data->lock, flags);
return false;
}
if (atomic_read(&data->rb_writer) == 1)
pr_warn("One time data collection test enabled\n");
}
data->jent_testing_rb[((u32)atomic_read(&data->rb_writer)) &
JENT_TEST_RINGBUFFER_MASK] = value;
atomic_inc(&data->rb_writer);
spin_unlock_irqrestore(&data->lock, flags);
if (wq_has_sleeper(&data->read_wait))
wake_up_interruptible(&data->read_wait);
return true;
}
static bool jent_testing_have_data(struct jent_testing *data)
{
return ((((u32)atomic_read(&data->rb_writer)) &
JENT_TEST_RINGBUFFER_MASK) !=
(data->rb_reader & JENT_TEST_RINGBUFFER_MASK));
}
static int jent_testing_reader(struct jent_testing *data, u32 *boot,
u8 *outbuf, u32 outbuflen)
{
unsigned long flags;
int collected_data = 0;
jent_testing_data_init(data, *boot);
while (outbuflen) {
u32 writer = (u32)atomic_read(&data->rb_writer);
spin_lock_irqsave(&data->lock, flags);
/* We have no data or reached the writer. */
if (!writer || (writer == data->rb_reader)) {
spin_unlock_irqrestore(&data->lock, flags);
/*
* Now we gathered all boot data, enable regular data
* collection.
*/
if (*boot) {
*boot = 0;
goto out;
}
wait_event_interruptible(data->read_wait,
jent_testing_have_data(data));
if (signal_pending(current)) {
collected_data = -ERESTARTSYS;
goto out;
}
continue;
}
/* We copy out word-wise */
if (outbuflen < sizeof(u32)) {
spin_unlock_irqrestore(&data->lock, flags);
goto out;
}
memcpy(outbuf, &data->jent_testing_rb[data->rb_reader],
sizeof(u32));
data->rb_reader++;
spin_unlock_irqrestore(&data->lock, flags);
outbuf += sizeof(u32);
outbuflen -= sizeof(u32);
collected_data += sizeof(u32);
}
out:
jent_testing_fini(data, *boot);
return collected_data;
}
static int jent_testing_extract_user(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos,
int (*reader)(u8 *outbuf, u32 outbuflen))
{
u8 *tmp, *tmp_aligned;
int ret = 0, large_request = (nbytes > 256);
if (!nbytes)
return 0;
/*
* The intention of this interface is for collecting at least
* 1000 samples due to the SP800-90B requirements. So, we make no
* effort in avoiding allocating more memory that actually needed
* by the user. Hence, we allocate sufficient memory to always hold
* that amount of data.
*/
tmp = kmalloc(JENT_TEST_RINGBUFFER_SIZE + sizeof(u32), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
tmp_aligned = PTR_ALIGN(tmp, sizeof(u32));
while (nbytes) {
int i;
if (large_request && need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
schedule();
}
i = min_t(int, nbytes, JENT_TEST_RINGBUFFER_SIZE);
i = reader(tmp_aligned, i);
if (i <= 0) {
if (i < 0)
ret = i;
break;
}
if (copy_to_user(buf, tmp_aligned, i)) {
ret = -EFAULT;
break;
}
nbytes -= i;
buf += i;
ret += i;
}
kfree_sensitive(tmp);
if (ret > 0)
*ppos += ret;
return ret;
}
/************** Raw High-Resolution Timer Entropy Data Handling **************/
static u32 boot_raw_hires_test = 0;
module_param(boot_raw_hires_test, uint, 0644);
MODULE_PARM_DESC(boot_raw_hires_test,
"Enable gathering boot time high resolution timer entropy of the first Jitter RNG entropy events");
static struct jent_testing jent_raw_hires = {
.rb_reader = 0,
.rb_writer = ATOMIC_INIT(0),
.lock = __SPIN_LOCK_UNLOCKED(jent_raw_hires.lock),
.read_wait = __WAIT_QUEUE_HEAD_INITIALIZER(jent_raw_hires.read_wait)
};
int jent_raw_hires_entropy_store(__u32 value)
{
return jent_testing_store(&jent_raw_hires, value, &boot_raw_hires_test);
}
EXPORT_SYMBOL(jent_raw_hires_entropy_store);
static int jent_raw_hires_entropy_reader(u8 *outbuf, u32 outbuflen)
{
return jent_testing_reader(&jent_raw_hires, &boot_raw_hires_test,
outbuf, outbuflen);
}
static ssize_t jent_raw_hires_read(struct file *file, char __user *to,
size_t count, loff_t *ppos)
{
return jent_testing_extract_user(file, to, count, ppos,
jent_raw_hires_entropy_reader);
}
static const struct file_operations jent_raw_hires_fops = {
.owner = THIS_MODULE,
.read = jent_raw_hires_read,
};
/******************************* Initialization *******************************/
void jent_testing_init(void)
{
jent_raw_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
debugfs_create_file_unsafe("jent_raw_hires", 0400,
jent_raw_debugfs_root, NULL,
&jent_raw_hires_fops);
}
EXPORT_SYMBOL(jent_testing_init);
void jent_testing_exit(void)
{
debugfs_remove_recursive(jent_raw_debugfs_root);
}
EXPORT_SYMBOL(jent_testing_exit);
| linux-master | crypto/jitterentropy-testing.c |
/* FCrypt encryption algorithm
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Based on code:
*
* Copyright (c) 1995 - 2000 Kungliga Tekniska Högskolan
* (Royal Institute of Technology, Stockholm, Sweden).
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the name of the Institute nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <asm/byteorder.h>
#include <crypto/algapi.h>
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#define ROUNDS 16
struct fcrypt_ctx {
__be32 sched[ROUNDS];
};
/* Rotate right two 32 bit numbers as a 56 bit number */
#define ror56(hi, lo, n) \
do { \
u32 t = lo & ((1 << n) - 1); \
lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
hi = (hi >> n) | (t << (24-n)); \
} while (0)
/* Rotate right one 64 bit number as a 56 bit number */
#define ror56_64(k, n) (k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)))
/*
* Sboxes for Feistel network derived from
* /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
*/
#undef Z
#define Z(x) cpu_to_be32(x << 3)
static const __be32 sbox0[256] = {
Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60),
Z(0xf2), Z(0x20), Z(0xb5), Z(0x38), Z(0x7e), Z(0xda), Z(0x9f), Z(0xe3),
Z(0xd2), Z(0xcf), Z(0xc4), Z(0x3c), Z(0x61), Z(0xff), Z(0x4a), Z(0x4a),
Z(0x35), Z(0xac), Z(0xaa), Z(0x5f), Z(0x2b), Z(0xbb), Z(0xbc), Z(0x53),
Z(0x4e), Z(0x9d), Z(0x78), Z(0xa3), Z(0xdc), Z(0x09), Z(0x32), Z(0x10),
Z(0xc6), Z(0x6f), Z(0x66), Z(0xd6), Z(0xab), Z(0xa9), Z(0xaf), Z(0xfd),
Z(0x3b), Z(0x95), Z(0xe8), Z(0x34), Z(0x9a), Z(0x81), Z(0x72), Z(0x80),
Z(0x9c), Z(0xf3), Z(0xec), Z(0xda), Z(0x9f), Z(0x26), Z(0x76), Z(0x15),
Z(0x3e), Z(0x55), Z(0x4d), Z(0xde), Z(0x84), Z(0xee), Z(0xad), Z(0xc7),
Z(0xf1), Z(0x6b), Z(0x3d), Z(0xd3), Z(0x04), Z(0x49), Z(0xaa), Z(0x24),
Z(0x0b), Z(0x8a), Z(0x83), Z(0xba), Z(0xfa), Z(0x85), Z(0xa0), Z(0xa8),
Z(0xb1), Z(0xd4), Z(0x01), Z(0xd8), Z(0x70), Z(0x64), Z(0xf0), Z(0x51),
Z(0xd2), Z(0xc3), Z(0xa7), Z(0x75), Z(0x8c), Z(0xa5), Z(0x64), Z(0xef),
Z(0x10), Z(0x4e), Z(0xb7), Z(0xc6), Z(0x61), Z(0x03), Z(0xeb), Z(0x44),
Z(0x3d), Z(0xe5), Z(0xb3), Z(0x5b), Z(0xae), Z(0xd5), Z(0xad), Z(0x1d),
Z(0xfa), Z(0x5a), Z(0x1e), Z(0x33), Z(0xab), Z(0x93), Z(0xa2), Z(0xb7),
Z(0xe7), Z(0xa8), Z(0x45), Z(0xa4), Z(0xcd), Z(0x29), Z(0x63), Z(0x44),
Z(0xb6), Z(0x69), Z(0x7e), Z(0x2e), Z(0x62), Z(0x03), Z(0xc8), Z(0xe0),
Z(0x17), Z(0xbb), Z(0xc7), Z(0xf3), Z(0x3f), Z(0x36), Z(0xba), Z(0x71),
Z(0x8e), Z(0x97), Z(0x65), Z(0x60), Z(0x69), Z(0xb6), Z(0xf6), Z(0xe6),
Z(0x6e), Z(0xe0), Z(0x81), Z(0x59), Z(0xe8), Z(0xaf), Z(0xdd), Z(0x95),
Z(0x22), Z(0x99), Z(0xfd), Z(0x63), Z(0x19), Z(0x74), Z(0x61), Z(0xb1),
Z(0xb6), Z(0x5b), Z(0xae), Z(0x54), Z(0xb3), Z(0x70), Z(0xff), Z(0xc6),
Z(0x3b), Z(0x3e), Z(0xc1), Z(0xd7), Z(0xe1), Z(0x0e), Z(0x76), Z(0xe5),
Z(0x36), Z(0x4f), Z(0x59), Z(0xc7), Z(0x08), Z(0x6e), Z(0x82), Z(0xa6),
Z(0x93), Z(0xc4), Z(0xaa), Z(0x26), Z(0x49), Z(0xe0), Z(0x21), Z(0x64),
Z(0x07), Z(0x9f), Z(0x64), Z(0x81), Z(0x9c), Z(0xbf), Z(0xf9), Z(0xd1),
Z(0x43), Z(0xf8), Z(0xb6), Z(0xb9), Z(0xf1), Z(0x24), Z(0x75), Z(0x03),
Z(0xe4), Z(0xb0), Z(0x99), Z(0x46), Z(0x3d), Z(0xf5), Z(0xd1), Z(0x39),
Z(0x72), Z(0x12), Z(0xf6), Z(0xba), Z(0x0c), Z(0x0d), Z(0x42), Z(0x2e)
};
#undef Z
#define Z(x) cpu_to_be32(((x & 0x1f) << 27) | (x >> 5))
static const __be32 sbox1[256] = {
Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89),
Z(0x50), Z(0x9c), Z(0x03), Z(0xb7), Z(0x73), Z(0xe6), Z(0xe1), Z(0x39),
Z(0x31), Z(0x2c), Z(0x27), Z(0x9f), Z(0xa5), Z(0x69), Z(0x44), Z(0xd6),
Z(0x23), Z(0x83), Z(0x98), Z(0x7d), Z(0x3c), Z(0xb4), Z(0x2d), Z(0x99),
Z(0x1c), Z(0x1f), Z(0x8c), Z(0x20), Z(0x03), Z(0x7c), Z(0x5f), Z(0xad),
Z(0xf4), Z(0xfa), Z(0x95), Z(0xca), Z(0x76), Z(0x44), Z(0xcd), Z(0xb6),
Z(0xb8), Z(0xa1), Z(0xa1), Z(0xbe), Z(0x9e), Z(0x54), Z(0x8f), Z(0x0b),
Z(0x16), Z(0x74), Z(0x31), Z(0x8a), Z(0x23), Z(0x17), Z(0x04), Z(0xfa),
Z(0x79), Z(0x84), Z(0xb1), Z(0xf5), Z(0x13), Z(0xab), Z(0xb5), Z(0x2e),
Z(0xaa), Z(0x0c), Z(0x60), Z(0x6b), Z(0x5b), Z(0xc4), Z(0x4b), Z(0xbc),
Z(0xe2), Z(0xaf), Z(0x45), Z(0x73), Z(0xfa), Z(0xc9), Z(0x49), Z(0xcd),
Z(0x00), Z(0x92), Z(0x7d), Z(0x97), Z(0x7a), Z(0x18), Z(0x60), Z(0x3d),
Z(0xcf), Z(0x5b), Z(0xde), Z(0xc6), Z(0xe2), Z(0xe6), Z(0xbb), Z(0x8b),
Z(0x06), Z(0xda), Z(0x08), Z(0x15), Z(0x1b), Z(0x88), Z(0x6a), Z(0x17),
Z(0x89), Z(0xd0), Z(0xa9), Z(0xc1), Z(0xc9), Z(0x70), Z(0x6b), Z(0xe5),
Z(0x43), Z(0xf4), Z(0x68), Z(0xc8), Z(0xd3), Z(0x84), Z(0x28), Z(0x0a),
Z(0x52), Z(0x66), Z(0xa3), Z(0xca), Z(0xf2), Z(0xe3), Z(0x7f), Z(0x7a),
Z(0x31), Z(0xf7), Z(0x88), Z(0x94), Z(0x5e), Z(0x9c), Z(0x63), Z(0xd5),
Z(0x24), Z(0x66), Z(0xfc), Z(0xb3), Z(0x57), Z(0x25), Z(0xbe), Z(0x89),
Z(0x44), Z(0xc4), Z(0xe0), Z(0x8f), Z(0x23), Z(0x3c), Z(0x12), Z(0x52),
Z(0xf5), Z(0x1e), Z(0xf4), Z(0xcb), Z(0x18), Z(0x33), Z(0x1f), Z(0xf8),
Z(0x69), Z(0x10), Z(0x9d), Z(0xd3), Z(0xf7), Z(0x28), Z(0xf8), Z(0x30),
Z(0x05), Z(0x5e), Z(0x32), Z(0xc0), Z(0xd5), Z(0x19), Z(0xbd), Z(0x45),
Z(0x8b), Z(0x5b), Z(0xfd), Z(0xbc), Z(0xe2), Z(0x5c), Z(0xa9), Z(0x96),
Z(0xef), Z(0x70), Z(0xcf), Z(0xc2), Z(0x2a), Z(0xb3), Z(0x61), Z(0xad),
Z(0x80), Z(0x48), Z(0x81), Z(0xb7), Z(0x1d), Z(0x43), Z(0xd9), Z(0xd7),
Z(0x45), Z(0xf0), Z(0xd8), Z(0x8a), Z(0x59), Z(0x7c), Z(0x57), Z(0xc1),
Z(0x79), Z(0xc7), Z(0x34), Z(0xd6), Z(0x43), Z(0xdf), Z(0xe4), Z(0x78),
Z(0x16), Z(0x06), Z(0xda), Z(0x92), Z(0x76), Z(0x51), Z(0xe1), Z(0xd4),
Z(0x70), Z(0x03), Z(0xe0), Z(0x2f), Z(0x96), Z(0x91), Z(0x82), Z(0x80)
};
#undef Z
#define Z(x) cpu_to_be32(x << 11)
static const __be32 sbox2[256] = {
Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d),
Z(0xf9), Z(0x6f), Z(0xdb), Z(0xb4), Z(0x65), Z(0x6e), Z(0xe7), Z(0x24),
Z(0xc8), Z(0x1a), Z(0xbb), Z(0x49), Z(0xb5), Z(0x0a), Z(0x7d), Z(0xb9),
Z(0xe8), Z(0xdc), Z(0xb7), Z(0xd9), Z(0x45), Z(0x20), Z(0x1b), Z(0xce),
Z(0x59), Z(0x9d), Z(0x6b), Z(0xbd), Z(0x0e), Z(0x8f), Z(0xa3), Z(0xa9),
Z(0xbc), Z(0x74), Z(0xa6), Z(0xf6), Z(0x7f), Z(0x5f), Z(0xb1), Z(0x68),
Z(0x84), Z(0xbc), Z(0xa9), Z(0xfd), Z(0x55), Z(0x50), Z(0xe9), Z(0xb6),
Z(0x13), Z(0x5e), Z(0x07), Z(0xb8), Z(0x95), Z(0x02), Z(0xc0), Z(0xd0),
Z(0x6a), Z(0x1a), Z(0x85), Z(0xbd), Z(0xb6), Z(0xfd), Z(0xfe), Z(0x17),
Z(0x3f), Z(0x09), Z(0xa3), Z(0x8d), Z(0xfb), Z(0xed), Z(0xda), Z(0x1d),
Z(0x6d), Z(0x1c), Z(0x6c), Z(0x01), Z(0x5a), Z(0xe5), Z(0x71), Z(0x3e),
Z(0x8b), Z(0x6b), Z(0xbe), Z(0x29), Z(0xeb), Z(0x12), Z(0x19), Z(0x34),
Z(0xcd), Z(0xb3), Z(0xbd), Z(0x35), Z(0xea), Z(0x4b), Z(0xd5), Z(0xae),
Z(0x2a), Z(0x79), Z(0x5a), Z(0xa5), Z(0x32), Z(0x12), Z(0x7b), Z(0xdc),
Z(0x2c), Z(0xd0), Z(0x22), Z(0x4b), Z(0xb1), Z(0x85), Z(0x59), Z(0x80),
Z(0xc0), Z(0x30), Z(0x9f), Z(0x73), Z(0xd3), Z(0x14), Z(0x48), Z(0x40),
Z(0x07), Z(0x2d), Z(0x8f), Z(0x80), Z(0x0f), Z(0xce), Z(0x0b), Z(0x5e),
Z(0xb7), Z(0x5e), Z(0xac), Z(0x24), Z(0x94), Z(0x4a), Z(0x18), Z(0x15),
Z(0x05), Z(0xe8), Z(0x02), Z(0x77), Z(0xa9), Z(0xc7), Z(0x40), Z(0x45),
Z(0x89), Z(0xd1), Z(0xea), Z(0xde), Z(0x0c), Z(0x79), Z(0x2a), Z(0x99),
Z(0x6c), Z(0x3e), Z(0x95), Z(0xdd), Z(0x8c), Z(0x7d), Z(0xad), Z(0x6f),
Z(0xdc), Z(0xff), Z(0xfd), Z(0x62), Z(0x47), Z(0xb3), Z(0x21), Z(0x8a),
Z(0xec), Z(0x8e), Z(0x19), Z(0x18), Z(0xb4), Z(0x6e), Z(0x3d), Z(0xfd),
Z(0x74), Z(0x54), Z(0x1e), Z(0x04), Z(0x85), Z(0xd8), Z(0xbc), Z(0x1f),
Z(0x56), Z(0xe7), Z(0x3a), Z(0x56), Z(0x67), Z(0xd6), Z(0xc8), Z(0xa5),
Z(0xf3), Z(0x8e), Z(0xde), Z(0xae), Z(0x37), Z(0x49), Z(0xb7), Z(0xfa),
Z(0xc8), Z(0xf4), Z(0x1f), Z(0xe0), Z(0x2a), Z(0x9b), Z(0x15), Z(0xd1),
Z(0x34), Z(0x0e), Z(0xb5), Z(0xe0), Z(0x44), Z(0x78), Z(0x84), Z(0x59),
Z(0x56), Z(0x68), Z(0x77), Z(0xa5), Z(0x14), Z(0x06), Z(0xf5), Z(0x2f),
Z(0x8c), Z(0x8a), Z(0x73), Z(0x80), Z(0x76), Z(0xb4), Z(0x10), Z(0x86)
};
#undef Z
#define Z(x) cpu_to_be32(x << 19)
static const __be32 sbox3[256] = {
Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57),
Z(0xd6), Z(0x6b), Z(0x5d), Z(0x72), Z(0xf0), Z(0x92), Z(0x5a), Z(0x1b),
Z(0x53), Z(0x80), Z(0x24), Z(0x70), Z(0x9a), Z(0xcc), Z(0xa7), Z(0x66),
Z(0xa1), Z(0x01), Z(0xa5), Z(0x41), Z(0x97), Z(0x41), Z(0x31), Z(0x82),
Z(0xf1), Z(0x14), Z(0xcf), Z(0x53), Z(0x0d), Z(0xa0), Z(0x10), Z(0xcc),
Z(0x2a), Z(0x7d), Z(0xd2), Z(0xbf), Z(0x4b), Z(0x1a), Z(0xdb), Z(0x16),
Z(0x47), Z(0xf6), Z(0x51), Z(0x36), Z(0xed), Z(0xf3), Z(0xb9), Z(0x1a),
Z(0xa7), Z(0xdf), Z(0x29), Z(0x43), Z(0x01), Z(0x54), Z(0x70), Z(0xa4),
Z(0xbf), Z(0xd4), Z(0x0b), Z(0x53), Z(0x44), Z(0x60), Z(0x9e), Z(0x23),
Z(0xa1), Z(0x18), Z(0x68), Z(0x4f), Z(0xf0), Z(0x2f), Z(0x82), Z(0xc2),
Z(0x2a), Z(0x41), Z(0xb2), Z(0x42), Z(0x0c), Z(0xed), Z(0x0c), Z(0x1d),
Z(0x13), Z(0x3a), Z(0x3c), Z(0x6e), Z(0x35), Z(0xdc), Z(0x60), Z(0x65),
Z(0x85), Z(0xe9), Z(0x64), Z(0x02), Z(0x9a), Z(0x3f), Z(0x9f), Z(0x87),
Z(0x96), Z(0xdf), Z(0xbe), Z(0xf2), Z(0xcb), Z(0xe5), Z(0x6c), Z(0xd4),
Z(0x5a), Z(0x83), Z(0xbf), Z(0x92), Z(0x1b), Z(0x94), Z(0x00), Z(0x42),
Z(0xcf), Z(0x4b), Z(0x00), Z(0x75), Z(0xba), Z(0x8f), Z(0x76), Z(0x5f),
Z(0x5d), Z(0x3a), Z(0x4d), Z(0x09), Z(0x12), Z(0x08), Z(0x38), Z(0x95),
Z(0x17), Z(0xe4), Z(0x01), Z(0x1d), Z(0x4c), Z(0xa9), Z(0xcc), Z(0x85),
Z(0x82), Z(0x4c), Z(0x9d), Z(0x2f), Z(0x3b), Z(0x66), Z(0xa1), Z(0x34),
Z(0x10), Z(0xcd), Z(0x59), Z(0x89), Z(0xa5), Z(0x31), Z(0xcf), Z(0x05),
Z(0xc8), Z(0x84), Z(0xfa), Z(0xc7), Z(0xba), Z(0x4e), Z(0x8b), Z(0x1a),
Z(0x19), Z(0xf1), Z(0xa1), Z(0x3b), Z(0x18), Z(0x12), Z(0x17), Z(0xb0),
Z(0x98), Z(0x8d), Z(0x0b), Z(0x23), Z(0xc3), Z(0x3a), Z(0x2d), Z(0x20),
Z(0xdf), Z(0x13), Z(0xa0), Z(0xa8), Z(0x4c), Z(0x0d), Z(0x6c), Z(0x2f),
Z(0x47), Z(0x13), Z(0x13), Z(0x52), Z(0x1f), Z(0x2d), Z(0xf5), Z(0x79),
Z(0x3d), Z(0xa2), Z(0x54), Z(0xbd), Z(0x69), Z(0xc8), Z(0x6b), Z(0xf3),
Z(0x05), Z(0x28), Z(0xf1), Z(0x16), Z(0x46), Z(0x40), Z(0xb0), Z(0x11),
Z(0xd3), Z(0xb7), Z(0x95), Z(0x49), Z(0xcf), Z(0xc3), Z(0x1d), Z(0x8f),
Z(0xd8), Z(0xe1), Z(0x73), Z(0xdb), Z(0xad), Z(0xc8), Z(0xc9), Z(0xa9),
Z(0xa1), Z(0xc2), Z(0xc5), Z(0xe3), Z(0xba), Z(0xfc), Z(0x0e), Z(0x25)
};
/*
* This is a 16 round Feistel network with permutation F_ENCRYPT
*/
#define F_ENCRYPT(R, L, sched) \
do { \
union lc4 { __be32 l; u8 c[4]; } u; \
u.l = sched ^ R; \
L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
} while (0)
/*
* encryptor
*/
static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
__be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
memcpy(dst, &X, sizeof(X));
}
/*
* decryptor
*/
static void fcrypt_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
struct {
__be32 l, r;
} X;
memcpy(&X, src, sizeof(X));
F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
memcpy(dst, &X, sizeof(X));
}
/*
* Generate a key schedule from key, the least significant bit in each key byte
* is parity and shall be ignored. This leaves 56 significant bits in the key
* to scatter over the 16 key schedules. For each schedule extract the low
* order 32 bits and use as schedule, then rotate right by 11 bits.
*/
static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
#if BITS_PER_LONG == 64 /* the 64-bit version can also be used for 32-bit
* kernels - it seems to be faster but the code is
* larger */
u64 k; /* k holds all 56 non-parity bits */
/* discard the parity bits */
k = (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key++) >> 1;
k <<= 7;
k |= (*key) >> 1;
/* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11);
ctx->sched[0xf] = cpu_to_be32(k);
return 0;
#else
u32 hi, lo; /* hi is upper 24 bits and lo lower 32, total 56 */
/* discard the parity bits */
lo = (*key++) >> 1;
lo <<= 7;
lo |= (*key++) >> 1;
lo <<= 7;
lo |= (*key++) >> 1;
lo <<= 7;
lo |= (*key++) >> 1;
hi = lo >> 4;
lo &= 0xf;
lo <<= 7;
lo |= (*key++) >> 1;
lo <<= 7;
lo |= (*key++) >> 1;
lo <<= 7;
lo |= (*key++) >> 1;
lo <<= 7;
lo |= (*key) >> 1;
/* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11);
ctx->sched[0xf] = cpu_to_be32(lo);
return 0;
#endif
}
static struct crypto_alg fcrypt_alg = {
.cra_name = "fcrypt",
.cra_driver_name = "fcrypt-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = 8,
.cra_ctxsize = sizeof(struct fcrypt_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = 8,
.cia_max_keysize = 8,
.cia_setkey = fcrypt_setkey,
.cia_encrypt = fcrypt_encrypt,
.cia_decrypt = fcrypt_decrypt } }
};
static int __init fcrypt_mod_init(void)
{
return crypto_register_alg(&fcrypt_alg);
}
static void __exit fcrypt_mod_fini(void)
{
crypto_unregister_alg(&fcrypt_alg);
}
subsys_initcall(fcrypt_mod_init);
module_exit(fcrypt_mod_fini);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
MODULE_AUTHOR("David Howells <[email protected]>");
MODULE_ALIAS_CRYPTO("fcrypt");
| linux-master | crypto/fcrypt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Hash Info: Hash algorithms information
*
* Copyright (c) 2013 Dmitry Kasatkin <[email protected]>
*/
#include <linux/export.h>
#include <crypto/hash_info.h>
const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_MD4] = "md4",
[HASH_ALGO_MD5] = "md5",
[HASH_ALGO_SHA1] = "sha1",
[HASH_ALGO_RIPE_MD_160] = "rmd160",
[HASH_ALGO_SHA256] = "sha256",
[HASH_ALGO_SHA384] = "sha384",
[HASH_ALGO_SHA512] = "sha512",
[HASH_ALGO_SHA224] = "sha224",
[HASH_ALGO_RIPE_MD_128] = "rmd128",
[HASH_ALGO_RIPE_MD_256] = "rmd256",
[HASH_ALGO_RIPE_MD_320] = "rmd320",
[HASH_ALGO_WP_256] = "wp256",
[HASH_ALGO_WP_384] = "wp384",
[HASH_ALGO_WP_512] = "wp512",
[HASH_ALGO_TGR_128] = "tgr128",
[HASH_ALGO_TGR_160] = "tgr160",
[HASH_ALGO_TGR_192] = "tgr192",
[HASH_ALGO_SM3_256] = "sm3",
[HASH_ALGO_STREEBOG_256] = "streebog256",
[HASH_ALGO_STREEBOG_512] = "streebog512",
};
EXPORT_SYMBOL_GPL(hash_algo_name);
const int hash_digest_size[HASH_ALGO__LAST] = {
[HASH_ALGO_MD4] = MD5_DIGEST_SIZE,
[HASH_ALGO_MD5] = MD5_DIGEST_SIZE,
[HASH_ALGO_SHA1] = SHA1_DIGEST_SIZE,
[HASH_ALGO_RIPE_MD_160] = RMD160_DIGEST_SIZE,
[HASH_ALGO_SHA256] = SHA256_DIGEST_SIZE,
[HASH_ALGO_SHA384] = SHA384_DIGEST_SIZE,
[HASH_ALGO_SHA512] = SHA512_DIGEST_SIZE,
[HASH_ALGO_SHA224] = SHA224_DIGEST_SIZE,
[HASH_ALGO_RIPE_MD_128] = RMD128_DIGEST_SIZE,
[HASH_ALGO_RIPE_MD_256] = RMD256_DIGEST_SIZE,
[HASH_ALGO_RIPE_MD_320] = RMD320_DIGEST_SIZE,
[HASH_ALGO_WP_256] = WP256_DIGEST_SIZE,
[HASH_ALGO_WP_384] = WP384_DIGEST_SIZE,
[HASH_ALGO_WP_512] = WP512_DIGEST_SIZE,
[HASH_ALGO_TGR_128] = TGR128_DIGEST_SIZE,
[HASH_ALGO_TGR_160] = TGR160_DIGEST_SIZE,
[HASH_ALGO_TGR_192] = TGR192_DIGEST_SIZE,
[HASH_ALGO_SM3_256] = SM3256_DIGEST_SIZE,
[HASH_ALGO_STREEBOG_256] = STREEBOG256_DIGEST_SIZE,
[HASH_ALGO_STREEBOG_512] = STREEBOG512_DIGEST_SIZE,
};
EXPORT_SYMBOL_GPL(hash_digest_size);
| linux-master | crypto/hash_info.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Public Key Signature Algorithm
*
* Copyright (c) 2023 Herbert Xu <[email protected]>
*/
#include <crypto/akcipher.h>
#include <crypto/internal/sig.h>
#include <linux/cryptouser.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
#define CRYPTO_ALG_TYPE_SIG_MASK 0x0000000e
static const struct crypto_type crypto_sig_type;
static int crypto_sig_init_tfm(struct crypto_tfm *tfm)
{
if (tfm->__crt_alg->cra_type != &crypto_sig_type)
return crypto_init_akcipher_ops_sig(tfm);
return 0;
}
static void __maybe_unused crypto_sig_show(struct seq_file *m,
struct crypto_alg *alg)
{
seq_puts(m, "type : sig\n");
}
static int __maybe_unused crypto_sig_report(struct sk_buff *skb,
struct crypto_alg *alg)
{
struct crypto_report_akcipher rsig = {};
strscpy(rsig.type, "sig", sizeof(rsig.type));
return nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER, sizeof(rsig), &rsig);
}
static int __maybe_unused crypto_sig_report_stat(struct sk_buff *skb,
struct crypto_alg *alg)
{
struct crypto_stat_akcipher rsig = {};
strscpy(rsig.type, "sig", sizeof(rsig.type));
return nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, sizeof(rsig), &rsig);
}
static const struct crypto_type crypto_sig_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_sig_init_tfm,
#ifdef CONFIG_PROC_FS
.show = crypto_sig_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_sig_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_sig_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_SIG_MASK,
.type = CRYPTO_ALG_TYPE_SIG,
.tfmsize = offsetof(struct crypto_sig, base),
};
struct crypto_sig *crypto_alloc_sig(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_sig_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_sig);
int crypto_sig_maxsize(struct crypto_sig *tfm)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
return crypto_akcipher_maxsize(*ctx);
}
EXPORT_SYMBOL_GPL(crypto_sig_maxsize);
int crypto_sig_sign(struct crypto_sig *tfm,
const void *src, unsigned int slen,
void *dst, unsigned int dlen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher_sync_data data = {
.tfm = *ctx,
.src = src,
.dst = dst,
.slen = slen,
.dlen = dlen,
};
return crypto_akcipher_sync_prep(&data) ?:
crypto_akcipher_sync_post(&data,
crypto_akcipher_sign(data.req));
}
EXPORT_SYMBOL_GPL(crypto_sig_sign);
int crypto_sig_verify(struct crypto_sig *tfm,
const void *src, unsigned int slen,
const void *digest, unsigned int dlen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
struct crypto_akcipher_sync_data data = {
.tfm = *ctx,
.src = src,
.slen = slen,
.dlen = dlen,
};
int err;
err = crypto_akcipher_sync_prep(&data);
if (err)
return err;
memcpy(data.buf + slen, digest, dlen);
return crypto_akcipher_sync_post(&data,
crypto_akcipher_verify(data.req));
}
EXPORT_SYMBOL_GPL(crypto_sig_verify);
int crypto_sig_set_pubkey(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
return crypto_akcipher_set_pub_key(*ctx, key, keylen);
}
EXPORT_SYMBOL_GPL(crypto_sig_set_pubkey);
int crypto_sig_set_privkey(struct crypto_sig *tfm,
const void *key, unsigned int keylen)
{
struct crypto_akcipher **ctx = crypto_sig_ctx(tfm);
return crypto_akcipher_set_priv_key(*ctx, key, keylen);
}
EXPORT_SYMBOL_GPL(crypto_sig_set_privkey);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Public Key Signature Algorithms");
| linux-master | crypto/sig.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* HMAC: Keyed-Hashing for Message Authentication (RFC2104).
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2006 Herbert Xu <[email protected]>
*
* The HMAC implementation is derived from USAGI.
* Copyright (c) 2002 Kazunori Miyazawa <[email protected]> / USAGI
*/
#include <crypto/hmac.h>
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
struct hmac_ctx {
struct crypto_shash *hash;
};
static inline void *align_ptr(void *p, unsigned int align)
{
return (void *)ALIGN((unsigned long)p, align);
}
static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm)
{
return align_ptr(crypto_shash_ctx_aligned(tfm) +
crypto_shash_statesize(tfm) * 2,
crypto_tfm_ctx_alignment());
}
static int hmac_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
int bs = crypto_shash_blocksize(parent);
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
char *ipad = crypto_shash_ctx_aligned(parent);
char *opad = ipad + ss;
struct hmac_ctx *ctx = align_ptr(opad + ss,
crypto_tfm_ctx_alignment());
struct crypto_shash *hash = ctx->hash;
SHASH_DESC_ON_STACK(shash, hash);
unsigned int i;
if (fips_enabled && (keylen < 112 / 8))
return -EINVAL;
shash->tfm = hash;
if (keylen > bs) {
int err;
err = crypto_shash_digest(shash, inkey, keylen, ipad);
if (err)
return err;
keylen = ds;
} else
memcpy(ipad, inkey, keylen);
memset(ipad + keylen, 0, bs - keylen);
memcpy(opad, ipad, bs);
for (i = 0; i < bs; i++) {
ipad[i] ^= HMAC_IPAD_VALUE;
opad[i] ^= HMAC_OPAD_VALUE;
}
return crypto_shash_init(shash) ?:
crypto_shash_update(shash, ipad, bs) ?:
crypto_shash_export(shash, ipad) ?:
crypto_shash_init(shash) ?:
crypto_shash_update(shash, opad, bs) ?:
crypto_shash_export(shash, opad);
}
static int hmac_export(struct shash_desc *pdesc, void *out)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_export(desc, out);
}
static int hmac_import(struct shash_desc *pdesc, const void *in)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm);
desc->tfm = ctx->hash;
return crypto_shash_import(desc, in);
}
static int hmac_init(struct shash_desc *pdesc)
{
return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm));
}
static int hmac_update(struct shash_desc *pdesc,
const u8 *data, unsigned int nbytes)
{
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_update(desc, data, nbytes);
}
static int hmac_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_final(desc, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
}
static int hmac_finup(struct shash_desc *pdesc, const u8 *data,
unsigned int nbytes, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
int ds = crypto_shash_digestsize(parent);
int ss = crypto_shash_statesize(parent);
char *opad = crypto_shash_ctx_aligned(parent) + ss;
struct shash_desc *desc = shash_desc_ctx(pdesc);
return crypto_shash_finup(desc, data, nbytes, out) ?:
crypto_shash_import(desc, opad) ?:
crypto_shash_finup(desc, out, ds, out);
}
static int hmac_init_tfm(struct crypto_shash *parent)
{
struct crypto_shash *hash;
struct shash_instance *inst = shash_alg_instance(parent);
struct crypto_shash_spawn *spawn = shash_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(parent);
hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
parent->descsize = sizeof(struct shash_desc) +
crypto_shash_descsize(hash);
ctx->hash = hash;
return 0;
}
static int hmac_clone_tfm(struct crypto_shash *dst, struct crypto_shash *src)
{
struct hmac_ctx *sctx = hmac_ctx(src);
struct hmac_ctx *dctx = hmac_ctx(dst);
struct crypto_shash *hash;
hash = crypto_clone_shash(sctx->hash);
if (IS_ERR(hash))
return PTR_ERR(hash);
dctx->hash = hash;
return 0;
}
static void hmac_exit_tfm(struct crypto_shash *parent)
{
struct hmac_ctx *ctx = hmac_ctx(parent);
crypto_free_shash(ctx->hash);
}
static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_shash_spawn *spawn;
struct crypto_alg *alg;
struct shash_alg *salg;
u32 mask;
int err;
int ds;
int ss;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_shash(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
salg = crypto_spawn_shash_alg(spawn);
alg = &salg->base;
/* The underlying hash algorithm must not require a key */
err = -EINVAL;
if (crypto_shash_alg_needs_key(salg))
goto err_free_inst;
ds = salg->digestsize;
ss = salg->statesize;
if (ds > alg->cra_blocksize ||
ss < alg->cra_blocksize)
goto err_free_inst;
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = alg->cra_blocksize;
inst->alg.base.cra_alignmask = alg->cra_alignmask;
ss = ALIGN(ss, alg->cra_alignmask + 1);
inst->alg.digestsize = ds;
inst->alg.statesize = ss;
inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) +
ALIGN(ss * 2, crypto_tfm_ctx_alignment());
inst->alg.init = hmac_init;
inst->alg.update = hmac_update;
inst->alg.final = hmac_final;
inst->alg.finup = hmac_finup;
inst->alg.export = hmac_export;
inst->alg.import = hmac_import;
inst->alg.setkey = hmac_setkey;
inst->alg.init_tfm = hmac_init_tfm;
inst->alg.clone_tfm = hmac_clone_tfm;
inst->alg.exit_tfm = hmac_exit_tfm;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template hmac_tmpl = {
.name = "hmac",
.create = hmac_create,
.module = THIS_MODULE,
};
static int __init hmac_module_init(void)
{
return crypto_register_template(&hmac_tmpl);
}
static void __exit hmac_module_exit(void)
{
crypto_unregister_template(&hmac_tmpl);
}
subsys_initcall(hmac_module_init);
module_exit(hmac_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("HMAC hash algorithm");
MODULE_ALIAS_CRYPTO("hmac");
| linux-master | crypto/hmac.c |
/*
* Cryptographic API.
*
* MD4 Message Digest Algorithm (RFC1320).
*
* Implementation derived from Andrew Tridgell and Steve French's
* CIFS MD4 implementation, and the cryptoapi implementation
* originally based on the public domain implementation written
* by Colin Plumb in 1993.
*
* Copyright (c) Andrew Tridgell 1997-1998.
* Modified by Steve French ([email protected]) 2002
* Copyright (c) Cryptoapi developers.
* Copyright (c) 2002 David S. Miller ([email protected])
* Copyright (c) 2002 James Morris <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/byteorder.h>
#define MD4_DIGEST_SIZE 16
#define MD4_HMAC_BLOCK_SIZE 64
#define MD4_BLOCK_WORDS 16
#define MD4_HASH_WORDS 4
struct md4_ctx {
u32 hash[MD4_HASH_WORDS];
u32 block[MD4_BLOCK_WORDS];
u64 byte_count;
};
static inline u32 lshift(u32 x, unsigned int s)
{
x &= 0xFFFFFFFF;
return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s));
}
static inline u32 F(u32 x, u32 y, u32 z)
{
return (x & y) | ((~x) & z);
}
static inline u32 G(u32 x, u32 y, u32 z)
{
return (x & y) | (x & z) | (y & z);
}
static inline u32 H(u32 x, u32 y, u32 z)
{
return x ^ y ^ z;
}
#define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s))
#define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (u32)0x5A827999,s))
#define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (u32)0x6ED9EBA1,s))
static void md4_transform(u32 *hash, u32 const *in)
{
u32 a, b, c, d;
a = hash[0];
b = hash[1];
c = hash[2];
d = hash[3];
ROUND1(a, b, c, d, in[0], 3);
ROUND1(d, a, b, c, in[1], 7);
ROUND1(c, d, a, b, in[2], 11);
ROUND1(b, c, d, a, in[3], 19);
ROUND1(a, b, c, d, in[4], 3);
ROUND1(d, a, b, c, in[5], 7);
ROUND1(c, d, a, b, in[6], 11);
ROUND1(b, c, d, a, in[7], 19);
ROUND1(a, b, c, d, in[8], 3);
ROUND1(d, a, b, c, in[9], 7);
ROUND1(c, d, a, b, in[10], 11);
ROUND1(b, c, d, a, in[11], 19);
ROUND1(a, b, c, d, in[12], 3);
ROUND1(d, a, b, c, in[13], 7);
ROUND1(c, d, a, b, in[14], 11);
ROUND1(b, c, d, a, in[15], 19);
ROUND2(a, b, c, d,in[ 0], 3);
ROUND2(d, a, b, c, in[4], 5);
ROUND2(c, d, a, b, in[8], 9);
ROUND2(b, c, d, a, in[12], 13);
ROUND2(a, b, c, d, in[1], 3);
ROUND2(d, a, b, c, in[5], 5);
ROUND2(c, d, a, b, in[9], 9);
ROUND2(b, c, d, a, in[13], 13);
ROUND2(a, b, c, d, in[2], 3);
ROUND2(d, a, b, c, in[6], 5);
ROUND2(c, d, a, b, in[10], 9);
ROUND2(b, c, d, a, in[14], 13);
ROUND2(a, b, c, d, in[3], 3);
ROUND2(d, a, b, c, in[7], 5);
ROUND2(c, d, a, b, in[11], 9);
ROUND2(b, c, d, a, in[15], 13);
ROUND3(a, b, c, d,in[ 0], 3);
ROUND3(d, a, b, c, in[8], 9);
ROUND3(c, d, a, b, in[4], 11);
ROUND3(b, c, d, a, in[12], 15);
ROUND3(a, b, c, d, in[2], 3);
ROUND3(d, a, b, c, in[10], 9);
ROUND3(c, d, a, b, in[6], 11);
ROUND3(b, c, d, a, in[14], 15);
ROUND3(a, b, c, d, in[1], 3);
ROUND3(d, a, b, c, in[9], 9);
ROUND3(c, d, a, b, in[5], 11);
ROUND3(b, c, d, a, in[13], 15);
ROUND3(a, b, c, d, in[3], 3);
ROUND3(d, a, b, c, in[11], 9);
ROUND3(c, d, a, b, in[7], 11);
ROUND3(b, c, d, a, in[15], 15);
hash[0] += a;
hash[1] += b;
hash[2] += c;
hash[3] += d;
}
static inline void md4_transform_helper(struct md4_ctx *ctx)
{
le32_to_cpu_array(ctx->block, ARRAY_SIZE(ctx->block));
md4_transform(ctx->hash, ctx->block);
}
static int md4_init(struct shash_desc *desc)
{
struct md4_ctx *mctx = shash_desc_ctx(desc);
mctx->hash[0] = 0x67452301;
mctx->hash[1] = 0xefcdab89;
mctx->hash[2] = 0x98badcfe;
mctx->hash[3] = 0x10325476;
mctx->byte_count = 0;
return 0;
}
static int md4_update(struct shash_desc *desc, const u8 *data, unsigned int len)
{
struct md4_ctx *mctx = shash_desc_ctx(desc);
const u32 avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f);
mctx->byte_count += len;
if (avail > len) {
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, len);
return 0;
}
memcpy((char *)mctx->block + (sizeof(mctx->block) - avail),
data, avail);
md4_transform_helper(mctx);
data += avail;
len -= avail;
while (len >= sizeof(mctx->block)) {
memcpy(mctx->block, data, sizeof(mctx->block));
md4_transform_helper(mctx);
data += sizeof(mctx->block);
len -= sizeof(mctx->block);
}
memcpy(mctx->block, data, len);
return 0;
}
static int md4_final(struct shash_desc *desc, u8 *out)
{
struct md4_ctx *mctx = shash_desc_ctx(desc);
const unsigned int offset = mctx->byte_count & 0x3f;
char *p = (char *)mctx->block + offset;
int padding = 56 - (offset + 1);
*p++ = 0x80;
if (padding < 0) {
memset(p, 0x00, padding + sizeof (u64));
md4_transform_helper(mctx);
p = (char *)mctx->block;
padding = 56;
}
memset(p, 0, padding);
mctx->block[14] = mctx->byte_count << 3;
mctx->block[15] = mctx->byte_count >> 29;
le32_to_cpu_array(mctx->block, (sizeof(mctx->block) -
sizeof(u64)) / sizeof(u32));
md4_transform(mctx->hash, mctx->block);
cpu_to_le32_array(mctx->hash, ARRAY_SIZE(mctx->hash));
memcpy(out, mctx->hash, sizeof(mctx->hash));
memset(mctx, 0, sizeof(*mctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = MD4_DIGEST_SIZE,
.init = md4_init,
.update = md4_update,
.final = md4_final,
.descsize = sizeof(struct md4_ctx),
.base = {
.cra_name = "md4",
.cra_driver_name = "md4-generic",
.cra_blocksize = MD4_HMAC_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init md4_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit md4_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(md4_mod_init);
module_exit(md4_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
MODULE_ALIAS_CRYPTO("md4");
| linux-master | crypto/md4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Twofish for CryptoAPI
*
* Originally Twofish for GPG
* By Matthew Skala <[email protected]>, July 26, 1998
* 256-bit key length added March 20, 1999
* Some modifications to reduce the text size by Werner Koch, April, 1998
* Ported to the kerneli patch by Marc Mutz <[email protected]>
* Ported to CryptoAPI by Colin Slater <[email protected]>
*
* The original author has disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* This code is a "clean room" implementation, written from the paper
* _Twofish: A 128-Bit Block Cipher_ by Bruce Schneier, John Kelsey,
* Doug Whiting, David Wagner, Chris Hall, and Niels Ferguson, available
* through http://www.counterpane.com/twofish.html
*
* For background information on multiplication in finite fields, used for
* the matrix operations in the key schedule, see the book _Contemporary
* Abstract Algebra_ by Joseph A. Gallian, especially chapter 22 in the
* Third Edition.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/twofish.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/bitops.h>
/* Macros to compute the g() function in the encryption and decryption
* rounds. G1 is the straight g() function; G2 includes the 8-bit
* rotation for the high 32-bit word. */
#define G1(a) \
(ctx->s[0][(a) & 0xFF]) ^ (ctx->s[1][((a) >> 8) & 0xFF]) \
^ (ctx->s[2][((a) >> 16) & 0xFF]) ^ (ctx->s[3][(a) >> 24])
#define G2(b) \
(ctx->s[1][(b) & 0xFF]) ^ (ctx->s[2][((b) >> 8) & 0xFF]) \
^ (ctx->s[3][((b) >> 16) & 0xFF]) ^ (ctx->s[0][(b) >> 24])
/* Encryption and decryption Feistel rounds. Each one calls the two g()
* macros, does the PHT, and performs the XOR and the appropriate bit
* rotations. The parameters are the round number (used to select subkeys),
* and the four 32-bit chunks of the text. */
#define ENCROUND(n, a, b, c, d) \
x = G1 (a); y = G2 (b); \
x += y; y += x + ctx->k[2 * (n) + 1]; \
(c) ^= x + ctx->k[2 * (n)]; \
(c) = ror32((c), 1); \
(d) = rol32((d), 1) ^ y
#define DECROUND(n, a, b, c, d) \
x = G1 (a); y = G2 (b); \
x += y; y += x; \
(d) ^= y + ctx->k[2 * (n) + 1]; \
(d) = ror32((d), 1); \
(c) = rol32((c), 1); \
(c) ^= (x + ctx->k[2 * (n)])
/* Encryption and decryption cycles; each one is simply two Feistel rounds
* with the 32-bit chunks re-ordered to simulate the "swap" */
#define ENCCYCLE(n) \
ENCROUND (2 * (n), a, b, c, d); \
ENCROUND (2 * (n) + 1, c, d, a, b)
#define DECCYCLE(n) \
DECROUND (2 * (n) + 1, c, d, a, b); \
DECROUND (2 * (n), a, b, c, d)
/* Macros to convert the input and output bytes into 32-bit words,
* and simultaneously perform the whitening step. INPACK packs word
* number n into the variable named by x, using whitening subkey number m.
* OUTUNPACK unpacks word number n from the variable named by x, using
* whitening subkey number m. */
#define INPACK(n, x, m) \
x = get_unaligned_le32(in + (n) * 4) ^ ctx->w[m]
#define OUTUNPACK(n, x, m) \
x ^= ctx->w[m]; \
put_unaligned_le32(x, out + (n) * 4)
/* Encrypt one block. in and out may be the same. */
static void twofish_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
/* Temporaries used by the round function. */
u32 x, y;
/* Input whitening and packing. */
INPACK (0, a, 0);
INPACK (1, b, 1);
INPACK (2, c, 2);
INPACK (3, d, 3);
/* Encryption Feistel cycles. */
ENCCYCLE (0);
ENCCYCLE (1);
ENCCYCLE (2);
ENCCYCLE (3);
ENCCYCLE (4);
ENCCYCLE (5);
ENCCYCLE (6);
ENCCYCLE (7);
/* Output whitening and unpacking. */
OUTUNPACK (0, c, 4);
OUTUNPACK (1, d, 5);
OUTUNPACK (2, a, 6);
OUTUNPACK (3, b, 7);
}
/* Decrypt one block. in and out may be the same. */
static void twofish_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct twofish_ctx *ctx = crypto_tfm_ctx(tfm);
/* The four 32-bit chunks of the text. */
u32 a, b, c, d;
/* Temporaries used by the round function. */
u32 x, y;
/* Input whitening and packing. */
INPACK (0, c, 4);
INPACK (1, d, 5);
INPACK (2, a, 6);
INPACK (3, b, 7);
/* Encryption Feistel cycles. */
DECCYCLE (7);
DECCYCLE (6);
DECCYCLE (5);
DECCYCLE (4);
DECCYCLE (3);
DECCYCLE (2);
DECCYCLE (1);
DECCYCLE (0);
/* Output whitening and unpacking. */
OUTUNPACK (0, a, 0);
OUTUNPACK (1, b, 1);
OUTUNPACK (2, c, 2);
OUTUNPACK (3, d, 3);
}
static struct crypto_alg alg = {
.cra_name = "twofish",
.cra_driver_name = "twofish-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct twofish_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TF_MIN_KEY_SIZE,
.cia_max_keysize = TF_MAX_KEY_SIZE,
.cia_setkey = twofish_setkey,
.cia_encrypt = twofish_encrypt,
.cia_decrypt = twofish_decrypt } }
};
static int __init twofish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit twofish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
subsys_initcall(twofish_mod_init);
module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("twofish");
MODULE_ALIAS_CRYPTO("twofish-generic");
| linux-master | crypto/twofish_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <[email protected]>
* 2002 Adam J. Richter <[email protected]>
* 2004 Jean-Luc Cooke <[email protected]>
*/
#include <crypto/scatterwalk.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
static inline void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
void *src = out ? buf : sgdata;
void *dst = out ? sgdata : buf;
memcpy(dst, src, nbytes);
}
void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out)
{
for (;;) {
unsigned int len_this_page = scatterwalk_pagelen(walk);
u8 *vaddr;
if (len_this_page > nbytes)
len_this_page = nbytes;
if (out != 2) {
vaddr = scatterwalk_map(walk);
memcpy_dir(buf, vaddr, len_this_page, out);
scatterwalk_unmap(vaddr);
}
scatterwalk_advance(walk, len_this_page);
if (nbytes == len_this_page)
break;
buf += len_this_page;
nbytes -= len_this_page;
scatterwalk_pagedone(walk, out & 1, 1);
}
}
EXPORT_SYMBOL_GPL(scatterwalk_copychunks);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out)
{
struct scatter_walk walk;
struct scatterlist tmp[2];
if (!nbytes)
return;
sg = scatterwalk_ffwd(tmp, sg, start);
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)
{
for (;;) {
if (!len)
return src;
if (src->length > len)
break;
len -= src->length;
src = sg_next(src);
}
sg_init_table(dst, 2);
sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
scatterwalk_crypto_chain(dst, sg_next(src), 2);
return dst;
}
EXPORT_SYMBOL_GPL(scatterwalk_ffwd);
| linux-master | crypto/scatterwalk.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* algif_hash: User-space interface for hash algorithms
*
* This file provides the user-space API for hash algorithms.
*
* Copyright (c) 2010 Herbert Xu <[email protected]>
*/
#include <crypto/hash.h>
#include <crypto/if_alg.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct hash_ctx {
struct af_alg_sgl sgl;
u8 *result;
struct crypto_wait wait;
unsigned int len;
bool more;
struct ahash_request req;
};
static int hash_alloc_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (ctx->result)
return 0;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL);
if (!ctx->result)
return -ENOMEM;
memset(ctx->result, 0, ds);
return 0;
}
static void hash_free_result(struct sock *sk, struct hash_ctx *ctx)
{
unsigned ds;
if (!ctx->result)
return;
ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
sock_kzfree_s(sk, ctx->result, ds);
ctx->result = NULL;
}
static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
size_t ignored)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
ssize_t copied = 0;
size_t len, max_pages, npages;
bool continuing, need_init = false;
int err;
max_pages = min_t(size_t, ALG_MAX_PAGES,
DIV_ROUND_UP(sk->sk_sndbuf, PAGE_SIZE));
lock_sock(sk);
continuing = ctx->more;
if (!continuing) {
/* Discard a previous request that wasn't marked MSG_MORE. */
hash_free_result(sk, ctx);
if (!msg_data_left(msg))
goto done; /* Zero-length; don't start new req */
need_init = true;
} else if (!msg_data_left(msg)) {
/*
* No data - finalise the prev req if MSG_MORE so any error
* comes out here.
*/
if (!(msg->msg_flags & MSG_MORE)) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock_free;
ahash_request_set_crypt(&ctx->req, NULL,
ctx->result, 0);
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
goto unlock_free;
}
goto done_more;
}
while (msg_data_left(msg)) {
ctx->sgl.sgt.sgl = ctx->sgl.sgl;
ctx->sgl.sgt.nents = 0;
ctx->sgl.sgt.orig_nents = 0;
err = -EIO;
npages = iov_iter_npages(&msg->msg_iter, max_pages);
if (npages == 0)
goto unlock_free;
sg_init_table(ctx->sgl.sgl, npages);
ctx->sgl.need_unpin = iov_iter_extract_will_pin(&msg->msg_iter);
err = extract_iter_to_sg(&msg->msg_iter, LONG_MAX,
&ctx->sgl.sgt, npages, 0);
if (err < 0)
goto unlock_free;
len = err;
sg_mark_end(ctx->sgl.sgt.sgl + ctx->sgl.sgt.nents - 1);
if (!msg_data_left(msg)) {
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock_free;
}
ahash_request_set_crypt(&ctx->req, ctx->sgl.sgt.sgl,
ctx->result, len);
if (!msg_data_left(msg) && !continuing &&
!(msg->msg_flags & MSG_MORE)) {
err = crypto_ahash_digest(&ctx->req);
} else {
if (need_init) {
err = crypto_wait_req(
crypto_ahash_init(&ctx->req),
&ctx->wait);
if (err)
goto unlock_free;
need_init = false;
}
if (msg_data_left(msg) || (msg->msg_flags & MSG_MORE))
err = crypto_ahash_update(&ctx->req);
else
err = crypto_ahash_finup(&ctx->req);
continuing = true;
}
err = crypto_wait_req(err, &ctx->wait);
if (err)
goto unlock_free;
copied += len;
af_alg_free_sg(&ctx->sgl);
}
done_more:
ctx->more = msg->msg_flags & MSG_MORE;
done:
err = 0;
unlock:
release_sock(sk);
return copied ?: err;
unlock_free:
af_alg_free_sg(&ctx->sgl);
hash_free_result(sk, ctx);
ctx->more = false;
goto unlock;
}
static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req));
bool result;
int err;
if (len > ds)
len = ds;
else if (len < ds)
msg->msg_flags |= MSG_TRUNC;
lock_sock(sk);
result = ctx->result;
err = hash_alloc_result(sk, ctx);
if (err)
goto unlock;
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
if (!result && !ctx->more) {
err = crypto_wait_req(crypto_ahash_init(&ctx->req),
&ctx->wait);
if (err)
goto unlock;
}
if (!result || ctx->more) {
ctx->more = false;
err = crypto_wait_req(crypto_ahash_final(&ctx->req),
&ctx->wait);
if (err)
goto unlock;
}
err = memcpy_to_msg(msg, ctx->result, len);
unlock:
hash_free_result(sk, ctx);
release_sock(sk);
return err ?: len;
}
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
bool kern)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
struct crypto_ahash *tfm;
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
char *state;
bool more;
int err;
tfm = crypto_ahash_reqtfm(req);
state = kmalloc(crypto_ahash_statesize(tfm), GFP_KERNEL);
err = -ENOMEM;
if (!state)
goto out;
lock_sock(sk);
more = ctx->more;
err = more ? crypto_ahash_export(req, state) : 0;
release_sock(sk);
if (err)
goto out_free_state;
err = af_alg_accept(ask->parent, newsock, kern);
if (err)
goto out_free_state;
sk2 = newsock->sk;
ask2 = alg_sk(sk2);
ctx2 = ask2->private;
ctx2->more = more;
if (!more)
goto out_free_state;
err = crypto_ahash_import(&ctx2->req, state);
if (err) {
sock_orphan(sk2);
sock_put(sk2);
}
out_free_state:
kfree_sensitive(state);
out:
return err;
}
static struct proto_ops algif_hash_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.release = af_alg_release,
.sendmsg = hash_sendmsg,
.recvmsg = hash_recvmsg,
.accept = hash_accept,
};
static int hash_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct crypto_ahash *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
atomic_set(&ask->nokey_refcnt, 0);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int hash_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_sendmsg(sock, msg, size);
}
static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_recvmsg(sock, msg, ignored, flags);
}
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
int flags, bool kern)
{
int err;
err = hash_check_key(sock);
if (err)
return err;
return hash_accept(sock, newsock, flags, kern);
}
static struct proto_ops algif_hash_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.release = af_alg_release,
.sendmsg = hash_sendmsg_nokey,
.recvmsg = hash_recvmsg_nokey,
.accept = hash_accept_nokey,
};
static void *hash_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_ahash(name, type, mask);
}
static void hash_release(void *private)
{
crypto_free_ahash(private);
}
static int hash_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_ahash_setkey(private, key, keylen);
}
static void hash_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
hash_free_result(sk, ctx);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int hash_accept_parent_nokey(void *private, struct sock *sk)
{
struct crypto_ahash *tfm = private;
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx;
unsigned int len = sizeof(*ctx) + crypto_ahash_reqsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->result = NULL;
ctx->len = len;
ctx->more = false;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
ahash_request_set_tfm(&ctx->req, tfm);
ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
sk->sk_destruct = hash_sock_destruct;
return 0;
}
static int hash_accept_parent(void *private, struct sock *sk)
{
struct crypto_ahash *tfm = private;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return hash_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_hash = {
.bind = hash_bind,
.release = hash_release,
.setkey = hash_setkey,
.accept = hash_accept_parent,
.accept_nokey = hash_accept_parent_nokey,
.ops = &algif_hash_ops,
.ops_nokey = &algif_hash_ops_nokey,
.name = "hash",
.owner = THIS_MODULE
};
static int __init algif_hash_init(void)
{
return af_alg_register_type(&algif_type_hash);
}
static void __exit algif_hash_exit(void)
{
int err = af_alg_unregister_type(&algif_type_hash);
BUG_ON(err);
}
module_init(algif_hash_init);
module_exit(algif_hash_exit);
MODULE_LICENSE("GPL");
| linux-master | crypto/algif_hash.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Scatterlist Cryptographic API.
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2002 David S. Miller ([email protected])
* Copyright (c) 2005 Herbert Xu <[email protected]>
*
* Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]>
* and Nettle, by Niels Möller.
*/
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/param.h>
#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/completion.h>
#include "internal.h"
LIST_HEAD(crypto_alg_list);
EXPORT_SYMBOL_GPL(crypto_alg_list);
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
BLOCKING_NOTIFIER_HEAD(crypto_chain);
EXPORT_SYMBOL_GPL(crypto_chain);
#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
DEFINE_STATIC_KEY_FALSE(__crypto_boot_test_finished);
EXPORT_SYMBOL_GPL(__crypto_boot_test_finished);
#endif
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
{
return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
}
EXPORT_SYMBOL_GPL(crypto_mod_get);
void crypto_mod_put(struct crypto_alg *alg)
{
struct module *module = alg->cra_module;
crypto_alg_put(alg);
module_put(module);
}
EXPORT_SYMBOL_GPL(crypto_mod_put);
static struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type,
u32 mask)
{
struct crypto_alg *q, *alg = NULL;
int best = -2;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
int exact, fuzzy;
if (crypto_is_moribund(q))
continue;
if ((q->cra_flags ^ type) & mask)
continue;
if (crypto_is_larval(q) &&
!crypto_is_test_larval((struct crypto_larval *)q) &&
((struct crypto_larval *)q)->mask != mask)
continue;
exact = !strcmp(q->cra_driver_name, name);
fuzzy = !strcmp(q->cra_name, name);
if (!exact && !(fuzzy && q->cra_priority > best))
continue;
if (unlikely(!crypto_mod_get(q)))
continue;
best = q->cra_priority;
if (alg)
crypto_mod_put(alg);
alg = q;
if (exact)
break;
}
return alg;
}
static void crypto_larval_destroy(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
BUG_ON(!crypto_is_larval(alg));
if (!IS_ERR_OR_NULL(larval->adult))
crypto_mod_put(larval->adult);
kfree(larval);
}
struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
{
struct crypto_larval *larval;
larval = kzalloc(sizeof(*larval), GFP_KERNEL);
if (!larval)
return ERR_PTR(-ENOMEM);
larval->mask = mask;
larval->alg.cra_flags = CRYPTO_ALG_LARVAL | type;
larval->alg.cra_priority = -1;
larval->alg.cra_destroy = crypto_larval_destroy;
strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
init_completion(&larval->completion);
return larval;
}
EXPORT_SYMBOL_GPL(crypto_larval_alloc);
static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_larval *larval;
larval = crypto_larval_alloc(name, type, mask);
if (IS_ERR(larval))
return ERR_CAST(larval);
refcount_set(&larval->alg.cra_refcnt, 2);
down_write(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, type, mask);
if (!alg) {
alg = &larval->alg;
list_add(&alg->cra_list, &crypto_alg_list);
}
up_write(&crypto_alg_sem);
if (alg != &larval->alg) {
kfree(larval);
if (crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
}
return alg;
}
void crypto_larval_kill(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
down_write(&crypto_alg_sem);
list_del(&alg->cra_list);
up_write(&crypto_alg_sem);
complete_all(&larval->completion);
crypto_alg_put(alg);
}
EXPORT_SYMBOL_GPL(crypto_larval_kill);
void crypto_wait_for_test(struct crypto_larval *larval)
{
int err;
err = crypto_probing_notify(CRYPTO_MSG_ALG_REGISTER, larval->adult);
if (WARN_ON_ONCE(err != NOTIFY_STOP))
goto out;
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
out:
crypto_larval_kill(&larval->alg);
}
EXPORT_SYMBOL_GPL(crypto_wait_for_test);
static void crypto_start_test(struct crypto_larval *larval)
{
if (!crypto_is_test_larval(larval))
return;
if (larval->test_started)
return;
down_write(&crypto_alg_sem);
if (larval->test_started) {
up_write(&crypto_alg_sem);
return;
}
larval->test_started = true;
up_write(&crypto_alg_sem);
crypto_wait_for_test(larval);
}
static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg)
{
struct crypto_larval *larval = (void *)alg;
long timeout;
if (!crypto_boot_test_finished())
crypto_start_test(larval);
timeout = wait_for_completion_killable_timeout(
&larval->completion, 60 * HZ);
alg = larval->adult;
if (timeout < 0)
alg = ERR_PTR(-EINTR);
else if (!timeout)
alg = ERR_PTR(-ETIMEDOUT);
else if (!alg)
alg = ERR_PTR(-ENOENT);
else if (IS_ERR(alg))
;
else if (crypto_is_test_larval(larval) &&
!(alg->cra_flags & CRYPTO_ALG_TESTED))
alg = ERR_PTR(-EAGAIN);
else if (alg->cra_flags & CRYPTO_ALG_FIPS_INTERNAL)
alg = ERR_PTR(-EAGAIN);
else if (!crypto_mod_get(alg))
alg = ERR_PTR(-EAGAIN);
crypto_mod_put(&larval->alg);
return alg;
}
static struct crypto_alg *crypto_alg_lookup(const char *name, u32 type,
u32 mask)
{
const u32 fips = CRYPTO_ALG_FIPS_INTERNAL;
struct crypto_alg *alg;
u32 test = 0;
if (!((type | mask) & CRYPTO_ALG_TESTED))
test |= CRYPTO_ALG_TESTED;
down_read(&crypto_alg_sem);
alg = __crypto_alg_lookup(name, (type | test) & ~fips,
(mask | test) & ~fips);
if (alg) {
if (((type | mask) ^ fips) & fips)
mask |= fips;
mask &= fips;
if (!crypto_is_larval(alg) &&
((type ^ alg->cra_flags) & mask)) {
/* Algorithm is disallowed in FIPS mode. */
crypto_mod_put(alg);
alg = ERR_PTR(-ENOENT);
}
} else if (test) {
alg = __crypto_alg_lookup(name, type, mask);
if (alg && !crypto_is_larval(alg)) {
/* Test failed */
crypto_mod_put(alg);
alg = ERR_PTR(-ELIBBAD);
}
}
up_read(&crypto_alg_sem);
return alg;
}
static struct crypto_alg *crypto_larval_lookup(const char *name, u32 type,
u32 mask)
{
struct crypto_alg *alg;
if (!name)
return ERR_PTR(-ENOENT);
type &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
alg = crypto_alg_lookup(name, type, mask);
if (!alg && !(mask & CRYPTO_NOLOAD)) {
request_module("crypto-%s", name);
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
CRYPTO_ALG_NEED_FALLBACK))
request_module("crypto-%s-all", name);
alg = crypto_alg_lookup(name, type, mask);
}
if (!IS_ERR_OR_NULL(alg) && crypto_is_larval(alg))
alg = crypto_larval_wait(alg);
else if (!alg)
alg = crypto_larval_add(name, type, mask);
return alg;
}
int crypto_probing_notify(unsigned long val, void *v)
{
int ok;
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
if (ok == NOTIFY_DONE) {
request_module("cryptomgr");
ok = blocking_notifier_call_chain(&crypto_chain, val, v);
}
return ok;
}
EXPORT_SYMBOL_GPL(crypto_probing_notify);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *larval;
int ok;
/*
* If the internal flag is set for a cipher, require a caller to
* invoke the cipher with the internal flag to use that cipher.
* Also, if a caller wants to allocate a cipher that may or may
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
* !(mask & CRYPTO_ALG_INTERNAL).
*/
if (!((type | mask) & CRYPTO_ALG_INTERNAL))
mask |= CRYPTO_ALG_INTERNAL;
larval = crypto_larval_lookup(name, type, mask);
if (IS_ERR(larval) || !crypto_is_larval(larval))
return larval;
ok = crypto_probing_notify(CRYPTO_MSG_ALG_REQUEST, larval);
if (ok == NOTIFY_STOP)
alg = crypto_larval_wait(larval);
else {
crypto_mod_put(larval);
alg = ERR_PTR(-ENOENT);
}
crypto_larval_kill(larval);
return alg;
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
static void crypto_exit_ops(struct crypto_tfm *tfm)
{
const struct crypto_type *type = tfm->__crt_alg->cra_type;
if (type && tfm->exit)
tfm->exit(tfm);
}
static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
{
const struct crypto_type *type_obj = alg->cra_type;
unsigned int len;
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
if (type_obj)
return len + type_obj->ctxsize(alg, type, mask);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
BUG();
case CRYPTO_ALG_TYPE_CIPHER:
len += crypto_cipher_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
len += crypto_compress_ctxsize(alg);
break;
}
return len;
}
void crypto_shoot_alg(struct crypto_alg *alg)
{
down_write(&crypto_alg_sem);
alg->cra_flags |= CRYPTO_ALG_DYING;
up_write(&crypto_alg_sem);
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
struct crypto_tfm *__crypto_alloc_tfmgfp(struct crypto_alg *alg, u32 type,
u32 mask, gfp_t gfp)
{
struct crypto_tfm *tfm = NULL;
unsigned int tfm_size;
int err = -ENOMEM;
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
tfm = kzalloc(tfm_size, gfp);
if (tfm == NULL)
goto out_err;
tfm->__crt_alg = alg;
refcount_set(&tfm->refcnt, 1);
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
goto cra_init_failed;
goto out;
cra_init_failed:
crypto_exit_ops(tfm);
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(tfm);
out_err:
tfm = ERR_PTR(err);
out:
return tfm;
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfmgfp);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask)
{
return __crypto_alloc_tfmgfp(alg, type, mask, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
/*
* crypto_alloc_base - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
* @type: Type of algorithm
* @mask: Mask for type comparison
*
* This function should not be used by new algorithm types.
* Please use crypto_alloc_tfm instead.
*
* crypto_alloc_base() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable
* modules, it will then attempt to load a module of the same name or
* alias. If that fails it will send a query to any loaded crypto manager
* to construct an algorithm on the fly. A refcount is grabbed on the
* algorithm which is then associated with the new transform.
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
* crypto_alloc_skcipher().
*
* In case of error the return value is an error pointer.
*/
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
{
struct crypto_tfm *tfm;
int err;
for (;;) {
struct crypto_alg *alg;
alg = crypto_alg_mod_lookup(alg_name, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
}
tfm = __crypto_alloc_tfm(alg, type, mask);
if (!IS_ERR(tfm))
return tfm;
crypto_mod_put(alg);
err = PTR_ERR(tfm);
err:
if (err != -EAGAIN)
break;
if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
}
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_base);
static void *crypto_alloc_tfmmem(struct crypto_alg *alg,
const struct crypto_type *frontend, int node,
gfp_t gfp)
{
struct crypto_tfm *tfm;
unsigned int tfmsize;
unsigned int total;
char *mem;
tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc_node(total, gfp, node);
if (mem == NULL)
return ERR_PTR(-ENOMEM);
tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg;
tfm->node = node;
refcount_set(&tfm->refcnt, 1);
return mem;
}
void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend,
int node)
{
struct crypto_tfm *tfm;
char *mem;
int err;
mem = crypto_alloc_tfmmem(alg, frontend, node, GFP_KERNEL);
if (IS_ERR(mem))
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
err = frontend->init_tfm(tfm);
if (err)
goto out_free_tfm;
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
goto cra_init_failed;
goto out;
cra_init_failed:
crypto_exit_ops(tfm);
out_free_tfm:
if (err == -EAGAIN)
crypto_shoot_alg(alg);
kfree(mem);
mem = ERR_PTR(err);
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
void *crypto_clone_tfm(const struct crypto_type *frontend,
struct crypto_tfm *otfm)
{
struct crypto_alg *alg = otfm->__crt_alg;
struct crypto_tfm *tfm;
char *mem;
mem = ERR_PTR(-ESTALE);
if (unlikely(!crypto_mod_get(alg)))
goto out;
mem = crypto_alloc_tfmmem(alg, frontend, otfm->node, GFP_ATOMIC);
if (IS_ERR(mem)) {
crypto_mod_put(alg);
goto out;
}
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
tfm->crt_flags = otfm->crt_flags;
tfm->exit = otfm->exit;
out:
return mem;
}
EXPORT_SYMBOL_GPL(crypto_clone_tfm);
struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend,
u32 type, u32 mask)
{
if (frontend) {
type &= frontend->maskclear;
mask &= frontend->maskclear;
type |= frontend->type;
mask |= frontend->maskset;
}
return crypto_alg_mod_lookup(alg_name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_find_alg);
/*
* crypto_alloc_tfm_node - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
* @frontend: Frontend algorithm type
* @type: Type of algorithm
* @mask: Mask for type comparison
* @node: NUMA node in which users desire to put requests, if node is
* NUMA_NO_NODE, it means users have no special requirement.
*
* crypto_alloc_tfm() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable
* modules, it will then attempt to load a module of the same name or
* alias. If that fails it will send a query to any loaded crypto manager
* to construct an algorithm on the fly. A refcount is grabbed on the
* algorithm which is then associated with the new transform.
*
* The returned transform is of a non-determinate type. Most people
* should use one of the more specific allocation functions such as
* crypto_alloc_skcipher().
*
* In case of error the return value is an error pointer.
*/
void *crypto_alloc_tfm_node(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask,
int node)
{
void *tfm;
int err;
for (;;) {
struct crypto_alg *alg;
alg = crypto_find_alg(alg_name, frontend, type, mask);
if (IS_ERR(alg)) {
err = PTR_ERR(alg);
goto err;
}
tfm = crypto_create_tfm_node(alg, frontend, node);
if (!IS_ERR(tfm))
return tfm;
crypto_mod_put(alg);
err = PTR_ERR(tfm);
err:
if (err != -EAGAIN)
break;
if (fatal_signal_pending(current)) {
err = -EINTR;
break;
}
}
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
/*
* crypto_destroy_tfm - Free crypto transform
* @mem: Start of tfm slab
* @tfm: Transform to free
*
* This function frees up the transform and any associated resources,
* then drops the refcount on the associated algorithm.
*/
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
{
struct crypto_alg *alg;
if (IS_ERR_OR_NULL(mem))
return;
if (!refcount_dec_and_test(&tfm->refcnt))
return;
alg = tfm->__crt_alg;
if (!tfm->exit && alg->cra_exit)
alg->cra_exit(tfm);
crypto_exit_ops(tfm);
crypto_mod_put(alg);
kfree_sensitive(mem);
}
EXPORT_SYMBOL_GPL(crypto_destroy_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask)
{
int ret = 0;
struct crypto_alg *alg = crypto_alg_mod_lookup(name, type, mask);
if (!IS_ERR(alg)) {
crypto_mod_put(alg);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(crypto_has_alg);
void crypto_req_done(void *data, int err)
{
struct crypto_wait *wait = data;
if (err == -EINPROGRESS)
return;
wait->err = err;
complete(&wait->completion);
}
EXPORT_SYMBOL_GPL(crypto_req_done);
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
| linux-master | crypto/api.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* PCBC: Propagating Cipher Block Chaining mode
*
* Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
* Written by David Howells ([email protected])
*
* Derived from cbc.c
* - Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
static int crypto_pcbc_encrypt_segment(struct skcipher_request *req,
struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
do {
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, dst, iv);
crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_pcbc_encrypt_inplace(struct skcipher_request *req,
struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
memcpy(tmpbuf, src, bsize);
crypto_xor(iv, src, bsize);
crypto_cipher_encrypt_one(tfm, src, iv);
crypto_xor_cpy(iv, tmpbuf, src, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_pcbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_encrypt_inplace(req, &walk,
cipher);
else
nbytes = crypto_pcbc_encrypt_segment(req, &walk,
cipher);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int crypto_pcbc_decrypt_segment(struct skcipher_request *req,
struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
do {
crypto_cipher_decrypt_one(tfm, dst, src);
crypto_xor(dst, iv, bsize);
crypto_xor_cpy(iv, dst, src, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_pcbc_decrypt_inplace(struct skcipher_request *req,
struct skcipher_walk *walk,
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
memcpy(tmpbuf, src, bsize);
crypto_cipher_decrypt_one(tfm, src, src);
crypto_xor(src, iv, bsize);
crypto_xor_cpy(iv, src, tmpbuf, bsize);
src += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_pcbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
struct skcipher_walk walk;
unsigned int nbytes;
int err;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
if (walk.src.virt.addr == walk.dst.virt.addr)
nbytes = crypto_pcbc_decrypt_inplace(req, &walk,
cipher);
else
nbytes = crypto_pcbc_decrypt_segment(req, &walk,
cipher);
err = skcipher_walk_done(&walk, nbytes);
}
return err;
}
static int crypto_pcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
inst->alg.encrypt = crypto_pcbc_encrypt;
inst->alg.decrypt = crypto_pcbc_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
return err;
}
static struct crypto_template crypto_pcbc_tmpl = {
.name = "pcbc",
.create = crypto_pcbc_create,
.module = THIS_MODULE,
};
static int __init crypto_pcbc_module_init(void)
{
return crypto_register_template(&crypto_pcbc_tmpl);
}
static void __exit crypto_pcbc_module_exit(void)
{
crypto_unregister_template(&crypto_pcbc_tmpl);
}
subsys_initcall(crypto_pcbc_module_init);
module_exit(crypto_pcbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("pcbc");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/pcbc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Software async crypto daemon.
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*
* Added AEAD support to cryptd.
* Authors: Tadeusz Struk ([email protected])
* Adrian Hoban <[email protected]>
* Gabriele Paoloni <[email protected]>
* Aidan O'Mahony ([email protected])
* Copyright (c) 2010, Intel Corporation.
*/
#include <crypto/internal/hash.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/cryptd.h>
#include <linux/refcount.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
static unsigned int cryptd_max_cpu_qlen = 1000;
module_param(cryptd_max_cpu_qlen, uint, 0);
MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
static struct workqueue_struct *cryptd_wq;
struct cryptd_cpu_queue {
struct crypto_queue queue;
struct work_struct work;
};
struct cryptd_queue {
/*
* Protected by disabling BH to allow enqueueing from softinterrupt and
* dequeuing from kworker (cryptd_queue_worker()).
*/
struct cryptd_cpu_queue __percpu *cpu_queue;
};
struct cryptd_instance_ctx {
struct crypto_spawn spawn;
struct cryptd_queue *queue;
};
struct skcipherd_instance_ctx {
struct crypto_skcipher_spawn spawn;
struct cryptd_queue *queue;
};
struct hashd_instance_ctx {
struct crypto_shash_spawn spawn;
struct cryptd_queue *queue;
};
struct aead_instance_ctx {
struct crypto_aead_spawn aead_spawn;
struct cryptd_queue *queue;
};
struct cryptd_skcipher_ctx {
refcount_t refcnt;
struct crypto_skcipher *child;
};
struct cryptd_skcipher_request_ctx {
struct skcipher_request req;
};
struct cryptd_hash_ctx {
refcount_t refcnt;
struct crypto_shash *child;
};
struct cryptd_hash_request_ctx {
crypto_completion_t complete;
void *data;
struct shash_desc desc;
};
struct cryptd_aead_ctx {
refcount_t refcnt;
struct crypto_aead *child;
};
struct cryptd_aead_request_ctx {
struct aead_request req;
};
static void cryptd_queue_worker(struct work_struct *work);
static int cryptd_init_queue(struct cryptd_queue *queue,
unsigned int max_cpu_qlen)
{
int cpu;
struct cryptd_cpu_queue *cpu_queue;
queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
if (!queue->cpu_queue)
return -ENOMEM;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
}
pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
return 0;
}
static void cryptd_fini_queue(struct cryptd_queue *queue)
{
int cpu;
struct cryptd_cpu_queue *cpu_queue;
for_each_possible_cpu(cpu) {
cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
BUG_ON(cpu_queue->queue.qlen);
}
free_percpu(queue->cpu_queue);
}
static int cryptd_enqueue_request(struct cryptd_queue *queue,
struct crypto_async_request *request)
{
int err;
struct cryptd_cpu_queue *cpu_queue;
refcount_t *refcnt;
local_bh_disable();
cpu_queue = this_cpu_ptr(queue->cpu_queue);
err = crypto_enqueue_request(&cpu_queue->queue, request);
refcnt = crypto_tfm_ctx(request->tfm);
if (err == -ENOSPC)
goto out;
queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
if (!refcount_read(refcnt))
goto out;
refcount_inc(refcnt);
out:
local_bh_enable();
return err;
}
/* Called in workqueue context, do one real cryption work (via
* req->complete) and reschedule itself if there are more work to
* do. */
static void cryptd_queue_worker(struct work_struct *work)
{
struct cryptd_cpu_queue *cpu_queue;
struct crypto_async_request *req, *backlog;
cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
/*
* Only handle one request at a time to avoid hogging crypto workqueue.
*/
local_bh_disable();
backlog = crypto_get_backlog(&cpu_queue->queue);
req = crypto_dequeue_request(&cpu_queue->queue);
local_bh_enable();
if (!req)
return;
if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
crypto_request_complete(req, 0);
if (cpu_queue->queue.qlen)
queue_work(cryptd_wq, &cpu_queue->work);
}
static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
return ictx->queue;
}
static void cryptd_type_and_mask(struct crypto_attr_type *algt,
u32 *type, u32 *mask)
{
/*
* cryptd is allowed to wrap internal algorithms, but in that case the
* resulting cryptd instance will be marked as internal as well.
*/
*type = algt->type & CRYPTO_ALG_INTERNAL;
*mask = algt->mask & CRYPTO_ALG_INTERNAL;
/* No point in cryptd wrapping an algorithm that's already async. */
*mask |= CRYPTO_ALG_ASYNC;
*mask |= crypto_algt_inherited_mask(algt);
}
static int cryptd_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)",
alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 50;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
return 0;
}
static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child = ctx->child;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child,
crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, keylen);
}
static struct skcipher_request *cryptd_skcipher_prepare(
struct skcipher_request *req, int err)
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->req;
struct cryptd_skcipher_ctx *ctx;
struct crypto_skcipher *child;
req->base.complete = subreq->base.complete;
req->base.data = subreq->base.data;
if (unlikely(err == -EINPROGRESS))
return NULL;
ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
child = ctx->child;
skcipher_request_set_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
return subreq;
}
static void cryptd_skcipher_complete(struct skcipher_request *req, int err,
crypto_completion_t complete)
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = &rctx->req;
int refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
skcipher_request_complete(req, err);
local_bh_enable();
if (unlikely(err == -EINPROGRESS)) {
subreq->base.complete = req->base.complete;
subreq->base.data = req->base.data;
req->base.complete = complete;
req->base.data = req;
} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_skcipher(tfm);
}
static void cryptd_skcipher_encrypt(void *data, int err)
{
struct skcipher_request *req = data;
struct skcipher_request *subreq;
subreq = cryptd_skcipher_prepare(req, err);
if (likely(subreq))
err = crypto_skcipher_encrypt(subreq);
cryptd_skcipher_complete(req, err, cryptd_skcipher_encrypt);
}
static void cryptd_skcipher_decrypt(void *data, int err)
{
struct skcipher_request *req = data;
struct skcipher_request *subreq;
subreq = cryptd_skcipher_prepare(req, err);
if (likely(subreq))
err = crypto_skcipher_decrypt(subreq);
cryptd_skcipher_complete(req, err, cryptd_skcipher_decrypt);
}
static int cryptd_skcipher_enqueue(struct skcipher_request *req,
crypto_completion_t compl)
{
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct skcipher_request *subreq = &rctx->req;
struct cryptd_queue *queue;
queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
subreq->base.complete = req->base.complete;
subreq->base.data = req->base.data;
req->base.complete = compl;
req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
{
return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
}
static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
{
return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
}
static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct crypto_skcipher_spawn *spawn = &ictx->spawn;
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *cipher;
cipher = crypto_spawn_skcipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
crypto_skcipher_set_reqsize(
tfm, sizeof(struct cryptd_skcipher_request_ctx) +
crypto_skcipher_reqsize(cipher));
return 0;
}
static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
}
static void cryptd_skcipher_free(struct skcipher_instance *inst)
{
struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ctx->spawn);
kfree(inst);
}
static int cryptd_create_skcipher(struct crypto_template *tmpl,
struct rtattr **tb,
struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct skcipherd_instance_ctx *ctx;
struct skcipher_instance *inst;
struct skcipher_alg *alg;
u32 type;
u32 mask;
int err;
cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = skcipher_instance_ctx(inst);
ctx->queue = queue;
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), type, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_skcipher_alg(&ctx->spawn);
err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
inst->alg.init = cryptd_skcipher_init_tfm;
inst->alg.exit = cryptd_skcipher_exit_tfm;
inst->alg.setkey = cryptd_skcipher_setkey;
inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
inst->free = cryptd_skcipher_free;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
cryptd_skcipher_free(inst);
}
return err;
}
static int cryptd_hash_init_tfm(struct crypto_ahash *tfm)
{
struct ahash_instance *inst = ahash_alg_instance(tfm);
struct hashd_instance_ctx *ictx = ahash_instance_ctx(inst);
struct crypto_shash_spawn *spawn = &ictx->spawn;
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_spawn_shash(spawn);
if (IS_ERR(hash))
return PTR_ERR(hash);
ctx->child = hash;
crypto_ahash_set_reqsize(tfm,
sizeof(struct cryptd_hash_request_ctx) +
crypto_shash_descsize(hash));
return 0;
}
static int cryptd_hash_clone_tfm(struct crypto_ahash *ntfm,
struct crypto_ahash *tfm)
{
struct cryptd_hash_ctx *nctx = crypto_ahash_ctx(ntfm);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *hash;
hash = crypto_clone_shash(ctx->child);
if (IS_ERR(hash))
return PTR_ERR(hash);
nctx->child = hash;
return 0;
}
static void cryptd_hash_exit_tfm(struct crypto_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
crypto_free_shash(ctx->child);
}
static int cryptd_hash_setkey(struct crypto_ahash *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
struct crypto_shash *child = ctx->child;
crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_shash_setkey(child, key, keylen);
}
static int cryptd_hash_enqueue(struct ahash_request *req,
crypto_completion_t compl)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_queue *queue =
cryptd_get_queue(crypto_ahash_tfm(tfm));
rctx->complete = req->base.complete;
rctx->data = req->base.data;
req->base.complete = compl;
req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
static struct shash_desc *cryptd_hash_prepare(struct ahash_request *req,
int err)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
req->base.complete = rctx->complete;
req->base.data = rctx->data;
if (unlikely(err == -EINPROGRESS))
return NULL;
return &rctx->desc;
}
static void cryptd_hash_complete(struct ahash_request *req, int err,
crypto_completion_t complete)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
int refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
ahash_request_complete(req, err);
local_bh_enable();
if (err == -EINPROGRESS) {
req->base.complete = complete;
req->base.data = req;
} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_ahash(tfm);
}
static void cryptd_hash_init(void *data, int err)
{
struct ahash_request *req = data;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
struct shash_desc *desc;
desc = cryptd_hash_prepare(req, err);
if (unlikely(!desc))
goto out;
desc->tfm = child;
err = crypto_shash_init(desc);
out:
cryptd_hash_complete(req, err, cryptd_hash_init);
}
static int cryptd_hash_init_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_init);
}
static void cryptd_hash_update(void *data, int err)
{
struct ahash_request *req = data;
struct shash_desc *desc;
desc = cryptd_hash_prepare(req, err);
if (likely(desc))
err = shash_ahash_update(req, desc);
cryptd_hash_complete(req, err, cryptd_hash_update);
}
static int cryptd_hash_update_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_update);
}
static void cryptd_hash_final(void *data, int err)
{
struct ahash_request *req = data;
struct shash_desc *desc;
desc = cryptd_hash_prepare(req, err);
if (likely(desc))
err = crypto_shash_final(desc, req->result);
cryptd_hash_complete(req, err, cryptd_hash_final);
}
static int cryptd_hash_final_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_final);
}
static void cryptd_hash_finup(void *data, int err)
{
struct ahash_request *req = data;
struct shash_desc *desc;
desc = cryptd_hash_prepare(req, err);
if (likely(desc))
err = shash_ahash_finup(req, desc);
cryptd_hash_complete(req, err, cryptd_hash_finup);
}
static int cryptd_hash_finup_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_finup);
}
static void cryptd_hash_digest(void *data, int err)
{
struct ahash_request *req = data;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct crypto_shash *child = ctx->child;
struct shash_desc *desc;
desc = cryptd_hash_prepare(req, err);
if (unlikely(!desc))
goto out;
desc->tfm = child;
err = shash_ahash_digest(req, desc);
out:
cryptd_hash_complete(req, err, cryptd_hash_digest);
}
static int cryptd_hash_digest_enqueue(struct ahash_request *req)
{
return cryptd_hash_enqueue(req, cryptd_hash_digest);
}
static int cryptd_hash_export(struct ahash_request *req, void *out)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return crypto_shash_export(&rctx->desc, out);
}
static int cryptd_hash_import(struct ahash_request *req, const void *in)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
struct shash_desc *desc = cryptd_shash_desc(req);
desc->tfm = ctx->child;
return crypto_shash_import(desc, in);
}
static void cryptd_hash_free(struct ahash_instance *inst)
{
struct hashd_instance_ctx *ctx = ahash_instance_ctx(inst);
crypto_drop_shash(&ctx->spawn);
kfree(inst);
}
static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct hashd_instance_ctx *ctx;
struct ahash_instance *inst;
struct shash_alg *alg;
u32 type;
u32 mask;
int err;
cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = ahash_instance_ctx(inst);
ctx->queue = queue;
err = crypto_grab_shash(&ctx->spawn, ahash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), type, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_shash_alg(&ctx->spawn);
err = cryptd_init_instance(ahash_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.halg.base.cra_flags |= CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & (CRYPTO_ALG_INTERNAL|
CRYPTO_ALG_OPTIONAL_KEY));
inst->alg.halg.digestsize = alg->digestsize;
inst->alg.halg.statesize = alg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.init_tfm = cryptd_hash_init_tfm;
inst->alg.clone_tfm = cryptd_hash_clone_tfm;
inst->alg.exit_tfm = cryptd_hash_exit_tfm;
inst->alg.init = cryptd_hash_init_enqueue;
inst->alg.update = cryptd_hash_update_enqueue;
inst->alg.final = cryptd_hash_final_enqueue;
inst->alg.finup = cryptd_hash_finup_enqueue;
inst->alg.export = cryptd_hash_export;
inst->alg.import = cryptd_hash_import;
if (crypto_shash_alg_has_setkey(alg))
inst->alg.setkey = cryptd_hash_setkey;
inst->alg.digest = cryptd_hash_digest_enqueue;
inst->free = cryptd_hash_free;
err = ahash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
cryptd_hash_free(inst);
}
return err;
}
static int cryptd_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
return crypto_aead_setkey(child, key, keylen);
}
static int cryptd_aead_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
return crypto_aead_setauthsize(child, authsize);
}
static void cryptd_aead_crypt(struct aead_request *req,
struct crypto_aead *child, int err,
int (*crypt)(struct aead_request *req),
crypto_completion_t compl)
{
struct cryptd_aead_request_ctx *rctx;
struct aead_request *subreq;
struct cryptd_aead_ctx *ctx;
struct crypto_aead *tfm;
int refcnt;
rctx = aead_request_ctx(req);
subreq = &rctx->req;
req->base.complete = subreq->base.complete;
req->base.data = subreq->base.data;
tfm = crypto_aead_reqtfm(req);
if (unlikely(err == -EINPROGRESS))
goto out;
aead_request_set_tfm(subreq, child);
aead_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
aead_request_set_ad(subreq, req->assoclen);
err = crypt(subreq);
out:
ctx = crypto_aead_ctx(tfm);
refcnt = refcount_read(&ctx->refcnt);
local_bh_disable();
aead_request_complete(req, err);
local_bh_enable();
if (err == -EINPROGRESS) {
subreq->base.complete = req->base.complete;
subreq->base.data = req->base.data;
req->base.complete = compl;
req->base.data = req;
} else if (refcnt && refcount_dec_and_test(&ctx->refcnt))
crypto_free_aead(tfm);
}
static void cryptd_aead_encrypt(void *data, int err)
{
struct aead_request *req = data;
struct cryptd_aead_ctx *ctx;
struct crypto_aead *child;
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
child = ctx->child;
cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt,
cryptd_aead_encrypt);
}
static void cryptd_aead_decrypt(void *data, int err)
{
struct aead_request *req = data;
struct cryptd_aead_ctx *ctx;
struct crypto_aead *child;
ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
child = ctx->child;
cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt,
cryptd_aead_decrypt);
}
static int cryptd_aead_enqueue(struct aead_request *req,
crypto_completion_t compl)
{
struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
struct aead_request *subreq = &rctx->req;
subreq->base.complete = req->base.complete;
subreq->base.data = req->base.data;
req->base.complete = compl;
req->base.data = req;
return cryptd_enqueue_request(queue, &req->base);
}
static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
{
return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
}
static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
{
return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
}
static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *cipher;
cipher = crypto_spawn_aead(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
crypto_aead_set_reqsize(
tfm, sizeof(struct cryptd_aead_request_ctx) +
crypto_aead_reqsize(cipher));
return 0;
}
static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
static void cryptd_aead_free(struct aead_instance *inst)
{
struct aead_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_aead(&ctx->aead_spawn);
kfree(inst);
}
static int cryptd_create_aead(struct crypto_template *tmpl,
struct rtattr **tb,
struct crypto_attr_type *algt,
struct cryptd_queue *queue)
{
struct aead_instance_ctx *ctx;
struct aead_instance *inst;
struct aead_alg *alg;
u32 type;
u32 mask;
int err;
cryptd_type_and_mask(algt, &type, &mask);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
ctx->queue = queue;
err = crypto_grab_aead(&ctx->aead_spawn, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), type, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC |
(alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.init = cryptd_aead_init_tfm;
inst->alg.exit = cryptd_aead_exit_tfm;
inst->alg.setkey = cryptd_aead_setkey;
inst->alg.setauthsize = cryptd_aead_setauthsize;
inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
inst->free = cryptd_aead_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
cryptd_aead_free(inst);
}
return err;
}
static struct cryptd_queue queue;
static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_SKCIPHER:
return cryptd_create_skcipher(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_HASH:
return cryptd_create_hash(tmpl, tb, algt, &queue);
case CRYPTO_ALG_TYPE_AEAD:
return cryptd_create_aead(tmpl, tb, algt, &queue);
}
return -EINVAL;
}
static struct crypto_template cryptd_tmpl = {
.name = "cryptd",
.create = cryptd_create,
.module = THIS_MODULE,
};
struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_skcipher_ctx *ctx;
struct crypto_skcipher *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_skcipher(tfm);
return ERR_PTR(-EINVAL);
}
ctx = crypto_skcipher_ctx(tfm);
refcount_set(&ctx->refcnt, 1);
return container_of(tfm, struct cryptd_skcipher, base);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
return refcount_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
if (refcount_dec_and_test(&ctx->refcnt))
crypto_free_skcipher(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_hash_ctx *ctx;
struct crypto_ahash *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_ahash(tfm);
return ERR_PTR(-EINVAL);
}
ctx = crypto_ahash_ctx(tfm);
refcount_set(&ctx->refcnt, 1);
return __cryptd_ahash_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_child);
struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
{
struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
return &rctx->desc;
}
EXPORT_SYMBOL_GPL(cryptd_shash_desc);
bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
return refcount_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
void cryptd_free_ahash(struct cryptd_ahash *tfm)
{
struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
if (refcount_dec_and_test(&ctx->refcnt))
crypto_free_ahash(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_ahash);
struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
u32 type, u32 mask)
{
char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
struct cryptd_aead_ctx *ctx;
struct crypto_aead *tfm;
if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
"cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-EINVAL);
tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
crypto_free_aead(tfm);
return ERR_PTR(-EINVAL);
}
ctx = crypto_aead_ctx(tfm);
refcount_set(&ctx->refcnt, 1);
return __cryptd_aead_cast(tfm);
}
EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
{
struct cryptd_aead_ctx *ctx;
ctx = crypto_aead_ctx(&tfm->base);
return ctx->child;
}
EXPORT_SYMBOL_GPL(cryptd_aead_child);
bool cryptd_aead_queued(struct cryptd_aead *tfm)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
return refcount_read(&ctx->refcnt) - 1;
}
EXPORT_SYMBOL_GPL(cryptd_aead_queued);
void cryptd_free_aead(struct cryptd_aead *tfm)
{
struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
if (refcount_dec_and_test(&ctx->refcnt))
crypto_free_aead(&tfm->base);
}
EXPORT_SYMBOL_GPL(cryptd_free_aead);
static int __init cryptd_init(void)
{
int err;
cryptd_wq = alloc_workqueue("cryptd", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
1);
if (!cryptd_wq)
return -ENOMEM;
err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen);
if (err)
goto err_destroy_wq;
err = crypto_register_template(&cryptd_tmpl);
if (err)
goto err_fini_queue;
return 0;
err_fini_queue:
cryptd_fini_queue(&queue);
err_destroy_wq:
destroy_workqueue(cryptd_wq);
return err;
}
static void __exit cryptd_exit(void)
{
destroy_workqueue(cryptd_wq);
cryptd_fini_queue(&queue);
crypto_unregister_template(&cryptd_tmpl);
}
subsys_initcall(cryptd_init);
module_exit(cryptd_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Software async crypto daemon");
MODULE_ALIAS_CRYPTO("cryptd");
| linux-master | crypto/cryptd.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Key-agreement Protocol Primitives (KPP)
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <[email protected]>
*/
#include <crypto/internal/kpp.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
static int __maybe_unused crypto_kpp_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_kpp rkpp;
memset(&rkpp, 0, sizeof(rkpp));
strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
return nla_put(skb, CRYPTOCFGA_REPORT_KPP, sizeof(rkpp), &rkpp);
}
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_kpp_show(struct seq_file *m, struct crypto_alg *alg)
{
seq_puts(m, "type : kpp\n");
}
static void crypto_kpp_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
struct kpp_alg *alg = crypto_kpp_alg(kpp);
alg->exit(kpp);
}
static int crypto_kpp_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_kpp *kpp = __crypto_kpp_tfm(tfm);
struct kpp_alg *alg = crypto_kpp_alg(kpp);
if (alg->exit)
kpp->base.exit = crypto_kpp_exit_tfm;
if (alg->init)
return alg->init(kpp);
return 0;
}
static void crypto_kpp_free_instance(struct crypto_instance *inst)
{
struct kpp_instance *kpp = kpp_instance(inst);
kpp->free(kpp);
}
static int __maybe_unused crypto_kpp_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct kpp_alg *kpp = __crypto_kpp_alg(alg);
struct crypto_istat_kpp *istat;
struct crypto_stat_kpp rkpp;
istat = kpp_get_stat(kpp);
memset(&rkpp, 0, sizeof(rkpp));
strscpy(rkpp.type, "kpp", sizeof(rkpp.type));
rkpp.stat_setsecret_cnt = atomic64_read(&istat->setsecret_cnt);
rkpp.stat_generate_public_key_cnt =
atomic64_read(&istat->generate_public_key_cnt);
rkpp.stat_compute_shared_secret_cnt =
atomic64_read(&istat->compute_shared_secret_cnt);
rkpp.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_KPP, sizeof(rkpp), &rkpp);
}
static const struct crypto_type crypto_kpp_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_kpp_init_tfm,
.free = crypto_kpp_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_kpp_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_kpp_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_kpp_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_KPP,
.tfmsize = offsetof(struct crypto_kpp, base),
};
struct crypto_kpp *crypto_alloc_kpp(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_kpp_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_kpp);
int crypto_grab_kpp(struct crypto_kpp_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_kpp_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_kpp);
int crypto_has_kpp(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_kpp_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_kpp);
static void kpp_prepare_alg(struct kpp_alg *alg)
{
struct crypto_istat_kpp *istat = kpp_get_stat(alg);
struct crypto_alg *base = &alg->base;
base->cra_type = &crypto_kpp_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_KPP;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
}
int crypto_register_kpp(struct kpp_alg *alg)
{
struct crypto_alg *base = &alg->base;
kpp_prepare_alg(alg);
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_kpp);
void crypto_unregister_kpp(struct kpp_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_kpp);
int kpp_register_instance(struct crypto_template *tmpl,
struct kpp_instance *inst)
{
if (WARN_ON(!inst->free))
return -EINVAL;
kpp_prepare_alg(&inst->alg);
return crypto_register_instance(tmpl, kpp_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(kpp_register_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Key-agreement Protocol Primitives");
| linux-master | crypto/kpp.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AEAD: Authenticated Encryption with Associated Data
*
* This file provides API support for AEAD algorithms.
*
* Copyright (c) 2007-2015 Herbert Xu <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <linux/cryptouser.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "internal.h"
static inline struct crypto_istat_aead *aead_get_stat(struct aead_alg *alg)
{
#ifdef CONFIG_CRYPTO_STATS
return &alg->stat;
#else
return NULL;
#endif
}
static int setkey_unaligned(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
int ret;
u8 *buffer, *alignbuffer;
unsigned long absize;
absize = keylen + alignmask;
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
ret = crypto_aead_alg(tfm)->setkey(tfm, alignbuffer, keylen);
memset(alignbuffer, 0, keylen);
kfree(buffer);
return ret;
}
int crypto_aead_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
unsigned long alignmask = crypto_aead_alignmask(tfm);
int err;
if ((unsigned long)key & alignmask)
err = setkey_unaligned(tfm, key, keylen);
else
err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
if (unlikely(err)) {
crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
return err;
}
crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setkey);
int crypto_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
int err;
if ((!authsize && crypto_aead_maxauthsize(tfm)) ||
authsize > crypto_aead_maxauthsize(tfm))
return -EINVAL;
if (crypto_aead_alg(tfm)->setauthsize) {
err = crypto_aead_alg(tfm)->setauthsize(tfm, authsize);
if (err)
return err;
}
tfm->authsize = authsize;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_aead_setauthsize);
static inline int crypto_aead_errstat(struct crypto_istat_aead *istat, int err)
{
if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
return err;
if (err && err != -EINPROGRESS && err != -EBUSY)
atomic64_inc(&istat->err_cnt);
return err;
}
int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct aead_alg *alg = crypto_aead_alg(aead);
struct crypto_istat_aead *istat;
int ret;
istat = aead_get_stat(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else
ret = alg->encrypt(req);
return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_encrypt);
int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct aead_alg *alg = crypto_aead_alg(aead);
struct crypto_istat_aead *istat;
int ret;
istat = aead_get_stat(alg);
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
atomic64_inc(&istat->encrypt_cnt);
atomic64_add(req->cryptlen, &istat->encrypt_tlen);
}
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
ret = -ENOKEY;
else if (req->cryptlen < crypto_aead_authsize(aead))
ret = -EINVAL;
else
ret = alg->decrypt(req);
return crypto_aead_errstat(istat, ret);
}
EXPORT_SYMBOL_GPL(crypto_aead_decrypt);
static void crypto_aead_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
alg->exit(aead);
}
static int crypto_aead_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_aead *aead = __crypto_aead_cast(tfm);
struct aead_alg *alg = crypto_aead_alg(aead);
crypto_aead_set_flags(aead, CRYPTO_TFM_NEED_KEY);
aead->authsize = alg->maxauthsize;
if (alg->exit)
aead->base.exit = crypto_aead_exit_tfm;
if (alg->init)
return alg->init(aead);
return 0;
}
static int __maybe_unused crypto_aead_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_aead raead;
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
memset(&raead, 0, sizeof(raead));
strscpy(raead.type, "aead", sizeof(raead.type));
strscpy(raead.geniv, "<none>", sizeof(raead.geniv));
raead.blocksize = alg->cra_blocksize;
raead.maxauthsize = aead->maxauthsize;
raead.ivsize = aead->ivsize;
return nla_put(skb, CRYPTOCFGA_REPORT_AEAD, sizeof(raead), &raead);
}
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_aead_show(struct seq_file *m, struct crypto_alg *alg)
{
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
seq_printf(m, "type : aead\n");
seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ?
"yes" : "no");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "ivsize : %u\n", aead->ivsize);
seq_printf(m, "maxauthsize : %u\n", aead->maxauthsize);
seq_printf(m, "geniv : <none>\n");
}
static void crypto_aead_free_instance(struct crypto_instance *inst)
{
struct aead_instance *aead = aead_instance(inst);
aead->free(aead);
}
static int __maybe_unused crypto_aead_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct aead_alg *aead = container_of(alg, struct aead_alg, base);
struct crypto_istat_aead *istat = aead_get_stat(aead);
struct crypto_stat_aead raead;
memset(&raead, 0, sizeof(raead));
strscpy(raead.type, "aead", sizeof(raead.type));
raead.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
raead.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
raead.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
raead.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
raead.stat_err_cnt = atomic64_read(&istat->err_cnt);
return nla_put(skb, CRYPTOCFGA_STAT_AEAD, sizeof(raead), &raead);
}
static const struct crypto_type crypto_aead_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_aead_init_tfm,
.free = crypto_aead_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_aead_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_aead_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_aead_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_AEAD,
.tfmsize = offsetof(struct crypto_aead, base),
};
int crypto_grab_aead(struct crypto_aead_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_aead_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_aead);
struct crypto_aead *crypto_alloc_aead(const char *alg_name, u32 type, u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_aead_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_aead);
static int aead_prepare_alg(struct aead_alg *alg)
{
struct crypto_istat_aead *istat = aead_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (max3(alg->maxauthsize, alg->ivsize, alg->chunksize) >
PAGE_SIZE / 8)
return -EINVAL;
if (!alg->chunksize)
alg->chunksize = base->cra_blocksize;
base->cra_type = &crypto_aead_type;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
base->cra_flags |= CRYPTO_ALG_TYPE_AEAD;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}
int crypto_register_aead(struct aead_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = aead_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_aead);
void crypto_unregister_aead(struct aead_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_aead);
int crypto_register_aeads(struct aead_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_aead(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_aead(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_aeads);
void crypto_unregister_aeads(struct aead_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_aead(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_aeads);
int aead_register_instance(struct crypto_template *tmpl,
struct aead_instance *inst)
{
int err;
if (WARN_ON(!inst->free))
return -EINVAL;
err = aead_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, aead_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(aead_register_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Authenticated Encryption with Associated Data (AEAD)");
| linux-master | crypto/aead.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Crypto user configuration API.
*
* Copyright (C) 2011 secunet Security Networks AG
* Copyright (C) 2011 Steffen Klassert <[email protected]>
*/
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/cryptouser.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <net/netlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
#include <crypto/internal/cryptouser.h>
#include "internal.h"
#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
static DEFINE_MUTEX(crypto_cfg_mutex);
struct crypto_dump_info {
struct sk_buff *in_skb;
struct sk_buff *out_skb;
u32 nlmsg_seq;
u16 nlmsg_flags;
};
struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
int match = 0;
if (crypto_is_larval(q))
continue;
if ((q->cra_flags ^ p->cru_type) & p->cru_mask)
continue;
if (strlen(p->cru_driver_name))
match = !strcmp(q->cra_driver_name,
p->cru_driver_name);
else if (!exact)
match = !strcmp(q->cra_name, p->cru_name);
if (!match)
continue;
if (unlikely(!crypto_mod_get(q)))
continue;
alg = q;
break;
}
up_read(&crypto_alg_sem);
return alg;
}
static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_cipher rcipher;
memset(&rcipher, 0, sizeof(rcipher));
strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
rcipher.blocksize = alg->cra_blocksize;
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
return nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
sizeof(rcipher), &rcipher);
}
static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_comp rcomp;
memset(&rcomp, 0, sizeof(rcomp));
strscpy(rcomp.type, "compression", sizeof(rcomp.type));
return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS, sizeof(rcomp), &rcomp);
}
static int crypto_report_one(struct crypto_alg *alg,
struct crypto_user_alg *ualg, struct sk_buff *skb)
{
memset(ualg, 0, sizeof(*ualg));
strscpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
strscpy(ualg->cru_driver_name, alg->cra_driver_name,
sizeof(ualg->cru_driver_name));
strscpy(ualg->cru_module_name, module_name(alg->cra_module),
sizeof(ualg->cru_module_name));
ualg->cru_type = 0;
ualg->cru_mask = 0;
ualg->cru_flags = alg->cra_flags;
ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
goto nla_put_failure;
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
struct crypto_report_larval rl;
memset(&rl, 0, sizeof(rl));
strscpy(rl.type, "larval", sizeof(rl.type));
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL, sizeof(rl), &rl))
goto nla_put_failure;
goto out;
}
if (alg->cra_type && alg->cra_type->report) {
if (alg->cra_type->report(skb, alg))
goto nla_put_failure;
goto out;
}
switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
case CRYPTO_ALG_TYPE_CIPHER:
if (crypto_report_cipher(skb, alg))
goto nla_put_failure;
break;
case CRYPTO_ALG_TYPE_COMPRESS:
if (crypto_report_comp(skb, alg))
goto nla_put_failure;
break;
}
out:
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int crypto_report_alg(struct crypto_alg *alg,
struct crypto_dump_info *info)
{
struct sk_buff *in_skb = info->in_skb;
struct sk_buff *skb = info->out_skb;
struct nlmsghdr *nlh;
struct crypto_user_alg *ualg;
int err = 0;
nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
CRYPTO_MSG_GETALG, sizeof(*ualg), info->nlmsg_flags);
if (!nlh) {
err = -EMSGSIZE;
goto out;
}
ualg = nlmsg_data(nlh);
err = crypto_report_one(alg, ualg, skb);
if (err) {
nlmsg_cancel(skb, nlh);
goto out;
}
nlmsg_end(skb, nlh);
out:
return err;
}
static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
struct nlattr **attrs)
{
struct net *net = sock_net(in_skb->sk);
struct crypto_user_alg *p = nlmsg_data(in_nlh);
struct crypto_alg *alg;
struct sk_buff *skb;
struct crypto_dump_info info;
int err;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 0);
if (!alg)
return -ENOENT;
err = -ENOMEM;
skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
goto drop_alg;
info.in_skb = in_skb;
info.out_skb = skb;
info.nlmsg_seq = in_nlh->nlmsg_seq;
info.nlmsg_flags = 0;
err = crypto_report_alg(alg, &info);
drop_alg:
crypto_mod_put(alg);
if (err) {
kfree_skb(skb);
return err;
}
return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
}
static int crypto_dump_report(struct sk_buff *skb, struct netlink_callback *cb)
{
const size_t start_pos = cb->args[0];
size_t pos = 0;
struct crypto_dump_info info;
struct crypto_alg *alg;
int res;
info.in_skb = cb->skb;
info.out_skb = skb;
info.nlmsg_seq = cb->nlh->nlmsg_seq;
info.nlmsg_flags = NLM_F_MULTI;
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list) {
if (pos >= start_pos) {
res = crypto_report_alg(alg, &info);
if (res == -EMSGSIZE)
break;
if (res)
goto out;
}
pos++;
}
cb->args[0] = pos;
res = skb->len;
out:
up_read(&crypto_alg_sem);
return res;
}
static int crypto_dump_report_done(struct netlink_callback *cb)
{
return 0;
}
static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
LIST_HEAD(list);
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
if (priority && !strlen(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
down_write(&crypto_alg_sem);
crypto_remove_spawns(alg, &list, NULL);
if (priority)
alg->cra_priority = nla_get_u32(priority);
up_write(&crypto_alg_sem);
crypto_mod_put(alg);
crypto_remove_final(&list);
return 0;
}
static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
int err;
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
alg = crypto_alg_match(p, 1);
if (!alg)
return -ENOENT;
/* We can not unregister core algorithms such as aes-generic.
* We would loose the reference in the crypto_alg_list to this algorithm
* if we try to unregister. Unregistering such an algorithm without
* removing the module is not possible, so we restrict to crypto
* instances that are build from templates. */
err = -EINVAL;
if (!(alg->cra_flags & CRYPTO_ALG_INSTANCE))
goto drop_alg;
err = -EBUSY;
if (refcount_read(&alg->cra_refcnt) > 2)
goto drop_alg;
crypto_unregister_instance((struct crypto_instance *)alg);
err = 0;
drop_alg:
crypto_mod_put(alg);
return err;
}
static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
int exact = 0;
const char *name;
struct crypto_alg *alg;
struct crypto_user_alg *p = nlmsg_data(nlh);
struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
return -EINVAL;
if (strlen(p->cru_driver_name))
exact = 1;
if (priority && !exact)
return -EINVAL;
alg = crypto_alg_match(p, exact);
if (alg) {
crypto_mod_put(alg);
return -EEXIST;
}
if (strlen(p->cru_driver_name))
name = p->cru_driver_name;
else
name = p->cru_name;
alg = crypto_alg_mod_lookup(name, p->cru_type, p->cru_mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
down_write(&crypto_alg_sem);
if (priority)
alg->cra_priority = nla_get_u32(priority);
up_write(&crypto_alg_sem);
crypto_mod_put(alg);
return 0;
}
static int crypto_del_rng(struct sk_buff *skb, struct nlmsghdr *nlh,
struct nlattr **attrs)
{
if (!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
return crypto_del_default_rng();
}
#define MSGSIZE(type) sizeof(struct type)
static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
[CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
};
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
[CRYPTOCFGA_PRIORITY_VAL] = { .type = NLA_U32},
};
#undef MSGSIZE
static const struct crypto_link {
int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
int (*dump)(struct sk_buff *, struct netlink_callback *);
int (*done)(struct netlink_callback *);
} crypto_dispatch[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_NEWALG - CRYPTO_MSG_BASE] = { .doit = crypto_add_alg},
[CRYPTO_MSG_DELALG - CRYPTO_MSG_BASE] = { .doit = crypto_del_alg},
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = { .doit = crypto_update_alg},
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = { .doit = crypto_report,
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
[CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat},
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[CRYPTOCFGA_MAX+1];
const struct crypto_link *link;
int type, err;
type = nlh->nlmsg_type;
if (type > CRYPTO_MSG_MAX)
return -EINVAL;
type -= CRYPTO_MSG_BASE;
link = &crypto_dispatch[type];
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
(nlh->nlmsg_flags & NLM_F_DUMP))) {
struct crypto_alg *alg;
unsigned long dump_alloc = 0;
if (link->dump == NULL)
return -EINVAL;
down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
up_read(&crypto_alg_sem);
{
struct netlink_dump_control c = {
.dump = link->dump,
.done = link->done,
.min_dump_alloc = min(dump_alloc, 65535UL),
};
err = netlink_dump_start(net->crypto_nlsk, skb, nlh, &c);
}
return err;
}
err = nlmsg_parse_deprecated(nlh, crypto_msg_min[type], attrs,
CRYPTOCFGA_MAX, crypto_policy, extack);
if (err < 0)
return err;
if (link->doit == NULL)
return -EINVAL;
return link->doit(skb, nlh, attrs);
}
static void crypto_netlink_rcv(struct sk_buff *skb)
{
mutex_lock(&crypto_cfg_mutex);
netlink_rcv_skb(skb, &crypto_user_rcv_msg);
mutex_unlock(&crypto_cfg_mutex);
}
static int __net_init crypto_netlink_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = crypto_netlink_rcv,
};
net->crypto_nlsk = netlink_kernel_create(net, NETLINK_CRYPTO, &cfg);
return net->crypto_nlsk == NULL ? -ENOMEM : 0;
}
static void __net_exit crypto_netlink_exit(struct net *net)
{
netlink_kernel_release(net->crypto_nlsk);
net->crypto_nlsk = NULL;
}
static struct pernet_operations crypto_netlink_net_ops = {
.init = crypto_netlink_init,
.exit = crypto_netlink_exit,
};
static int __init crypto_user_init(void)
{
return register_pernet_subsys(&crypto_netlink_net_ops);
}
static void __exit crypto_user_exit(void)
{
unregister_pernet_subsys(&crypto_netlink_net_ops);
}
module_init(crypto_user_init);
module_exit(crypto_user_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <[email protected]>");
MODULE_DESCRIPTION("Crypto userspace configuration API");
MODULE_ALIAS("net-pf-16-proto-21");
| linux-master | crypto/crypto_user_base.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* ECDH key-agreement protocol
*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvator Benedetto <[email protected]>
*/
#include <linux/module.h>
#include <crypto/internal/ecc.h>
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <crypto/ecdh.h>
#include <linux/scatterlist.h>
struct ecdh_ctx {
unsigned int curve_id;
unsigned int ndigits;
u64 private_key[ECC_MAX_DIGITS];
};
static inline struct ecdh_ctx *ecdh_get_ctx(struct crypto_kpp *tfm)
{
return kpp_tfm_ctx(tfm);
}
static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
struct ecdh params;
if (crypto_ecdh_decode_key(buf, len, ¶ms) < 0 ||
params.key_size > sizeof(u64) * ctx->ndigits)
return -EINVAL;
if (!params.key || !params.key_size)
return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
ctx->private_key);
memcpy(ctx->private_key, params.key, params.key_size);
if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
ctx->private_key, params.key_size) < 0) {
memzero_explicit(ctx->private_key, params.key_size);
return -EINVAL;
}
return 0;
}
static int ecdh_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
u64 *public_key;
u64 *shared_secret = NULL;
void *buf;
size_t copied, nbytes, public_key_sz;
int ret = -ENOMEM;
nbytes = ctx->ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
/* Public part is a point thus it has both coordinates */
public_key_sz = 2 * nbytes;
public_key = kmalloc(public_key_sz, GFP_KERNEL);
if (!public_key)
return -ENOMEM;
if (req->src) {
shared_secret = kmalloc(nbytes, GFP_KERNEL);
if (!shared_secret)
goto free_pubkey;
/* from here on it's invalid parameters */
ret = -EINVAL;
/* must have exactly two points to be on the curve */
if (public_key_sz != req->src_len)
goto free_all;
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
public_key_sz),
public_key, public_key_sz);
if (copied != public_key_sz)
goto free_all;
ret = crypto_ecdh_shared_secret(ctx->curve_id, ctx->ndigits,
ctx->private_key, public_key,
shared_secret);
buf = shared_secret;
} else {
ret = ecc_make_pub_key(ctx->curve_id, ctx->ndigits,
ctx->private_key, public_key);
buf = public_key;
nbytes = public_key_sz;
}
if (ret < 0)
goto free_all;
/* might want less than we've got */
nbytes = min_t(size_t, nbytes, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
ret = -EINVAL;
/* fall through */
free_all:
kfree_sensitive(shared_secret);
free_pubkey:
kfree(public_key);
return ret;
}
static unsigned int ecdh_max_size(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
/* Public key is made of two coordinates, add one to the left shift */
return ctx->ndigits << (ECC_DIGITS_TO_BYTES_SHIFT + 1);
}
static int ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P192;
ctx->ndigits = ECC_CURVE_NIST_P192_DIGITS;
return 0;
}
static struct kpp_alg ecdh_nist_p192 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
.init = ecdh_nist_p192_init_tfm,
.base = {
.cra_name = "ecdh-nist-p192",
.cra_driver_name = "ecdh-nist-p192-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
},
};
static int ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P256;
ctx->ndigits = ECC_CURVE_NIST_P256_DIGITS;
return 0;
}
static struct kpp_alg ecdh_nist_p256 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
.init = ecdh_nist_p256_init_tfm,
.base = {
.cra_name = "ecdh-nist-p256",
.cra_driver_name = "ecdh-nist-p256-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
},
};
static int ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
{
struct ecdh_ctx *ctx = ecdh_get_ctx(tfm);
ctx->curve_id = ECC_CURVE_NIST_P384;
ctx->ndigits = ECC_CURVE_NIST_P384_DIGITS;
return 0;
}
static struct kpp_alg ecdh_nist_p384 = {
.set_secret = ecdh_set_secret,
.generate_public_key = ecdh_compute_value,
.compute_shared_secret = ecdh_compute_value,
.max_size = ecdh_max_size,
.init = ecdh_nist_p384_init_tfm,
.base = {
.cra_name = "ecdh-nist-p384",
.cra_driver_name = "ecdh-nist-p384-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct ecdh_ctx),
},
};
static bool ecdh_nist_p192_registered;
static int __init ecdh_init(void)
{
int ret;
/* NIST p192 will fail to register in FIPS mode */
ret = crypto_register_kpp(&ecdh_nist_p192);
ecdh_nist_p192_registered = ret == 0;
ret = crypto_register_kpp(&ecdh_nist_p256);
if (ret)
goto nist_p256_error;
ret = crypto_register_kpp(&ecdh_nist_p384);
if (ret)
goto nist_p384_error;
return 0;
nist_p384_error:
crypto_unregister_kpp(&ecdh_nist_p256);
nist_p256_error:
if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
return ret;
}
static void __exit ecdh_exit(void)
{
if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192);
crypto_unregister_kpp(&ecdh_nist_p256);
crypto_unregister_kpp(&ecdh_nist_p384);
}
subsys_initcall(ecdh_init);
module_exit(ecdh_exit);
MODULE_ALIAS_CRYPTO("ecdh");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ECDH generic algorithm");
| linux-master | crypto/ecdh.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SM4 Cipher Algorithm.
*
* Copyright (C) 2018 ARM Limited or its affiliates.
* All rights reserved.
*/
#include <crypto/algapi.h>
#include <crypto/sm4.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
/**
* sm4_setkey - Set the SM4 key.
* @tfm: The %crypto_tfm that is used in the context.
* @in_key: The input key.
* @key_len: The size of the key.
*
* This function uses sm4_expandkey() to expand the key.
* &sm4_ctx _must_ be the private data embedded in @tfm which is
* retrieved with crypto_tfm_ctx().
*
* Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
*/
static int sm4_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
return sm4_expandkey(ctx, in_key, key_len);
}
/* encrypt a block of text */
static void sm4_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
sm4_crypt_block(ctx->rkey_enc, out, in);
}
/* decrypt a block of text */
static void sm4_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct sm4_ctx *ctx = crypto_tfm_ctx(tfm);
sm4_crypt_block(ctx->rkey_dec, out, in);
}
static struct crypto_alg sm4_alg = {
.cra_name = "sm4",
.cra_driver_name = "sm4-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SM4_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct sm4_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = SM4_KEY_SIZE,
.cia_max_keysize = SM4_KEY_SIZE,
.cia_setkey = sm4_setkey,
.cia_encrypt = sm4_encrypt,
.cia_decrypt = sm4_decrypt
}
}
};
static int __init sm4_init(void)
{
return crypto_register_alg(&sm4_alg);
}
static void __exit sm4_fini(void)
{
crypto_unregister_alg(&sm4_alg);
}
subsys_initcall(sm4_init);
module_exit(sm4_fini);
MODULE_DESCRIPTION("SM4 Cipher Algorithm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sm4");
MODULE_ALIAS_CRYPTO("sm4-generic");
| linux-master | crypto/sm4_generic.c |
/*
* DRBG: Deterministic Random Bits Generator
* Based on NIST Recommended DRBG from NIST SP800-90A with the following
* properties:
* * CTR DRBG with DF with AES-128, AES-192, AES-256 cores
* * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
* * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
* * with and without prediction resistance
*
* Copyright Stephan Mueller <[email protected]>, 2014
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* DRBG Usage
* ==========
* The SP 800-90A DRBG allows the user to specify a personalization string
* for initialization as well as an additional information string for each
* random number request. The following code fragments show how a caller
* uses the kernel crypto API to use the full functionality of the DRBG.
*
* Usage without any additional data
* ---------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
*
* drng = crypto_alloc_rng(drng_name, 0, 0);
* err = crypto_rng_get_bytes(drng, &data, DATALEN);
* crypto_free_rng(drng);
*
*
* Usage with personalization string during initialization
* -------------------------------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
* struct drbg_string pers;
* char personalization[11] = "some-string";
*
* drbg_string_fill(&pers, personalization, strlen(personalization));
* drng = crypto_alloc_rng(drng_name, 0, 0);
* // The reset completely re-initializes the DRBG with the provided
* // personalization string
* err = crypto_rng_reset(drng, &personalization, strlen(personalization));
* err = crypto_rng_get_bytes(drng, &data, DATALEN);
* crypto_free_rng(drng);
*
*
* Usage with additional information string during random number request
* ---------------------------------------------------------------------
* struct crypto_rng *drng;
* int err;
* char data[DATALEN];
* char addtl_string[11] = "some-string";
* string drbg_string addtl;
*
* drbg_string_fill(&addtl, addtl_string, strlen(addtl_string));
* drng = crypto_alloc_rng(drng_name, 0, 0);
* // The following call is a wrapper to crypto_rng_get_bytes() and returns
* // the same error codes.
* err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl);
* crypto_free_rng(drng);
*
*
* Usage with personalization and additional information strings
* -------------------------------------------------------------
* Just mix both scenarios above.
*/
#include <crypto/drbg.h>
#include <crypto/internal/cipher.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
/***************************************************************
* Backend cipher definitions available to DRBG
***************************************************************/
/*
* The order of the DRBG definitions here matter: every DRBG is registered
* as stdrng. Each DRBG receives an increasing cra_priority values the later
* they are defined in this array (see drbg_fill_array).
*
* HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
* the SHA256 / AES 256 over other ciphers. Thus, the favored
* DRBGs are the latest entries in this array.
*/
static const struct drbg_core drbg_cores[] = {
#ifdef CONFIG_CRYPTO_DRBG_CTR
{
.flags = DRBG_CTR | DRBG_STRENGTH128,
.statelen = 32, /* 256 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes128",
.backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH192,
.statelen = 40, /* 320 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes192",
.backend_cra_name = "aes",
}, {
.flags = DRBG_CTR | DRBG_STRENGTH256,
.statelen = 48, /* 384 bits as defined in 10.2.1 */
.blocklen_bytes = 16,
.cra_name = "ctr_aes256",
.backend_cra_name = "aes",
},
#endif /* CONFIG_CRYPTO_DRBG_CTR */
#ifdef CONFIG_CRYPTO_DRBG_HASH
{
.flags = DRBG_HASH | DRBG_STRENGTH128,
.statelen = 55, /* 440 bits */
.blocklen_bytes = 20,
.cra_name = "sha1",
.backend_cra_name = "sha1",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
.blocklen_bytes = 48,
.cra_name = "sha384",
.backend_cra_name = "sha384",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 111, /* 888 bits */
.blocklen_bytes = 64,
.cra_name = "sha512",
.backend_cra_name = "sha512",
}, {
.flags = DRBG_HASH | DRBG_STRENGTH256,
.statelen = 55, /* 440 bits */
.blocklen_bytes = 32,
.cra_name = "sha256",
.backend_cra_name = "sha256",
},
#endif /* CONFIG_CRYPTO_DRBG_HASH */
#ifdef CONFIG_CRYPTO_DRBG_HMAC
{
.flags = DRBG_HMAC | DRBG_STRENGTH128,
.statelen = 20, /* block length of cipher */
.blocklen_bytes = 20,
.cra_name = "hmac_sha1",
.backend_cra_name = "hmac(sha1)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 48, /* block length of cipher */
.blocklen_bytes = 48,
.cra_name = "hmac_sha384",
.backend_cra_name = "hmac(sha384)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 32, /* block length of cipher */
.blocklen_bytes = 32,
.cra_name = "hmac_sha256",
.backend_cra_name = "hmac(sha256)",
}, {
.flags = DRBG_HMAC | DRBG_STRENGTH256,
.statelen = 64, /* block length of cipher */
.blocklen_bytes = 64,
.cra_name = "hmac_sha512",
.backend_cra_name = "hmac(sha512)",
},
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
};
static int drbg_uninstantiate(struct drbg_state *drbg);
/******************************************************************
* Generic helper functions
******************************************************************/
/*
* Return strength of DRBG according to SP800-90A section 8.4
*
* @flags DRBG flags reference
*
* Return: normalized strength in *bytes* value or 32 as default
* to counter programming errors
*/
static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
{
switch (flags & DRBG_STRENGTH_MASK) {
case DRBG_STRENGTH128:
return 16;
case DRBG_STRENGTH192:
return 24;
case DRBG_STRENGTH256:
return 32;
default:
return 32;
}
}
/*
* FIPS 140-2 continuous self test for the noise source
* The test is performed on the noise source input data. Thus, the function
* implicitly knows the size of the buffer to be equal to the security
* strength.
*
* Note, this function disregards the nonce trailing the entropy data during
* initial seeding.
*
* drbg->drbg_mutex must have been taken.
*
* @drbg DRBG handle
* @entropy buffer of seed data to be checked
*
* return:
* 0 on success
* -EAGAIN on when the CTRNG is not yet primed
* < 0 on error
*/
static int drbg_fips_continuous_test(struct drbg_state *drbg,
const unsigned char *entropy)
{
unsigned short entropylen = drbg_sec_strength(drbg->core->flags);
int ret = 0;
if (!IS_ENABLED(CONFIG_CRYPTO_FIPS))
return 0;
/* skip test if we test the overall system */
if (list_empty(&drbg->test_data.list))
return 0;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
if (!drbg->fips_primed) {
/* Priming of FIPS test */
memcpy(drbg->prev, entropy, entropylen);
drbg->fips_primed = true;
/* priming: another round is needed */
return -EAGAIN;
}
ret = memcmp(drbg->prev, entropy, entropylen);
if (!ret)
panic("DRBG continuous self test failed\n");
memcpy(drbg->prev, entropy, entropylen);
/* the test shall pass when the two values are not equal */
return 0;
}
/*
* Convert an integer into a byte representation of this integer.
* The byte representation is big-endian
*
* @val value to be converted
* @buf buffer holding the converted integer -- caller must ensure that
* buffer size is at least 32 bit
*/
#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
static inline void drbg_cpu_to_be32(__u32 val, unsigned char *buf)
{
struct s {
__be32 conv;
};
struct s *conversion = (struct s *) buf;
conversion->conv = cpu_to_be32(val);
}
#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
/******************************************************************
* CTR DRBG callback functions
******************************************************************/
#ifdef CONFIG_CRYPTO_DRBG_CTR
#define CRYPTO_DRBG_CTR_STRING "CTR "
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes256");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes256");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes192");
MODULE_ALIAS_CRYPTO("drbg_pr_ctr_aes128");
MODULE_ALIAS_CRYPTO("drbg_nopr_ctr_aes128");
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key);
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in);
static int drbg_init_sym_kernel(struct drbg_state *drbg);
static int drbg_fini_sym_kernel(struct drbg_state *drbg);
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inbuflen,
u8 *outbuf, u32 outlen);
#define DRBG_OUTSCRATCHLEN 256
/* BCC function for CTR DRBG as defined in 10.4.3 */
static int drbg_ctr_bcc(struct drbg_state *drbg,
unsigned char *out, const unsigned char *key,
struct list_head *in)
{
int ret = 0;
struct drbg_string *curr = NULL;
struct drbg_string data;
short cnt = 0;
drbg_string_fill(&data, out, drbg_blocklen(drbg));
/* 10.4.3 step 2 / 4 */
drbg_kcapi_symsetkey(drbg, key);
list_for_each_entry(curr, in, list) {
const unsigned char *pos = curr->buf;
size_t len = curr->len;
/* 10.4.3 step 4.1 */
while (len) {
/* 10.4.3 step 4.2 */
if (drbg_blocklen(drbg) == cnt) {
cnt = 0;
ret = drbg_kcapi_sym(drbg, out, &data);
if (ret)
return ret;
}
out[cnt] ^= *pos;
pos++;
cnt++;
len--;
}
}
/* 10.4.3 step 4.2 for last block */
if (cnt)
ret = drbg_kcapi_sym(drbg, out, &data);
return ret;
}
/*
* scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
* (and drbg_ctr_bcc, but this function does not need any temporary buffers),
* the scratchpad is used as follows:
* drbg_ctr_update:
* temp
* start: drbg->scratchpad
* length: drbg_statelen(drbg) + drbg_blocklen(drbg)
* note: the cipher writing into this variable works
* blocklen-wise. Now, when the statelen is not a multiple
* of blocklen, the generateion loop below "spills over"
* by at most blocklen. Thus, we need to give sufficient
* memory.
* df_data
* start: drbg->scratchpad +
* drbg_statelen(drbg) + drbg_blocklen(drbg)
* length: drbg_statelen(drbg)
*
* drbg_ctr_df:
* pad
* start: df_data + drbg_statelen(drbg)
* length: drbg_blocklen(drbg)
* iv
* start: pad + drbg_blocklen(drbg)
* length: drbg_blocklen(drbg)
* temp
* start: iv + drbg_blocklen(drbg)
* length: drbg_satelen(drbg) + drbg_blocklen(drbg)
* note: temp is the buffer that the BCC function operates
* on. BCC operates blockwise. drbg_statelen(drbg)
* is sufficient when the DRBG state length is a multiple
* of the block size. For AES192 (and maybe other ciphers)
* this is not correct and the length for temp is
* insufficient (yes, that also means for such ciphers,
* the final output of all BCC rounds are truncated).
* Therefore, add drbg_blocklen(drbg) to cover all
* possibilities.
*/
/* Derivation Function for CTR DRBG as defined in 10.4.2 */
static int drbg_ctr_df(struct drbg_state *drbg,
unsigned char *df_data, size_t bytes_to_return,
struct list_head *seedlist)
{
int ret = -EFAULT;
unsigned char L_N[8];
/* S3 is input */
struct drbg_string S1, S2, S4, cipherin;
LIST_HEAD(bcc_list);
unsigned char *pad = df_data + drbg_statelen(drbg);
unsigned char *iv = pad + drbg_blocklen(drbg);
unsigned char *temp = iv + drbg_blocklen(drbg);
size_t padlen = 0;
unsigned int templen = 0;
/* 10.4.2 step 7 */
unsigned int i = 0;
/* 10.4.2 step 8 */
const unsigned char *K = (unsigned char *)
"\x00\x01\x02\x03\x04\x05\x06\x07"
"\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
"\x10\x11\x12\x13\x14\x15\x16\x17"
"\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
unsigned char *X;
size_t generated_len = 0;
size_t inputlen = 0;
struct drbg_string *seed = NULL;
memset(pad, 0, drbg_blocklen(drbg));
memset(iv, 0, drbg_blocklen(drbg));
/* 10.4.2 step 1 is implicit as we work byte-wise */
/* 10.4.2 step 2 */
if ((512/8) < bytes_to_return)
return -EINVAL;
/* 10.4.2 step 2 -- calculate the entire length of all input data */
list_for_each_entry(seed, seedlist, list)
inputlen += seed->len;
drbg_cpu_to_be32(inputlen, &L_N[0]);
/* 10.4.2 step 3 */
drbg_cpu_to_be32(bytes_to_return, &L_N[4]);
/* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
/* wrap the padlen appropriately */
if (padlen)
padlen = drbg_blocklen(drbg) - padlen;
/*
* pad / padlen contains the 0x80 byte and the following zero bytes.
* As the calculated padlen value only covers the number of zero
* bytes, this value has to be incremented by one for the 0x80 byte.
*/
padlen++;
pad[0] = 0x80;
/* 10.4.2 step 4 -- first fill the linked list and then order it */
drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
list_add_tail(&S1.list, &bcc_list);
drbg_string_fill(&S2, L_N, sizeof(L_N));
list_add_tail(&S2.list, &bcc_list);
list_splice_tail(seedlist, &bcc_list);
drbg_string_fill(&S4, pad, padlen);
list_add_tail(&S4.list, &bcc_list);
/* 10.4.2 step 9 */
while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
/*
* 10.4.2 step 9.1 - the padding is implicit as the buffer
* holds zeros after allocation -- even the increment of i
* is irrelevant as the increment remains within length of i
*/
drbg_cpu_to_be32(i, iv);
/* 10.4.2 step 9.2 -- BCC and concatenation with temp */
ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
if (ret)
goto out;
/* 10.4.2 step 9.3 */
i++;
templen += drbg_blocklen(drbg);
}
/* 10.4.2 step 11 */
X = temp + (drbg_keylen(drbg));
drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
/* 10.4.2 step 12: overwriting of outval is implemented in next step */
/* 10.4.2 step 13 */
drbg_kcapi_symsetkey(drbg, temp);
while (generated_len < bytes_to_return) {
short blocklen = 0;
/*
* 10.4.2 step 13.1: the truncation of the key length is
* implicit as the key is only drbg_blocklen in size based on
* the implementation of the cipher function callback
*/
ret = drbg_kcapi_sym(drbg, X, &cipherin);
if (ret)
goto out;
blocklen = (drbg_blocklen(drbg) <
(bytes_to_return - generated_len)) ?
drbg_blocklen(drbg) :
(bytes_to_return - generated_len);
/* 10.4.2 step 13.2 and 14 */
memcpy(df_data + generated_len, X, blocklen);
generated_len += blocklen;
}
ret = 0;
out:
memset(iv, 0, drbg_blocklen(drbg));
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
memset(pad, 0, drbg_blocklen(drbg));
return ret;
}
/*
* update function of CTR DRBG as defined in 10.2.1.2
*
* The reseed variable has an enhanced meaning compared to the update
* functions of the other DRBGs as follows:
* 0 => initial seed from initialization
* 1 => reseed via drbg_seed
* 2 => first invocation from drbg_ctr_update when addtl is present. In
* this case, the df_data scratchpad is not deleted so that it is
* available for another calls to prevent calling the DF function
* again.
* 3 => second invocation from drbg_ctr_update. When the update function
* was called with addtl, the df_data memory already contains the
* DFed addtl information and we do not need to call DF again.
*/
static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = -EFAULT;
/* 10.2.1.2 step 1 */
unsigned char *temp = drbg->scratchpad;
unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
drbg_blocklen(drbg);
if (3 > reseed)
memset(df_data, 0, drbg_statelen(drbg));
if (!reseed) {
/*
* The DRBG uses the CTR mode of the underlying AES cipher. The
* CTR mode increments the counter value after the AES operation
* but SP800-90A requires that the counter is incremented before
* the AES operation. Hence, we increment it at the time we set
* it by one.
*/
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = crypto_skcipher_setkey(drbg->ctr_handle, drbg->C,
drbg_keylen(drbg));
if (ret)
goto out;
}
/* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
if (seed) {
ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
if (ret)
goto out;
}
ret = drbg_kcapi_sym_ctr(drbg, df_data, drbg_statelen(drbg),
temp, drbg_statelen(drbg));
if (ret)
return ret;
/* 10.2.1.2 step 5 */
ret = crypto_skcipher_setkey(drbg->ctr_handle, temp,
drbg_keylen(drbg));
if (ret)
goto out;
/* 10.2.1.2 step 6 */
memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
/* See above: increment counter by one to compensate timing of CTR op */
crypto_inc(drbg->V, drbg_blocklen(drbg));
ret = 0;
out:
memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
if (2 != reseed)
memset(df_data, 0, drbg_statelen(drbg));
return ret;
}
/*
* scratchpad use: drbg_ctr_update is called independently from
* drbg_ctr_extract_bytes. Therefore, the scratchpad is reused
*/
/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
static int drbg_ctr_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl)
{
int ret;
int len = min_t(int, buflen, INT_MAX);
/* 10.2.1.5.2 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_ctr_update(drbg, addtl, 2);
if (ret)
return 0;
}
/* 10.2.1.5.2 step 4.1 */
ret = drbg_kcapi_sym_ctr(drbg, NULL, 0, buf, len);
if (ret)
return ret;
/* 10.2.1.5.2 step 6 */
ret = drbg_ctr_update(drbg, NULL, 3);
if (ret)
len = ret;
return len;
}
static const struct drbg_state_ops drbg_ctr_ops = {
.update = drbg_ctr_update,
.generate = drbg_ctr_generate,
.crypto_init = drbg_init_sym_kernel,
.crypto_fini = drbg_fini_sym_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_CTR */
/******************************************************************
* HMAC DRBG callback functions
******************************************************************/
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in);
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key);
static int drbg_init_hash_kernel(struct drbg_state *drbg);
static int drbg_fini_hash_kernel(struct drbg_state *drbg);
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_HMAC
#define CRYPTO_DRBG_HMAC_STRING "HMAC "
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha512");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha512");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha256");
MODULE_ALIAS_CRYPTO("drbg_pr_hmac_sha1");
MODULE_ALIAS_CRYPTO("drbg_nopr_hmac_sha1");
/* update function of HMAC DRBG as defined in 10.1.2.2 */
static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = -EFAULT;
int i = 0;
struct drbg_string seed1, seed2, vdata;
LIST_HEAD(seedlist);
LIST_HEAD(vdatalist);
if (!reseed) {
/* 10.1.2.3 step 2 -- memset(0) of C is implicit with kzalloc */
memset(drbg->V, 1, drbg_statelen(drbg));
drbg_kcapi_hmacsetkey(drbg, drbg->C);
}
drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
list_add_tail(&seed1.list, &seedlist);
/* buffer of seed2 will be filled in for loop below with one byte */
drbg_string_fill(&seed2, NULL, 1);
list_add_tail(&seed2.list, &seedlist);
/* input data of seed is allowed to be NULL at this point */
if (seed)
list_splice_tail(seed, &seedlist);
drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg));
list_add_tail(&vdata.list, &vdatalist);
for (i = 2; 0 < i; i--) {
/* first round uses 0x0, second 0x1 */
unsigned char prefix = DRBG_PREFIX0;
if (1 == i)
prefix = DRBG_PREFIX1;
/* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
seed2.buf = &prefix;
ret = drbg_kcapi_hash(drbg, drbg->C, &seedlist);
if (ret)
return ret;
drbg_kcapi_hmacsetkey(drbg, drbg->C);
/* 10.1.2.2 step 2 and 5 -- HMAC for V */
ret = drbg_kcapi_hash(drbg, drbg->V, &vdatalist);
if (ret)
return ret;
/* 10.1.2.2 step 3 */
if (!seed)
return ret;
}
return 0;
}
/* generate function of HMAC DRBG as defined in 10.1.2.5 */
static int drbg_hmac_generate(struct drbg_state *drbg,
unsigned char *buf,
unsigned int buflen,
struct list_head *addtl)
{
int len = 0;
int ret = 0;
struct drbg_string data;
LIST_HEAD(datalist);
/* 10.1.2.5 step 2 */
if (addtl && !list_empty(addtl)) {
ret = drbg_hmac_update(drbg, addtl, 1);
if (ret)
return ret;
}
drbg_string_fill(&data, drbg->V, drbg_statelen(drbg));
list_add_tail(&data.list, &datalist);
while (len < buflen) {
unsigned int outlen = 0;
/* 10.1.2.5 step 4.1 */
ret = drbg_kcapi_hash(drbg, drbg->V, &datalist);
if (ret)
return ret;
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
/* 10.1.2.5 step 4.2 */
memcpy(buf + len, drbg->V, outlen);
len += outlen;
}
/* 10.1.2.5 step 6 */
if (addtl && !list_empty(addtl))
ret = drbg_hmac_update(drbg, addtl, 1);
else
ret = drbg_hmac_update(drbg, NULL, 1);
if (ret)
return ret;
return len;
}
static const struct drbg_state_ops drbg_hmac_ops = {
.update = drbg_hmac_update,
.generate = drbg_hmac_generate,
.crypto_init = drbg_init_hash_kernel,
.crypto_fini = drbg_fini_hash_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
/******************************************************************
* Hash DRBG callback functions
******************************************************************/
#ifdef CONFIG_CRYPTO_DRBG_HASH
#define CRYPTO_DRBG_HASH_STRING "HASH "
MODULE_ALIAS_CRYPTO("drbg_pr_sha512");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha512");
MODULE_ALIAS_CRYPTO("drbg_pr_sha384");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha384");
MODULE_ALIAS_CRYPTO("drbg_pr_sha256");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha256");
MODULE_ALIAS_CRYPTO("drbg_pr_sha1");
MODULE_ALIAS_CRYPTO("drbg_nopr_sha1");
/*
* Increment buffer
*
* @dst buffer to increment
* @add value to add
*/
static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
const unsigned char *add, size_t addlen)
{
/* implied: dstlen > addlen */
unsigned char *dstptr;
const unsigned char *addptr;
unsigned int remainder = 0;
size_t len = addlen;
dstptr = dst + (dstlen-1);
addptr = add + (addlen-1);
while (len) {
remainder += *dstptr + *addptr;
*dstptr = remainder & 0xff;
remainder >>= 8;
len--; dstptr--; addptr--;
}
len = dstlen - addlen;
while (len && remainder > 0) {
remainder = *dstptr + 1;
*dstptr = remainder & 0xff;
remainder >>= 8;
len--; dstptr--;
}
}
/*
* scratchpad usage: as drbg_hash_update and drbg_hash_df are used
* interlinked, the scratchpad is used as follows:
* drbg_hash_update
* start: drbg->scratchpad
* length: drbg_statelen(drbg)
* drbg_hash_df:
* start: drbg->scratchpad + drbg_statelen(drbg)
* length: drbg_blocklen(drbg)
*
* drbg_hash_process_addtl uses the scratchpad, but fully completes
* before either of the functions mentioned before are invoked. Therefore,
* drbg_hash_process_addtl does not need to be specifically considered.
*/
/* Derivation Function for Hash DRBG as defined in 10.4.1 */
static int drbg_hash_df(struct drbg_state *drbg,
unsigned char *outval, size_t outlen,
struct list_head *entropylist)
{
int ret = 0;
size_t len = 0;
unsigned char input[5];
unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
/* 10.4.1 step 3 */
input[0] = 1;
drbg_cpu_to_be32((outlen * 8), &input[1]);
/* 10.4.1 step 4.1 -- concatenation of data for input into hash */
drbg_string_fill(&data, input, 5);
list_add(&data.list, entropylist);
/* 10.4.1 step 4 */
while (len < outlen) {
short blocklen = 0;
/* 10.4.1 step 4.1 */
ret = drbg_kcapi_hash(drbg, tmp, entropylist);
if (ret)
goto out;
/* 10.4.1 step 4.2 */
input[0]++;
blocklen = (drbg_blocklen(drbg) < (outlen - len)) ?
drbg_blocklen(drbg) : (outlen - len);
memcpy(outval + len, tmp, blocklen);
len += blocklen;
}
out:
memset(tmp, 0, drbg_blocklen(drbg));
return ret;
}
/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
int reseed)
{
int ret = 0;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
LIST_HEAD(datalist2);
unsigned char *V = drbg->scratchpad;
unsigned char prefix = DRBG_PREFIX1;
if (!seed)
return -EINVAL;
if (reseed) {
/* 10.1.1.3 step 1 */
memcpy(V, drbg->V, drbg_statelen(drbg));
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist);
drbg_string_fill(&data2, V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist);
}
list_splice_tail(seed, &datalist);
/* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist);
if (ret)
goto out;
/* 10.1.1.2 / 10.1.1.3 step 4 */
prefix = DRBG_PREFIX0;
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist2);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist2);
/* 10.1.1.2 / 10.1.1.3 step 4 */
ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
out:
memset(drbg->scratchpad, 0, drbg_statelen(drbg));
return ret;
}
/* processing of additional information string for Hash DRBG */
static int drbg_hash_process_addtl(struct drbg_state *drbg,
struct list_head *addtl)
{
int ret = 0;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
unsigned char prefix = DRBG_PREFIX2;
/* 10.1.1.4 step 2 */
if (!addtl || list_empty(addtl))
return 0;
/* 10.1.1.4 step 2a */
drbg_string_fill(&data1, &prefix, 1);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data1.list, &datalist);
list_add_tail(&data2.list, &datalist);
list_splice_tail(addtl, &datalist);
ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret)
goto out;
/* 10.1.1.4 step 2b */
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->scratchpad, drbg_blocklen(drbg));
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return ret;
}
/* Hashgen defined in 10.1.1.4 */
static int drbg_hash_hashgen(struct drbg_state *drbg,
unsigned char *buf,
unsigned int buflen)
{
int len = 0;
int ret = 0;
unsigned char *src = drbg->scratchpad;
unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
struct drbg_string data;
LIST_HEAD(datalist);
/* 10.1.1.4 step hashgen 2 */
memcpy(src, drbg->V, drbg_statelen(drbg));
drbg_string_fill(&data, src, drbg_statelen(drbg));
list_add_tail(&data.list, &datalist);
while (len < buflen) {
unsigned int outlen = 0;
/* 10.1.1.4 step hashgen 4.1 */
ret = drbg_kcapi_hash(drbg, dst, &datalist);
if (ret) {
len = ret;
goto out;
}
outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
drbg_blocklen(drbg) : (buflen - len);
/* 10.1.1.4 step hashgen 4.2 */
memcpy(buf + len, dst, outlen);
len += outlen;
/* 10.1.1.4 hashgen step 4.3 */
if (len < buflen)
crypto_inc(src, drbg_statelen(drbg));
}
out:
memset(drbg->scratchpad, 0,
(drbg_statelen(drbg) + drbg_blocklen(drbg)));
return len;
}
/* generate function for Hash DRBG as defined in 10.1.1.4 */
static int drbg_hash_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct list_head *addtl)
{
int len = 0;
int ret = 0;
union {
unsigned char req[8];
__be64 req_int;
} u;
unsigned char prefix = DRBG_PREFIX3;
struct drbg_string data1, data2;
LIST_HEAD(datalist);
/* 10.1.1.4 step 2 */
ret = drbg_hash_process_addtl(drbg, addtl);
if (ret)
return ret;
/* 10.1.1.4 step 3 */
len = drbg_hash_hashgen(drbg, buf, buflen);
/* this is the value H as documented in 10.1.1.4 */
/* 10.1.1.4 step 4 */
drbg_string_fill(&data1, &prefix, 1);
list_add_tail(&data1.list, &datalist);
drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
list_add_tail(&data2.list, &datalist);
ret = drbg_kcapi_hash(drbg, drbg->scratchpad, &datalist);
if (ret) {
len = ret;
goto out;
}
/* 10.1.1.4 step 5 */
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->scratchpad, drbg_blocklen(drbg));
drbg_add_buf(drbg->V, drbg_statelen(drbg),
drbg->C, drbg_statelen(drbg));
u.req_int = cpu_to_be64(drbg->reseed_ctr);
drbg_add_buf(drbg->V, drbg_statelen(drbg), u.req, 8);
out:
memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
return len;
}
/*
* scratchpad usage: as update and generate are used isolated, both
* can use the scratchpad
*/
static const struct drbg_state_ops drbg_hash_ops = {
.update = drbg_hash_update,
.generate = drbg_hash_generate,
.crypto_init = drbg_init_hash_kernel,
.crypto_fini = drbg_fini_hash_kernel,
};
#endif /* CONFIG_CRYPTO_DRBG_HASH */
/******************************************************************
* Functions common for DRBG implementations
******************************************************************/
static inline int __drbg_seed(struct drbg_state *drbg, struct list_head *seed,
int reseed, enum drbg_seed_state new_seed_state)
{
int ret = drbg->d_ops->update(drbg, seed, reseed);
if (ret)
return ret;
drbg->seeded = new_seed_state;
drbg->last_seed_time = jiffies;
/* 10.1.1.2 / 10.1.1.3 step 5 */
drbg->reseed_ctr = 1;
switch (drbg->seeded) {
case DRBG_SEED_STATE_UNSEEDED:
/* Impossible, but handle it to silence compiler warnings. */
fallthrough;
case DRBG_SEED_STATE_PARTIAL:
/*
* Require frequent reseeds until the seed source is
* fully initialized.
*/
drbg->reseed_threshold = 50;
break;
case DRBG_SEED_STATE_FULL:
/*
* Seed source has become fully initialized, frequent
* reseeds no longer required.
*/
drbg->reseed_threshold = drbg_max_requests(drbg);
break;
}
return ret;
}
static inline int drbg_get_random_bytes(struct drbg_state *drbg,
unsigned char *entropy,
unsigned int entropylen)
{
int ret;
do {
get_random_bytes(entropy, entropylen);
ret = drbg_fips_continuous_test(drbg, entropy);
if (ret && ret != -EAGAIN)
return ret;
} while (ret);
return 0;
}
static int drbg_seed_from_random(struct drbg_state *drbg)
{
struct drbg_string data;
LIST_HEAD(seedlist);
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
unsigned char entropy[32];
int ret;
BUG_ON(!entropylen);
BUG_ON(entropylen > sizeof(entropy));
drbg_string_fill(&data, entropy, entropylen);
list_add_tail(&data.list, &seedlist);
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
ret = __drbg_seed(drbg, &seedlist, true, DRBG_SEED_STATE_FULL);
out:
memzero_explicit(entropy, entropylen);
return ret;
}
static bool drbg_nopr_reseed_interval_elapsed(struct drbg_state *drbg)
{
unsigned long next_reseed;
/* Don't ever reseed from get_random_bytes() in test mode. */
if (list_empty(&drbg->test_data.list))
return false;
/*
* Obtain fresh entropy for the nopr DRBGs after 300s have
* elapsed in order to still achieve sort of partial
* prediction resistance over the time domain at least. Note
* that the period of 300s has been chosen to match the
* CRNG_RESEED_INTERVAL of the get_random_bytes()' chacha
* rngs.
*/
next_reseed = drbg->last_seed_time + 300 * HZ;
return time_after(jiffies, next_reseed);
}
/*
* Seeding or reseeding of the DRBG
*
* @drbg: DRBG state struct
* @pers: personalization / additional information buffer
* @reseed: 0 for initial seed process, 1 for reseeding
*
* return:
* 0 on success
* error value otherwise
*/
static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
bool reseed)
{
int ret;
unsigned char entropy[((32 + 16) * 2)];
unsigned int entropylen = drbg_sec_strength(drbg->core->flags);
struct drbg_string data1;
LIST_HEAD(seedlist);
enum drbg_seed_state new_seed_state = DRBG_SEED_STATE_FULL;
/* 9.1 / 9.2 / 9.3.1 step 3 */
if (pers && pers->len > (drbg_max_addtl(drbg))) {
pr_devel("DRBG: personalization string too long %zu\n",
pers->len);
return -EINVAL;
}
if (list_empty(&drbg->test_data.list)) {
drbg_string_fill(&data1, drbg->test_data.buf,
drbg->test_data.len);
pr_devel("DRBG: using test entropy\n");
} else {
/*
* Gather entropy equal to the security strength of the DRBG.
* With a derivation function, a nonce is required in addition
* to the entropy. A nonce must be at least 1/2 of the security
* strength of the DRBG in size. Thus, entropy + nonce is 3/2
* of the strength. The consideration of a nonce is only
* applicable during initial seeding.
*/
BUG_ON(!entropylen);
if (!reseed)
entropylen = ((entropylen + 1) / 2) * 3;
BUG_ON((entropylen * 2) > sizeof(entropy));
/* Get seed from in-kernel /dev/urandom */
if (!rng_is_initialized())
new_seed_state = DRBG_SEED_STATE_PARTIAL;
ret = drbg_get_random_bytes(drbg, entropy, entropylen);
if (ret)
goto out;
if (!drbg->jent) {
drbg_string_fill(&data1, entropy, entropylen);
pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
entropylen);
} else {
/*
* Get seed from Jitter RNG, failures are
* fatal only in FIPS mode.
*/
ret = crypto_rng_get_bytes(drbg->jent,
entropy + entropylen,
entropylen);
if (fips_enabled && ret) {
pr_devel("DRBG: jent failed with %d\n", ret);
/*
* Do not treat the transient failure of the
* Jitter RNG as an error that needs to be
* reported. The combined number of the
* maximum reseed threshold times the maximum
* number of Jitter RNG transient errors is
* less than the reseed threshold required by
* SP800-90A allowing us to treat the
* transient errors as such.
*
* However, we mandate that at least the first
* seeding operation must succeed with the
* Jitter RNG.
*/
if (!reseed || ret != -EAGAIN)
goto out;
}
drbg_string_fill(&data1, entropy, entropylen * 2);
pr_devel("DRBG: (re)seeding with %u bytes of entropy\n",
entropylen * 2);
}
}
list_add_tail(&data1.list, &seedlist);
/*
* concatenation of entropy with personalization str / addtl input)
* the variable pers is directly handed in by the caller, so check its
* contents whether it is appropriate
*/
if (pers && pers->buf && 0 < pers->len) {
list_add_tail(&pers->list, &seedlist);
pr_devel("DRBG: using personalization string\n");
}
if (!reseed) {
memset(drbg->V, 0, drbg_statelen(drbg));
memset(drbg->C, 0, drbg_statelen(drbg));
}
ret = __drbg_seed(drbg, &seedlist, reseed, new_seed_state);
out:
memzero_explicit(entropy, entropylen * 2);
return ret;
}
/* Free all substructures in a DRBG state without the DRBG state structure */
static inline void drbg_dealloc_state(struct drbg_state *drbg)
{
if (!drbg)
return;
kfree_sensitive(drbg->Vbuf);
drbg->Vbuf = NULL;
drbg->V = NULL;
kfree_sensitive(drbg->Cbuf);
drbg->Cbuf = NULL;
drbg->C = NULL;
kfree_sensitive(drbg->scratchpadbuf);
drbg->scratchpadbuf = NULL;
drbg->reseed_ctr = 0;
drbg->d_ops = NULL;
drbg->core = NULL;
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
kfree_sensitive(drbg->prev);
drbg->prev = NULL;
drbg->fips_primed = false;
}
}
/*
* Allocate all sub-structures for a DRBG state.
* The DRBG state structure must already be allocated.
*/
static inline int drbg_alloc_state(struct drbg_state *drbg)
{
int ret = -ENOMEM;
unsigned int sb_size = 0;
switch (drbg->core->flags & DRBG_TYPE_MASK) {
#ifdef CONFIG_CRYPTO_DRBG_HMAC
case DRBG_HMAC:
drbg->d_ops = &drbg_hmac_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_HMAC */
#ifdef CONFIG_CRYPTO_DRBG_HASH
case DRBG_HASH:
drbg->d_ops = &drbg_hash_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_HASH */
#ifdef CONFIG_CRYPTO_DRBG_CTR
case DRBG_CTR:
drbg->d_ops = &drbg_ctr_ops;
break;
#endif /* CONFIG_CRYPTO_DRBG_CTR */
default:
ret = -EOPNOTSUPP;
goto err;
}
ret = drbg->d_ops->crypto_init(drbg);
if (ret < 0)
goto err;
drbg->Vbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Vbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->V = PTR_ALIGN(drbg->Vbuf, ret + 1);
drbg->Cbuf = kmalloc(drbg_statelen(drbg) + ret, GFP_KERNEL);
if (!drbg->Cbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->C = PTR_ALIGN(drbg->Cbuf, ret + 1);
/* scratchpad is only generated for CTR and Hash */
if (drbg->core->flags & DRBG_HMAC)
sb_size = 0;
else if (drbg->core->flags & DRBG_CTR)
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
drbg_statelen(drbg) + /* df_data */
drbg_blocklen(drbg) + /* pad */
drbg_blocklen(drbg) + /* iv */
drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
else
sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
if (0 < sb_size) {
drbg->scratchpadbuf = kzalloc(sb_size + ret, GFP_KERNEL);
if (!drbg->scratchpadbuf) {
ret = -ENOMEM;
goto fini;
}
drbg->scratchpad = PTR_ALIGN(drbg->scratchpadbuf, ret + 1);
}
if (IS_ENABLED(CONFIG_CRYPTO_FIPS)) {
drbg->prev = kzalloc(drbg_sec_strength(drbg->core->flags),
GFP_KERNEL);
if (!drbg->prev) {
ret = -ENOMEM;
goto fini;
}
drbg->fips_primed = false;
}
return 0;
fini:
drbg->d_ops->crypto_fini(drbg);
err:
drbg_dealloc_state(drbg);
return ret;
}
/*************************************************************************
* DRBG interface functions
*************************************************************************/
/*
* DRBG generate function as required by SP800-90A - this function
* generates random numbers
*
* @drbg DRBG state handle
* @buf Buffer where to store the random numbers -- the buffer must already
* be pre-allocated by caller
* @buflen Length of output buffer - this value defines the number of random
* bytes pulled from DRBG
* @addtl Additional input that is mixed into state, may be NULL -- note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800-90A. The additional input is mixed into
* the state in addition to the pulled entropy.
*
* return: 0 when all bytes are generated; < 0 in case of an error
*/
static int drbg_generate(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
{
int len = 0;
LIST_HEAD(addtllist);
if (!drbg->core) {
pr_devel("DRBG: not yet seeded\n");
return -EINVAL;
}
if (0 == buflen || !buf) {
pr_devel("DRBG: no output buffer provided\n");
return -EINVAL;
}
if (addtl && NULL == addtl->buf && 0 < addtl->len) {
pr_devel("DRBG: wrong format of additional information\n");
return -EINVAL;
}
/* 9.3.1 step 2 */
len = -EINVAL;
if (buflen > (drbg_max_request_bytes(drbg))) {
pr_devel("DRBG: requested random numbers too large %u\n",
buflen);
goto err;
}
/* 9.3.1 step 3 is implicit with the chosen DRBG */
/* 9.3.1 step 4 */
if (addtl && addtl->len > (drbg_max_addtl(drbg))) {
pr_devel("DRBG: additional information string too long %zu\n",
addtl->len);
goto err;
}
/* 9.3.1 step 5 is implicit with the chosen DRBG */
/*
* 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
* here. The spec is a bit convoluted here, we make it simpler.
*/
if (drbg->reseed_threshold < drbg->reseed_ctr)
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
if (drbg->pr || drbg->seeded == DRBG_SEED_STATE_UNSEEDED) {
pr_devel("DRBG: reseeding before generation (prediction "
"resistance: %s, state %s)\n",
drbg->pr ? "true" : "false",
(drbg->seeded == DRBG_SEED_STATE_FULL ?
"seeded" : "unseeded"));
/* 9.3.1 steps 7.1 through 7.3 */
len = drbg_seed(drbg, addtl, true);
if (len)
goto err;
/* 9.3.1 step 7.4 */
addtl = NULL;
} else if (rng_is_initialized() &&
(drbg->seeded == DRBG_SEED_STATE_PARTIAL ||
drbg_nopr_reseed_interval_elapsed(drbg))) {
len = drbg_seed_from_random(drbg);
if (len)
goto err;
}
if (addtl && 0 < addtl->len)
list_add_tail(&addtl->list, &addtllist);
/* 9.3.1 step 8 and 10 */
len = drbg->d_ops->generate(drbg, buf, buflen, &addtllist);
/* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
drbg->reseed_ctr++;
if (0 >= len)
goto err;
/*
* Section 11.3.3 requires to re-perform self tests after some
* generated random numbers. The chosen value after which self
* test is performed is arbitrary, but it should be reasonable.
* However, we do not perform the self tests because of the following
* reasons: it is mathematically impossible that the initial self tests
* were successfully and the following are not. If the initial would
* pass and the following would not, the kernel integrity is violated.
* In this case, the entire kernel operation is questionable and it
* is unlikely that the integrity violation only affects the
* correct operation of the DRBG.
*
* Albeit the following code is commented out, it is provided in
* case somebody has a need to implement the test of 11.3.3.
*/
#if 0
if (drbg->reseed_ctr && !(drbg->reseed_ctr % 4096)) {
int err = 0;
pr_devel("DRBG: start to perform self test\n");
if (drbg->core->flags & DRBG_HMAC)
err = alg_test("drbg_pr_hmac_sha256",
"drbg_pr_hmac_sha256", 0, 0);
else if (drbg->core->flags & DRBG_CTR)
err = alg_test("drbg_pr_ctr_aes128",
"drbg_pr_ctr_aes128", 0, 0);
else
err = alg_test("drbg_pr_sha256",
"drbg_pr_sha256", 0, 0);
if (err) {
pr_err("DRBG: periodical self test failed\n");
/*
* uninstantiate implies that from now on, only errors
* are returned when reusing this DRBG cipher handle
*/
drbg_uninstantiate(drbg);
return 0;
} else {
pr_devel("DRBG: self test successful\n");
}
}
#endif
/*
* All operations were successful, return 0 as mandated by
* the kernel crypto API interface.
*/
len = 0;
err:
return len;
}
/*
* Wrapper around drbg_generate which can pull arbitrary long strings
* from the DRBG without hitting the maximum request limitation.
*
* Parameters: see drbg_generate
* Return codes: see drbg_generate -- if one drbg_generate request fails,
* the entire drbg_generate_long request fails
*/
static int drbg_generate_long(struct drbg_state *drbg,
unsigned char *buf, unsigned int buflen,
struct drbg_string *addtl)
{
unsigned int len = 0;
unsigned int slice = 0;
do {
int err = 0;
unsigned int chunk = 0;
slice = ((buflen - len) / drbg_max_request_bytes(drbg));
chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len);
mutex_lock(&drbg->drbg_mutex);
err = drbg_generate(drbg, buf + len, chunk, addtl);
mutex_unlock(&drbg->drbg_mutex);
if (0 > err)
return err;
len += chunk;
} while (slice > 0 && (len < buflen));
return 0;
}
static int drbg_prepare_hrng(struct drbg_state *drbg)
{
/* We do not need an HRNG in test mode. */
if (list_empty(&drbg->test_data.list))
return 0;
drbg->jent = crypto_alloc_rng("jitterentropy_rng", 0, 0);
if (IS_ERR(drbg->jent)) {
const int err = PTR_ERR(drbg->jent);
drbg->jent = NULL;
if (fips_enabled)
return err;
pr_info("DRBG: Continuing without Jitter RNG\n");
}
return 0;
}
/*
* DRBG instantiation function as required by SP800-90A - this function
* sets up the DRBG handle, performs the initial seeding and all sanity
* checks required by SP800-90A
*
* @drbg memory of state -- if NULL, new memory is allocated
* @pers Personalization string that is mixed into state, may be NULL -- note
* the entropy is pulled by the DRBG internally unconditionally
* as defined in SP800-90A. The additional input is mixed into
* the state in addition to the pulled entropy.
* @coreref reference to core
* @pr prediction resistance enabled
*
* return
* 0 on success
* error value otherwise
*/
static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
int coreref, bool pr)
{
int ret;
bool reseed = true;
pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
"%s\n", coreref, pr ? "enabled" : "disabled");
mutex_lock(&drbg->drbg_mutex);
/* 9.1 step 1 is implicit with the selected DRBG type */
/*
* 9.1 step 2 is implicit as caller can select prediction resistance
* and the flag is copied into drbg->flags --
* all DRBG types support prediction resistance
*/
/* 9.1 step 4 is implicit in drbg_sec_strength */
if (!drbg->core) {
drbg->core = &drbg_cores[coreref];
drbg->pr = pr;
drbg->seeded = DRBG_SEED_STATE_UNSEEDED;
drbg->last_seed_time = 0;
drbg->reseed_threshold = drbg_max_requests(drbg);
ret = drbg_alloc_state(drbg);
if (ret)
goto unlock;
ret = drbg_prepare_hrng(drbg);
if (ret)
goto free_everything;
reseed = false;
}
ret = drbg_seed(drbg, pers, reseed);
if (ret && !reseed)
goto free_everything;
mutex_unlock(&drbg->drbg_mutex);
return ret;
unlock:
mutex_unlock(&drbg->drbg_mutex);
return ret;
free_everything:
mutex_unlock(&drbg->drbg_mutex);
drbg_uninstantiate(drbg);
return ret;
}
/*
* DRBG uninstantiate function as required by SP800-90A - this function
* frees all buffers and the DRBG handle
*
* @drbg DRBG state handle
*
* return
* 0 on success
*/
static int drbg_uninstantiate(struct drbg_state *drbg)
{
if (!IS_ERR_OR_NULL(drbg->jent))
crypto_free_rng(drbg->jent);
drbg->jent = NULL;
if (drbg->d_ops)
drbg->d_ops->crypto_fini(drbg);
drbg_dealloc_state(drbg);
/* no scrubbing of test_data -- this shall survive an uninstantiate */
return 0;
}
/*
* Helper function for setting the test data in the DRBG
*
* @drbg DRBG state handle
* @data test data
* @len test data length
*/
static void drbg_kcapi_set_entropy(struct crypto_rng *tfm,
const u8 *data, unsigned int len)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
mutex_lock(&drbg->drbg_mutex);
drbg_string_fill(&drbg->test_data, data, len);
mutex_unlock(&drbg->drbg_mutex);
}
/***************************************************************
* Kernel crypto API cipher invocations requested by DRBG
***************************************************************/
#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
struct sdesc {
struct shash_desc shash;
char ctx[];
};
static int drbg_init_hash_kernel(struct drbg_state *drbg)
{
struct sdesc *sdesc;
struct crypto_shash *tfm;
tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_info("DRBG: could not allocate digest TFM handle: %s\n",
drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm));
sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
GFP_KERNEL);
if (!sdesc) {
crypto_free_shash(tfm);
return -ENOMEM;
}
sdesc->shash.tfm = tfm;
drbg->priv_data = sdesc;
return crypto_shash_alignmask(tfm);
}
static int drbg_fini_hash_kernel(struct drbg_state *drbg)
{
struct sdesc *sdesc = drbg->priv_data;
if (sdesc) {
crypto_free_shash(sdesc->shash.tfm);
kfree_sensitive(sdesc);
}
drbg->priv_data = NULL;
return 0;
}
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct sdesc *sdesc = drbg->priv_data;
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
}
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in)
{
struct sdesc *sdesc = drbg->priv_data;
struct drbg_string *input = NULL;
crypto_shash_init(&sdesc->shash);
list_for_each_entry(input, in, list)
crypto_shash_update(&sdesc->shash, input->buf, input->len);
return crypto_shash_final(&sdesc->shash, outval);
}
#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
#ifdef CONFIG_CRYPTO_DRBG_CTR
static int drbg_fini_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm =
(struct crypto_cipher *)drbg->priv_data;
if (tfm)
crypto_free_cipher(tfm);
drbg->priv_data = NULL;
if (drbg->ctr_handle)
crypto_free_skcipher(drbg->ctr_handle);
drbg->ctr_handle = NULL;
if (drbg->ctr_req)
skcipher_request_free(drbg->ctr_req);
drbg->ctr_req = NULL;
kfree(drbg->outscratchpadbuf);
drbg->outscratchpadbuf = NULL;
return 0;
}
static int drbg_init_sym_kernel(struct drbg_state *drbg)
{
struct crypto_cipher *tfm;
struct crypto_skcipher *sk_tfm;
struct skcipher_request *req;
unsigned int alignmask;
char ctr_name[CRYPTO_MAX_ALG_NAME];
tfm = crypto_alloc_cipher(drbg->core->backend_cra_name, 0, 0);
if (IS_ERR(tfm)) {
pr_info("DRBG: could not allocate cipher TFM handle: %s\n",
drbg->core->backend_cra_name);
return PTR_ERR(tfm);
}
BUG_ON(drbg_blocklen(drbg) != crypto_cipher_blocksize(tfm));
drbg->priv_data = tfm;
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
drbg->core->backend_cra_name) >= CRYPTO_MAX_ALG_NAME) {
drbg_fini_sym_kernel(drbg);
return -EINVAL;
}
sk_tfm = crypto_alloc_skcipher(ctr_name, 0, 0);
if (IS_ERR(sk_tfm)) {
pr_info("DRBG: could not allocate CTR cipher TFM handle: %s\n",
ctr_name);
drbg_fini_sym_kernel(drbg);
return PTR_ERR(sk_tfm);
}
drbg->ctr_handle = sk_tfm;
crypto_init_wait(&drbg->ctr_wait);
req = skcipher_request_alloc(sk_tfm, GFP_KERNEL);
if (!req) {
pr_info("DRBG: could not allocate request queue\n");
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->ctr_req = req;
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
CRYPTO_TFM_REQ_MAY_SLEEP,
crypto_req_done, &drbg->ctr_wait);
alignmask = crypto_skcipher_alignmask(sk_tfm);
drbg->outscratchpadbuf = kmalloc(DRBG_OUTSCRATCHLEN + alignmask,
GFP_KERNEL);
if (!drbg->outscratchpadbuf) {
drbg_fini_sym_kernel(drbg);
return -ENOMEM;
}
drbg->outscratchpad = (u8 *)PTR_ALIGN(drbg->outscratchpadbuf,
alignmask + 1);
sg_init_table(&drbg->sg_in, 1);
sg_init_one(&drbg->sg_out, drbg->outscratchpad, DRBG_OUTSCRATCHLEN);
return alignmask;
}
static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key)
{
struct crypto_cipher *tfm = drbg->priv_data;
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
}
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in)
{
struct crypto_cipher *tfm = drbg->priv_data;
/* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg));
crypto_cipher_encrypt_one(tfm, outval, in->buf);
return 0;
}
static int drbg_kcapi_sym_ctr(struct drbg_state *drbg,
u8 *inbuf, u32 inlen,
u8 *outbuf, u32 outlen)
{
struct scatterlist *sg_in = &drbg->sg_in, *sg_out = &drbg->sg_out;
u32 scratchpad_use = min_t(u32, outlen, DRBG_OUTSCRATCHLEN);
int ret;
if (inbuf) {
/* Use caller-provided input buffer */
sg_set_buf(sg_in, inbuf, inlen);
} else {
/* Use scratchpad for in-place operation */
inlen = scratchpad_use;
memset(drbg->outscratchpad, 0, scratchpad_use);
sg_set_buf(sg_in, drbg->outscratchpad, scratchpad_use);
}
while (outlen) {
u32 cryptlen = min3(inlen, outlen, (u32)DRBG_OUTSCRATCHLEN);
/* Output buffer may not be valid for SGL, use scratchpad */
skcipher_request_set_crypt(drbg->ctr_req, sg_in, sg_out,
cryptlen, drbg->V);
ret = crypto_wait_req(crypto_skcipher_encrypt(drbg->ctr_req),
&drbg->ctr_wait);
if (ret)
goto out;
crypto_init_wait(&drbg->ctr_wait);
memcpy(outbuf, drbg->outscratchpad, cryptlen);
memzero_explicit(drbg->outscratchpad, cryptlen);
outlen -= cryptlen;
outbuf += cryptlen;
}
ret = 0;
out:
return ret;
}
#endif /* CONFIG_CRYPTO_DRBG_CTR */
/***************************************************************
* Kernel crypto API interface to register DRBG
***************************************************************/
/*
* Look up the DRBG flags by given kernel crypto API cra_name
* The code uses the drbg_cores definition to do this
*
* @cra_name kernel crypto API cra_name
* @coreref reference to integer which is filled with the pointer to
* the applicable core
* @pr reference for setting prediction resistance
*
* return: flags
*/
static inline void drbg_convert_tfm_core(const char *cra_driver_name,
int *coreref, bool *pr)
{
int i = 0;
size_t start = 0;
int len = 0;
*pr = true;
/* disassemble the names */
if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) {
start = 10;
*pr = false;
} else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) {
start = 8;
} else {
return;
}
/* remove the first part */
len = strlen(cra_driver_name) - start;
for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) {
if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name,
len)) {
*coreref = i;
return;
}
}
}
static int drbg_kcapi_init(struct crypto_tfm *tfm)
{
struct drbg_state *drbg = crypto_tfm_ctx(tfm);
mutex_init(&drbg->drbg_mutex);
return 0;
}
static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
{
drbg_uninstantiate(crypto_tfm_ctx(tfm));
}
/*
* Generate random numbers invoked by the kernel crypto API:
* The API of the kernel crypto API is extended as follows:
*
* src is additional input supplied to the RNG.
* slen is the length of src.
* dst is the output buffer where random data is to be stored.
* dlen is the length of dst.
*/
static int drbg_kcapi_random(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
struct drbg_string *addtl = NULL;
struct drbg_string string;
if (slen) {
/* linked list variable is now local to allow modification */
drbg_string_fill(&string, src, slen);
addtl = &string;
}
return drbg_generate_long(drbg, dst, dlen, addtl);
}
/*
* Seed the DRBG invoked by the kernel crypto API
*/
static int drbg_kcapi_seed(struct crypto_rng *tfm,
const u8 *seed, unsigned int slen)
{
struct drbg_state *drbg = crypto_rng_ctx(tfm);
struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
bool pr = false;
struct drbg_string string;
struct drbg_string *seed_string = NULL;
int coreref = 0;
drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
&pr);
if (0 < slen) {
drbg_string_fill(&string, seed, slen);
seed_string = &string;
}
return drbg_instantiate(drbg, seed_string, coreref, pr);
}
/***************************************************************
* Kernel module: code to load the module
***************************************************************/
/*
* Tests as defined in 11.3.2 in addition to the cipher tests: testing
* of the error handling.
*
* Note: testing of failing seed source as defined in 11.3.2 is not applicable
* as seed source of get_random_bytes does not fail.
*
* Note 2: There is no sensible way of testing the reseed counter
* enforcement, so skip it.
*/
static inline int __init drbg_healthcheck_sanity(void)
{
int len = 0;
#define OUTBUFLEN 16
unsigned char buf[OUTBUFLEN];
struct drbg_state *drbg = NULL;
int ret;
int rc = -EFAULT;
bool pr = false;
int coreref = 0;
struct drbg_string addtl;
size_t max_addtllen, max_request_bytes;
/* only perform test in FIPS mode */
if (!fips_enabled)
return 0;
#ifdef CONFIG_CRYPTO_DRBG_CTR
drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
#elif defined CONFIG_CRYPTO_DRBG_HASH
drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
#else
drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
#endif
drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
if (!drbg)
return -ENOMEM;
mutex_init(&drbg->drbg_mutex);
drbg->core = &drbg_cores[coreref];
drbg->reseed_threshold = drbg_max_requests(drbg);
/*
* if the following tests fail, it is likely that there is a buffer
* overflow as buf is much smaller than the requested or provided
* string lengths -- in case the error handling does not succeed
* we may get an OOPS. And we want to get an OOPS as this is a
* grave bug.
*/
max_addtllen = drbg_max_addtl(drbg);
max_request_bytes = drbg_max_request_bytes(drbg);
drbg_string_fill(&addtl, buf, max_addtllen + 1);
/* overflow addtllen with additonal info string */
len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
BUG_ON(0 < len);
/* overflow max_bits */
len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
BUG_ON(0 < len);
/* overflow max addtllen with personalization string */
ret = drbg_seed(drbg, &addtl, false);
BUG_ON(0 == ret);
/* all tests passed */
rc = 0;
pr_devel("DRBG: Sanity tests for failure code paths successfully "
"completed\n");
kfree(drbg);
return rc;
}
static struct rng_alg drbg_algs[22];
/*
* Fill the array drbg_algs used to register the different DRBGs
* with the kernel crypto API. To fill the array, the information
* from drbg_cores[] is used.
*/
static inline void __init drbg_fill_array(struct rng_alg *alg,
const struct drbg_core *core, int pr)
{
int pos = 0;
static int priority = 200;
memcpy(alg->base.cra_name, "stdrng", 6);
if (pr) {
memcpy(alg->base.cra_driver_name, "drbg_pr_", 8);
pos = 8;
} else {
memcpy(alg->base.cra_driver_name, "drbg_nopr_", 10);
pos = 10;
}
memcpy(alg->base.cra_driver_name + pos, core->cra_name,
strlen(core->cra_name));
alg->base.cra_priority = priority;
priority++;
/*
* If FIPS mode enabled, the selected DRBG shall have the
* highest cra_priority over other stdrng instances to ensure
* it is selected.
*/
if (fips_enabled)
alg->base.cra_priority += 200;
alg->base.cra_ctxsize = sizeof(struct drbg_state);
alg->base.cra_module = THIS_MODULE;
alg->base.cra_init = drbg_kcapi_init;
alg->base.cra_exit = drbg_kcapi_cleanup;
alg->generate = drbg_kcapi_random;
alg->seed = drbg_kcapi_seed;
alg->set_ent = drbg_kcapi_set_entropy;
alg->seedsize = 0;
}
static int __init drbg_init(void)
{
unsigned int i = 0; /* pointer to drbg_algs */
unsigned int j = 0; /* pointer to drbg_cores */
int ret;
ret = drbg_healthcheck_sanity();
if (ret)
return ret;
if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) {
pr_info("DRBG: Cannot register all DRBG types"
"(slots needed: %zu, slots available: %zu)\n",
ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
return -EFAULT;
}
/*
* each DRBG definition can be used with PR and without PR, thus
* we instantiate each DRBG in drbg_cores[] twice.
*
* As the order of placing them into the drbg_algs array matters
* (the later DRBGs receive a higher cra_priority) we register the
* prediction resistance DRBGs first as the should not be too
* interesting.
*/
for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
return crypto_register_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
static void __exit drbg_exit(void)
{
crypto_unregister_rngs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
}
subsys_initcall(drbg_init);
module_exit(drbg_exit);
#ifndef CRYPTO_DRBG_HASH_STRING
#define CRYPTO_DRBG_HASH_STRING ""
#endif
#ifndef CRYPTO_DRBG_HMAC_STRING
#define CRYPTO_DRBG_HMAC_STRING ""
#endif
#ifndef CRYPTO_DRBG_CTR_STRING
#define CRYPTO_DRBG_CTR_STRING ""
#endif
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
"using following cores: "
CRYPTO_DRBG_HASH_STRING
CRYPTO_DRBG_HMAC_STRING
CRYPTO_DRBG_CTR_STRING);
MODULE_ALIAS_CRYPTO("stdrng");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/drbg.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* TEA, XTEA, and XETA crypto alogrithms
*
* The TEA and Xtended TEA algorithms were developed by David Wheeler
* and Roger Needham at the Computer Laboratory of Cambridge University.
*
* Due to the order of evaluation in XTEA many people have incorrectly
* implemented it. XETA (XTEA in the wrong order), exists for
* compatibility with these implementations.
*
* Copyright (c) 2004 Aaron Grothe [email protected]
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define TEA_KEY_SIZE 16
#define TEA_BLOCK_SIZE 8
#define TEA_ROUNDS 32
#define TEA_DELTA 0x9e3779b9
#define XTEA_KEY_SIZE 16
#define XTEA_BLOCK_SIZE 8
#define XTEA_ROUNDS 32
#define XTEA_DELTA 0x9e3779b9
struct tea_ctx {
u32 KEY[4];
};
struct xtea_ctx {
u32 KEY[4];
};
static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
}
static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum = 0;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
n = TEA_ROUNDS;
while (n-- > 0) {
sum += TEA_DELTA;
y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, n, sum;
u32 k0, k1, k2, k3;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
k0 = ctx->KEY[0];
k1 = ctx->KEY[1];
k2 = ctx->KEY[2];
k3 = ctx->KEY[3];
sum = TEA_DELTA << 5;
n = TEA_ROUNDS;
while (n-- > 0) {
z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3);
y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
sum -= TEA_DELTA;
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *key = (const __le32 *)in_key;
ctx->KEY[0] = le32_to_cpu(key[0]);
ctx->KEY[1] = le32_to_cpu(key[1]);
ctx->KEY[2] = le32_to_cpu(key[2]);
ctx->KEY[3] = le32_to_cpu(key[3]);
return 0;
}
static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
while (sum != limit) {
y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]);
sum += XTEA_DELTA;
z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]);
sum -= XTEA_DELTA;
y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]);
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum = 0;
u32 limit = XTEA_DELTA * XTEA_ROUNDS;
struct xtea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
while (sum != limit) {
y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3];
sum += XTEA_DELTA;
z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
u32 y, z, sum;
struct tea_ctx *ctx = crypto_tfm_ctx(tfm);
const __le32 *in = (const __le32 *)src;
__le32 *out = (__le32 *)dst;
y = le32_to_cpu(in[0]);
z = le32_to_cpu(in[1]);
sum = XTEA_DELTA * XTEA_ROUNDS;
while (sum) {
z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3];
sum -= XTEA_DELTA;
y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3];
}
out[0] = cpu_to_le32(y);
out[1] = cpu_to_le32(z);
}
static struct crypto_alg tea_algs[3] = { {
.cra_name = "tea",
.cra_driver_name = "tea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = TEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct tea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = TEA_KEY_SIZE,
.cia_max_keysize = TEA_KEY_SIZE,
.cia_setkey = tea_setkey,
.cia_encrypt = tea_encrypt,
.cia_decrypt = tea_decrypt } }
}, {
.cra_name = "xtea",
.cra_driver_name = "xtea-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xtea_encrypt,
.cia_decrypt = xtea_decrypt } }
}, {
.cra_name = "xeta",
.cra_driver_name = "xeta-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = XTEA_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct xtea_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = XTEA_KEY_SIZE,
.cia_max_keysize = XTEA_KEY_SIZE,
.cia_setkey = xtea_setkey,
.cia_encrypt = xeta_encrypt,
.cia_decrypt = xeta_decrypt } }
} };
static int __init tea_mod_init(void)
{
return crypto_register_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
static void __exit tea_mod_fini(void)
{
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
subsys_initcall(tea_mod_init);
module_exit(tea_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
| linux-master | crypto/tea.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Shared crypto simd helpers
*
* Copyright (c) 2012 Jussi Kivilinna <[email protected]>
* Copyright (c) 2016 Herbert Xu <[email protected]>
* Copyright (c) 2019 Google LLC
*
* Based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <[email protected]>
*/
/*
* Shared crypto SIMD helpers. These functions dynamically create and register
* an skcipher or AEAD algorithm that wraps another, internal algorithm. The
* wrapper ensures that the internal algorithm is only executed in a context
* where SIMD instructions are usable, i.e. where may_use_simd() returns true.
* If SIMD is already usable, the wrapper directly calls the internal algorithm.
* Otherwise it defers execution to a workqueue via cryptd.
*
* This is an alternative to the internal algorithm implementing a fallback for
* the !may_use_simd() case itself.
*
* Note that the wrapper algorithm is asynchronous, i.e. it has the
* CRYPTO_ALG_ASYNC flag set. Therefore it won't be found by users who
* explicitly allocate a synchronous algorithm.
*/
#include <crypto/cryptd.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/preempt.h>
#include <asm/simd.h>
/* skcipher support */
struct simd_skcipher_alg {
const char *ialg_name;
struct skcipher_alg alg;
};
struct simd_skcipher_ctx {
struct cryptd_skcipher *cryptd_tfm;
};
static int simd_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child = &ctx->cryptd_tfm->base;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, key_len);
}
static int simd_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq;
struct crypto_skcipher *child;
subreq = skcipher_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
return crypto_skcipher_encrypt(subreq);
}
static int simd_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq;
struct crypto_skcipher *child;
subreq = skcipher_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_skcipher_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_skcipher_child(ctx->cryptd_tfm);
skcipher_request_set_tfm(subreq, child);
return crypto_skcipher_decrypt(subreq);
}
static void simd_skcipher_exit(struct crypto_skcipher *tfm)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
cryptd_free_skcipher(ctx->cryptd_tfm);
}
static int simd_skcipher_init(struct crypto_skcipher *tfm)
{
struct simd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
struct cryptd_skcipher *cryptd_tfm;
struct simd_skcipher_alg *salg;
struct skcipher_alg *alg;
unsigned reqsize;
alg = crypto_skcipher_alg(tfm);
salg = container_of(alg, struct simd_skcipher_alg, alg);
cryptd_tfm = cryptd_alloc_skcipher(salg->ialg_name,
CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
reqsize += sizeof(struct skcipher_request);
crypto_skcipher_set_reqsize(tfm, reqsize);
return 0;
}
struct simd_skcipher_alg *simd_skcipher_create_compat(const char *algname,
const char *drvname,
const char *basename)
{
struct simd_skcipher_alg *salg;
struct crypto_skcipher *tfm;
struct skcipher_alg *ialg;
struct skcipher_alg *alg;
int err;
tfm = crypto_alloc_skcipher(basename, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
ialg = crypto_skcipher_alg(tfm);
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
goto out_put_tfm;
}
salg->ialg_name = basename;
alg = &salg->alg;
err = -ENAMETOOLONG;
if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
alg->base.cra_module = ialg->base.cra_module;
alg->base.cra_ctxsize = sizeof(struct simd_skcipher_ctx);
alg->ivsize = ialg->ivsize;
alg->chunksize = ialg->chunksize;
alg->min_keysize = ialg->min_keysize;
alg->max_keysize = ialg->max_keysize;
alg->init = simd_skcipher_init;
alg->exit = simd_skcipher_exit;
alg->setkey = simd_skcipher_setkey;
alg->encrypt = simd_skcipher_encrypt;
alg->decrypt = simd_skcipher_decrypt;
err = crypto_register_skcipher(alg);
if (err)
goto out_free_salg;
out_put_tfm:
crypto_free_skcipher(tfm);
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
goto out_put_tfm;
}
EXPORT_SYMBOL_GPL(simd_skcipher_create_compat);
struct simd_skcipher_alg *simd_skcipher_create(const char *algname,
const char *basename)
{
char drvname[CRYPTO_MAX_ALG_NAME];
if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return simd_skcipher_create_compat(algname, drvname, basename);
}
EXPORT_SYMBOL_GPL(simd_skcipher_create);
void simd_skcipher_free(struct simd_skcipher_alg *salg)
{
crypto_unregister_skcipher(&salg->alg);
kfree(salg);
}
EXPORT_SYMBOL_GPL(simd_skcipher_free);
int simd_register_skciphers_compat(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs)
{
int err;
int i;
const char *algname;
const char *drvname;
const char *basename;
struct simd_skcipher_alg *simd;
err = crypto_register_skciphers(algs, count);
if (err)
return err;
for (i = 0; i < count; i++) {
WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
simd = simd_skcipher_create_compat(algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
simd_algs[i] = simd;
}
return 0;
err_unregister:
simd_unregister_skciphers(algs, count, simd_algs);
return err;
}
EXPORT_SYMBOL_GPL(simd_register_skciphers_compat);
void simd_unregister_skciphers(struct skcipher_alg *algs, int count,
struct simd_skcipher_alg **simd_algs)
{
int i;
crypto_unregister_skciphers(algs, count);
for (i = 0; i < count; i++) {
if (simd_algs[i]) {
simd_skcipher_free(simd_algs[i]);
simd_algs[i] = NULL;
}
}
}
EXPORT_SYMBOL_GPL(simd_unregister_skciphers);
/* AEAD support */
struct simd_aead_alg {
const char *ialg_name;
struct aead_alg alg;
};
struct simd_aead_ctx {
struct cryptd_aead *cryptd_tfm;
};
static int simd_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int key_len)
{
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *child = &ctx->cryptd_tfm->base;
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(child, key, key_len);
}
static int simd_aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
{
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *child = &ctx->cryptd_tfm->base;
return crypto_aead_setauthsize(child, authsize);
}
static int simd_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_request *subreq;
struct crypto_aead *child;
subreq = aead_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_aead_child(ctx->cryptd_tfm);
aead_request_set_tfm(subreq, child);
return crypto_aead_encrypt(subreq);
}
static int simd_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct aead_request *subreq;
struct crypto_aead *child;
subreq = aead_request_ctx(req);
*subreq = *req;
if (!crypto_simd_usable() ||
(in_atomic() && cryptd_aead_queued(ctx->cryptd_tfm)))
child = &ctx->cryptd_tfm->base;
else
child = cryptd_aead_child(ctx->cryptd_tfm);
aead_request_set_tfm(subreq, child);
return crypto_aead_decrypt(subreq);
}
static void simd_aead_exit(struct crypto_aead *tfm)
{
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
cryptd_free_aead(ctx->cryptd_tfm);
}
static int simd_aead_init(struct crypto_aead *tfm)
{
struct simd_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct cryptd_aead *cryptd_tfm;
struct simd_aead_alg *salg;
struct aead_alg *alg;
unsigned reqsize;
alg = crypto_aead_alg(tfm);
salg = container_of(alg, struct simd_aead_alg, alg);
cryptd_tfm = cryptd_alloc_aead(salg->ialg_name, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
reqsize = crypto_aead_reqsize(cryptd_aead_child(cryptd_tfm));
reqsize = max(reqsize, crypto_aead_reqsize(&cryptd_tfm->base));
reqsize += sizeof(struct aead_request);
crypto_aead_set_reqsize(tfm, reqsize);
return 0;
}
struct simd_aead_alg *simd_aead_create_compat(const char *algname,
const char *drvname,
const char *basename)
{
struct simd_aead_alg *salg;
struct crypto_aead *tfm;
struct aead_alg *ialg;
struct aead_alg *alg;
int err;
tfm = crypto_alloc_aead(basename, CRYPTO_ALG_INTERNAL,
CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
ialg = crypto_aead_alg(tfm);
salg = kzalloc(sizeof(*salg), GFP_KERNEL);
if (!salg) {
salg = ERR_PTR(-ENOMEM);
goto out_put_tfm;
}
salg->ialg_name = basename;
alg = &salg->alg;
err = -ENAMETOOLONG;
if (snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", algname) >=
CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
if (snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
drvname) >= CRYPTO_MAX_ALG_NAME)
goto out_free_salg;
alg->base.cra_flags = CRYPTO_ALG_ASYNC |
(ialg->base.cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
alg->base.cra_priority = ialg->base.cra_priority;
alg->base.cra_blocksize = ialg->base.cra_blocksize;
alg->base.cra_alignmask = ialg->base.cra_alignmask;
alg->base.cra_module = ialg->base.cra_module;
alg->base.cra_ctxsize = sizeof(struct simd_aead_ctx);
alg->ivsize = ialg->ivsize;
alg->maxauthsize = ialg->maxauthsize;
alg->chunksize = ialg->chunksize;
alg->init = simd_aead_init;
alg->exit = simd_aead_exit;
alg->setkey = simd_aead_setkey;
alg->setauthsize = simd_aead_setauthsize;
alg->encrypt = simd_aead_encrypt;
alg->decrypt = simd_aead_decrypt;
err = crypto_register_aead(alg);
if (err)
goto out_free_salg;
out_put_tfm:
crypto_free_aead(tfm);
return salg;
out_free_salg:
kfree(salg);
salg = ERR_PTR(err);
goto out_put_tfm;
}
EXPORT_SYMBOL_GPL(simd_aead_create_compat);
struct simd_aead_alg *simd_aead_create(const char *algname,
const char *basename)
{
char drvname[CRYPTO_MAX_ALG_NAME];
if (snprintf(drvname, CRYPTO_MAX_ALG_NAME, "simd-%s", basename) >=
CRYPTO_MAX_ALG_NAME)
return ERR_PTR(-ENAMETOOLONG);
return simd_aead_create_compat(algname, drvname, basename);
}
EXPORT_SYMBOL_GPL(simd_aead_create);
void simd_aead_free(struct simd_aead_alg *salg)
{
crypto_unregister_aead(&salg->alg);
kfree(salg);
}
EXPORT_SYMBOL_GPL(simd_aead_free);
int simd_register_aeads_compat(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs)
{
int err;
int i;
const char *algname;
const char *drvname;
const char *basename;
struct simd_aead_alg *simd;
err = crypto_register_aeads(algs, count);
if (err)
return err;
for (i = 0; i < count; i++) {
WARN_ON(strncmp(algs[i].base.cra_name, "__", 2));
WARN_ON(strncmp(algs[i].base.cra_driver_name, "__", 2));
algname = algs[i].base.cra_name + 2;
drvname = algs[i].base.cra_driver_name + 2;
basename = algs[i].base.cra_driver_name;
simd = simd_aead_create_compat(algname, drvname, basename);
err = PTR_ERR(simd);
if (IS_ERR(simd))
goto err_unregister;
simd_algs[i] = simd;
}
return 0;
err_unregister:
simd_unregister_aeads(algs, count, simd_algs);
return err;
}
EXPORT_SYMBOL_GPL(simd_register_aeads_compat);
void simd_unregister_aeads(struct aead_alg *algs, int count,
struct simd_aead_alg **simd_algs)
{
int i;
crypto_unregister_aeads(algs, count);
for (i = 0; i < count; i++) {
if (simd_algs[i]) {
simd_aead_free(simd_algs[i]);
simd_algs[i] = NULL;
}
}
}
EXPORT_SYMBOL_GPL(simd_unregister_aeads);
MODULE_LICENSE("GPL");
| linux-master | crypto/simd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* POLYVAL: hash function for HCTR2.
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <[email protected]>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <[email protected]>
* Copyright 2021 Google LLC
*/
/*
* Code based on crypto/ghash-generic.c
*
* POLYVAL is a keyed hash function similar to GHASH. POLYVAL uses a different
* modulus for finite field multiplication which makes hardware accelerated
* implementations on little-endian machines faster. POLYVAL is used in the
* kernel to implement HCTR2, but was originally specified for AES-GCM-SIV
* (RFC 8452).
*
* For more information see:
* Length-preserving encryption with HCTR2:
* https://eprint.iacr.org/2021/1441.pdf
* AES-GCM-SIV: Nonce Misuse-Resistant Authenticated Encryption:
* https://datatracker.ietf.org/doc/html/rfc8452
*
* Like GHASH, POLYVAL is not a cryptographic hash function and should
* not be used outside of crypto modes explicitly designed to use POLYVAL.
*
* This implementation uses a convenient trick involving the GHASH and POLYVAL
* fields. This trick allows multiplication in the POLYVAL field to be
* implemented by using multiplication in the GHASH field as a subroutine. An
* element of the POLYVAL field can be converted to an element of the GHASH
* field by computing x*REVERSE(a), where REVERSE reverses the byte-ordering of
* a. Similarly, an element of the GHASH field can be converted back to the
* POLYVAL field by computing REVERSE(x^{-1}*a). For more information, see:
* https://datatracker.ietf.org/doc/html/rfc8452#appendix-A
*
* By using this trick, we do not need to implement the POLYVAL field for the
* generic implementation.
*
* Warning: this generic implementation is not intended to be used in practice
* and is not constant time. For practical use, a hardware accelerated
* implementation of POLYVAL should be used instead.
*
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/polyval.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
struct polyval_tfm_ctx {
struct gf128mul_4k *gf128;
};
struct polyval_desc_ctx {
union {
u8 buffer[POLYVAL_BLOCK_SIZE];
be128 buffer128;
};
u32 bytes;
};
static void copy_and_reverse(u8 dst[POLYVAL_BLOCK_SIZE],
const u8 src[POLYVAL_BLOCK_SIZE])
{
u64 a = get_unaligned((const u64 *)&src[0]);
u64 b = get_unaligned((const u64 *)&src[8]);
put_unaligned(swab64(a), (u64 *)&dst[8]);
put_unaligned(swab64(b), (u64 *)&dst[0]);
}
/*
* Performs multiplication in the POLYVAL field using the GHASH field as a
* subroutine. This function is used as a fallback for hardware accelerated
* implementations when simd registers are unavailable.
*
* Note: This function is not used for polyval-generic, instead we use the 4k
* lookup table implementation for finite field multiplication.
*/
void polyval_mul_non4k(u8 *op1, const u8 *op2)
{
be128 a, b;
// Assume one argument is in Montgomery form and one is not.
copy_and_reverse((u8 *)&a, op1);
copy_and_reverse((u8 *)&b, op2);
gf128mul_x_lle(&a, &a);
gf128mul_lle(&a, &b);
copy_and_reverse(op1, (u8 *)&a);
}
EXPORT_SYMBOL_GPL(polyval_mul_non4k);
/*
* Perform a POLYVAL update using non4k multiplication. This function is used
* as a fallback for hardware accelerated implementations when simd registers
* are unavailable.
*
* Note: This function is not used for polyval-generic, instead we use the 4k
* lookup table implementation of finite field multiplication.
*/
void polyval_update_non4k(const u8 *key, const u8 *in,
size_t nblocks, u8 *accumulator)
{
while (nblocks--) {
crypto_xor(accumulator, in, POLYVAL_BLOCK_SIZE);
polyval_mul_non4k(accumulator, key);
in += POLYVAL_BLOCK_SIZE;
}
}
EXPORT_SYMBOL_GPL(polyval_update_non4k);
static int polyval_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct polyval_tfm_ctx *ctx = crypto_shash_ctx(tfm);
be128 k;
if (keylen != POLYVAL_BLOCK_SIZE)
return -EINVAL;
gf128mul_free_4k(ctx->gf128);
BUILD_BUG_ON(sizeof(k) != POLYVAL_BLOCK_SIZE);
copy_and_reverse((u8 *)&k, key);
gf128mul_x_lle(&k, &k);
ctx->gf128 = gf128mul_init_4k_lle(&k);
memzero_explicit(&k, POLYVAL_BLOCK_SIZE);
if (!ctx->gf128)
return -ENOMEM;
return 0;
}
static int polyval_init(struct shash_desc *desc)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int polyval_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *pos;
u8 tmp[POLYVAL_BLOCK_SIZE];
int n;
if (dctx->bytes) {
n = min(srclen, dctx->bytes);
pos = dctx->buffer + dctx->bytes - 1;
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos-- ^= *src++;
if (!dctx->bytes)
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
}
while (srclen >= POLYVAL_BLOCK_SIZE) {
copy_and_reverse(tmp, src);
crypto_xor(dctx->buffer, tmp, POLYVAL_BLOCK_SIZE);
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
src += POLYVAL_BLOCK_SIZE;
srclen -= POLYVAL_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = POLYVAL_BLOCK_SIZE - srclen;
pos = dctx->buffer + POLYVAL_BLOCK_SIZE - 1;
while (srclen--)
*pos-- ^= *src++;
}
return 0;
}
static int polyval_final(struct shash_desc *desc, u8 *dst)
{
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
const struct polyval_tfm_ctx *ctx = crypto_shash_ctx(desc->tfm);
if (dctx->bytes)
gf128mul_4k_lle(&dctx->buffer128, ctx->gf128);
copy_and_reverse(dst, dctx->buffer);
return 0;
}
static void polyval_exit_tfm(struct crypto_tfm *tfm)
{
struct polyval_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
gf128mul_free_4k(ctx->gf128);
}
static struct shash_alg polyval_alg = {
.digestsize = POLYVAL_DIGEST_SIZE,
.init = polyval_init,
.update = polyval_update,
.final = polyval_final,
.setkey = polyval_setkey,
.descsize = sizeof(struct polyval_desc_ctx),
.base = {
.cra_name = "polyval",
.cra_driver_name = "polyval-generic",
.cra_priority = 100,
.cra_blocksize = POLYVAL_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
.cra_module = THIS_MODULE,
.cra_exit = polyval_exit_tfm,
},
};
static int __init polyval_mod_init(void)
{
return crypto_register_shash(&polyval_alg);
}
static void __exit polyval_mod_exit(void)
{
crypto_unregister_shash(&polyval_alg);
}
subsys_initcall(polyval_mod_init);
module_exit(polyval_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("POLYVAL hash function");
MODULE_ALIAS_CRYPTO("polyval");
MODULE_ALIAS_CRYPTO("polyval-generic");
| linux-master | crypto/polyval-generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ChaCha and XChaCha stream ciphers, including ChaCha20 (RFC7539)
*
* Copyright (C) 2015 Martin Willi
* Copyright (C) 2018 Google LLC
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <crypto/internal/chacha.h>
#include <crypto/internal/skcipher.h>
#include <linux/module.h>
static int chacha_stream_xor(struct skcipher_request *req,
const struct chacha_ctx *ctx, const u8 *iv)
{
struct skcipher_walk walk;
u32 state[16];
int err;
err = skcipher_walk_virt(&walk, req, false);
chacha_init_generic(state, ctx->key, iv);
while (walk.nbytes > 0) {
unsigned int nbytes = walk.nbytes;
if (nbytes < walk.total)
nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE);
chacha_crypt_generic(state, walk.dst.virt.addr,
walk.src.virt.addr, nbytes, ctx->nrounds);
err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
}
return err;
}
static int crypto_chacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
return chacha_stream_xor(req, ctx, req->iv);
}
static int crypto_xchacha_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
struct chacha_ctx subctx;
u32 state[16];
u8 real_iv[16];
/* Compute the subkey given the original key and first 128 nonce bits */
chacha_init_generic(state, ctx->key, req->iv);
hchacha_block_generic(state, subctx.key, ctx->nrounds);
subctx.nrounds = ctx->nrounds;
/* Build the real IV */
memcpy(&real_iv[0], req->iv + 24, 8); /* stream position */
memcpy(&real_iv[8], req->iv + 16, 8); /* remaining 64 nonce bits */
/* Generate the stream and XOR it with the data */
return chacha_stream_xor(req, &subctx, real_iv);
}
static struct skcipher_alg algs[] = {
{
.base.cra_name = "chacha20",
.base.cra_driver_name = "chacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = CHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_chacha_crypt,
.decrypt = crypto_chacha_crypt,
}, {
.base.cra_name = "xchacha20",
.base.cra_driver_name = "xchacha20-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha20_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}, {
.base.cra_name = "xchacha12",
.base.cra_driver_name = "xchacha12-generic",
.base.cra_priority = 100,
.base.cra_blocksize = 1,
.base.cra_ctxsize = sizeof(struct chacha_ctx),
.base.cra_module = THIS_MODULE,
.min_keysize = CHACHA_KEY_SIZE,
.max_keysize = CHACHA_KEY_SIZE,
.ivsize = XCHACHA_IV_SIZE,
.chunksize = CHACHA_BLOCK_SIZE,
.setkey = chacha12_setkey,
.encrypt = crypto_xchacha_crypt,
.decrypt = crypto_xchacha_crypt,
}
};
static int __init chacha_generic_mod_init(void)
{
return crypto_register_skciphers(algs, ARRAY_SIZE(algs));
}
static void __exit chacha_generic_mod_fini(void)
{
crypto_unregister_skciphers(algs, ARRAY_SIZE(algs));
}
subsys_initcall(chacha_generic_mod_init);
module_exit(chacha_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <[email protected]>");
MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (generic)");
MODULE_ALIAS_CRYPTO("chacha20");
MODULE_ALIAS_CRYPTO("chacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha20");
MODULE_ALIAS_CRYPTO("xchacha20-generic");
MODULE_ALIAS_CRYPTO("xchacha12");
MODULE_ALIAS_CRYPTO("xchacha12-generic");
| linux-master | crypto/chacha_generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* HCTR2 length-preserving encryption mode
*
* Copyright 2021 Google LLC
*/
/*
* HCTR2 is a length-preserving encryption mode that is efficient on
* processors with instructions to accelerate AES and carryless
* multiplication, e.g. x86 processors with AES-NI and CLMUL, and ARM
* processors with the ARMv8 crypto extensions.
*
* For more details, see the paper: "Length-preserving encryption with HCTR2"
* (https://eprint.iacr.org/2021/1441.pdf)
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/polyval.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#define BLOCKCIPHER_BLOCK_SIZE 16
/*
* The specification allows variable-length tweaks, but Linux's crypto API
* currently only allows algorithms to support a single length. The "natural"
* tweak length for HCTR2 is 16, since that fits into one POLYVAL block for
* the best performance. But longer tweaks are useful for fscrypt, to avoid
* needing to derive per-file keys. So instead we use two blocks, or 32 bytes.
*/
#define TWEAK_SIZE 32
struct hctr2_instance_ctx {
struct crypto_cipher_spawn blockcipher_spawn;
struct crypto_skcipher_spawn xctr_spawn;
struct crypto_shash_spawn polyval_spawn;
};
struct hctr2_tfm_ctx {
struct crypto_cipher *blockcipher;
struct crypto_skcipher *xctr;
struct crypto_shash *polyval;
u8 L[BLOCKCIPHER_BLOCK_SIZE];
int hashed_tweak_offset;
/*
* This struct is allocated with extra space for two exported hash
* states. Since the hash state size is not known at compile-time, we
* can't add these to the struct directly.
*
* hashed_tweaklen_divisible;
* hashed_tweaklen_remainder;
*/
};
struct hctr2_request_ctx {
u8 first_block[BLOCKCIPHER_BLOCK_SIZE];
u8 xctr_iv[BLOCKCIPHER_BLOCK_SIZE];
struct scatterlist *bulk_part_dst;
struct scatterlist *bulk_part_src;
struct scatterlist sg_src[2];
struct scatterlist sg_dst[2];
/*
* Sub-request sizes are unknown at compile-time, so they need to go
* after the members with known sizes.
*/
union {
struct shash_desc hash_desc;
struct skcipher_request xctr_req;
} u;
/*
* This struct is allocated with extra space for one exported hash
* state. Since the hash state size is not known at compile-time, we
* can't add it to the struct directly.
*
* hashed_tweak;
*/
};
static inline u8 *hctr2_hashed_tweaklen(const struct hctr2_tfm_ctx *tctx,
bool has_remainder)
{
u8 *p = (u8 *)tctx + sizeof(*tctx);
if (has_remainder) /* For messages not a multiple of block length */
p += crypto_shash_statesize(tctx->polyval);
return p;
}
static inline u8 *hctr2_hashed_tweak(const struct hctr2_tfm_ctx *tctx,
struct hctr2_request_ctx *rctx)
{
return (u8 *)rctx + tctx->hashed_tweak_offset;
}
/*
* The input data for each HCTR2 hash step begins with a 16-byte block that
* contains the tweak length and a flag that indicates whether the input is evenly
* divisible into blocks. Since this implementation only supports one tweak
* length, we precompute the two hash states resulting from hashing the two
* possible values of this initial block. This reduces by one block the amount of
* data that needs to be hashed for each encryption/decryption
*
* These precomputed hashes are stored in hctr2_tfm_ctx.
*/
static int hctr2_hash_tweaklen(struct hctr2_tfm_ctx *tctx, bool has_remainder)
{
SHASH_DESC_ON_STACK(shash, tfm->polyval);
__le64 tweak_length_block[2];
int err;
shash->tfm = tctx->polyval;
memset(tweak_length_block, 0, sizeof(tweak_length_block));
tweak_length_block[0] = cpu_to_le64(TWEAK_SIZE * 8 * 2 + 2 + has_remainder);
err = crypto_shash_init(shash);
if (err)
return err;
err = crypto_shash_update(shash, (u8 *)tweak_length_block,
POLYVAL_BLOCK_SIZE);
if (err)
return err;
return crypto_shash_export(shash, hctr2_hashed_tweaklen(tctx, has_remainder));
}
static int hctr2_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
u8 hbar[BLOCKCIPHER_BLOCK_SIZE];
int err;
crypto_cipher_clear_flags(tctx->blockcipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->blockcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tctx->blockcipher, key, keylen);
if (err)
return err;
crypto_skcipher_clear_flags(tctx->xctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->xctr,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->xctr, key, keylen);
if (err)
return err;
memset(hbar, 0, sizeof(hbar));
crypto_cipher_encrypt_one(tctx->blockcipher, hbar, hbar);
memset(tctx->L, 0, sizeof(tctx->L));
tctx->L[0] = 0x01;
crypto_cipher_encrypt_one(tctx->blockcipher, tctx->L, tctx->L);
crypto_shash_clear_flags(tctx->polyval, CRYPTO_TFM_REQ_MASK);
crypto_shash_set_flags(tctx->polyval, crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_shash_setkey(tctx->polyval, hbar, BLOCKCIPHER_BLOCK_SIZE);
if (err)
return err;
memzero_explicit(hbar, sizeof(hbar));
return hctr2_hash_tweaklen(tctx, true) ?: hctr2_hash_tweaklen(tctx, false);
}
static int hctr2_hash_tweak(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
struct shash_desc *hash_desc = &rctx->u.hash_desc;
int err;
bool has_remainder = req->cryptlen % POLYVAL_BLOCK_SIZE;
hash_desc->tfm = tctx->polyval;
err = crypto_shash_import(hash_desc, hctr2_hashed_tweaklen(tctx, has_remainder));
if (err)
return err;
err = crypto_shash_update(hash_desc, req->iv, TWEAK_SIZE);
if (err)
return err;
// Store the hashed tweak, since we need it when computing both
// H(T || N) and H(T || V).
return crypto_shash_export(hash_desc, hctr2_hashed_tweak(tctx, rctx));
}
static int hctr2_hash_message(struct skcipher_request *req,
struct scatterlist *sgl,
u8 digest[POLYVAL_DIGEST_SIZE])
{
static const u8 padding[BLOCKCIPHER_BLOCK_SIZE] = { 0x1 };
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
struct shash_desc *hash_desc = &rctx->u.hash_desc;
const unsigned int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
struct sg_mapping_iter miter;
unsigned int remainder = bulk_len % BLOCKCIPHER_BLOCK_SIZE;
int i;
int err = 0;
int n = 0;
sg_miter_start(&miter, sgl, sg_nents(sgl),
SG_MITER_FROM_SG | SG_MITER_ATOMIC);
for (i = 0; i < bulk_len; i += n) {
sg_miter_next(&miter);
n = min_t(unsigned int, miter.length, bulk_len - i);
err = crypto_shash_update(hash_desc, miter.addr, n);
if (err)
break;
}
sg_miter_stop(&miter);
if (err)
return err;
if (remainder) {
err = crypto_shash_update(hash_desc, padding,
BLOCKCIPHER_BLOCK_SIZE - remainder);
if (err)
return err;
}
return crypto_shash_final(hash_desc, digest);
}
static int hctr2_finish(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
u8 digest[POLYVAL_DIGEST_SIZE];
struct shash_desc *hash_desc = &rctx->u.hash_desc;
int err;
// U = UU ^ H(T || V)
// or M = MM ^ H(T || N)
hash_desc->tfm = tctx->polyval;
err = crypto_shash_import(hash_desc, hctr2_hashed_tweak(tctx, rctx));
if (err)
return err;
err = hctr2_hash_message(req, rctx->bulk_part_dst, digest);
if (err)
return err;
crypto_xor(rctx->first_block, digest, BLOCKCIPHER_BLOCK_SIZE);
// Copy U (or M) into dst scatterlist
scatterwalk_map_and_copy(rctx->first_block, req->dst,
0, BLOCKCIPHER_BLOCK_SIZE, 1);
return 0;
}
static void hctr2_xctr_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err)
err = hctr2_finish(req);
skcipher_request_complete(req, err);
}
static int hctr2_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct hctr2_request_ctx *rctx = skcipher_request_ctx(req);
u8 digest[POLYVAL_DIGEST_SIZE];
int bulk_len = req->cryptlen - BLOCKCIPHER_BLOCK_SIZE;
int err;
// Requests must be at least one block
if (req->cryptlen < BLOCKCIPHER_BLOCK_SIZE)
return -EINVAL;
// Copy M (or U) into a temporary buffer
scatterwalk_map_and_copy(rctx->first_block, req->src,
0, BLOCKCIPHER_BLOCK_SIZE, 0);
// Create scatterlists for N and V
rctx->bulk_part_src = scatterwalk_ffwd(rctx->sg_src, req->src,
BLOCKCIPHER_BLOCK_SIZE);
rctx->bulk_part_dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
BLOCKCIPHER_BLOCK_SIZE);
// MM = M ^ H(T || N)
// or UU = U ^ H(T || V)
err = hctr2_hash_tweak(req);
if (err)
return err;
err = hctr2_hash_message(req, rctx->bulk_part_src, digest);
if (err)
return err;
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
// UU = E(MM)
// or MM = D(UU)
if (enc)
crypto_cipher_encrypt_one(tctx->blockcipher, rctx->first_block,
digest);
else
crypto_cipher_decrypt_one(tctx->blockcipher, rctx->first_block,
digest);
// S = MM ^ UU ^ L
crypto_xor(digest, rctx->first_block, BLOCKCIPHER_BLOCK_SIZE);
crypto_xor_cpy(rctx->xctr_iv, digest, tctx->L, BLOCKCIPHER_BLOCK_SIZE);
// V = XCTR(S, N)
// or N = XCTR(S, V)
skcipher_request_set_tfm(&rctx->u.xctr_req, tctx->xctr);
skcipher_request_set_crypt(&rctx->u.xctr_req, rctx->bulk_part_src,
rctx->bulk_part_dst, bulk_len,
rctx->xctr_iv);
skcipher_request_set_callback(&rctx->u.xctr_req,
req->base.flags,
hctr2_xctr_done, req);
return crypto_skcipher_encrypt(&rctx->u.xctr_req) ?:
hctr2_finish(req);
}
static int hctr2_encrypt(struct skcipher_request *req)
{
return hctr2_crypt(req, true);
}
static int hctr2_decrypt(struct skcipher_request *req)
{
return hctr2_crypt(req, false);
}
static int hctr2_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *xctr;
struct crypto_cipher *blockcipher;
struct crypto_shash *polyval;
unsigned int subreq_size;
int err;
xctr = crypto_spawn_skcipher(&ictx->xctr_spawn);
if (IS_ERR(xctr))
return PTR_ERR(xctr);
blockcipher = crypto_spawn_cipher(&ictx->blockcipher_spawn);
if (IS_ERR(blockcipher)) {
err = PTR_ERR(blockcipher);
goto err_free_xctr;
}
polyval = crypto_spawn_shash(&ictx->polyval_spawn);
if (IS_ERR(polyval)) {
err = PTR_ERR(polyval);
goto err_free_blockcipher;
}
tctx->xctr = xctr;
tctx->blockcipher = blockcipher;
tctx->polyval = polyval;
BUILD_BUG_ON(offsetofend(struct hctr2_request_ctx, u) !=
sizeof(struct hctr2_request_ctx));
subreq_size = max(sizeof_field(struct hctr2_request_ctx, u.hash_desc) +
crypto_shash_descsize(polyval),
sizeof_field(struct hctr2_request_ctx, u.xctr_req) +
crypto_skcipher_reqsize(xctr));
tctx->hashed_tweak_offset = offsetof(struct hctr2_request_ctx, u) +
subreq_size;
crypto_skcipher_set_reqsize(tfm, tctx->hashed_tweak_offset +
crypto_shash_statesize(polyval));
return 0;
err_free_blockcipher:
crypto_free_cipher(blockcipher);
err_free_xctr:
crypto_free_skcipher(xctr);
return err;
}
static void hctr2_exit_tfm(struct crypto_skcipher *tfm)
{
struct hctr2_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
crypto_free_cipher(tctx->blockcipher);
crypto_free_skcipher(tctx->xctr);
crypto_free_shash(tctx->polyval);
}
static void hctr2_free_instance(struct skcipher_instance *inst)
{
struct hctr2_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_cipher(&ictx->blockcipher_spawn);
crypto_drop_skcipher(&ictx->xctr_spawn);
crypto_drop_shash(&ictx->polyval_spawn);
kfree(inst);
}
static int hctr2_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *xctr_name,
const char *polyval_name)
{
u32 mask;
struct skcipher_instance *inst;
struct hctr2_instance_ctx *ictx;
struct skcipher_alg *xctr_alg;
struct crypto_alg *blockcipher_alg;
struct shash_alg *polyval_alg;
char blockcipher_name[CRYPTO_MAX_ALG_NAME];
int len;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ictx = skcipher_instance_ctx(inst);
/* Stream cipher, xctr(block_cipher) */
err = crypto_grab_skcipher(&ictx->xctr_spawn,
skcipher_crypto_instance(inst),
xctr_name, 0, mask);
if (err)
goto err_free_inst;
xctr_alg = crypto_spawn_skcipher_alg(&ictx->xctr_spawn);
err = -EINVAL;
if (strncmp(xctr_alg->base.cra_name, "xctr(", 5))
goto err_free_inst;
len = strscpy(blockcipher_name, xctr_alg->base.cra_name + 5,
sizeof(blockcipher_name));
if (len < 1)
goto err_free_inst;
if (blockcipher_name[len - 1] != ')')
goto err_free_inst;
blockcipher_name[len - 1] = 0;
/* Block cipher, e.g. "aes" */
err = crypto_grab_cipher(&ictx->blockcipher_spawn,
skcipher_crypto_instance(inst),
blockcipher_name, 0, mask);
if (err)
goto err_free_inst;
blockcipher_alg = crypto_spawn_cipher_alg(&ictx->blockcipher_spawn);
/* Require blocksize of 16 bytes */
err = -EINVAL;
if (blockcipher_alg->cra_blocksize != BLOCKCIPHER_BLOCK_SIZE)
goto err_free_inst;
/* Polyval ε-∆U hash function */
err = crypto_grab_shash(&ictx->polyval_spawn,
skcipher_crypto_instance(inst),
polyval_name, 0, mask);
if (err)
goto err_free_inst;
polyval_alg = crypto_spawn_shash_alg(&ictx->polyval_spawn);
/* Ensure Polyval is being used */
err = -EINVAL;
if (strcmp(polyval_alg->base.cra_name, "polyval") != 0)
goto err_free_inst;
/* Instance fields */
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, "hctr2(%s)",
blockcipher_alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"hctr2_base(%s,%s)",
xctr_alg->base.cra_driver_name,
polyval_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_blocksize = BLOCKCIPHER_BLOCK_SIZE;
inst->alg.base.cra_ctxsize = sizeof(struct hctr2_tfm_ctx) +
polyval_alg->statesize * 2;
inst->alg.base.cra_alignmask = xctr_alg->base.cra_alignmask |
polyval_alg->base.cra_alignmask;
/*
* The hash function is called twice, so it is weighted higher than the
* xctr and blockcipher.
*/
inst->alg.base.cra_priority = (2 * xctr_alg->base.cra_priority +
4 * polyval_alg->base.cra_priority +
blockcipher_alg->cra_priority) / 7;
inst->alg.setkey = hctr2_setkey;
inst->alg.encrypt = hctr2_encrypt;
inst->alg.decrypt = hctr2_decrypt;
inst->alg.init = hctr2_init_tfm;
inst->alg.exit = hctr2_exit_tfm;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(xctr_alg);
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(xctr_alg);
inst->alg.ivsize = TWEAK_SIZE;
inst->free = hctr2_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
hctr2_free_instance(inst);
}
return err;
}
static int hctr2_create_base(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *xctr_name;
const char *polyval_name;
xctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(xctr_name))
return PTR_ERR(xctr_name);
polyval_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(polyval_name))
return PTR_ERR(polyval_name);
return hctr2_create_common(tmpl, tb, xctr_name, polyval_name);
}
static int hctr2_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *blockcipher_name;
char xctr_name[CRYPTO_MAX_ALG_NAME];
blockcipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(blockcipher_name))
return PTR_ERR(blockcipher_name);
if (snprintf(xctr_name, CRYPTO_MAX_ALG_NAME, "xctr(%s)",
blockcipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return hctr2_create_common(tmpl, tb, xctr_name, "polyval");
}
static struct crypto_template hctr2_tmpls[] = {
{
/* hctr2_base(xctr_name, polyval_name) */
.name = "hctr2_base",
.create = hctr2_create_base,
.module = THIS_MODULE,
}, {
/* hctr2(blockcipher_name) */
.name = "hctr2",
.create = hctr2_create,
.module = THIS_MODULE,
}
};
static int __init hctr2_module_init(void)
{
return crypto_register_templates(hctr2_tmpls, ARRAY_SIZE(hctr2_tmpls));
}
static void __exit hctr2_module_exit(void)
{
return crypto_unregister_templates(hctr2_tmpls,
ARRAY_SIZE(hctr2_tmpls));
}
subsys_initcall(hctr2_module_init);
module_exit(hctr2_module_exit);
MODULE_DESCRIPTION("HCTR2 length-preserving encryption mode");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("hctr2");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/hctr2.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/lzo.h>
#include <crypto/internal/scompress.h>
struct lzo_ctx {
void *lzo_comp_mem;
};
static void *lzo_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
ctx = kvmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int lzo_init(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lzo_comp_mem = lzo_alloc_ctx(NULL);
if (IS_ERR(ctx->lzo_comp_mem))
return -ENOMEM;
return 0;
}
static void lzo_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
kvfree(ctx);
}
static void lzo_exit(struct crypto_tfm *tfm)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
lzo_free_ctx(NULL, ctx->lzo_comp_mem);
}
static int __lzo_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
int err;
err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
return __lzo_compress(src, slen, dst, dlen, ctx->lzo_comp_mem);
}
static int lzo_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzo_compress(src, slen, dst, dlen, ctx);
}
static int __lzo_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
int err;
size_t tmp_len = *dlen; /* size_t(ulong) <-> uint on 64 bit */
err = lzo1x_decompress_safe(src, slen, dst, &tmp_len);
if (err != LZO_E_OK)
return -EINVAL;
*dlen = tmp_len;
return 0;
}
static int lzo_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
return __lzo_decompress(src, slen, dst, dlen);
}
static int lzo_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lzo_decompress(src, slen, dst, dlen);
}
static struct crypto_alg alg = {
.cra_name = "lzo",
.cra_driver_name = "lzo-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lzo_ctx),
.cra_module = THIS_MODULE,
.cra_init = lzo_init,
.cra_exit = lzo_exit,
.cra_u = { .compress = {
.coa_compress = lzo_compress,
.coa_decompress = lzo_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = lzo_alloc_ctx,
.free_ctx = lzo_free_ctx,
.compress = lzo_scompress,
.decompress = lzo_sdecompress,
.base = {
.cra_name = "lzo",
.cra_driver_name = "lzo-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init lzo_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg);
return ret;
}
return ret;
}
static void __exit lzo_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(lzo_mod_init);
module_exit(lzo_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO Compression Algorithm");
MODULE_ALIAS_CRYPTO("lzo");
| linux-master | crypto/lzo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <crypto/curve25519.h>
#include <crypto/internal/kpp.h>
#include <crypto/kpp.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf,
unsigned int len)
{
u8 *secret = kpp_tfm_ctx(tfm);
if (!len)
curve25519_generate_secret(secret);
else if (len == CURVE25519_KEY_SIZE &&
crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE))
memcpy(secret, buf, CURVE25519_KEY_SIZE);
else
return -EINVAL;
return 0;
}
static int curve25519_compute_value(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
const u8 *secret = kpp_tfm_ctx(tfm);
u8 public_key[CURVE25519_KEY_SIZE];
u8 buf[CURVE25519_KEY_SIZE];
int copied, nbytes;
u8 const *bp;
if (req->src) {
copied = sg_copy_to_buffer(req->src,
sg_nents_for_len(req->src,
CURVE25519_KEY_SIZE),
public_key, CURVE25519_KEY_SIZE);
if (copied != CURVE25519_KEY_SIZE)
return -EINVAL;
bp = public_key;
} else {
bp = curve25519_base_point;
}
curve25519_generic(buf, secret, bp);
/* might want less than we've got */
nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len);
copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst,
nbytes),
buf, nbytes);
if (copied != nbytes)
return -EINVAL;
return 0;
}
static unsigned int curve25519_max_size(struct crypto_kpp *tfm)
{
return CURVE25519_KEY_SIZE;
}
static struct kpp_alg curve25519_alg = {
.base.cra_name = "curve25519",
.base.cra_driver_name = "curve25519-generic",
.base.cra_priority = 100,
.base.cra_module = THIS_MODULE,
.base.cra_ctxsize = CURVE25519_KEY_SIZE,
.set_secret = curve25519_set_secret,
.generate_public_key = curve25519_compute_value,
.compute_shared_secret = curve25519_compute_value,
.max_size = curve25519_max_size,
};
static int __init curve25519_init(void)
{
return crypto_register_kpp(&curve25519_alg);
}
static void __exit curve25519_exit(void)
{
crypto_unregister_kpp(&curve25519_alg);
}
subsys_initcall(curve25519_init);
module_exit(curve25519_exit);
MODULE_ALIAS_CRYPTO("curve25519");
MODULE_ALIAS_CRYPTO("curve25519-generic");
MODULE_LICENSE("GPL");
| linux-master | crypto/curve25519-generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Compression operations.
*
* Copyright (c) 2002 James Morris <[email protected]>
*/
#include <linux/crypto.h>
#include "internal.h"
int crypto_comp_compress(struct crypto_comp *comp,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
struct crypto_tfm *tfm = crypto_comp_tfm(comp);
return tfm->__crt_alg->cra_compress.coa_compress(tfm, src, slen, dst,
dlen);
}
EXPORT_SYMBOL_GPL(crypto_comp_compress);
int crypto_comp_decompress(struct crypto_comp *comp,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
struct crypto_tfm *tfm = crypto_comp_tfm(comp);
return tfm->__crt_alg->cra_compress.coa_decompress(tfm, src, slen, dst,
dlen);
}
EXPORT_SYMBOL_GPL(crypto_comp_decompress);
| linux-master | crypto/compress.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* seqiv: Sequence Number IV Generator
*
* This generator generates an IV based on a sequence number by xoring it
* with a salt. This algorithm is mainly useful for CTR and similar modes.
*
* Copyright (c) 2007 Herbert Xu <[email protected]>
*/
#include <crypto/internal/geniv.h>
#include <crypto/scatterwalk.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err)
{
struct aead_request *subreq = aead_request_ctx(req);
struct crypto_aead *geniv;
if (err == -EINPROGRESS || err == -EBUSY)
return;
if (err)
goto out;
geniv = crypto_aead_reqtfm(req);
memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv));
out:
kfree_sensitive(subreq->iv);
}
static void seqiv_aead_encrypt_complete(void *data, int err)
{
struct aead_request *req = data;
seqiv_aead_encrypt_complete2(req, err);
aead_request_complete(req, err);
}
static int seqiv_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
u8 *info;
unsigned int ivsize = 8;
int err;
if (req->cryptlen < ivsize)
return -EINVAL;
aead_request_set_tfm(subreq, ctx->child);
compl = req->base.complete;
data = req->base.data;
info = req->iv;
if (req->src != req->dst) {
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
req->assoclen + req->cryptlen,
NULL);
err = crypto_skcipher_encrypt(nreq);
if (err)
return err;
}
if (unlikely(!IS_ALIGNED((unsigned long)info,
crypto_aead_alignmask(geniv) + 1))) {
info = kmemdup(req->iv, ivsize, req->base.flags &
CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
GFP_ATOMIC);
if (!info)
return -ENOMEM;
compl = seqiv_aead_encrypt_complete;
data = req;
}
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen - ivsize, info);
aead_request_set_ad(subreq, req->assoclen + ivsize);
crypto_xor(info, ctx->salt, ivsize);
scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
err = crypto_aead_encrypt(subreq);
if (unlikely(info != req->iv))
seqiv_aead_encrypt_complete2(req, err);
return err;
}
static int seqiv_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *geniv = crypto_aead_reqtfm(req);
struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
struct aead_request *subreq = aead_request_ctx(req);
crypto_completion_t compl;
void *data;
unsigned int ivsize = 8;
if (req->cryptlen < ivsize + crypto_aead_authsize(geniv))
return -EINVAL;
aead_request_set_tfm(subreq, ctx->child);
compl = req->base.complete;
data = req->base.data;
aead_request_set_callback(subreq, req->base.flags, compl, data);
aead_request_set_crypt(subreq, req->src, req->dst,
req->cryptlen - ivsize, req->iv);
aead_request_set_ad(subreq, req->assoclen + ivsize);
scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
return crypto_aead_decrypt(subreq);
}
static int seqiv_aead_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct aead_instance *inst;
int err;
inst = aead_geniv_alloc(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
err = -EINVAL;
if (inst->alg.ivsize != sizeof(u64))
goto free_inst;
inst->alg.encrypt = seqiv_aead_encrypt;
inst->alg.decrypt = seqiv_aead_decrypt;
inst->alg.init = aead_init_geniv;
inst->alg.exit = aead_exit_geniv;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.base.cra_ctxsize += inst->alg.ivsize;
err = aead_register_instance(tmpl, inst);
if (err) {
free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template seqiv_tmpl = {
.name = "seqiv",
.create = seqiv_aead_create,
.module = THIS_MODULE,
};
static int __init seqiv_module_init(void)
{
return crypto_register_template(&seqiv_tmpl);
}
static void __exit seqiv_module_exit(void)
{
crypto_unregister_template(&seqiv_tmpl);
}
subsys_initcall(seqiv_module_init);
module_exit(seqiv_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Sequence Number IV Generator");
MODULE_ALIAS_CRYPTO("seqiv");
| linux-master | crypto/seqiv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* pcrypt - Parallel crypto wrapper.
*
* Copyright (C) 2009 secunet Security Networks AG
* Copyright (C) 2009 Steffen Klassert <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/aead.h>
#include <linux/atomic.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kobject.h>
#include <linux/cpu.h>
#include <crypto/pcrypt.h>
static struct padata_instance *pencrypt;
static struct padata_instance *pdecrypt;
static struct kset *pcrypt_kset;
struct pcrypt_instance_ctx {
struct crypto_aead_spawn spawn;
struct padata_shell *psenc;
struct padata_shell *psdec;
atomic_t tfm_count;
};
struct pcrypt_aead_ctx {
struct crypto_aead *child;
unsigned int cb_cpu;
};
static inline struct pcrypt_instance_ctx *pcrypt_tfm_ictx(
struct crypto_aead *tfm)
{
return aead_instance_ctx(aead_alg_instance(tfm));
}
static int pcrypt_aead_setkey(struct crypto_aead *parent,
const u8 *key, unsigned int keylen)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setkey(ctx->child, key, keylen);
}
static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
return crypto_aead_setauthsize(ctx->child, authsize);
}
static void pcrypt_aead_serial(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
aead_request_complete(req->base.data, padata->info);
}
static void pcrypt_aead_done(void *data, int err)
{
struct aead_request *req = data;
struct pcrypt_request *preq = aead_request_ctx(req);
struct padata_priv *padata = pcrypt_request_padata(preq);
padata->info = err;
padata_do_serial(padata);
}
static void pcrypt_aead_enc(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
int ret;
ret = crypto_aead_encrypt(req);
if (ret == -EINPROGRESS)
return;
padata->info = ret;
padata_do_serial(padata);
}
static int pcrypt_aead_encrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
struct pcrypt_instance_ctx *ictx;
ictx = pcrypt_tfm_ictx(aead);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_enc;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_ad(creq, req->assoclen);
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
return err;
}
static void pcrypt_aead_dec(struct padata_priv *padata)
{
struct pcrypt_request *preq = pcrypt_padata_request(padata);
struct aead_request *req = pcrypt_request_ctx(preq);
int ret;
ret = crypto_aead_decrypt(req);
if (ret == -EINPROGRESS)
return;
padata->info = ret;
padata_do_serial(padata);
}
static int pcrypt_aead_decrypt(struct aead_request *req)
{
int err;
struct pcrypt_request *preq = aead_request_ctx(req);
struct aead_request *creq = pcrypt_request_ctx(preq);
struct padata_priv *padata = pcrypt_request_padata(preq);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
u32 flags = aead_request_flags(req);
struct pcrypt_instance_ctx *ictx;
ictx = pcrypt_tfm_ictx(aead);
memset(padata, 0, sizeof(struct padata_priv));
padata->parallel = pcrypt_aead_dec;
padata->serial = pcrypt_aead_serial;
aead_request_set_tfm(creq, ctx->child);
aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
pcrypt_aead_done, req);
aead_request_set_crypt(creq, req->src, req->dst,
req->cryptlen, req->iv);
aead_request_set_ad(creq, req->assoclen);
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
if (!err)
return -EINPROGRESS;
return err;
}
static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
{
int cpu, cpu_index;
struct aead_instance *inst = aead_alg_instance(tfm);
struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *cipher;
cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
cpumask_weight(cpu_online_mask);
ctx->cb_cpu = cpumask_first(cpu_online_mask);
for (cpu = 0; cpu < cpu_index; cpu++)
ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
cipher = crypto_spawn_aead(&ictx->spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
sizeof(struct aead_request) +
crypto_aead_reqsize(cipher));
return 0;
}
static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
{
struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
static void pcrypt_free(struct aead_instance *inst)
{
struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_aead(&ctx->spawn);
padata_free_shell(ctx->psdec);
padata_free_shell(ctx->psenc);
kfree(inst);
}
static int pcrypt_init_instance(struct crypto_instance *inst,
struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
inst->alg.cra_priority = alg->cra_priority + 100;
inst->alg.cra_blocksize = alg->cra_blocksize;
inst->alg.cra_alignmask = alg->cra_alignmask;
return 0;
}
static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
struct crypto_attr_type *algt)
{
struct pcrypt_instance_ctx *ctx;
struct aead_instance *inst;
struct aead_alg *alg;
u32 mask = crypto_algt_inherited_mask(algt);
int err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
err = -ENOMEM;
ctx = aead_instance_ctx(inst);
ctx->psenc = padata_alloc_shell(pencrypt);
if (!ctx->psenc)
goto err_free_inst;
ctx->psdec = padata_alloc_shell(pdecrypt);
if (!ctx->psdec)
goto err_free_inst;
err = crypto_grab_aead(&ctx->spawn, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(&ctx->spawn);
err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
if (err)
goto err_free_inst;
inst->alg.base.cra_flags |= CRYPTO_ALG_ASYNC;
inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
inst->alg.init = pcrypt_aead_init_tfm;
inst->alg.exit = pcrypt_aead_exit_tfm;
inst->alg.setkey = pcrypt_aead_setkey;
inst->alg.setauthsize = pcrypt_aead_setauthsize;
inst->alg.encrypt = pcrypt_aead_encrypt;
inst->alg.decrypt = pcrypt_aead_decrypt;
inst->free = pcrypt_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
pcrypt_free(inst);
}
return err;
}
static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
case CRYPTO_ALG_TYPE_AEAD:
return pcrypt_create_aead(tmpl, tb, algt);
}
return -EINVAL;
}
static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
{
int ret;
pinst->kobj.kset = pcrypt_kset;
ret = kobject_add(&pinst->kobj, NULL, "%s", name);
if (!ret)
kobject_uevent(&pinst->kobj, KOBJ_ADD);
return ret;
}
static int pcrypt_init_padata(struct padata_instance **pinst, const char *name)
{
int ret = -ENOMEM;
*pinst = padata_alloc(name);
if (!*pinst)
return ret;
ret = pcrypt_sysfs_add(*pinst, name);
if (ret)
padata_free(*pinst);
return ret;
}
static struct crypto_template pcrypt_tmpl = {
.name = "pcrypt",
.create = pcrypt_create,
.module = THIS_MODULE,
};
static int __init pcrypt_init(void)
{
int err = -ENOMEM;
pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
if (!pcrypt_kset)
goto err;
err = pcrypt_init_padata(&pencrypt, "pencrypt");
if (err)
goto err_unreg_kset;
err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
if (err)
goto err_deinit_pencrypt;
return crypto_register_template(&pcrypt_tmpl);
err_deinit_pencrypt:
padata_free(pencrypt);
err_unreg_kset:
kset_unregister(pcrypt_kset);
err:
return err;
}
static void __exit pcrypt_exit(void)
{
crypto_unregister_template(&pcrypt_tmpl);
padata_free(pencrypt);
padata_free(pdecrypt);
kset_unregister(pcrypt_kset);
}
subsys_initcall(pcrypt_init);
module_exit(pcrypt_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Steffen Klassert <[email protected]>");
MODULE_DESCRIPTION("Parallel crypto wrapper");
MODULE_ALIAS_CRYPTO("pcrypt");
| linux-master | crypto/pcrypt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Deflate algorithm (RFC 1951), implemented here primarily for use
* by IPCOMP (RFC 3173 & RFC 2394).
*
* Copyright (c) 2003 James Morris <[email protected]>
*
* FIXME: deflate transforms will require up to a total of about 436k of kernel
* memory on i386 (390k for compression, the rest for decompression), as the
* current zlib kernel code uses a worst case pre-allocation system by default.
* This needs to be fixed so that the amount of memory required is properly
* related to the winbits and memlevel parameters.
*
* The default winbits of 11 should suit most packets, and it may be something
* to configure on a per-tfm basis in the future.
*
* Currently, compression history is not maintained between tfm calls, as
* it is not needed for IPCOMP and keeps the code simpler. It can be
* implemented if someone wants it.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/zlib.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/net.h>
#include <crypto/internal/scompress.h>
#define DEFLATE_DEF_LEVEL Z_DEFAULT_COMPRESSION
#define DEFLATE_DEF_WINBITS 11
#define DEFLATE_DEF_MEMLEVEL MAX_MEM_LEVEL
struct deflate_ctx {
struct z_stream_s comp_stream;
struct z_stream_s decomp_stream;
};
static int deflate_comp_init(struct deflate_ctx *ctx, int format)
{
int ret = 0;
struct z_stream_s *stream = &ctx->comp_stream;
stream->workspace = vzalloc(zlib_deflate_workspacesize(
MAX_WBITS, MAX_MEM_LEVEL));
if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
if (format)
ret = zlib_deflateInit(stream, 3);
else
ret = zlib_deflateInit2(stream, DEFLATE_DEF_LEVEL, Z_DEFLATED,
-DEFLATE_DEF_WINBITS,
DEFLATE_DEF_MEMLEVEL,
Z_DEFAULT_STRATEGY);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(stream->workspace);
goto out;
}
static int deflate_decomp_init(struct deflate_ctx *ctx, int format)
{
int ret = 0;
struct z_stream_s *stream = &ctx->decomp_stream;
stream->workspace = vzalloc(zlib_inflate_workspacesize());
if (!stream->workspace) {
ret = -ENOMEM;
goto out;
}
if (format)
ret = zlib_inflateInit(stream);
else
ret = zlib_inflateInit2(stream, -DEFLATE_DEF_WINBITS);
if (ret != Z_OK) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(stream->workspace);
goto out;
}
static void deflate_comp_exit(struct deflate_ctx *ctx)
{
zlib_deflateEnd(&ctx->comp_stream);
vfree(ctx->comp_stream.workspace);
}
static void deflate_decomp_exit(struct deflate_ctx *ctx)
{
zlib_inflateEnd(&ctx->decomp_stream);
vfree(ctx->decomp_stream.workspace);
}
static int __deflate_init(void *ctx, int format)
{
int ret;
ret = deflate_comp_init(ctx, format);
if (ret)
goto out;
ret = deflate_decomp_init(ctx, format);
if (ret)
deflate_comp_exit(ctx);
out:
return ret;
}
static void *gen_deflate_alloc_ctx(struct crypto_scomp *tfm, int format)
{
struct deflate_ctx *ctx;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ret = __deflate_init(ctx, format);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
return ctx;
}
static void *deflate_alloc_ctx(struct crypto_scomp *tfm)
{
return gen_deflate_alloc_ctx(tfm, 0);
}
static void *zlib_deflate_alloc_ctx(struct crypto_scomp *tfm)
{
return gen_deflate_alloc_ctx(tfm, 1);
}
static int deflate_init(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
return __deflate_init(ctx, 0);
}
static void __deflate_exit(void *ctx)
{
deflate_comp_exit(ctx);
deflate_decomp_exit(ctx);
}
static void deflate_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
__deflate_exit(ctx);
kfree_sensitive(ctx);
}
static void deflate_exit(struct crypto_tfm *tfm)
{
struct deflate_ctx *ctx = crypto_tfm_ctx(tfm);
__deflate_exit(ctx);
}
static int __deflate_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->comp_stream;
ret = zlib_deflateReset(stream);
if (ret != Z_OK) {
ret = -EINVAL;
goto out;
}
stream->next_in = (u8 *)src;
stream->avail_in = slen;
stream->next_out = (u8 *)dst;
stream->avail_out = *dlen;
ret = zlib_deflate(stream, Z_FINISH);
if (ret != Z_STREAM_END) {
ret = -EINVAL;
goto out;
}
ret = 0;
*dlen = stream->total_out;
out:
return ret;
}
static int deflate_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
return __deflate_compress(src, slen, dst, dlen, dctx);
}
static int deflate_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __deflate_compress(src, slen, dst, dlen, ctx);
}
static int __deflate_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int ret = 0;
struct deflate_ctx *dctx = ctx;
struct z_stream_s *stream = &dctx->decomp_stream;
ret = zlib_inflateReset(stream);
if (ret != Z_OK) {
ret = -EINVAL;
goto out;
}
stream->next_in = (u8 *)src;
stream->avail_in = slen;
stream->next_out = (u8 *)dst;
stream->avail_out = *dlen;
ret = zlib_inflate(stream, Z_SYNC_FLUSH);
/*
* Work around a bug in zlib, which sometimes wants to taste an extra
* byte when being used in the (undocumented) raw deflate mode.
* (From USAGI).
*/
if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
u8 zerostuff = 0;
stream->next_in = &zerostuff;
stream->avail_in = 1;
ret = zlib_inflate(stream, Z_FINISH);
}
if (ret != Z_STREAM_END) {
ret = -EINVAL;
goto out;
}
ret = 0;
*dlen = stream->total_out;
out:
return ret;
}
static int deflate_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct deflate_ctx *dctx = crypto_tfm_ctx(tfm);
return __deflate_decompress(src, slen, dst, dlen, dctx);
}
static int deflate_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __deflate_decompress(src, slen, dst, dlen, ctx);
}
static struct crypto_alg alg = {
.cra_name = "deflate",
.cra_driver_name = "deflate-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct deflate_ctx),
.cra_module = THIS_MODULE,
.cra_init = deflate_init,
.cra_exit = deflate_exit,
.cra_u = { .compress = {
.coa_compress = deflate_compress,
.coa_decompress = deflate_decompress } }
};
static struct scomp_alg scomp[] = { {
.alloc_ctx = deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
.compress = deflate_scompress,
.decompress = deflate_sdecompress,
.base = {
.cra_name = "deflate",
.cra_driver_name = "deflate-scomp",
.cra_module = THIS_MODULE,
}
}, {
.alloc_ctx = zlib_deflate_alloc_ctx,
.free_ctx = deflate_free_ctx,
.compress = deflate_scompress,
.decompress = deflate_sdecompress,
.base = {
.cra_name = "zlib-deflate",
.cra_driver_name = "zlib-deflate-scomp",
.cra_module = THIS_MODULE,
}
} };
static int __init deflate_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomps(scomp, ARRAY_SIZE(scomp));
if (ret) {
crypto_unregister_alg(&alg);
return ret;
}
return ret;
}
static void __exit deflate_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomps(scomp, ARRAY_SIZE(scomp));
}
subsys_initcall(deflate_mod_init);
module_exit(deflate_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
MODULE_AUTHOR("James Morris <[email protected]>");
MODULE_ALIAS_CRYPTO("deflate");
| linux-master | crypto/deflate.c |
/*
* Cryptographic API.
*
* T10 Data Integrity Field CRC16 Crypto Transform
*
* Copyright (c) 2007 Oracle Corporation. All rights reserved.
* Written by Martin K. Petersen <[email protected]>
* Copyright (C) 2013 Intel Corporation
* Author: Tim Chen <[email protected]>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/crc-t10dif.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/kernel.h>
struct chksum_desc_ctx {
__u16 crc;
};
/*
* Steps through buffer one byte at a time, calculates reflected
* crc using table.
*/
static int chksum_init(struct shash_desc *desc)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = 0;
return 0;
}
static int chksum_update(struct shash_desc *desc, const u8 *data,
unsigned int length)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
ctx->crc = crc_t10dif_generic(ctx->crc, data, length);
return 0;
}
static int chksum_final(struct shash_desc *desc, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
*(__u16 *)out = ctx->crc;
return 0;
}
static int __chksum_finup(__u16 crc, const u8 *data, unsigned int len, u8 *out)
{
*(__u16 *)out = crc_t10dif_generic(crc, data, len);
return 0;
}
static int chksum_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
return __chksum_finup(ctx->crc, data, len, out);
}
static int chksum_digest(struct shash_desc *desc, const u8 *data,
unsigned int length, u8 *out)
{
return __chksum_finup(0, data, length, out);
}
static struct shash_alg alg = {
.digestsize = CRC_T10DIF_DIGEST_SIZE,
.init = chksum_init,
.update = chksum_update,
.final = chksum_final,
.finup = chksum_finup,
.digest = chksum_digest,
.descsize = sizeof(struct chksum_desc_ctx),
.base = {
.cra_name = "crct10dif",
.cra_driver_name = "crct10dif-generic",
.cra_priority = 100,
.cra_blocksize = CRC_T10DIF_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init crct10dif_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit crct10dif_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(crct10dif_mod_init);
module_exit(crct10dif_mod_fini);
MODULE_AUTHOR("Tim Chen <[email protected]>");
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crct10dif");
MODULE_ALIAS_CRYPTO("crct10dif-generic");
| linux-master | crypto/crct10dif_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (c) 2016, Intel Corporation
* Authors: Salvatore Benedetto <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/err.h>
#include <linux/string.h>
#include <crypto/ecdh.h>
#include <crypto/kpp.h>
#define ECDH_KPP_SECRET_MIN_SIZE (sizeof(struct kpp_secret) + sizeof(short))
static inline u8 *ecdh_pack_data(void *dst, const void *src, size_t sz)
{
memcpy(dst, src, sz);
return dst + sz;
}
static inline const u8 *ecdh_unpack_data(void *dst, const void *src, size_t sz)
{
memcpy(dst, src, sz);
return src + sz;
}
unsigned int crypto_ecdh_key_len(const struct ecdh *params)
{
return ECDH_KPP_SECRET_MIN_SIZE + params->key_size;
}
EXPORT_SYMBOL_GPL(crypto_ecdh_key_len);
int crypto_ecdh_encode_key(char *buf, unsigned int len,
const struct ecdh *params)
{
u8 *ptr = buf;
struct kpp_secret secret = {
.type = CRYPTO_KPP_SECRET_TYPE_ECDH,
.len = len
};
if (unlikely(!buf))
return -EINVAL;
if (len != crypto_ecdh_key_len(params))
return -EINVAL;
ptr = ecdh_pack_data(ptr, &secret, sizeof(secret));
ptr = ecdh_pack_data(ptr, ¶ms->key_size, sizeof(params->key_size));
ecdh_pack_data(ptr, params->key, params->key_size);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ecdh_encode_key);
int crypto_ecdh_decode_key(const char *buf, unsigned int len,
struct ecdh *params)
{
const u8 *ptr = buf;
struct kpp_secret secret;
if (unlikely(!buf || len < ECDH_KPP_SECRET_MIN_SIZE))
return -EINVAL;
ptr = ecdh_unpack_data(&secret, ptr, sizeof(secret));
if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
return -EINVAL;
if (unlikely(len < secret.len))
return -EINVAL;
ptr = ecdh_unpack_data(¶ms->key_size, ptr, sizeof(params->key_size));
if (secret.len != crypto_ecdh_key_len(params))
return -EINVAL;
/* Don't allocate memory. Set pointer to data
* within the given buffer
*/
params->key = (void *)ptr;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_ecdh_decode_key);
| linux-master | crypto/ecdh_helper.c |
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described
* at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Copyright (C) 2017 Gilad Ben-Yossef <[email protected]>
* Copyright (C) 2021 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <asm/unaligned.h>
#include <crypto/sm3.h>
static const u32 ____cacheline_aligned K[64] = {
0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb,
0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc,
0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce,
0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6,
0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5,
0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53,
0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d,
0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4,
0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43,
0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c,
0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce,
0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec,
0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5
};
/*
* Transform the message X which consists of 16 32-bit-words. See
* GM/T 004-2012 for details.
*/
#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \
do { \
ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \
ss2 = ss1 ^ rol32((a), 12); \
d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \
h += GG ## i(e, f, g) + ss1 + (w1); \
b = rol32((b), 9); \
f = rol32((f), 19); \
h = P0((h)); \
} while (0)
#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \
R(1, a, b, c, d, e, f, g, h, t, w1, w2)
#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \
R(2, a, b, c, d, e, f, g, h, t, w1, w2)
#define FF1(x, y, z) (x ^ y ^ z)
#define FF2(x, y, z) ((x & y) | (x & z) | (y & z))
#define GG1(x, y, z) FF1(x, y, z)
#define GG2(x, y, z) ((x & y) | (~x & z))
/* Message expansion */
#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17))
#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23))
#define I(i) (W[i] = get_unaligned_be32(data + i * 4))
#define W1(i) (W[i & 0x0f])
#define W2(i) (W[i & 0x0f] = \
P1(W[i & 0x0f] \
^ W[(i-9) & 0x0f] \
^ rol32(W[(i-3) & 0x0f], 15)) \
^ rol32(W[(i-13) & 0x0f], 7) \
^ W[(i-6) & 0x0f])
static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16])
{
u32 a, b, c, d, e, f, g, h, ss1, ss2;
a = sctx->state[0];
b = sctx->state[1];
c = sctx->state[2];
d = sctx->state[3];
e = sctx->state[4];
f = sctx->state[5];
g = sctx->state[6];
h = sctx->state[7];
R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4));
R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5));
R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6));
R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7));
R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8));
R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9));
R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10));
R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11));
R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12));
R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13));
R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14));
R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15));
R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16));
R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17));
R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18));
R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19));
R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20));
R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21));
R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22));
R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23));
R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24));
R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25));
R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26));
R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27));
R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28));
R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29));
R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30));
R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31));
R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32));
R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33));
R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34));
R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35));
R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36));
R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37));
R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38));
R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39));
R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40));
R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41));
R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42));
R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43));
R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44));
R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45));
R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46));
R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47));
R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48));
R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49));
R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50));
R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51));
R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52));
R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53));
R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54));
R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55));
R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56));
R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57));
R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58));
R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59));
R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60));
R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61));
R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62));
R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63));
R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64));
R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65));
R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66));
R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67));
sctx->state[0] ^= a;
sctx->state[1] ^= b;
sctx->state[2] ^= c;
sctx->state[3] ^= d;
sctx->state[4] ^= e;
sctx->state[5] ^= f;
sctx->state[6] ^= g;
sctx->state[7] ^= h;
}
#undef R
#undef R1
#undef R2
#undef I
#undef W1
#undef W2
static inline void sm3_block(struct sm3_state *sctx,
u8 const *data, int blocks, u32 W[16])
{
while (blocks--) {
sm3_transform(sctx, data, W);
data += SM3_BLOCK_SIZE;
}
}
void sm3_update(struct sm3_state *sctx, const u8 *data, unsigned int len)
{
unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
u32 W[16];
sctx->count += len;
if ((partial + len) >= SM3_BLOCK_SIZE) {
int blocks;
if (partial) {
int p = SM3_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, p);
data += p;
len -= p;
sm3_block(sctx, sctx->buffer, 1, W);
}
blocks = len / SM3_BLOCK_SIZE;
len %= SM3_BLOCK_SIZE;
if (blocks) {
sm3_block(sctx, data, blocks, W);
data += blocks * SM3_BLOCK_SIZE;
}
memzero_explicit(W, sizeof(W));
partial = 0;
}
if (len)
memcpy(sctx->buffer + partial, data, len);
}
EXPORT_SYMBOL_GPL(sm3_update);
void sm3_final(struct sm3_state *sctx, u8 *out)
{
const int bit_offset = SM3_BLOCK_SIZE - sizeof(u64);
__be64 *bits = (__be64 *)(sctx->buffer + bit_offset);
__be32 *digest = (__be32 *)out;
unsigned int partial = sctx->count % SM3_BLOCK_SIZE;
u32 W[16];
int i;
sctx->buffer[partial++] = 0x80;
if (partial > bit_offset) {
memset(sctx->buffer + partial, 0, SM3_BLOCK_SIZE - partial);
partial = 0;
sm3_block(sctx, sctx->buffer, 1, W);
}
memset(sctx->buffer + partial, 0, bit_offset - partial);
*bits = cpu_to_be64(sctx->count << 3);
sm3_block(sctx, sctx->buffer, 1, W);
for (i = 0; i < 8; i++)
put_unaligned_be32(sctx->state[i], digest++);
/* Zeroize sensitive information. */
memzero_explicit(W, sizeof(W));
memzero_explicit(sctx, sizeof(*sctx));
}
EXPORT_SYMBOL_GPL(sm3_final);
MODULE_DESCRIPTION("Generic SM3 library");
MODULE_LICENSE("GPL v2");
| linux-master | crypto/sm3.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*
* Copyright (c) 2013 Chanho Min <[email protected]>
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/vmalloc.h>
#include <linux/lz4.h>
#include <crypto/internal/scompress.h>
struct lz4hc_ctx {
void *lz4hc_comp_mem;
};
static void *lz4hc_alloc_ctx(struct crypto_scomp *tfm)
{
void *ctx;
ctx = vmalloc(LZ4HC_MEM_COMPRESS);
if (!ctx)
return ERR_PTR(-ENOMEM);
return ctx;
}
static int lz4hc_init(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
ctx->lz4hc_comp_mem = lz4hc_alloc_ctx(NULL);
if (IS_ERR(ctx->lz4hc_comp_mem))
return -ENOMEM;
return 0;
}
static void lz4hc_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
vfree(ctx);
}
static void lz4hc_exit(struct crypto_tfm *tfm)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
lz4hc_free_ctx(NULL, ctx->lz4hc_comp_mem);
}
static int __lz4hc_compress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int out_len = LZ4_compress_HC(src, dst, slen,
*dlen, LZ4HC_DEFAULT_CLEVEL, ctx);
if (!out_len)
return -EINVAL;
*dlen = out_len;
return 0;
}
static int lz4hc_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lz4hc_compress_crypto(src, slen, dst, dlen, ctx);
}
static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst,
unsigned int *dlen)
{
struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
return __lz4hc_compress_crypto(src, slen, dst, dlen,
ctx->lz4hc_comp_mem);
}
static int __lz4hc_decompress_crypto(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
if (out_len < 0)
return -EINVAL;
*dlen = out_len;
return 0;
}
static int lz4hc_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
}
static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst,
unsigned int *dlen)
{
return __lz4hc_decompress_crypto(src, slen, dst, dlen, NULL);
}
static struct crypto_alg alg_lz4hc = {
.cra_name = "lz4hc",
.cra_driver_name = "lz4hc-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct lz4hc_ctx),
.cra_module = THIS_MODULE,
.cra_init = lz4hc_init,
.cra_exit = lz4hc_exit,
.cra_u = { .compress = {
.coa_compress = lz4hc_compress_crypto,
.coa_decompress = lz4hc_decompress_crypto } }
};
static struct scomp_alg scomp = {
.alloc_ctx = lz4hc_alloc_ctx,
.free_ctx = lz4hc_free_ctx,
.compress = lz4hc_scompress,
.decompress = lz4hc_sdecompress,
.base = {
.cra_name = "lz4hc",
.cra_driver_name = "lz4hc-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init lz4hc_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg_lz4hc);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret) {
crypto_unregister_alg(&alg_lz4hc);
return ret;
}
return ret;
}
static void __exit lz4hc_mod_fini(void)
{
crypto_unregister_alg(&alg_lz4hc);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(lz4hc_mod_init);
module_exit(lz4hc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZ4HC Compression Algorithm");
MODULE_ALIAS_CRYPTO("lz4hc");
| linux-master | crypto/lz4hc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cryptographic API.
*
* Copyright (c) 2017-present, Facebook, Inc.
*/
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/vmalloc.h>
#include <linux/zstd.h>
#include <crypto/internal/scompress.h>
#define ZSTD_DEF_LEVEL 3
struct zstd_ctx {
zstd_cctx *cctx;
zstd_dctx *dctx;
void *cwksp;
void *dwksp;
};
static zstd_parameters zstd_params(void)
{
return zstd_get_params(ZSTD_DEF_LEVEL, 0);
}
static int zstd_comp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const zstd_parameters params = zstd_params();
const size_t wksp_size = zstd_cctx_workspace_bound(¶ms.cParams);
ctx->cwksp = vzalloc(wksp_size);
if (!ctx->cwksp) {
ret = -ENOMEM;
goto out;
}
ctx->cctx = zstd_init_cctx(ctx->cwksp, wksp_size);
if (!ctx->cctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->cwksp);
goto out;
}
static int zstd_decomp_init(struct zstd_ctx *ctx)
{
int ret = 0;
const size_t wksp_size = zstd_dctx_workspace_bound();
ctx->dwksp = vzalloc(wksp_size);
if (!ctx->dwksp) {
ret = -ENOMEM;
goto out;
}
ctx->dctx = zstd_init_dctx(ctx->dwksp, wksp_size);
if (!ctx->dctx) {
ret = -EINVAL;
goto out_free;
}
out:
return ret;
out_free:
vfree(ctx->dwksp);
goto out;
}
static void zstd_comp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->cwksp);
ctx->cwksp = NULL;
ctx->cctx = NULL;
}
static void zstd_decomp_exit(struct zstd_ctx *ctx)
{
vfree(ctx->dwksp);
ctx->dwksp = NULL;
ctx->dctx = NULL;
}
static int __zstd_init(void *ctx)
{
int ret;
ret = zstd_comp_init(ctx);
if (ret)
return ret;
ret = zstd_decomp_init(ctx);
if (ret)
zstd_comp_exit(ctx);
return ret;
}
static void *zstd_alloc_ctx(struct crypto_scomp *tfm)
{
int ret;
struct zstd_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ret = __zstd_init(ctx);
if (ret) {
kfree(ctx);
return ERR_PTR(ret);
}
return ctx;
}
static int zstd_init(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_init(ctx);
}
static void __zstd_exit(void *ctx)
{
zstd_comp_exit(ctx);
zstd_decomp_exit(ctx);
}
static void zstd_free_ctx(struct crypto_scomp *tfm, void *ctx)
{
__zstd_exit(ctx);
kfree_sensitive(ctx);
}
static void zstd_exit(struct crypto_tfm *tfm)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
__zstd_exit(ctx);
}
static int __zstd_compress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
const zstd_parameters params = zstd_params();
out_len = zstd_compress_cctx(zctx->cctx, dst, *dlen, src, slen, ¶ms);
if (zstd_is_error(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int zstd_scompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_compress(src, slen, dst, dlen, ctx);
}
static int __zstd_decompress(const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen, void *ctx)
{
size_t out_len;
struct zstd_ctx *zctx = ctx;
out_len = zstd_decompress_dctx(zctx->dctx, dst, *dlen, src, slen);
if (zstd_is_error(out_len))
return -EINVAL;
*dlen = out_len;
return 0;
}
static int zstd_decompress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
struct zstd_ctx *ctx = crypto_tfm_ctx(tfm);
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static int zstd_sdecompress(struct crypto_scomp *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen,
void *ctx)
{
return __zstd_decompress(src, slen, dst, dlen, ctx);
}
static struct crypto_alg alg = {
.cra_name = "zstd",
.cra_driver_name = "zstd-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_ctxsize = sizeof(struct zstd_ctx),
.cra_module = THIS_MODULE,
.cra_init = zstd_init,
.cra_exit = zstd_exit,
.cra_u = { .compress = {
.coa_compress = zstd_compress,
.coa_decompress = zstd_decompress } }
};
static struct scomp_alg scomp = {
.alloc_ctx = zstd_alloc_ctx,
.free_ctx = zstd_free_ctx,
.compress = zstd_scompress,
.decompress = zstd_sdecompress,
.base = {
.cra_name = "zstd",
.cra_driver_name = "zstd-scomp",
.cra_module = THIS_MODULE,
}
};
static int __init zstd_mod_init(void)
{
int ret;
ret = crypto_register_alg(&alg);
if (ret)
return ret;
ret = crypto_register_scomp(&scomp);
if (ret)
crypto_unregister_alg(&alg);
return ret;
}
static void __exit zstd_mod_fini(void)
{
crypto_unregister_alg(&alg);
crypto_unregister_scomp(&scomp);
}
subsys_initcall(zstd_mod_init);
module_exit(zstd_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Zstd Compression Algorithm");
MODULE_ALIAS_CRYPTO("zstd");
| linux-master | crypto/zstd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and
* described at https://tools.ietf.org/html/draft-shen-sm3-hash-01
*
* Copyright (C) 2017 ARM Limited or its affiliates.
* Written by Gilad Ben-Yossef <[email protected]>
* Copyright (C) 2021 Tianjia Zhang <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sm3.h>
#include <crypto/sm3_base.h>
#include <linux/bitops.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = {
0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F,
0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F,
0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74,
0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B
};
EXPORT_SYMBOL_GPL(sm3_zero_message_hash);
static int crypto_sm3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
sm3_update(shash_desc_ctx(desc), data, len);
return 0;
}
static int crypto_sm3_final(struct shash_desc *desc, u8 *out)
{
sm3_final(shash_desc_ctx(desc), out);
return 0;
}
static int crypto_sm3_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
struct sm3_state *sctx = shash_desc_ctx(desc);
if (len)
sm3_update(sctx, data, len);
sm3_final(sctx, hash);
return 0;
}
static struct shash_alg sm3_alg = {
.digestsize = SM3_DIGEST_SIZE,
.init = sm3_base_init,
.update = crypto_sm3_update,
.final = crypto_sm3_final,
.finup = crypto_sm3_finup,
.descsize = sizeof(struct sm3_state),
.base = {
.cra_name = "sm3",
.cra_driver_name = "sm3-generic",
.cra_priority = 100,
.cra_blocksize = SM3_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sm3_generic_mod_init(void)
{
return crypto_register_shash(&sm3_alg);
}
static void __exit sm3_generic_mod_fini(void)
{
crypto_unregister_shash(&sm3_alg);
}
subsys_initcall(sm3_generic_mod_init);
module_exit(sm3_generic_mod_fini);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("SM3 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sm3");
MODULE_ALIAS_CRYPTO("sm3-generic");
| linux-master | crypto/sm3_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* XTS: as defined in IEEE1619/D16
* http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
*
* Copyright (c) 2007 Rik Snel <[email protected]>
*
* Based on ecb.c
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/xts.h>
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
struct xts_tfm_ctx {
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
};
struct xts_instance_ctx {
struct crypto_skcipher_spawn spawn;
char name[CRYPTO_MAX_ALG_NAME];
};
struct xts_request_ctx {
le128 t;
struct scatterlist *tail;
struct scatterlist sg[2];
struct skcipher_request subreq;
};
static int xts_setkey(struct crypto_skcipher *parent, const u8 *key,
unsigned int keylen)
{
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(parent);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
int err;
err = xts_verify_key(parent, key, keylen);
if (err)
return err;
keylen /= 2;
/* we need two cipher instances: one to compute the initial 'tweak'
* by encrypting the IV (usually the 'plain' iv) and the other
* one to encrypt and decrypt the data */
/* tweak cipher, uses Key2 i.e. the second half of *key */
tweak = ctx->tweak;
crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
err = crypto_cipher_setkey(tweak, key + keylen, keylen);
if (err)
return err;
/* data cipher, uses Key1 i.e. the first half of *key */
child = ctx->child;
crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_skcipher_setkey(child, key, keylen);
}
/*
* We compute the tweak masks twice (both before and after the ECB encryption or
* decryption) to avoid having to allocate a temporary buffer and/or make
* mutliple calls to the 'ecb(..)' instance, which usually would be slower than
* just doing the gf128mul_x_ble() calls again.
*/
static int xts_xor_tweak(struct skcipher_request *req, bool second_pass,
bool enc)
{
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const bool cts = (req->cryptlen % XTS_BLOCK_SIZE);
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
le128 t = rctx->t;
int err;
if (second_pass) {
req = &rctx->subreq;
/* set to our TFM to enforce correct alignment: */
skcipher_request_set_tfm(req, tfm);
}
err = skcipher_walk_virt(&w, req, false);
while (w.nbytes) {
unsigned int avail = w.nbytes;
le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
wdst = w.dst.virt.addr;
do {
if (unlikely(cts) &&
w.total - w.nbytes + avail < 2 * XTS_BLOCK_SIZE) {
if (!enc) {
if (second_pass)
rctx->t = t;
gf128mul_x_ble(&t, &t);
}
le128_xor(wdst, &t, wsrc);
if (enc && second_pass)
gf128mul_x_ble(&rctx->t, &t);
skcipher_walk_done(&w, avail - bs);
return 0;
}
le128_xor(wdst++, &t, wsrc++);
gf128mul_x_ble(&t, &t);
} while ((avail -= bs) >= bs);
err = skcipher_walk_done(&w, avail);
}
return err;
}
static int xts_xor_tweak_pre(struct skcipher_request *req, bool enc)
{
return xts_xor_tweak(req, false, enc);
}
static int xts_xor_tweak_post(struct skcipher_request *req, bool enc)
{
return xts_xor_tweak(req, true, enc);
}
static void xts_cts_done(void *data, int err)
{
struct skcipher_request *req = data;
le128 b;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
le128_xor(&b, &rctx->t, &b);
scatterwalk_map_and_copy(&b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
}
skcipher_request_complete(req, err);
}
static int xts_cts_final(struct skcipher_request *req,
int (*crypt)(struct skcipher_request *req))
{
const struct xts_tfm_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
int offset = req->cryptlen & ~(XTS_BLOCK_SIZE - 1);
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int tail = req->cryptlen % XTS_BLOCK_SIZE;
le128 b[2];
int err;
rctx->tail = scatterwalk_ffwd(rctx->sg, req->dst,
offset - XTS_BLOCK_SIZE);
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
b[1] = b[0];
scatterwalk_map_and_copy(b, req->src, offset, tail, 0);
le128_xor(b, &rctx->t, b);
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE + tail, 1);
skcipher_request_set_tfm(subreq, ctx->child);
skcipher_request_set_callback(subreq, req->base.flags, xts_cts_done,
req);
skcipher_request_set_crypt(subreq, rctx->tail, rctx->tail,
XTS_BLOCK_SIZE, NULL);
err = crypt(subreq);
if (err)
return err;
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 0);
le128_xor(b, &rctx->t, b);
scatterwalk_map_and_copy(b, rctx->tail, 0, XTS_BLOCK_SIZE, 1);
return 0;
}
static void xts_encrypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, true);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_encrypt);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}
skcipher_request_complete(req, err);
}
static void xts_decrypt_done(void *data, int err)
{
struct skcipher_request *req = data;
if (!err) {
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
rctx->subreq.base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
err = xts_xor_tweak_post(req, false);
if (!err && unlikely(req->cryptlen % XTS_BLOCK_SIZE)) {
err = xts_cts_final(req, crypto_skcipher_decrypt);
if (err == -EINPROGRESS || err == -EBUSY)
return;
}
}
skcipher_request_complete(req, err);
}
static int xts_init_crypt(struct skcipher_request *req,
crypto_completion_t compl)
{
const struct xts_tfm_ctx *ctx =
crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
if (req->cryptlen < XTS_BLOCK_SIZE)
return -EINVAL;
skcipher_request_set_tfm(subreq, ctx->child);
skcipher_request_set_callback(subreq, req->base.flags, compl, req);
skcipher_request_set_crypt(subreq, req->dst, req->dst,
req->cryptlen & ~(XTS_BLOCK_SIZE - 1), NULL);
/* calculate first value of T */
crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
return 0;
}
static int xts_encrypt(struct skcipher_request *req)
{
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
err = xts_init_crypt(req, xts_encrypt_done) ?:
xts_xor_tweak_pre(req, true) ?:
crypto_skcipher_encrypt(subreq) ?:
xts_xor_tweak_post(req, true);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
return xts_cts_final(req, crypto_skcipher_encrypt);
}
static int xts_decrypt(struct skcipher_request *req)
{
struct xts_request_ctx *rctx = skcipher_request_ctx(req);
struct skcipher_request *subreq = &rctx->subreq;
int err;
err = xts_init_crypt(req, xts_decrypt_done) ?:
xts_xor_tweak_pre(req, false) ?:
crypto_skcipher_decrypt(subreq) ?:
xts_xor_tweak_post(req, false);
if (err || likely((req->cryptlen % XTS_BLOCK_SIZE) == 0))
return err;
return xts_cts_final(req, crypto_skcipher_decrypt);
}
static int xts_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
child = crypto_spawn_skcipher(&ictx->spawn);
if (IS_ERR(child))
return PTR_ERR(child);
ctx->child = child;
tweak = crypto_alloc_cipher(ictx->name, 0, 0);
if (IS_ERR(tweak)) {
crypto_free_skcipher(ctx->child);
return PTR_ERR(tweak);
}
ctx->tweak = tweak;
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
sizeof(struct xts_request_ctx));
return 0;
}
static void xts_exit_tfm(struct crypto_skcipher *tfm)
{
struct xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(ctx->child);
crypto_free_cipher(ctx->tweak);
}
static void xts_free_instance(struct skcipher_instance *inst)
{
struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->spawn);
kfree(inst);
}
static int xts_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct xts_instance_ctx *ctx;
struct skcipher_alg *alg;
const char *cipher_name;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
if (err)
return err;
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = skcipher_instance_ctx(inst);
err = crypto_grab_skcipher(&ctx->spawn, skcipher_crypto_instance(inst),
cipher_name, 0, mask);
if (err == -ENOENT) {
err = -ENAMETOOLONG;
if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
err = crypto_grab_skcipher(&ctx->spawn,
skcipher_crypto_instance(inst),
ctx->name, 0, mask);
}
if (err)
goto err_free_inst;
alg = crypto_skcipher_spawn_alg(&ctx->spawn);
err = -EINVAL;
if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
goto err_free_inst;
if (crypto_skcipher_alg_ivsize(alg))
goto err_free_inst;
err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
&alg->base);
if (err)
goto err_free_inst;
err = -EINVAL;
cipher_name = alg->base.cra_name;
/* Alas we screwed up the naming so we have to mangle the
* cipher name.
*/
if (!strncmp(cipher_name, "ecb(", 4)) {
int len;
len = strscpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
if (len < 2)
goto err_free_inst;
if (ctx->name[len - 1] != ')')
goto err_free_inst;
ctx->name[len - 1] = 0;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) {
err = -ENAMETOOLONG;
goto err_free_inst;
}
} else
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
(__alignof__(u64) - 1);
inst->alg.ivsize = XTS_BLOCK_SIZE;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
inst->alg.base.cra_ctxsize = sizeof(struct xts_tfm_ctx);
inst->alg.init = xts_init_tfm;
inst->alg.exit = xts_exit_tfm;
inst->alg.setkey = xts_setkey;
inst->alg.encrypt = xts_encrypt;
inst->alg.decrypt = xts_decrypt;
inst->free = xts_free_instance;
err = skcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
xts_free_instance(inst);
}
return err;
}
static struct crypto_template xts_tmpl = {
.name = "xts",
.create = xts_create,
.module = THIS_MODULE,
};
static int __init xts_module_init(void)
{
return crypto_register_template(&xts_tmpl);
}
static void __exit xts_module_exit(void)
{
crypto_unregister_template(&xts_tmpl);
}
subsys_initcall(xts_module_init);
module_exit(xts_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("XTS block cipher mode");
MODULE_ALIAS_CRYPTO("xts");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
MODULE_SOFTDEP("pre: ecb");
| linux-master | crypto/xts.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Blowfish Cipher Algorithm, by Bruce Schneier.
* http://www.counterpane.com/blowfish.html
*
* Adapted from Kerneli implementation.
*
* Copyright (c) Herbert Valerio Riedel <[email protected]>
* Copyright (c) Kyle McMartin <[email protected]>
* Copyright (c) 2002 James Morris <[email protected]>
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/unaligned.h>
#include <linux/types.h>
#include <crypto/blowfish.h>
/*
* Round loop unrolling macros, S is a pointer to a S-Box array
* organized in 4 unsigned longs at a row.
*/
#define GET32_3(x) (((x) & 0xff))
#define GET32_2(x) (((x) >> (8)) & (0xff))
#define GET32_1(x) (((x) >> (16)) & (0xff))
#define GET32_0(x) (((x) >> (24)) & (0xff))
#define bf_F(x) (((S[GET32_0(x)] + S[256 + GET32_1(x)]) ^ \
S[512 + GET32_2(x)]) + S[768 + GET32_3(x)])
#define ROUND(a, b, n) ({ b ^= P[n]; a ^= bf_F(b); })
static void bf_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = get_unaligned_be32(src);
u32 yr = get_unaligned_be32(src + 4);
ROUND(yr, yl, 0);
ROUND(yl, yr, 1);
ROUND(yr, yl, 2);
ROUND(yl, yr, 3);
ROUND(yr, yl, 4);
ROUND(yl, yr, 5);
ROUND(yr, yl, 6);
ROUND(yl, yr, 7);
ROUND(yr, yl, 8);
ROUND(yl, yr, 9);
ROUND(yr, yl, 10);
ROUND(yl, yr, 11);
ROUND(yr, yl, 12);
ROUND(yl, yr, 13);
ROUND(yr, yl, 14);
ROUND(yl, yr, 15);
yl ^= P[16];
yr ^= P[17];
put_unaligned_be32(yr, dst);
put_unaligned_be32(yl, dst + 4);
}
static void bf_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct bf_ctx *ctx = crypto_tfm_ctx(tfm);
const u32 *P = ctx->p;
const u32 *S = ctx->s;
u32 yl = get_unaligned_be32(src);
u32 yr = get_unaligned_be32(src + 4);
ROUND(yr, yl, 17);
ROUND(yl, yr, 16);
ROUND(yr, yl, 15);
ROUND(yl, yr, 14);
ROUND(yr, yl, 13);
ROUND(yl, yr, 12);
ROUND(yr, yl, 11);
ROUND(yl, yr, 10);
ROUND(yr, yl, 9);
ROUND(yl, yr, 8);
ROUND(yr, yl, 7);
ROUND(yl, yr, 6);
ROUND(yr, yl, 5);
ROUND(yl, yr, 4);
ROUND(yr, yl, 3);
ROUND(yl, yr, 2);
yl ^= P[1];
yr ^= P[0];
put_unaligned_be32(yr, dst);
put_unaligned_be32(yl, dst + 4);
}
static struct crypto_alg alg = {
.cra_name = "blowfish",
.cra_driver_name = "blowfish-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = BF_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct bf_ctx),
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = BF_MIN_KEY_SIZE,
.cia_max_keysize = BF_MAX_KEY_SIZE,
.cia_setkey = blowfish_setkey,
.cia_encrypt = bf_encrypt,
.cia_decrypt = bf_decrypt } }
};
static int __init blowfish_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit blowfish_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
subsys_initcall(blowfish_mod_init);
module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("blowfish");
MODULE_ALIAS_CRYPTO("blowfish-generic");
| linux-master | crypto/blowfish_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* geniv: Shared IV generator code
*
* This file provides common code to IV generators such as seqiv.
*
* Copyright (c) 2007-2019 Herbert Xu <[email protected]>
*/
#include <crypto/internal/geniv.h>
#include <crypto/internal/rng.h>
#include <crypto/null.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
static int aead_geniv_setkey(struct crypto_aead *tfm,
const u8 *key, unsigned int keylen)
{
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
return crypto_aead_setkey(ctx->child, key, keylen);
}
static int aead_geniv_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
return crypto_aead_setauthsize(ctx->child, authsize);
}
static void aead_geniv_free(struct aead_instance *inst)
{
crypto_drop_aead(aead_instance_ctx(inst));
kfree(inst);
}
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb)
{
struct crypto_aead_spawn *spawn;
struct aead_instance *inst;
struct aead_alg *alg;
unsigned int ivsize;
unsigned int maxauthsize;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return ERR_PTR(err);
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return ERR_PTR(-ENOMEM);
spawn = aead_instance_ctx(inst);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(spawn);
ivsize = crypto_aead_alg_ivsize(alg);
maxauthsize = crypto_aead_alg_maxauthsize(alg);
err = -EINVAL;
if (ivsize < sizeof(u64))
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"%s(%s)", tmpl->name, alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
inst->alg.setkey = aead_geniv_setkey;
inst->alg.setauthsize = aead_geniv_setauthsize;
inst->alg.ivsize = ivsize;
inst->alg.maxauthsize = maxauthsize;
inst->free = aead_geniv_free;
out:
return inst;
err_free_inst:
aead_geniv_free(inst);
inst = ERR_PTR(err);
goto out;
}
EXPORT_SYMBOL_GPL(aead_geniv_alloc);
int aead_init_geniv(struct crypto_aead *aead)
{
struct aead_geniv_ctx *ctx = crypto_aead_ctx(aead);
struct aead_instance *inst = aead_alg_instance(aead);
struct crypto_aead *child;
int err;
spin_lock_init(&ctx->lock);
err = crypto_get_default_rng();
if (err)
goto out;
err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt,
crypto_aead_ivsize(aead));
crypto_put_default_rng();
if (err)
goto out;
ctx->sknull = crypto_get_default_null_skcipher();
err = PTR_ERR(ctx->sknull);
if (IS_ERR(ctx->sknull))
goto out;
child = crypto_spawn_aead(aead_instance_ctx(inst));
err = PTR_ERR(child);
if (IS_ERR(child))
goto drop_null;
ctx->child = child;
crypto_aead_set_reqsize(aead, crypto_aead_reqsize(child) +
sizeof(struct aead_request));
err = 0;
out:
return err;
drop_null:
crypto_put_default_null_skcipher();
goto out;
}
EXPORT_SYMBOL_GPL(aead_init_geniv);
void aead_exit_geniv(struct crypto_aead *tfm)
{
struct aead_geniv_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
crypto_put_default_null_skcipher();
}
EXPORT_SYMBOL_GPL(aead_exit_geniv);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Shared IV generator code");
| linux-master | crypto/geniv.c |
/*
* Cryptographic API.
*
* AES Cipher Algorithm.
*
* Based on Brian Gladman's code.
*
* Linux developers:
* Alexander Kjeldaas <[email protected]>
* Herbert Valerio Riedel <[email protected]>
* Kyle McMartin <[email protected]>
* Adam J. Richter <[email protected]> (conversion to 2.5 API).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* ---------------------------------------------------------------------------
* Copyright (c) 2002, Dr Brian Gladman <[email protected]>, Worcester, UK.
* All rights reserved.
*
* LICENSE TERMS
*
* The free distribution and use of this software in both source and binary
* form is allowed (with or without changes) provided that:
*
* 1. distributions of this source code include the above copyright
* notice, this list of conditions and the following disclaimer;
*
* 2. distributions in binary form include the above copyright
* notice, this list of conditions and the following disclaimer
* in the documentation and/or other associated materials;
*
* 3. the copyright holder's name is not used to endorse products
* built using this software without specific written permission.
*
* ALTERNATIVELY, provided that this notice is retained in full, this product
* may be distributed under the terms of the GNU General Public License (GPL),
* in which case the provisions of the GPL apply INSTEAD OF those given above.
*
* DISCLAIMER
*
* This software is provided 'as is' with no explicit or implied warranties
* in respect of its properties, including, but not limited to, correctness
* and/or fitness for purpose.
* ---------------------------------------------------------------------------
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
static inline u8 byte(const u32 x, const unsigned n)
{
return x >> (n << 3);
}
/* cacheline-aligned to facilitate prefetching into cache */
__visible const u32 crypto_ft_tab[4][256] ____cacheline_aligned = {
{
0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6,
0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591,
0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56,
0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec,
0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa,
0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb,
0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45,
0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b,
0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c,
0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83,
0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9,
0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a,
0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d,
0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f,
0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df,
0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea,
0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34,
0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b,
0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d,
0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413,
0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1,
0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6,
0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972,
0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85,
0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed,
0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511,
0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe,
0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b,
0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05,
0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1,
0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142,
0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf,
0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3,
0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e,
0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a,
0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6,
0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3,
0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b,
0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428,
0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad,
0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14,
0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8,
0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4,
0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2,
0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda,
0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949,
0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf,
0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810,
0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c,
0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697,
0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e,
0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f,
0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc,
0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c,
0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969,
0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27,
0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122,
0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433,
0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9,
0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5,
0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a,
0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0,
0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e,
0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c,
}, {
0x6363c6a5, 0x7c7cf884, 0x7777ee99, 0x7b7bf68d,
0xf2f2ff0d, 0x6b6bd6bd, 0x6f6fdeb1, 0xc5c59154,
0x30306050, 0x01010203, 0x6767cea9, 0x2b2b567d,
0xfefee719, 0xd7d7b562, 0xabab4de6, 0x7676ec9a,
0xcaca8f45, 0x82821f9d, 0xc9c98940, 0x7d7dfa87,
0xfafaef15, 0x5959b2eb, 0x47478ec9, 0xf0f0fb0b,
0xadad41ec, 0xd4d4b367, 0xa2a25ffd, 0xafaf45ea,
0x9c9c23bf, 0xa4a453f7, 0x7272e496, 0xc0c09b5b,
0xb7b775c2, 0xfdfde11c, 0x93933dae, 0x26264c6a,
0x36366c5a, 0x3f3f7e41, 0xf7f7f502, 0xcccc834f,
0x3434685c, 0xa5a551f4, 0xe5e5d134, 0xf1f1f908,
0x7171e293, 0xd8d8ab73, 0x31316253, 0x15152a3f,
0x0404080c, 0xc7c79552, 0x23234665, 0xc3c39d5e,
0x18183028, 0x969637a1, 0x05050a0f, 0x9a9a2fb5,
0x07070e09, 0x12122436, 0x80801b9b, 0xe2e2df3d,
0xebebcd26, 0x27274e69, 0xb2b27fcd, 0x7575ea9f,
0x0909121b, 0x83831d9e, 0x2c2c5874, 0x1a1a342e,
0x1b1b362d, 0x6e6edcb2, 0x5a5ab4ee, 0xa0a05bfb,
0x5252a4f6, 0x3b3b764d, 0xd6d6b761, 0xb3b37dce,
0x2929527b, 0xe3e3dd3e, 0x2f2f5e71, 0x84841397,
0x5353a6f5, 0xd1d1b968, 0x00000000, 0xededc12c,
0x20204060, 0xfcfce31f, 0xb1b179c8, 0x5b5bb6ed,
0x6a6ad4be, 0xcbcb8d46, 0xbebe67d9, 0x3939724b,
0x4a4a94de, 0x4c4c98d4, 0x5858b0e8, 0xcfcf854a,
0xd0d0bb6b, 0xefefc52a, 0xaaaa4fe5, 0xfbfbed16,
0x434386c5, 0x4d4d9ad7, 0x33336655, 0x85851194,
0x45458acf, 0xf9f9e910, 0x02020406, 0x7f7ffe81,
0x5050a0f0, 0x3c3c7844, 0x9f9f25ba, 0xa8a84be3,
0x5151a2f3, 0xa3a35dfe, 0x404080c0, 0x8f8f058a,
0x92923fad, 0x9d9d21bc, 0x38387048, 0xf5f5f104,
0xbcbc63df, 0xb6b677c1, 0xdadaaf75, 0x21214263,
0x10102030, 0xffffe51a, 0xf3f3fd0e, 0xd2d2bf6d,
0xcdcd814c, 0x0c0c1814, 0x13132635, 0xececc32f,
0x5f5fbee1, 0x979735a2, 0x444488cc, 0x17172e39,
0xc4c49357, 0xa7a755f2, 0x7e7efc82, 0x3d3d7a47,
0x6464c8ac, 0x5d5dbae7, 0x1919322b, 0x7373e695,
0x6060c0a0, 0x81811998, 0x4f4f9ed1, 0xdcdca37f,
0x22224466, 0x2a2a547e, 0x90903bab, 0x88880b83,
0x46468cca, 0xeeeec729, 0xb8b86bd3, 0x1414283c,
0xdedea779, 0x5e5ebce2, 0x0b0b161d, 0xdbdbad76,
0xe0e0db3b, 0x32326456, 0x3a3a744e, 0x0a0a141e,
0x494992db, 0x06060c0a, 0x2424486c, 0x5c5cb8e4,
0xc2c29f5d, 0xd3d3bd6e, 0xacac43ef, 0x6262c4a6,
0x919139a8, 0x959531a4, 0xe4e4d337, 0x7979f28b,
0xe7e7d532, 0xc8c88b43, 0x37376e59, 0x6d6ddab7,
0x8d8d018c, 0xd5d5b164, 0x4e4e9cd2, 0xa9a949e0,
0x6c6cd8b4, 0x5656acfa, 0xf4f4f307, 0xeaeacf25,
0x6565caaf, 0x7a7af48e, 0xaeae47e9, 0x08081018,
0xbaba6fd5, 0x7878f088, 0x25254a6f, 0x2e2e5c72,
0x1c1c3824, 0xa6a657f1, 0xb4b473c7, 0xc6c69751,
0xe8e8cb23, 0xdddda17c, 0x7474e89c, 0x1f1f3e21,
0x4b4b96dd, 0xbdbd61dc, 0x8b8b0d86, 0x8a8a0f85,
0x7070e090, 0x3e3e7c42, 0xb5b571c4, 0x6666ccaa,
0x484890d8, 0x03030605, 0xf6f6f701, 0x0e0e1c12,
0x6161c2a3, 0x35356a5f, 0x5757aef9, 0xb9b969d0,
0x86861791, 0xc1c19958, 0x1d1d3a27, 0x9e9e27b9,
0xe1e1d938, 0xf8f8eb13, 0x98982bb3, 0x11112233,
0x6969d2bb, 0xd9d9a970, 0x8e8e0789, 0x949433a7,
0x9b9b2db6, 0x1e1e3c22, 0x87871592, 0xe9e9c920,
0xcece8749, 0x5555aaff, 0x28285078, 0xdfdfa57a,
0x8c8c038f, 0xa1a159f8, 0x89890980, 0x0d0d1a17,
0xbfbf65da, 0xe6e6d731, 0x424284c6, 0x6868d0b8,
0x414182c3, 0x999929b0, 0x2d2d5a77, 0x0f0f1e11,
0xb0b07bcb, 0x5454a8fc, 0xbbbb6dd6, 0x16162c3a,
}, {
0x63c6a563, 0x7cf8847c, 0x77ee9977, 0x7bf68d7b,
0xf2ff0df2, 0x6bd6bd6b, 0x6fdeb16f, 0xc59154c5,
0x30605030, 0x01020301, 0x67cea967, 0x2b567d2b,
0xfee719fe, 0xd7b562d7, 0xab4de6ab, 0x76ec9a76,
0xca8f45ca, 0x821f9d82, 0xc98940c9, 0x7dfa877d,
0xfaef15fa, 0x59b2eb59, 0x478ec947, 0xf0fb0bf0,
0xad41ecad, 0xd4b367d4, 0xa25ffda2, 0xaf45eaaf,
0x9c23bf9c, 0xa453f7a4, 0x72e49672, 0xc09b5bc0,
0xb775c2b7, 0xfde11cfd, 0x933dae93, 0x264c6a26,
0x366c5a36, 0x3f7e413f, 0xf7f502f7, 0xcc834fcc,
0x34685c34, 0xa551f4a5, 0xe5d134e5, 0xf1f908f1,
0x71e29371, 0xd8ab73d8, 0x31625331, 0x152a3f15,
0x04080c04, 0xc79552c7, 0x23466523, 0xc39d5ec3,
0x18302818, 0x9637a196, 0x050a0f05, 0x9a2fb59a,
0x070e0907, 0x12243612, 0x801b9b80, 0xe2df3de2,
0xebcd26eb, 0x274e6927, 0xb27fcdb2, 0x75ea9f75,
0x09121b09, 0x831d9e83, 0x2c58742c, 0x1a342e1a,
0x1b362d1b, 0x6edcb26e, 0x5ab4ee5a, 0xa05bfba0,
0x52a4f652, 0x3b764d3b, 0xd6b761d6, 0xb37dceb3,
0x29527b29, 0xe3dd3ee3, 0x2f5e712f, 0x84139784,
0x53a6f553, 0xd1b968d1, 0x00000000, 0xedc12ced,
0x20406020, 0xfce31ffc, 0xb179c8b1, 0x5bb6ed5b,
0x6ad4be6a, 0xcb8d46cb, 0xbe67d9be, 0x39724b39,
0x4a94de4a, 0x4c98d44c, 0x58b0e858, 0xcf854acf,
0xd0bb6bd0, 0xefc52aef, 0xaa4fe5aa, 0xfbed16fb,
0x4386c543, 0x4d9ad74d, 0x33665533, 0x85119485,
0x458acf45, 0xf9e910f9, 0x02040602, 0x7ffe817f,
0x50a0f050, 0x3c78443c, 0x9f25ba9f, 0xa84be3a8,
0x51a2f351, 0xa35dfea3, 0x4080c040, 0x8f058a8f,
0x923fad92, 0x9d21bc9d, 0x38704838, 0xf5f104f5,
0xbc63dfbc, 0xb677c1b6, 0xdaaf75da, 0x21426321,
0x10203010, 0xffe51aff, 0xf3fd0ef3, 0xd2bf6dd2,
0xcd814ccd, 0x0c18140c, 0x13263513, 0xecc32fec,
0x5fbee15f, 0x9735a297, 0x4488cc44, 0x172e3917,
0xc49357c4, 0xa755f2a7, 0x7efc827e, 0x3d7a473d,
0x64c8ac64, 0x5dbae75d, 0x19322b19, 0x73e69573,
0x60c0a060, 0x81199881, 0x4f9ed14f, 0xdca37fdc,
0x22446622, 0x2a547e2a, 0x903bab90, 0x880b8388,
0x468cca46, 0xeec729ee, 0xb86bd3b8, 0x14283c14,
0xdea779de, 0x5ebce25e, 0x0b161d0b, 0xdbad76db,
0xe0db3be0, 0x32645632, 0x3a744e3a, 0x0a141e0a,
0x4992db49, 0x060c0a06, 0x24486c24, 0x5cb8e45c,
0xc29f5dc2, 0xd3bd6ed3, 0xac43efac, 0x62c4a662,
0x9139a891, 0x9531a495, 0xe4d337e4, 0x79f28b79,
0xe7d532e7, 0xc88b43c8, 0x376e5937, 0x6ddab76d,
0x8d018c8d, 0xd5b164d5, 0x4e9cd24e, 0xa949e0a9,
0x6cd8b46c, 0x56acfa56, 0xf4f307f4, 0xeacf25ea,
0x65caaf65, 0x7af48e7a, 0xae47e9ae, 0x08101808,
0xba6fd5ba, 0x78f08878, 0x254a6f25, 0x2e5c722e,
0x1c38241c, 0xa657f1a6, 0xb473c7b4, 0xc69751c6,
0xe8cb23e8, 0xdda17cdd, 0x74e89c74, 0x1f3e211f,
0x4b96dd4b, 0xbd61dcbd, 0x8b0d868b, 0x8a0f858a,
0x70e09070, 0x3e7c423e, 0xb571c4b5, 0x66ccaa66,
0x4890d848, 0x03060503, 0xf6f701f6, 0x0e1c120e,
0x61c2a361, 0x356a5f35, 0x57aef957, 0xb969d0b9,
0x86179186, 0xc19958c1, 0x1d3a271d, 0x9e27b99e,
0xe1d938e1, 0xf8eb13f8, 0x982bb398, 0x11223311,
0x69d2bb69, 0xd9a970d9, 0x8e07898e, 0x9433a794,
0x9b2db69b, 0x1e3c221e, 0x87159287, 0xe9c920e9,
0xce8749ce, 0x55aaff55, 0x28507828, 0xdfa57adf,
0x8c038f8c, 0xa159f8a1, 0x89098089, 0x0d1a170d,
0xbf65dabf, 0xe6d731e6, 0x4284c642, 0x68d0b868,
0x4182c341, 0x9929b099, 0x2d5a772d, 0x0f1e110f,
0xb07bcbb0, 0x54a8fc54, 0xbb6dd6bb, 0x162c3a16,
}, {
0xc6a56363, 0xf8847c7c, 0xee997777, 0xf68d7b7b,
0xff0df2f2, 0xd6bd6b6b, 0xdeb16f6f, 0x9154c5c5,
0x60503030, 0x02030101, 0xcea96767, 0x567d2b2b,
0xe719fefe, 0xb562d7d7, 0x4de6abab, 0xec9a7676,
0x8f45caca, 0x1f9d8282, 0x8940c9c9, 0xfa877d7d,
0xef15fafa, 0xb2eb5959, 0x8ec94747, 0xfb0bf0f0,
0x41ecadad, 0xb367d4d4, 0x5ffda2a2, 0x45eaafaf,
0x23bf9c9c, 0x53f7a4a4, 0xe4967272, 0x9b5bc0c0,
0x75c2b7b7, 0xe11cfdfd, 0x3dae9393, 0x4c6a2626,
0x6c5a3636, 0x7e413f3f, 0xf502f7f7, 0x834fcccc,
0x685c3434, 0x51f4a5a5, 0xd134e5e5, 0xf908f1f1,
0xe2937171, 0xab73d8d8, 0x62533131, 0x2a3f1515,
0x080c0404, 0x9552c7c7, 0x46652323, 0x9d5ec3c3,
0x30281818, 0x37a19696, 0x0a0f0505, 0x2fb59a9a,
0x0e090707, 0x24361212, 0x1b9b8080, 0xdf3de2e2,
0xcd26ebeb, 0x4e692727, 0x7fcdb2b2, 0xea9f7575,
0x121b0909, 0x1d9e8383, 0x58742c2c, 0x342e1a1a,
0x362d1b1b, 0xdcb26e6e, 0xb4ee5a5a, 0x5bfba0a0,
0xa4f65252, 0x764d3b3b, 0xb761d6d6, 0x7dceb3b3,
0x527b2929, 0xdd3ee3e3, 0x5e712f2f, 0x13978484,
0xa6f55353, 0xb968d1d1, 0x00000000, 0xc12ceded,
0x40602020, 0xe31ffcfc, 0x79c8b1b1, 0xb6ed5b5b,
0xd4be6a6a, 0x8d46cbcb, 0x67d9bebe, 0x724b3939,
0x94de4a4a, 0x98d44c4c, 0xb0e85858, 0x854acfcf,
0xbb6bd0d0, 0xc52aefef, 0x4fe5aaaa, 0xed16fbfb,
0x86c54343, 0x9ad74d4d, 0x66553333, 0x11948585,
0x8acf4545, 0xe910f9f9, 0x04060202, 0xfe817f7f,
0xa0f05050, 0x78443c3c, 0x25ba9f9f, 0x4be3a8a8,
0xa2f35151, 0x5dfea3a3, 0x80c04040, 0x058a8f8f,
0x3fad9292, 0x21bc9d9d, 0x70483838, 0xf104f5f5,
0x63dfbcbc, 0x77c1b6b6, 0xaf75dada, 0x42632121,
0x20301010, 0xe51affff, 0xfd0ef3f3, 0xbf6dd2d2,
0x814ccdcd, 0x18140c0c, 0x26351313, 0xc32fecec,
0xbee15f5f, 0x35a29797, 0x88cc4444, 0x2e391717,
0x9357c4c4, 0x55f2a7a7, 0xfc827e7e, 0x7a473d3d,
0xc8ac6464, 0xbae75d5d, 0x322b1919, 0xe6957373,
0xc0a06060, 0x19988181, 0x9ed14f4f, 0xa37fdcdc,
0x44662222, 0x547e2a2a, 0x3bab9090, 0x0b838888,
0x8cca4646, 0xc729eeee, 0x6bd3b8b8, 0x283c1414,
0xa779dede, 0xbce25e5e, 0x161d0b0b, 0xad76dbdb,
0xdb3be0e0, 0x64563232, 0x744e3a3a, 0x141e0a0a,
0x92db4949, 0x0c0a0606, 0x486c2424, 0xb8e45c5c,
0x9f5dc2c2, 0xbd6ed3d3, 0x43efacac, 0xc4a66262,
0x39a89191, 0x31a49595, 0xd337e4e4, 0xf28b7979,
0xd532e7e7, 0x8b43c8c8, 0x6e593737, 0xdab76d6d,
0x018c8d8d, 0xb164d5d5, 0x9cd24e4e, 0x49e0a9a9,
0xd8b46c6c, 0xacfa5656, 0xf307f4f4, 0xcf25eaea,
0xcaaf6565, 0xf48e7a7a, 0x47e9aeae, 0x10180808,
0x6fd5baba, 0xf0887878, 0x4a6f2525, 0x5c722e2e,
0x38241c1c, 0x57f1a6a6, 0x73c7b4b4, 0x9751c6c6,
0xcb23e8e8, 0xa17cdddd, 0xe89c7474, 0x3e211f1f,
0x96dd4b4b, 0x61dcbdbd, 0x0d868b8b, 0x0f858a8a,
0xe0907070, 0x7c423e3e, 0x71c4b5b5, 0xccaa6666,
0x90d84848, 0x06050303, 0xf701f6f6, 0x1c120e0e,
0xc2a36161, 0x6a5f3535, 0xaef95757, 0x69d0b9b9,
0x17918686, 0x9958c1c1, 0x3a271d1d, 0x27b99e9e,
0xd938e1e1, 0xeb13f8f8, 0x2bb39898, 0x22331111,
0xd2bb6969, 0xa970d9d9, 0x07898e8e, 0x33a79494,
0x2db69b9b, 0x3c221e1e, 0x15928787, 0xc920e9e9,
0x8749cece, 0xaaff5555, 0x50782828, 0xa57adfdf,
0x038f8c8c, 0x59f8a1a1, 0x09808989, 0x1a170d0d,
0x65dabfbf, 0xd731e6e6, 0x84c64242, 0xd0b86868,
0x82c34141, 0x29b09999, 0x5a772d2d, 0x1e110f0f,
0x7bcbb0b0, 0xa8fc5454, 0x6dd6bbbb, 0x2c3a1616,
}
};
static const u32 crypto_fl_tab[4][256] ____cacheline_aligned = {
{
0x00000063, 0x0000007c, 0x00000077, 0x0000007b,
0x000000f2, 0x0000006b, 0x0000006f, 0x000000c5,
0x00000030, 0x00000001, 0x00000067, 0x0000002b,
0x000000fe, 0x000000d7, 0x000000ab, 0x00000076,
0x000000ca, 0x00000082, 0x000000c9, 0x0000007d,
0x000000fa, 0x00000059, 0x00000047, 0x000000f0,
0x000000ad, 0x000000d4, 0x000000a2, 0x000000af,
0x0000009c, 0x000000a4, 0x00000072, 0x000000c0,
0x000000b7, 0x000000fd, 0x00000093, 0x00000026,
0x00000036, 0x0000003f, 0x000000f7, 0x000000cc,
0x00000034, 0x000000a5, 0x000000e5, 0x000000f1,
0x00000071, 0x000000d8, 0x00000031, 0x00000015,
0x00000004, 0x000000c7, 0x00000023, 0x000000c3,
0x00000018, 0x00000096, 0x00000005, 0x0000009a,
0x00000007, 0x00000012, 0x00000080, 0x000000e2,
0x000000eb, 0x00000027, 0x000000b2, 0x00000075,
0x00000009, 0x00000083, 0x0000002c, 0x0000001a,
0x0000001b, 0x0000006e, 0x0000005a, 0x000000a0,
0x00000052, 0x0000003b, 0x000000d6, 0x000000b3,
0x00000029, 0x000000e3, 0x0000002f, 0x00000084,
0x00000053, 0x000000d1, 0x00000000, 0x000000ed,
0x00000020, 0x000000fc, 0x000000b1, 0x0000005b,
0x0000006a, 0x000000cb, 0x000000be, 0x00000039,
0x0000004a, 0x0000004c, 0x00000058, 0x000000cf,
0x000000d0, 0x000000ef, 0x000000aa, 0x000000fb,
0x00000043, 0x0000004d, 0x00000033, 0x00000085,
0x00000045, 0x000000f9, 0x00000002, 0x0000007f,
0x00000050, 0x0000003c, 0x0000009f, 0x000000a8,
0x00000051, 0x000000a3, 0x00000040, 0x0000008f,
0x00000092, 0x0000009d, 0x00000038, 0x000000f5,
0x000000bc, 0x000000b6, 0x000000da, 0x00000021,
0x00000010, 0x000000ff, 0x000000f3, 0x000000d2,
0x000000cd, 0x0000000c, 0x00000013, 0x000000ec,
0x0000005f, 0x00000097, 0x00000044, 0x00000017,
0x000000c4, 0x000000a7, 0x0000007e, 0x0000003d,
0x00000064, 0x0000005d, 0x00000019, 0x00000073,
0x00000060, 0x00000081, 0x0000004f, 0x000000dc,
0x00000022, 0x0000002a, 0x00000090, 0x00000088,
0x00000046, 0x000000ee, 0x000000b8, 0x00000014,
0x000000de, 0x0000005e, 0x0000000b, 0x000000db,
0x000000e0, 0x00000032, 0x0000003a, 0x0000000a,
0x00000049, 0x00000006, 0x00000024, 0x0000005c,
0x000000c2, 0x000000d3, 0x000000ac, 0x00000062,
0x00000091, 0x00000095, 0x000000e4, 0x00000079,
0x000000e7, 0x000000c8, 0x00000037, 0x0000006d,
0x0000008d, 0x000000d5, 0x0000004e, 0x000000a9,
0x0000006c, 0x00000056, 0x000000f4, 0x000000ea,
0x00000065, 0x0000007a, 0x000000ae, 0x00000008,
0x000000ba, 0x00000078, 0x00000025, 0x0000002e,
0x0000001c, 0x000000a6, 0x000000b4, 0x000000c6,
0x000000e8, 0x000000dd, 0x00000074, 0x0000001f,
0x0000004b, 0x000000bd, 0x0000008b, 0x0000008a,
0x00000070, 0x0000003e, 0x000000b5, 0x00000066,
0x00000048, 0x00000003, 0x000000f6, 0x0000000e,
0x00000061, 0x00000035, 0x00000057, 0x000000b9,
0x00000086, 0x000000c1, 0x0000001d, 0x0000009e,
0x000000e1, 0x000000f8, 0x00000098, 0x00000011,
0x00000069, 0x000000d9, 0x0000008e, 0x00000094,
0x0000009b, 0x0000001e, 0x00000087, 0x000000e9,
0x000000ce, 0x00000055, 0x00000028, 0x000000df,
0x0000008c, 0x000000a1, 0x00000089, 0x0000000d,
0x000000bf, 0x000000e6, 0x00000042, 0x00000068,
0x00000041, 0x00000099, 0x0000002d, 0x0000000f,
0x000000b0, 0x00000054, 0x000000bb, 0x00000016,
}, {
0x00006300, 0x00007c00, 0x00007700, 0x00007b00,
0x0000f200, 0x00006b00, 0x00006f00, 0x0000c500,
0x00003000, 0x00000100, 0x00006700, 0x00002b00,
0x0000fe00, 0x0000d700, 0x0000ab00, 0x00007600,
0x0000ca00, 0x00008200, 0x0000c900, 0x00007d00,
0x0000fa00, 0x00005900, 0x00004700, 0x0000f000,
0x0000ad00, 0x0000d400, 0x0000a200, 0x0000af00,
0x00009c00, 0x0000a400, 0x00007200, 0x0000c000,
0x0000b700, 0x0000fd00, 0x00009300, 0x00002600,
0x00003600, 0x00003f00, 0x0000f700, 0x0000cc00,
0x00003400, 0x0000a500, 0x0000e500, 0x0000f100,
0x00007100, 0x0000d800, 0x00003100, 0x00001500,
0x00000400, 0x0000c700, 0x00002300, 0x0000c300,
0x00001800, 0x00009600, 0x00000500, 0x00009a00,
0x00000700, 0x00001200, 0x00008000, 0x0000e200,
0x0000eb00, 0x00002700, 0x0000b200, 0x00007500,
0x00000900, 0x00008300, 0x00002c00, 0x00001a00,
0x00001b00, 0x00006e00, 0x00005a00, 0x0000a000,
0x00005200, 0x00003b00, 0x0000d600, 0x0000b300,
0x00002900, 0x0000e300, 0x00002f00, 0x00008400,
0x00005300, 0x0000d100, 0x00000000, 0x0000ed00,
0x00002000, 0x0000fc00, 0x0000b100, 0x00005b00,
0x00006a00, 0x0000cb00, 0x0000be00, 0x00003900,
0x00004a00, 0x00004c00, 0x00005800, 0x0000cf00,
0x0000d000, 0x0000ef00, 0x0000aa00, 0x0000fb00,
0x00004300, 0x00004d00, 0x00003300, 0x00008500,
0x00004500, 0x0000f900, 0x00000200, 0x00007f00,
0x00005000, 0x00003c00, 0x00009f00, 0x0000a800,
0x00005100, 0x0000a300, 0x00004000, 0x00008f00,
0x00009200, 0x00009d00, 0x00003800, 0x0000f500,
0x0000bc00, 0x0000b600, 0x0000da00, 0x00002100,
0x00001000, 0x0000ff00, 0x0000f300, 0x0000d200,
0x0000cd00, 0x00000c00, 0x00001300, 0x0000ec00,
0x00005f00, 0x00009700, 0x00004400, 0x00001700,
0x0000c400, 0x0000a700, 0x00007e00, 0x00003d00,
0x00006400, 0x00005d00, 0x00001900, 0x00007300,
0x00006000, 0x00008100, 0x00004f00, 0x0000dc00,
0x00002200, 0x00002a00, 0x00009000, 0x00008800,
0x00004600, 0x0000ee00, 0x0000b800, 0x00001400,
0x0000de00, 0x00005e00, 0x00000b00, 0x0000db00,
0x0000e000, 0x00003200, 0x00003a00, 0x00000a00,
0x00004900, 0x00000600, 0x00002400, 0x00005c00,
0x0000c200, 0x0000d300, 0x0000ac00, 0x00006200,
0x00009100, 0x00009500, 0x0000e400, 0x00007900,
0x0000e700, 0x0000c800, 0x00003700, 0x00006d00,
0x00008d00, 0x0000d500, 0x00004e00, 0x0000a900,
0x00006c00, 0x00005600, 0x0000f400, 0x0000ea00,
0x00006500, 0x00007a00, 0x0000ae00, 0x00000800,
0x0000ba00, 0x00007800, 0x00002500, 0x00002e00,
0x00001c00, 0x0000a600, 0x0000b400, 0x0000c600,
0x0000e800, 0x0000dd00, 0x00007400, 0x00001f00,
0x00004b00, 0x0000bd00, 0x00008b00, 0x00008a00,
0x00007000, 0x00003e00, 0x0000b500, 0x00006600,
0x00004800, 0x00000300, 0x0000f600, 0x00000e00,
0x00006100, 0x00003500, 0x00005700, 0x0000b900,
0x00008600, 0x0000c100, 0x00001d00, 0x00009e00,
0x0000e100, 0x0000f800, 0x00009800, 0x00001100,
0x00006900, 0x0000d900, 0x00008e00, 0x00009400,
0x00009b00, 0x00001e00, 0x00008700, 0x0000e900,
0x0000ce00, 0x00005500, 0x00002800, 0x0000df00,
0x00008c00, 0x0000a100, 0x00008900, 0x00000d00,
0x0000bf00, 0x0000e600, 0x00004200, 0x00006800,
0x00004100, 0x00009900, 0x00002d00, 0x00000f00,
0x0000b000, 0x00005400, 0x0000bb00, 0x00001600,
}, {
0x00630000, 0x007c0000, 0x00770000, 0x007b0000,
0x00f20000, 0x006b0000, 0x006f0000, 0x00c50000,
0x00300000, 0x00010000, 0x00670000, 0x002b0000,
0x00fe0000, 0x00d70000, 0x00ab0000, 0x00760000,
0x00ca0000, 0x00820000, 0x00c90000, 0x007d0000,
0x00fa0000, 0x00590000, 0x00470000, 0x00f00000,
0x00ad0000, 0x00d40000, 0x00a20000, 0x00af0000,
0x009c0000, 0x00a40000, 0x00720000, 0x00c00000,
0x00b70000, 0x00fd0000, 0x00930000, 0x00260000,
0x00360000, 0x003f0000, 0x00f70000, 0x00cc0000,
0x00340000, 0x00a50000, 0x00e50000, 0x00f10000,
0x00710000, 0x00d80000, 0x00310000, 0x00150000,
0x00040000, 0x00c70000, 0x00230000, 0x00c30000,
0x00180000, 0x00960000, 0x00050000, 0x009a0000,
0x00070000, 0x00120000, 0x00800000, 0x00e20000,
0x00eb0000, 0x00270000, 0x00b20000, 0x00750000,
0x00090000, 0x00830000, 0x002c0000, 0x001a0000,
0x001b0000, 0x006e0000, 0x005a0000, 0x00a00000,
0x00520000, 0x003b0000, 0x00d60000, 0x00b30000,
0x00290000, 0x00e30000, 0x002f0000, 0x00840000,
0x00530000, 0x00d10000, 0x00000000, 0x00ed0000,
0x00200000, 0x00fc0000, 0x00b10000, 0x005b0000,
0x006a0000, 0x00cb0000, 0x00be0000, 0x00390000,
0x004a0000, 0x004c0000, 0x00580000, 0x00cf0000,
0x00d00000, 0x00ef0000, 0x00aa0000, 0x00fb0000,
0x00430000, 0x004d0000, 0x00330000, 0x00850000,
0x00450000, 0x00f90000, 0x00020000, 0x007f0000,
0x00500000, 0x003c0000, 0x009f0000, 0x00a80000,
0x00510000, 0x00a30000, 0x00400000, 0x008f0000,
0x00920000, 0x009d0000, 0x00380000, 0x00f50000,
0x00bc0000, 0x00b60000, 0x00da0000, 0x00210000,
0x00100000, 0x00ff0000, 0x00f30000, 0x00d20000,
0x00cd0000, 0x000c0000, 0x00130000, 0x00ec0000,
0x005f0000, 0x00970000, 0x00440000, 0x00170000,
0x00c40000, 0x00a70000, 0x007e0000, 0x003d0000,
0x00640000, 0x005d0000, 0x00190000, 0x00730000,
0x00600000, 0x00810000, 0x004f0000, 0x00dc0000,
0x00220000, 0x002a0000, 0x00900000, 0x00880000,
0x00460000, 0x00ee0000, 0x00b80000, 0x00140000,
0x00de0000, 0x005e0000, 0x000b0000, 0x00db0000,
0x00e00000, 0x00320000, 0x003a0000, 0x000a0000,
0x00490000, 0x00060000, 0x00240000, 0x005c0000,
0x00c20000, 0x00d30000, 0x00ac0000, 0x00620000,
0x00910000, 0x00950000, 0x00e40000, 0x00790000,
0x00e70000, 0x00c80000, 0x00370000, 0x006d0000,
0x008d0000, 0x00d50000, 0x004e0000, 0x00a90000,
0x006c0000, 0x00560000, 0x00f40000, 0x00ea0000,
0x00650000, 0x007a0000, 0x00ae0000, 0x00080000,
0x00ba0000, 0x00780000, 0x00250000, 0x002e0000,
0x001c0000, 0x00a60000, 0x00b40000, 0x00c60000,
0x00e80000, 0x00dd0000, 0x00740000, 0x001f0000,
0x004b0000, 0x00bd0000, 0x008b0000, 0x008a0000,
0x00700000, 0x003e0000, 0x00b50000, 0x00660000,
0x00480000, 0x00030000, 0x00f60000, 0x000e0000,
0x00610000, 0x00350000, 0x00570000, 0x00b90000,
0x00860000, 0x00c10000, 0x001d0000, 0x009e0000,
0x00e10000, 0x00f80000, 0x00980000, 0x00110000,
0x00690000, 0x00d90000, 0x008e0000, 0x00940000,
0x009b0000, 0x001e0000, 0x00870000, 0x00e90000,
0x00ce0000, 0x00550000, 0x00280000, 0x00df0000,
0x008c0000, 0x00a10000, 0x00890000, 0x000d0000,
0x00bf0000, 0x00e60000, 0x00420000, 0x00680000,
0x00410000, 0x00990000, 0x002d0000, 0x000f0000,
0x00b00000, 0x00540000, 0x00bb0000, 0x00160000,
}, {
0x63000000, 0x7c000000, 0x77000000, 0x7b000000,
0xf2000000, 0x6b000000, 0x6f000000, 0xc5000000,
0x30000000, 0x01000000, 0x67000000, 0x2b000000,
0xfe000000, 0xd7000000, 0xab000000, 0x76000000,
0xca000000, 0x82000000, 0xc9000000, 0x7d000000,
0xfa000000, 0x59000000, 0x47000000, 0xf0000000,
0xad000000, 0xd4000000, 0xa2000000, 0xaf000000,
0x9c000000, 0xa4000000, 0x72000000, 0xc0000000,
0xb7000000, 0xfd000000, 0x93000000, 0x26000000,
0x36000000, 0x3f000000, 0xf7000000, 0xcc000000,
0x34000000, 0xa5000000, 0xe5000000, 0xf1000000,
0x71000000, 0xd8000000, 0x31000000, 0x15000000,
0x04000000, 0xc7000000, 0x23000000, 0xc3000000,
0x18000000, 0x96000000, 0x05000000, 0x9a000000,
0x07000000, 0x12000000, 0x80000000, 0xe2000000,
0xeb000000, 0x27000000, 0xb2000000, 0x75000000,
0x09000000, 0x83000000, 0x2c000000, 0x1a000000,
0x1b000000, 0x6e000000, 0x5a000000, 0xa0000000,
0x52000000, 0x3b000000, 0xd6000000, 0xb3000000,
0x29000000, 0xe3000000, 0x2f000000, 0x84000000,
0x53000000, 0xd1000000, 0x00000000, 0xed000000,
0x20000000, 0xfc000000, 0xb1000000, 0x5b000000,
0x6a000000, 0xcb000000, 0xbe000000, 0x39000000,
0x4a000000, 0x4c000000, 0x58000000, 0xcf000000,
0xd0000000, 0xef000000, 0xaa000000, 0xfb000000,
0x43000000, 0x4d000000, 0x33000000, 0x85000000,
0x45000000, 0xf9000000, 0x02000000, 0x7f000000,
0x50000000, 0x3c000000, 0x9f000000, 0xa8000000,
0x51000000, 0xa3000000, 0x40000000, 0x8f000000,
0x92000000, 0x9d000000, 0x38000000, 0xf5000000,
0xbc000000, 0xb6000000, 0xda000000, 0x21000000,
0x10000000, 0xff000000, 0xf3000000, 0xd2000000,
0xcd000000, 0x0c000000, 0x13000000, 0xec000000,
0x5f000000, 0x97000000, 0x44000000, 0x17000000,
0xc4000000, 0xa7000000, 0x7e000000, 0x3d000000,
0x64000000, 0x5d000000, 0x19000000, 0x73000000,
0x60000000, 0x81000000, 0x4f000000, 0xdc000000,
0x22000000, 0x2a000000, 0x90000000, 0x88000000,
0x46000000, 0xee000000, 0xb8000000, 0x14000000,
0xde000000, 0x5e000000, 0x0b000000, 0xdb000000,
0xe0000000, 0x32000000, 0x3a000000, 0x0a000000,
0x49000000, 0x06000000, 0x24000000, 0x5c000000,
0xc2000000, 0xd3000000, 0xac000000, 0x62000000,
0x91000000, 0x95000000, 0xe4000000, 0x79000000,
0xe7000000, 0xc8000000, 0x37000000, 0x6d000000,
0x8d000000, 0xd5000000, 0x4e000000, 0xa9000000,
0x6c000000, 0x56000000, 0xf4000000, 0xea000000,
0x65000000, 0x7a000000, 0xae000000, 0x08000000,
0xba000000, 0x78000000, 0x25000000, 0x2e000000,
0x1c000000, 0xa6000000, 0xb4000000, 0xc6000000,
0xe8000000, 0xdd000000, 0x74000000, 0x1f000000,
0x4b000000, 0xbd000000, 0x8b000000, 0x8a000000,
0x70000000, 0x3e000000, 0xb5000000, 0x66000000,
0x48000000, 0x03000000, 0xf6000000, 0x0e000000,
0x61000000, 0x35000000, 0x57000000, 0xb9000000,
0x86000000, 0xc1000000, 0x1d000000, 0x9e000000,
0xe1000000, 0xf8000000, 0x98000000, 0x11000000,
0x69000000, 0xd9000000, 0x8e000000, 0x94000000,
0x9b000000, 0x1e000000, 0x87000000, 0xe9000000,
0xce000000, 0x55000000, 0x28000000, 0xdf000000,
0x8c000000, 0xa1000000, 0x89000000, 0x0d000000,
0xbf000000, 0xe6000000, 0x42000000, 0x68000000,
0x41000000, 0x99000000, 0x2d000000, 0x0f000000,
0xb0000000, 0x54000000, 0xbb000000, 0x16000000,
}
};
__visible const u32 crypto_it_tab[4][256] ____cacheline_aligned = {
{
0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a,
0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b,
0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5,
0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5,
0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d,
0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b,
0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295,
0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e,
0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927,
0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d,
0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362,
0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9,
0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52,
0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566,
0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3,
0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed,
0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e,
0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4,
0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4,
0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd,
0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d,
0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060,
0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967,
0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879,
0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000,
0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c,
0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36,
0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624,
0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b,
0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c,
0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12,
0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14,
0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3,
0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b,
0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8,
0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684,
0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7,
0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177,
0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947,
0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322,
0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498,
0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f,
0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54,
0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382,
0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf,
0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb,
0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83,
0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef,
0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029,
0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235,
0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733,
0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117,
0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4,
0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546,
0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb,
0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d,
0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb,
0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a,
0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773,
0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478,
0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2,
0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff,
0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664,
0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0,
}, {
0xa7f45150, 0x65417e53, 0xa4171ac3, 0x5e273a96,
0x6bab3bcb, 0x459d1ff1, 0x58faacab, 0x03e34b93,
0xfa302055, 0x6d76adf6, 0x76cc8891, 0x4c02f525,
0xd7e54ffc, 0xcb2ac5d7, 0x44352680, 0xa362b58f,
0x5ab1de49, 0x1bba2567, 0x0eea4598, 0xc0fe5de1,
0x752fc302, 0xf04c8112, 0x97468da3, 0xf9d36bc6,
0x5f8f03e7, 0x9c921595, 0x7a6dbfeb, 0x595295da,
0x83bed42d, 0x217458d3, 0x69e04929, 0xc8c98e44,
0x89c2756a, 0x798ef478, 0x3e58996b, 0x71b927dd,
0x4fe1beb6, 0xad88f017, 0xac20c966, 0x3ace7db4,
0x4adf6318, 0x311ae582, 0x33519760, 0x7f536245,
0x7764b1e0, 0xae6bbb84, 0xa081fe1c, 0x2b08f994,
0x68487058, 0xfd458f19, 0x6cde9487, 0xf87b52b7,
0xd373ab23, 0x024b72e2, 0x8f1fe357, 0xab55662a,
0x28ebb207, 0xc2b52f03, 0x7bc5869a, 0x0837d3a5,
0x872830f2, 0xa5bf23b2, 0x6a0302ba, 0x8216ed5c,
0x1ccf8a2b, 0xb479a792, 0xf207f3f0, 0xe2694ea1,
0xf4da65cd, 0xbe0506d5, 0x6234d11f, 0xfea6c48a,
0x532e349d, 0x55f3a2a0, 0xe18a0532, 0xebf6a475,
0xec830b39, 0xef6040aa, 0x9f715e06, 0x106ebd51,
0x8a213ef9, 0x06dd963d, 0x053eddae, 0xbde64d46,
0x8d5491b5, 0x5dc47105, 0xd406046f, 0x155060ff,
0xfb981924, 0xe9bdd697, 0x434089cc, 0x9ed96777,
0x42e8b0bd, 0x8b890788, 0x5b19e738, 0xeec879db,
0x0a7ca147, 0x0f427ce9, 0x1e84f8c9, 0x00000000,
0x86800983, 0xed2b3248, 0x70111eac, 0x725a6c4e,
0xff0efdfb, 0x38850f56, 0xd5ae3d1e, 0x392d3627,
0xd90f0a64, 0xa65c6821, 0x545b9bd1, 0x2e36243a,
0x670a0cb1, 0xe757930f, 0x96eeb4d2, 0x919b1b9e,
0xc5c0804f, 0x20dc61a2, 0x4b775a69, 0x1a121c16,
0xba93e20a, 0x2aa0c0e5, 0xe0223c43, 0x171b121d,
0x0d090e0b, 0xc78bf2ad, 0xa8b62db9, 0xa91e14c8,
0x19f15785, 0x0775af4c, 0xdd99eebb, 0x607fa3fd,
0x2601f79f, 0xf5725cbc, 0x3b6644c5, 0x7efb5b34,
0x29438b76, 0xc623cbdc, 0xfcedb668, 0xf1e4b863,
0xdc31d7ca, 0x85634210, 0x22971340, 0x11c68420,
0x244a857d, 0x3dbbd2f8, 0x32f9ae11, 0xa129c76d,
0x2f9e1d4b, 0x30b2dcf3, 0x52860dec, 0xe3c177d0,
0x16b32b6c, 0xb970a999, 0x489411fa, 0x64e94722,
0x8cfca8c4, 0x3ff0a01a, 0x2c7d56d8, 0x903322ef,
0x4e4987c7, 0xd138d9c1, 0xa2ca8cfe, 0x0bd49836,
0x81f5a6cf, 0xde7aa528, 0x8eb7da26, 0xbfad3fa4,
0x9d3a2ce4, 0x9278500d, 0xcc5f6a9b, 0x467e5462,
0x138df6c2, 0xb8d890e8, 0xf7392e5e, 0xafc382f5,
0x805d9fbe, 0x93d0697c, 0x2dd56fa9, 0x1225cfb3,
0x99acc83b, 0x7d1810a7, 0x639ce86e, 0xbb3bdb7b,
0x7826cd09, 0x18596ef4, 0xb79aec01, 0x9a4f83a8,
0x6e95e665, 0xe6ffaa7e, 0xcfbc2108, 0xe815efe6,
0x9be7bad9, 0x366f4ace, 0x099fead4, 0x7cb029d6,
0xb2a431af, 0x233f2a31, 0x94a5c630, 0x66a235c0,
0xbc4e7437, 0xca82fca6, 0xd090e0b0, 0xd8a73315,
0x9804f14a, 0xdaec41f7, 0x50cd7f0e, 0xf691172f,
0xd64d768d, 0xb0ef434d, 0x4daacc54, 0x0496e4df,
0xb5d19ee3, 0x886a4c1b, 0x1f2cc1b8, 0x5165467f,
0xea5e9d04, 0x358c015d, 0x7487fa73, 0x410bfb2e,
0x1d67b35a, 0xd2db9252, 0x5610e933, 0x47d66d13,
0x61d79a8c, 0x0ca1377a, 0x14f8598e, 0x3c13eb89,
0x27a9ceee, 0xc961b735, 0xe51ce1ed, 0xb1477a3c,
0xdfd29c59, 0x73f2553f, 0xce141879, 0x37c773bf,
0xcdf753ea, 0xaafd5f5b, 0x6f3ddf14, 0xdb447886,
0xf3afca81, 0xc468b93e, 0x3424382c, 0x40a3c25f,
0xc31d1672, 0x25e2bc0c, 0x493c288b, 0x950dff41,
0x01a83971, 0xb30c08de, 0xe4b4d89c, 0xc1566490,
0x84cb7b61, 0xb632d570, 0x5c6c4874, 0x57b8d042,
}, {
0xf45150a7, 0x417e5365, 0x171ac3a4, 0x273a965e,
0xab3bcb6b, 0x9d1ff145, 0xfaacab58, 0xe34b9303,
0x302055fa, 0x76adf66d, 0xcc889176, 0x02f5254c,
0xe54ffcd7, 0x2ac5d7cb, 0x35268044, 0x62b58fa3,
0xb1de495a, 0xba25671b, 0xea45980e, 0xfe5de1c0,
0x2fc30275, 0x4c8112f0, 0x468da397, 0xd36bc6f9,
0x8f03e75f, 0x9215959c, 0x6dbfeb7a, 0x5295da59,
0xbed42d83, 0x7458d321, 0xe0492969, 0xc98e44c8,
0xc2756a89, 0x8ef47879, 0x58996b3e, 0xb927dd71,
0xe1beb64f, 0x88f017ad, 0x20c966ac, 0xce7db43a,
0xdf63184a, 0x1ae58231, 0x51976033, 0x5362457f,
0x64b1e077, 0x6bbb84ae, 0x81fe1ca0, 0x08f9942b,
0x48705868, 0x458f19fd, 0xde94876c, 0x7b52b7f8,
0x73ab23d3, 0x4b72e202, 0x1fe3578f, 0x55662aab,
0xebb20728, 0xb52f03c2, 0xc5869a7b, 0x37d3a508,
0x2830f287, 0xbf23b2a5, 0x0302ba6a, 0x16ed5c82,
0xcf8a2b1c, 0x79a792b4, 0x07f3f0f2, 0x694ea1e2,
0xda65cdf4, 0x0506d5be, 0x34d11f62, 0xa6c48afe,
0x2e349d53, 0xf3a2a055, 0x8a0532e1, 0xf6a475eb,
0x830b39ec, 0x6040aaef, 0x715e069f, 0x6ebd5110,
0x213ef98a, 0xdd963d06, 0x3eddae05, 0xe64d46bd,
0x5491b58d, 0xc471055d, 0x06046fd4, 0x5060ff15,
0x981924fb, 0xbdd697e9, 0x4089cc43, 0xd967779e,
0xe8b0bd42, 0x8907888b, 0x19e7385b, 0xc879dbee,
0x7ca1470a, 0x427ce90f, 0x84f8c91e, 0x00000000,
0x80098386, 0x2b3248ed, 0x111eac70, 0x5a6c4e72,
0x0efdfbff, 0x850f5638, 0xae3d1ed5, 0x2d362739,
0x0f0a64d9, 0x5c6821a6, 0x5b9bd154, 0x36243a2e,
0x0a0cb167, 0x57930fe7, 0xeeb4d296, 0x9b1b9e91,
0xc0804fc5, 0xdc61a220, 0x775a694b, 0x121c161a,
0x93e20aba, 0xa0c0e52a, 0x223c43e0, 0x1b121d17,
0x090e0b0d, 0x8bf2adc7, 0xb62db9a8, 0x1e14c8a9,
0xf1578519, 0x75af4c07, 0x99eebbdd, 0x7fa3fd60,
0x01f79f26, 0x725cbcf5, 0x6644c53b, 0xfb5b347e,
0x438b7629, 0x23cbdcc6, 0xedb668fc, 0xe4b863f1,
0x31d7cadc, 0x63421085, 0x97134022, 0xc6842011,
0x4a857d24, 0xbbd2f83d, 0xf9ae1132, 0x29c76da1,
0x9e1d4b2f, 0xb2dcf330, 0x860dec52, 0xc177d0e3,
0xb32b6c16, 0x70a999b9, 0x9411fa48, 0xe9472264,
0xfca8c48c, 0xf0a01a3f, 0x7d56d82c, 0x3322ef90,
0x4987c74e, 0x38d9c1d1, 0xca8cfea2, 0xd498360b,
0xf5a6cf81, 0x7aa528de, 0xb7da268e, 0xad3fa4bf,
0x3a2ce49d, 0x78500d92, 0x5f6a9bcc, 0x7e546246,
0x8df6c213, 0xd890e8b8, 0x392e5ef7, 0xc382f5af,
0x5d9fbe80, 0xd0697c93, 0xd56fa92d, 0x25cfb312,
0xacc83b99, 0x1810a77d, 0x9ce86e63, 0x3bdb7bbb,
0x26cd0978, 0x596ef418, 0x9aec01b7, 0x4f83a89a,
0x95e6656e, 0xffaa7ee6, 0xbc2108cf, 0x15efe6e8,
0xe7bad99b, 0x6f4ace36, 0x9fead409, 0xb029d67c,
0xa431afb2, 0x3f2a3123, 0xa5c63094, 0xa235c066,
0x4e7437bc, 0x82fca6ca, 0x90e0b0d0, 0xa73315d8,
0x04f14a98, 0xec41f7da, 0xcd7f0e50, 0x91172ff6,
0x4d768dd6, 0xef434db0, 0xaacc544d, 0x96e4df04,
0xd19ee3b5, 0x6a4c1b88, 0x2cc1b81f, 0x65467f51,
0x5e9d04ea, 0x8c015d35, 0x87fa7374, 0x0bfb2e41,
0x67b35a1d, 0xdb9252d2, 0x10e93356, 0xd66d1347,
0xd79a8c61, 0xa1377a0c, 0xf8598e14, 0x13eb893c,
0xa9ceee27, 0x61b735c9, 0x1ce1ede5, 0x477a3cb1,
0xd29c59df, 0xf2553f73, 0x141879ce, 0xc773bf37,
0xf753eacd, 0xfd5f5baa, 0x3ddf146f, 0x447886db,
0xafca81f3, 0x68b93ec4, 0x24382c34, 0xa3c25f40,
0x1d1672c3, 0xe2bc0c25, 0x3c288b49, 0x0dff4195,
0xa8397101, 0x0c08deb3, 0xb4d89ce4, 0x566490c1,
0xcb7b6184, 0x32d570b6, 0x6c48745c, 0xb8d04257,
}, {
0x5150a7f4, 0x7e536541, 0x1ac3a417, 0x3a965e27,
0x3bcb6bab, 0x1ff1459d, 0xacab58fa, 0x4b9303e3,
0x2055fa30, 0xadf66d76, 0x889176cc, 0xf5254c02,
0x4ffcd7e5, 0xc5d7cb2a, 0x26804435, 0xb58fa362,
0xde495ab1, 0x25671bba, 0x45980eea, 0x5de1c0fe,
0xc302752f, 0x8112f04c, 0x8da39746, 0x6bc6f9d3,
0x03e75f8f, 0x15959c92, 0xbfeb7a6d, 0x95da5952,
0xd42d83be, 0x58d32174, 0x492969e0, 0x8e44c8c9,
0x756a89c2, 0xf478798e, 0x996b3e58, 0x27dd71b9,
0xbeb64fe1, 0xf017ad88, 0xc966ac20, 0x7db43ace,
0x63184adf, 0xe582311a, 0x97603351, 0x62457f53,
0xb1e07764, 0xbb84ae6b, 0xfe1ca081, 0xf9942b08,
0x70586848, 0x8f19fd45, 0x94876cde, 0x52b7f87b,
0xab23d373, 0x72e2024b, 0xe3578f1f, 0x662aab55,
0xb20728eb, 0x2f03c2b5, 0x869a7bc5, 0xd3a50837,
0x30f28728, 0x23b2a5bf, 0x02ba6a03, 0xed5c8216,
0x8a2b1ccf, 0xa792b479, 0xf3f0f207, 0x4ea1e269,
0x65cdf4da, 0x06d5be05, 0xd11f6234, 0xc48afea6,
0x349d532e, 0xa2a055f3, 0x0532e18a, 0xa475ebf6,
0x0b39ec83, 0x40aaef60, 0x5e069f71, 0xbd51106e,
0x3ef98a21, 0x963d06dd, 0xddae053e, 0x4d46bde6,
0x91b58d54, 0x71055dc4, 0x046fd406, 0x60ff1550,
0x1924fb98, 0xd697e9bd, 0x89cc4340, 0x67779ed9,
0xb0bd42e8, 0x07888b89, 0xe7385b19, 0x79dbeec8,
0xa1470a7c, 0x7ce90f42, 0xf8c91e84, 0x00000000,
0x09838680, 0x3248ed2b, 0x1eac7011, 0x6c4e725a,
0xfdfbff0e, 0x0f563885, 0x3d1ed5ae, 0x3627392d,
0x0a64d90f, 0x6821a65c, 0x9bd1545b, 0x243a2e36,
0x0cb1670a, 0x930fe757, 0xb4d296ee, 0x1b9e919b,
0x804fc5c0, 0x61a220dc, 0x5a694b77, 0x1c161a12,
0xe20aba93, 0xc0e52aa0, 0x3c43e022, 0x121d171b,
0x0e0b0d09, 0xf2adc78b, 0x2db9a8b6, 0x14c8a91e,
0x578519f1, 0xaf4c0775, 0xeebbdd99, 0xa3fd607f,
0xf79f2601, 0x5cbcf572, 0x44c53b66, 0x5b347efb,
0x8b762943, 0xcbdcc623, 0xb668fced, 0xb863f1e4,
0xd7cadc31, 0x42108563, 0x13402297, 0x842011c6,
0x857d244a, 0xd2f83dbb, 0xae1132f9, 0xc76da129,
0x1d4b2f9e, 0xdcf330b2, 0x0dec5286, 0x77d0e3c1,
0x2b6c16b3, 0xa999b970, 0x11fa4894, 0x472264e9,
0xa8c48cfc, 0xa01a3ff0, 0x56d82c7d, 0x22ef9033,
0x87c74e49, 0xd9c1d138, 0x8cfea2ca, 0x98360bd4,
0xa6cf81f5, 0xa528de7a, 0xda268eb7, 0x3fa4bfad,
0x2ce49d3a, 0x500d9278, 0x6a9bcc5f, 0x5462467e,
0xf6c2138d, 0x90e8b8d8, 0x2e5ef739, 0x82f5afc3,
0x9fbe805d, 0x697c93d0, 0x6fa92dd5, 0xcfb31225,
0xc83b99ac, 0x10a77d18, 0xe86e639c, 0xdb7bbb3b,
0xcd097826, 0x6ef41859, 0xec01b79a, 0x83a89a4f,
0xe6656e95, 0xaa7ee6ff, 0x2108cfbc, 0xefe6e815,
0xbad99be7, 0x4ace366f, 0xead4099f, 0x29d67cb0,
0x31afb2a4, 0x2a31233f, 0xc63094a5, 0x35c066a2,
0x7437bc4e, 0xfca6ca82, 0xe0b0d090, 0x3315d8a7,
0xf14a9804, 0x41f7daec, 0x7f0e50cd, 0x172ff691,
0x768dd64d, 0x434db0ef, 0xcc544daa, 0xe4df0496,
0x9ee3b5d1, 0x4c1b886a, 0xc1b81f2c, 0x467f5165,
0x9d04ea5e, 0x015d358c, 0xfa737487, 0xfb2e410b,
0xb35a1d67, 0x9252d2db, 0xe9335610, 0x6d1347d6,
0x9a8c61d7, 0x377a0ca1, 0x598e14f8, 0xeb893c13,
0xceee27a9, 0xb735c961, 0xe1ede51c, 0x7a3cb147,
0x9c59dfd2, 0x553f73f2, 0x1879ce14, 0x73bf37c7,
0x53eacdf7, 0x5f5baafd, 0xdf146f3d, 0x7886db44,
0xca81f3af, 0xb93ec468, 0x382c3424, 0xc25f40a3,
0x1672c31d, 0xbc0c25e2, 0x288b493c, 0xff41950d,
0x397101a8, 0x08deb30c, 0xd89ce4b4, 0x6490c156,
0x7b6184cb, 0xd570b632, 0x48745c6c, 0xd04257b8,
}
};
static const u32 crypto_il_tab[4][256] ____cacheline_aligned = {
{
0x00000052, 0x00000009, 0x0000006a, 0x000000d5,
0x00000030, 0x00000036, 0x000000a5, 0x00000038,
0x000000bf, 0x00000040, 0x000000a3, 0x0000009e,
0x00000081, 0x000000f3, 0x000000d7, 0x000000fb,
0x0000007c, 0x000000e3, 0x00000039, 0x00000082,
0x0000009b, 0x0000002f, 0x000000ff, 0x00000087,
0x00000034, 0x0000008e, 0x00000043, 0x00000044,
0x000000c4, 0x000000de, 0x000000e9, 0x000000cb,
0x00000054, 0x0000007b, 0x00000094, 0x00000032,
0x000000a6, 0x000000c2, 0x00000023, 0x0000003d,
0x000000ee, 0x0000004c, 0x00000095, 0x0000000b,
0x00000042, 0x000000fa, 0x000000c3, 0x0000004e,
0x00000008, 0x0000002e, 0x000000a1, 0x00000066,
0x00000028, 0x000000d9, 0x00000024, 0x000000b2,
0x00000076, 0x0000005b, 0x000000a2, 0x00000049,
0x0000006d, 0x0000008b, 0x000000d1, 0x00000025,
0x00000072, 0x000000f8, 0x000000f6, 0x00000064,
0x00000086, 0x00000068, 0x00000098, 0x00000016,
0x000000d4, 0x000000a4, 0x0000005c, 0x000000cc,
0x0000005d, 0x00000065, 0x000000b6, 0x00000092,
0x0000006c, 0x00000070, 0x00000048, 0x00000050,
0x000000fd, 0x000000ed, 0x000000b9, 0x000000da,
0x0000005e, 0x00000015, 0x00000046, 0x00000057,
0x000000a7, 0x0000008d, 0x0000009d, 0x00000084,
0x00000090, 0x000000d8, 0x000000ab, 0x00000000,
0x0000008c, 0x000000bc, 0x000000d3, 0x0000000a,
0x000000f7, 0x000000e4, 0x00000058, 0x00000005,
0x000000b8, 0x000000b3, 0x00000045, 0x00000006,
0x000000d0, 0x0000002c, 0x0000001e, 0x0000008f,
0x000000ca, 0x0000003f, 0x0000000f, 0x00000002,
0x000000c1, 0x000000af, 0x000000bd, 0x00000003,
0x00000001, 0x00000013, 0x0000008a, 0x0000006b,
0x0000003a, 0x00000091, 0x00000011, 0x00000041,
0x0000004f, 0x00000067, 0x000000dc, 0x000000ea,
0x00000097, 0x000000f2, 0x000000cf, 0x000000ce,
0x000000f0, 0x000000b4, 0x000000e6, 0x00000073,
0x00000096, 0x000000ac, 0x00000074, 0x00000022,
0x000000e7, 0x000000ad, 0x00000035, 0x00000085,
0x000000e2, 0x000000f9, 0x00000037, 0x000000e8,
0x0000001c, 0x00000075, 0x000000df, 0x0000006e,
0x00000047, 0x000000f1, 0x0000001a, 0x00000071,
0x0000001d, 0x00000029, 0x000000c5, 0x00000089,
0x0000006f, 0x000000b7, 0x00000062, 0x0000000e,
0x000000aa, 0x00000018, 0x000000be, 0x0000001b,
0x000000fc, 0x00000056, 0x0000003e, 0x0000004b,
0x000000c6, 0x000000d2, 0x00000079, 0x00000020,
0x0000009a, 0x000000db, 0x000000c0, 0x000000fe,
0x00000078, 0x000000cd, 0x0000005a, 0x000000f4,
0x0000001f, 0x000000dd, 0x000000a8, 0x00000033,
0x00000088, 0x00000007, 0x000000c7, 0x00000031,
0x000000b1, 0x00000012, 0x00000010, 0x00000059,
0x00000027, 0x00000080, 0x000000ec, 0x0000005f,
0x00000060, 0x00000051, 0x0000007f, 0x000000a9,
0x00000019, 0x000000b5, 0x0000004a, 0x0000000d,
0x0000002d, 0x000000e5, 0x0000007a, 0x0000009f,
0x00000093, 0x000000c9, 0x0000009c, 0x000000ef,
0x000000a0, 0x000000e0, 0x0000003b, 0x0000004d,
0x000000ae, 0x0000002a, 0x000000f5, 0x000000b0,
0x000000c8, 0x000000eb, 0x000000bb, 0x0000003c,
0x00000083, 0x00000053, 0x00000099, 0x00000061,
0x00000017, 0x0000002b, 0x00000004, 0x0000007e,
0x000000ba, 0x00000077, 0x000000d6, 0x00000026,
0x000000e1, 0x00000069, 0x00000014, 0x00000063,
0x00000055, 0x00000021, 0x0000000c, 0x0000007d,
}, {
0x00005200, 0x00000900, 0x00006a00, 0x0000d500,
0x00003000, 0x00003600, 0x0000a500, 0x00003800,
0x0000bf00, 0x00004000, 0x0000a300, 0x00009e00,
0x00008100, 0x0000f300, 0x0000d700, 0x0000fb00,
0x00007c00, 0x0000e300, 0x00003900, 0x00008200,
0x00009b00, 0x00002f00, 0x0000ff00, 0x00008700,
0x00003400, 0x00008e00, 0x00004300, 0x00004400,
0x0000c400, 0x0000de00, 0x0000e900, 0x0000cb00,
0x00005400, 0x00007b00, 0x00009400, 0x00003200,
0x0000a600, 0x0000c200, 0x00002300, 0x00003d00,
0x0000ee00, 0x00004c00, 0x00009500, 0x00000b00,
0x00004200, 0x0000fa00, 0x0000c300, 0x00004e00,
0x00000800, 0x00002e00, 0x0000a100, 0x00006600,
0x00002800, 0x0000d900, 0x00002400, 0x0000b200,
0x00007600, 0x00005b00, 0x0000a200, 0x00004900,
0x00006d00, 0x00008b00, 0x0000d100, 0x00002500,
0x00007200, 0x0000f800, 0x0000f600, 0x00006400,
0x00008600, 0x00006800, 0x00009800, 0x00001600,
0x0000d400, 0x0000a400, 0x00005c00, 0x0000cc00,
0x00005d00, 0x00006500, 0x0000b600, 0x00009200,
0x00006c00, 0x00007000, 0x00004800, 0x00005000,
0x0000fd00, 0x0000ed00, 0x0000b900, 0x0000da00,
0x00005e00, 0x00001500, 0x00004600, 0x00005700,
0x0000a700, 0x00008d00, 0x00009d00, 0x00008400,
0x00009000, 0x0000d800, 0x0000ab00, 0x00000000,
0x00008c00, 0x0000bc00, 0x0000d300, 0x00000a00,
0x0000f700, 0x0000e400, 0x00005800, 0x00000500,
0x0000b800, 0x0000b300, 0x00004500, 0x00000600,
0x0000d000, 0x00002c00, 0x00001e00, 0x00008f00,
0x0000ca00, 0x00003f00, 0x00000f00, 0x00000200,
0x0000c100, 0x0000af00, 0x0000bd00, 0x00000300,
0x00000100, 0x00001300, 0x00008a00, 0x00006b00,
0x00003a00, 0x00009100, 0x00001100, 0x00004100,
0x00004f00, 0x00006700, 0x0000dc00, 0x0000ea00,
0x00009700, 0x0000f200, 0x0000cf00, 0x0000ce00,
0x0000f000, 0x0000b400, 0x0000e600, 0x00007300,
0x00009600, 0x0000ac00, 0x00007400, 0x00002200,
0x0000e700, 0x0000ad00, 0x00003500, 0x00008500,
0x0000e200, 0x0000f900, 0x00003700, 0x0000e800,
0x00001c00, 0x00007500, 0x0000df00, 0x00006e00,
0x00004700, 0x0000f100, 0x00001a00, 0x00007100,
0x00001d00, 0x00002900, 0x0000c500, 0x00008900,
0x00006f00, 0x0000b700, 0x00006200, 0x00000e00,
0x0000aa00, 0x00001800, 0x0000be00, 0x00001b00,
0x0000fc00, 0x00005600, 0x00003e00, 0x00004b00,
0x0000c600, 0x0000d200, 0x00007900, 0x00002000,
0x00009a00, 0x0000db00, 0x0000c000, 0x0000fe00,
0x00007800, 0x0000cd00, 0x00005a00, 0x0000f400,
0x00001f00, 0x0000dd00, 0x0000a800, 0x00003300,
0x00008800, 0x00000700, 0x0000c700, 0x00003100,
0x0000b100, 0x00001200, 0x00001000, 0x00005900,
0x00002700, 0x00008000, 0x0000ec00, 0x00005f00,
0x00006000, 0x00005100, 0x00007f00, 0x0000a900,
0x00001900, 0x0000b500, 0x00004a00, 0x00000d00,
0x00002d00, 0x0000e500, 0x00007a00, 0x00009f00,
0x00009300, 0x0000c900, 0x00009c00, 0x0000ef00,
0x0000a000, 0x0000e000, 0x00003b00, 0x00004d00,
0x0000ae00, 0x00002a00, 0x0000f500, 0x0000b000,
0x0000c800, 0x0000eb00, 0x0000bb00, 0x00003c00,
0x00008300, 0x00005300, 0x00009900, 0x00006100,
0x00001700, 0x00002b00, 0x00000400, 0x00007e00,
0x0000ba00, 0x00007700, 0x0000d600, 0x00002600,
0x0000e100, 0x00006900, 0x00001400, 0x00006300,
0x00005500, 0x00002100, 0x00000c00, 0x00007d00,
}, {
0x00520000, 0x00090000, 0x006a0000, 0x00d50000,
0x00300000, 0x00360000, 0x00a50000, 0x00380000,
0x00bf0000, 0x00400000, 0x00a30000, 0x009e0000,
0x00810000, 0x00f30000, 0x00d70000, 0x00fb0000,
0x007c0000, 0x00e30000, 0x00390000, 0x00820000,
0x009b0000, 0x002f0000, 0x00ff0000, 0x00870000,
0x00340000, 0x008e0000, 0x00430000, 0x00440000,
0x00c40000, 0x00de0000, 0x00e90000, 0x00cb0000,
0x00540000, 0x007b0000, 0x00940000, 0x00320000,
0x00a60000, 0x00c20000, 0x00230000, 0x003d0000,
0x00ee0000, 0x004c0000, 0x00950000, 0x000b0000,
0x00420000, 0x00fa0000, 0x00c30000, 0x004e0000,
0x00080000, 0x002e0000, 0x00a10000, 0x00660000,
0x00280000, 0x00d90000, 0x00240000, 0x00b20000,
0x00760000, 0x005b0000, 0x00a20000, 0x00490000,
0x006d0000, 0x008b0000, 0x00d10000, 0x00250000,
0x00720000, 0x00f80000, 0x00f60000, 0x00640000,
0x00860000, 0x00680000, 0x00980000, 0x00160000,
0x00d40000, 0x00a40000, 0x005c0000, 0x00cc0000,
0x005d0000, 0x00650000, 0x00b60000, 0x00920000,
0x006c0000, 0x00700000, 0x00480000, 0x00500000,
0x00fd0000, 0x00ed0000, 0x00b90000, 0x00da0000,
0x005e0000, 0x00150000, 0x00460000, 0x00570000,
0x00a70000, 0x008d0000, 0x009d0000, 0x00840000,
0x00900000, 0x00d80000, 0x00ab0000, 0x00000000,
0x008c0000, 0x00bc0000, 0x00d30000, 0x000a0000,
0x00f70000, 0x00e40000, 0x00580000, 0x00050000,
0x00b80000, 0x00b30000, 0x00450000, 0x00060000,
0x00d00000, 0x002c0000, 0x001e0000, 0x008f0000,
0x00ca0000, 0x003f0000, 0x000f0000, 0x00020000,
0x00c10000, 0x00af0000, 0x00bd0000, 0x00030000,
0x00010000, 0x00130000, 0x008a0000, 0x006b0000,
0x003a0000, 0x00910000, 0x00110000, 0x00410000,
0x004f0000, 0x00670000, 0x00dc0000, 0x00ea0000,
0x00970000, 0x00f20000, 0x00cf0000, 0x00ce0000,
0x00f00000, 0x00b40000, 0x00e60000, 0x00730000,
0x00960000, 0x00ac0000, 0x00740000, 0x00220000,
0x00e70000, 0x00ad0000, 0x00350000, 0x00850000,
0x00e20000, 0x00f90000, 0x00370000, 0x00e80000,
0x001c0000, 0x00750000, 0x00df0000, 0x006e0000,
0x00470000, 0x00f10000, 0x001a0000, 0x00710000,
0x001d0000, 0x00290000, 0x00c50000, 0x00890000,
0x006f0000, 0x00b70000, 0x00620000, 0x000e0000,
0x00aa0000, 0x00180000, 0x00be0000, 0x001b0000,
0x00fc0000, 0x00560000, 0x003e0000, 0x004b0000,
0x00c60000, 0x00d20000, 0x00790000, 0x00200000,
0x009a0000, 0x00db0000, 0x00c00000, 0x00fe0000,
0x00780000, 0x00cd0000, 0x005a0000, 0x00f40000,
0x001f0000, 0x00dd0000, 0x00a80000, 0x00330000,
0x00880000, 0x00070000, 0x00c70000, 0x00310000,
0x00b10000, 0x00120000, 0x00100000, 0x00590000,
0x00270000, 0x00800000, 0x00ec0000, 0x005f0000,
0x00600000, 0x00510000, 0x007f0000, 0x00a90000,
0x00190000, 0x00b50000, 0x004a0000, 0x000d0000,
0x002d0000, 0x00e50000, 0x007a0000, 0x009f0000,
0x00930000, 0x00c90000, 0x009c0000, 0x00ef0000,
0x00a00000, 0x00e00000, 0x003b0000, 0x004d0000,
0x00ae0000, 0x002a0000, 0x00f50000, 0x00b00000,
0x00c80000, 0x00eb0000, 0x00bb0000, 0x003c0000,
0x00830000, 0x00530000, 0x00990000, 0x00610000,
0x00170000, 0x002b0000, 0x00040000, 0x007e0000,
0x00ba0000, 0x00770000, 0x00d60000, 0x00260000,
0x00e10000, 0x00690000, 0x00140000, 0x00630000,
0x00550000, 0x00210000, 0x000c0000, 0x007d0000,
}, {
0x52000000, 0x09000000, 0x6a000000, 0xd5000000,
0x30000000, 0x36000000, 0xa5000000, 0x38000000,
0xbf000000, 0x40000000, 0xa3000000, 0x9e000000,
0x81000000, 0xf3000000, 0xd7000000, 0xfb000000,
0x7c000000, 0xe3000000, 0x39000000, 0x82000000,
0x9b000000, 0x2f000000, 0xff000000, 0x87000000,
0x34000000, 0x8e000000, 0x43000000, 0x44000000,
0xc4000000, 0xde000000, 0xe9000000, 0xcb000000,
0x54000000, 0x7b000000, 0x94000000, 0x32000000,
0xa6000000, 0xc2000000, 0x23000000, 0x3d000000,
0xee000000, 0x4c000000, 0x95000000, 0x0b000000,
0x42000000, 0xfa000000, 0xc3000000, 0x4e000000,
0x08000000, 0x2e000000, 0xa1000000, 0x66000000,
0x28000000, 0xd9000000, 0x24000000, 0xb2000000,
0x76000000, 0x5b000000, 0xa2000000, 0x49000000,
0x6d000000, 0x8b000000, 0xd1000000, 0x25000000,
0x72000000, 0xf8000000, 0xf6000000, 0x64000000,
0x86000000, 0x68000000, 0x98000000, 0x16000000,
0xd4000000, 0xa4000000, 0x5c000000, 0xcc000000,
0x5d000000, 0x65000000, 0xb6000000, 0x92000000,
0x6c000000, 0x70000000, 0x48000000, 0x50000000,
0xfd000000, 0xed000000, 0xb9000000, 0xda000000,
0x5e000000, 0x15000000, 0x46000000, 0x57000000,
0xa7000000, 0x8d000000, 0x9d000000, 0x84000000,
0x90000000, 0xd8000000, 0xab000000, 0x00000000,
0x8c000000, 0xbc000000, 0xd3000000, 0x0a000000,
0xf7000000, 0xe4000000, 0x58000000, 0x05000000,
0xb8000000, 0xb3000000, 0x45000000, 0x06000000,
0xd0000000, 0x2c000000, 0x1e000000, 0x8f000000,
0xca000000, 0x3f000000, 0x0f000000, 0x02000000,
0xc1000000, 0xaf000000, 0xbd000000, 0x03000000,
0x01000000, 0x13000000, 0x8a000000, 0x6b000000,
0x3a000000, 0x91000000, 0x11000000, 0x41000000,
0x4f000000, 0x67000000, 0xdc000000, 0xea000000,
0x97000000, 0xf2000000, 0xcf000000, 0xce000000,
0xf0000000, 0xb4000000, 0xe6000000, 0x73000000,
0x96000000, 0xac000000, 0x74000000, 0x22000000,
0xe7000000, 0xad000000, 0x35000000, 0x85000000,
0xe2000000, 0xf9000000, 0x37000000, 0xe8000000,
0x1c000000, 0x75000000, 0xdf000000, 0x6e000000,
0x47000000, 0xf1000000, 0x1a000000, 0x71000000,
0x1d000000, 0x29000000, 0xc5000000, 0x89000000,
0x6f000000, 0xb7000000, 0x62000000, 0x0e000000,
0xaa000000, 0x18000000, 0xbe000000, 0x1b000000,
0xfc000000, 0x56000000, 0x3e000000, 0x4b000000,
0xc6000000, 0xd2000000, 0x79000000, 0x20000000,
0x9a000000, 0xdb000000, 0xc0000000, 0xfe000000,
0x78000000, 0xcd000000, 0x5a000000, 0xf4000000,
0x1f000000, 0xdd000000, 0xa8000000, 0x33000000,
0x88000000, 0x07000000, 0xc7000000, 0x31000000,
0xb1000000, 0x12000000, 0x10000000, 0x59000000,
0x27000000, 0x80000000, 0xec000000, 0x5f000000,
0x60000000, 0x51000000, 0x7f000000, 0xa9000000,
0x19000000, 0xb5000000, 0x4a000000, 0x0d000000,
0x2d000000, 0xe5000000, 0x7a000000, 0x9f000000,
0x93000000, 0xc9000000, 0x9c000000, 0xef000000,
0xa0000000, 0xe0000000, 0x3b000000, 0x4d000000,
0xae000000, 0x2a000000, 0xf5000000, 0xb0000000,
0xc8000000, 0xeb000000, 0xbb000000, 0x3c000000,
0x83000000, 0x53000000, 0x99000000, 0x61000000,
0x17000000, 0x2b000000, 0x04000000, 0x7e000000,
0xba000000, 0x77000000, 0xd6000000, 0x26000000,
0xe1000000, 0x69000000, 0x14000000, 0x63000000,
0x55000000, 0x21000000, 0x0c000000, 0x7d000000,
}
};
EXPORT_SYMBOL_GPL(crypto_ft_tab);
EXPORT_SYMBOL_GPL(crypto_it_tab);
/**
* crypto_aes_set_key - Set the AES key.
* @tfm: The %crypto_tfm that is used in the context.
* @in_key: The input key.
* @key_len: The size of the key.
*
* This function uses aes_expand_key() to expand the key. &crypto_aes_ctx
* _must_ be the private data embedded in @tfm which is retrieved with
* crypto_tfm_ctx().
*
* Return: 0 on success; -EINVAL on failure (only happens for bad key lengths)
*/
int crypto_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
return aes_expandkey(ctx, in_key, key_len);
}
EXPORT_SYMBOL_GPL(crypto_aes_set_key);
/* encrypt a block of text */
#define f_rn(bo, bi, n, k) do { \
bo[n] = crypto_ft_tab[0][byte(bi[n], 0)] ^ \
crypto_ft_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
crypto_ft_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_ft_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
} while (0)
#define f_nround(bo, bi, k) do {\
f_rn(bo, bi, 0, k); \
f_rn(bo, bi, 1, k); \
f_rn(bo, bi, 2, k); \
f_rn(bo, bi, 3, k); \
k += 4; \
} while (0)
#define f_rl(bo, bi, n, k) do { \
bo[n] = crypto_fl_tab[0][byte(bi[n], 0)] ^ \
crypto_fl_tab[1][byte(bi[(n + 1) & 3], 1)] ^ \
crypto_fl_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_fl_tab[3][byte(bi[(n + 3) & 3], 3)] ^ *(k + n); \
} while (0)
#define f_lround(bo, bi, k) do {\
f_rl(bo, bi, 0, k); \
f_rl(bo, bi, 1, k); \
f_rl(bo, bi, 2, k); \
f_rl(bo, bi, 3, k); \
} while (0)
static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
u32 b0[4], b1[4];
const u32 *kp = ctx->key_enc + 4;
const int key_len = ctx->key_length;
b0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
b0[1] = ctx->key_enc[1] ^ get_unaligned_le32(in + 4);
b0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
b0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
if (key_len > 24) {
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
}
if (key_len > 16) {
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
}
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_nround(b0, b1, kp);
f_nround(b1, b0, kp);
f_lround(b0, b1, kp);
put_unaligned_le32(b0[0], out);
put_unaligned_le32(b0[1], out + 4);
put_unaligned_le32(b0[2], out + 8);
put_unaligned_le32(b0[3], out + 12);
}
/* decrypt a block of text */
#define i_rn(bo, bi, n, k) do { \
bo[n] = crypto_it_tab[0][byte(bi[n], 0)] ^ \
crypto_it_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
crypto_it_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_it_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
} while (0)
#define i_nround(bo, bi, k) do {\
i_rn(bo, bi, 0, k); \
i_rn(bo, bi, 1, k); \
i_rn(bo, bi, 2, k); \
i_rn(bo, bi, 3, k); \
k += 4; \
} while (0)
#define i_rl(bo, bi, n, k) do { \
bo[n] = crypto_il_tab[0][byte(bi[n], 0)] ^ \
crypto_il_tab[1][byte(bi[(n + 3) & 3], 1)] ^ \
crypto_il_tab[2][byte(bi[(n + 2) & 3], 2)] ^ \
crypto_il_tab[3][byte(bi[(n + 1) & 3], 3)] ^ *(k + n); \
} while (0)
#define i_lround(bo, bi, k) do {\
i_rl(bo, bi, 0, k); \
i_rl(bo, bi, 1, k); \
i_rl(bo, bi, 2, k); \
i_rl(bo, bi, 3, k); \
} while (0)
static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
u32 b0[4], b1[4];
const int key_len = ctx->key_length;
const u32 *kp = ctx->key_dec + 4;
b0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
b0[1] = ctx->key_dec[1] ^ get_unaligned_le32(in + 4);
b0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
b0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
if (key_len > 24) {
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
}
if (key_len > 16) {
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
}
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_nround(b0, b1, kp);
i_nround(b1, b0, kp);
i_lround(b0, b1, kp);
put_unaligned_le32(b0[0], out);
put_unaligned_le32(b0[1], out + 4);
put_unaligned_le32(b0[2], out + 8);
put_unaligned_le32(b0[3], out + 12);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = crypto_aes_set_key,
.cia_encrypt = crypto_aes_encrypt,
.cia_decrypt = crypto_aes_decrypt
}
}
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
subsys_initcall(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_CRYPTO("aes");
MODULE_ALIAS_CRYPTO("aes-generic");
| linux-master | crypto/aes_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* ARIA Cipher Algorithm.
*
* Documentation of ARIA can be found in RFC 5794.
* Copyright (c) 2022 Taehee Yoo <[email protected]>
*
* Information for ARIA
* http://210.104.33.10/ARIA/index-e.html (English)
* http://seed.kisa.or.kr/ (Korean)
*
* Public domain version is distributed above.
*/
#include <crypto/aria.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0,
0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e,
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0
};
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
int rkidx = 0;
ck = &key_rc[(key_len - 16) / 2];
w0[0] = be32_to_cpu(key[0]);
w0[1] = be32_to_cpu(key[1]);
w0[2] = be32_to_cpu(key[2]);
w0[3] = be32_to_cpu(key[3]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
reg2 = w0[2] ^ ck[2];
reg3 = w0[3] ^ ck[3];
aria_subst_diff_odd(®0, ®1, ®2, ®3);
if (key_len > 16) {
w1[0] = be32_to_cpu(key[4]);
w1[1] = be32_to_cpu(key[5]);
if (key_len > 24) {
w1[2] = be32_to_cpu(key[6]);
w1[3] = be32_to_cpu(key[7]);
} else {
w1[2] = 0;
w1[3] = 0;
}
} else {
w1[0] = 0;
w1[1] = 0;
w1[2] = 0;
w1[3] = 0;
}
w1[0] ^= reg0;
w1[1] ^= reg1;
w1[2] ^= reg2;
w1[3] ^= reg3;
reg0 = w1[0];
reg1 = w1[1];
reg2 = w1[2];
reg3 = w1[3];
reg0 ^= ck[4];
reg1 ^= ck[5];
reg2 ^= ck[6];
reg3 ^= ck[7];
aria_subst_diff_even(®0, ®1, ®2, ®3);
reg0 ^= w0[0];
reg1 ^= w0[1];
reg2 ^= w0[2];
reg3 ^= w0[3];
w2[0] = reg0;
w2[1] = reg1;
w2[2] = reg2;
w2[3] = reg3;
reg0 ^= ck[8];
reg1 ^= ck[9];
reg2 ^= ck[10];
reg3 ^= ck[11];
aria_subst_diff_odd(®0, ®1, ®2, ®3);
w3[0] = reg0 ^ w1[0];
w3[1] = reg1 ^ w1[1];
w3[2] = reg2 ^ w1[2];
w3[3] = reg3 ^ w1[3];
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97);
if (key_len > 16) {
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97);
if (key_len > 24) {
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109);
}
}
}
static void aria_set_decrypt_key(struct aria_ctx *ctx)
{
int i;
for (i = 0; i < 4; i++) {
ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i];
ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i];
}
for (i = 1; i < ctx->rounds; i++) {
ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]);
ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]);
ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]);
ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]);
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
aria_diff_byte(&ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
}
}
int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
BUILD_BUG_ON(sizeof(ctx->enc_key) != 272);
BUILD_BUG_ON(sizeof(ctx->dec_key) != 272);
BUILD_BUG_ON(sizeof(int) != sizeof(ctx->rounds));
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;
aria_set_encrypt_key(ctx, in_key, key_len);
aria_set_decrypt_key(ctx);
return 0;
}
EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
reg0 = be32_to_cpu(src[0]);
reg1 = be32_to_cpu(src[1]);
reg2 = be32_to_cpu(src[2]);
reg3 = be32_to_cpu(src[3]);
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
rkidx++;
aria_subst_diff_odd(®0, ®1, ®2, ®3);
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
rkidx++;
while ((rounds -= 2) > 0) {
aria_subst_diff_even(®0, ®1, ®2, ®3);
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
rkidx++;
aria_subst_diff_odd(®0, ®1, ®2, ®3);
aria_add_round_key(key[rkidx], ®0, ®1, ®2, ®3);
rkidx++;
}
reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]),
(u8)(x2[get_u8(reg0, 1)] >> 8),
(u8)(s1[get_u8(reg0, 2)]),
(u8)(s2[get_u8(reg0, 3)]));
reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]),
(u8)(x2[get_u8(reg1, 1)] >> 8),
(u8)(s1[get_u8(reg1, 2)]),
(u8)(s2[get_u8(reg1, 3)]));
reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]),
(u8)(x2[get_u8(reg2, 1)] >> 8),
(u8)(s1[get_u8(reg2, 2)]),
(u8)(s2[get_u8(reg2, 3)]));
reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]),
(u8)(x2[get_u8(reg3, 1)] >> 8),
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
dst[0] = cpu_to_be32(reg0);
dst[1] = cpu_to_be32(reg1);
dst[2] = cpu_to_be32(reg2);
dst[3] = cpu_to_be32(reg3);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->enc_key);
}
EXPORT_SYMBOL_GPL(aria_encrypt);
void aria_decrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->dec_key);
}
EXPORT_SYMBOL_GPL(aria_decrypt);
static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->enc_key);
}
static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->dec_key);
}
static struct crypto_alg aria_alg = {
.cra_name = "aria",
.cra_driver_name = "aria-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
.cia_setkey = aria_set_key,
.cia_encrypt = __aria_encrypt,
.cia_decrypt = __aria_decrypt
}
}
};
static int __init aria_init(void)
{
return crypto_register_alg(&aria_alg);
}
static void __exit aria_fini(void)
{
crypto_unregister_alg(&aria_alg);
}
subsys_initcall(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <[email protected]>");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-generic");
| linux-master | crypto/aria_generic.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Scalar fixed time AES core transform
*
* Copyright (C) 2017 Linaro Ltd <[email protected]>
*/
#include <crypto/aes.h>
#include <crypto/algapi.h>
#include <linux/module.h>
static int aesti_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
return aes_expandkey(ctx, in_key, key_len);
}
static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags;
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
aes_encrypt(ctx, out, in);
local_irq_restore(flags);
}
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned long flags;
/*
* Temporarily disable interrupts to avoid races where cachelines are
* evicted when the CPU is interrupted to do something else.
*/
local_irq_save(flags);
aes_decrypt(ctx, out, in);
local_irq_restore(flags);
}
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-fixed-time",
.cra_priority = 100 + 1,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
.cra_module = THIS_MODULE,
.cra_cipher.cia_min_keysize = AES_MIN_KEY_SIZE,
.cra_cipher.cia_max_keysize = AES_MAX_KEY_SIZE,
.cra_cipher.cia_setkey = aesti_set_key,
.cra_cipher.cia_encrypt = aesti_encrypt,
.cra_cipher.cia_decrypt = aesti_decrypt
};
static int __init aes_init(void)
{
return crypto_register_alg(&aes_alg);
}
static void __exit aes_fini(void)
{
crypto_unregister_alg(&aes_alg);
}
module_init(aes_init);
module_exit(aes_fini);
MODULE_DESCRIPTION("Generic fixed time AES");
MODULE_AUTHOR("Ard Biesheuvel <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | crypto/aes_ti.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SP800-108 Key-derivation function
*
* Copyright (C) 2021, Stephan Mueller <[email protected]>
*/
#include <linux/fips.h>
#include <linux/module.h>
#include <crypto/kdf_sp800108.h>
#include <crypto/internal/kdf_selftest.h>
/*
* SP800-108 CTR KDF implementation
*/
int crypto_kdf108_ctr_generate(struct crypto_shash *kmd,
const struct kvec *info, unsigned int info_nvec,
u8 *dst, unsigned int dlen)
{
SHASH_DESC_ON_STACK(desc, kmd);
__be32 counter = cpu_to_be32(1);
const unsigned int h = crypto_shash_digestsize(kmd), dlen_orig = dlen;
unsigned int i;
int err = 0;
u8 *dst_orig = dst;
desc->tfm = kmd;
while (dlen) {
err = crypto_shash_init(desc);
if (err)
goto out;
err = crypto_shash_update(desc, (u8 *)&counter, sizeof(__be32));
if (err)
goto out;
for (i = 0; i < info_nvec; i++) {
err = crypto_shash_update(desc, info[i].iov_base,
info[i].iov_len);
if (err)
goto out;
}
if (dlen < h) {
u8 tmpbuffer[HASH_MAX_DIGESTSIZE];
err = crypto_shash_final(desc, tmpbuffer);
if (err)
goto out;
memcpy(dst, tmpbuffer, dlen);
memzero_explicit(tmpbuffer, h);
goto out;
}
err = crypto_shash_final(desc, dst);
if (err)
goto out;
dlen -= h;
dst += h;
counter = cpu_to_be32(be32_to_cpu(counter) + 1);
}
out:
if (err)
memzero_explicit(dst_orig, dlen_orig);
shash_desc_zero(desc);
return err;
}
EXPORT_SYMBOL(crypto_kdf108_ctr_generate);
/*
* The seeding of the KDF
*/
int crypto_kdf108_setkey(struct crypto_shash *kmd,
const u8 *key, size_t keylen,
const u8 *ikm, size_t ikmlen)
{
unsigned int ds = crypto_shash_digestsize(kmd);
/* SP800-108 does not support IKM */
if (ikm || ikmlen)
return -EINVAL;
/* Check according to SP800-108 section 7.2 */
if (ds > keylen)
return -EINVAL;
/* Set the key for the MAC used for the KDF. */
return crypto_shash_setkey(kmd, key, keylen);
}
EXPORT_SYMBOL(crypto_kdf108_setkey);
/*
* Test vector obtained from
* http://csrc.nist.gov/groups/STM/cavp/documents/KBKDF800-108/CounterMode.zip
*/
static const struct kdf_testvec kdf_ctr_hmac_sha256_tv_template[] = {
{
.key = "\xdd\x1d\x91\xb7\xd9\x0b\x2b\xd3"
"\x13\x85\x33\xce\x92\xb2\x72\xfb"
"\xf8\xa3\x69\x31\x6a\xef\xe2\x42"
"\xe6\x59\xcc\x0a\xe2\x38\xaf\xe0",
.keylen = 32,
.ikm = NULL,
.ikmlen = 0,
.info = {
.iov_base = "\x01\x32\x2b\x96\xb3\x0a\xcd\x19"
"\x79\x79\x44\x4e\x46\x8e\x1c\x5c"
"\x68\x59\xbf\x1b\x1c\xf9\x51\xb7"
"\xe7\x25\x30\x3e\x23\x7e\x46\xb8"
"\x64\xa1\x45\xfa\xb2\x5e\x51\x7b"
"\x08\xf8\x68\x3d\x03\x15\xbb\x29"
"\x11\xd8\x0a\x0e\x8a\xba\x17\xf3"
"\xb4\x13\xfa\xac",
.iov_len = 60
},
.expected = "\x10\x62\x13\x42\xbf\xb0\xfd\x40"
"\x04\x6c\x0e\x29\xf2\xcf\xdb\xf0",
.expectedlen = 16
}
};
static int __init crypto_kdf108_init(void)
{
int ret;
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return 0;
ret = kdf_test(&kdf_ctr_hmac_sha256_tv_template[0], "hmac(sha256)",
crypto_kdf108_setkey, crypto_kdf108_ctr_generate);
if (ret) {
if (fips_enabled)
panic("alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n",
ret);
WARN(1,
"alg: self-tests for CTR-KDF (hmac(sha256)) failed (rc=%d)\n",
ret);
} else if (fips_enabled) {
pr_info("alg: self-tests for CTR-KDF (hmac(sha256)) passed\n");
}
return ret;
}
static void __exit crypto_kdf108_exit(void) { }
module_init(crypto_kdf108_init);
module_exit(crypto_kdf108_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("Key Derivation Function conformant to SP800-108");
| linux-master | crypto/kdf_sp800108.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* algif_aead: User-space interface for AEAD algorithms
*
* Copyright (C) 2014, Stephan Mueller <[email protected]>
*
* This file provides the user-space API for AEAD ciphers.
*
* The following concept of the memory management is used:
*
* The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
* filled by user space with the data submitted via sendmsg (maybe with
* MSG_SPLICE_PAGES). Filling up the TX SGL does not cause a crypto operation
* -- the data will only be tracked by the kernel. Upon receipt of one recvmsg
* call, the caller must provide a buffer which is tracked with the RX SGL.
*
* During the processing of the recvmsg operation, the cipher request is
* allocated and prepared. As part of the recvmsg operation, the processed
* TX buffers are extracted from the TX SGL into a separate SGL.
*
* After the completion of the crypto operation, the RX SGL and the cipher
* request is released. The extracted TX SGL parts are released together with
* the RX SGL release.
*/
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/net.h>
#include <net/sock.h>
struct aead_tfm {
struct crypto_aead *aead;
struct crypto_sync_skcipher *null_tfm;
};
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int as = crypto_aead_authsize(tfm);
/*
* The minimum amount of memory needed for an AEAD cipher is
* the AAD and in case of decryption the tag.
*/
return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
}
static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_sync_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
size_t usedpages = 0; /* [in] RX bufs to be used from user */
size_t processed = 0; /* [in] TX bufs to be consumed */
if (!ctx->init || ctx->more) {
err = af_alg_wait_for_data(sk, flags, 0);
if (err)
return err;
}
/*
* Data length provided by caller via sendmsg that has not yet been
* processed.
*/
used = ctx->used;
/*
* Make sure sufficient data is present -- note, the same check is also
* present in sendmsg. The checks in sendmsg shall provide an
* information to the data sender that something is wrong, but they are
* irrelevant to maintain the kernel integrity. We need this check
* here too in case user space decides to not honor the error message
* in sendmsg and still call recvmsg. This check here protects the
* kernel integrity.
*/
if (!aead_sufficient_data(sk))
return -EINVAL;
/*
* Calculate the minimum output buffer size holding the result of the
* cipher operation. When encrypting data, the receiving buffer is
* larger by the tag length compared to the input buffer as the
* encryption operation generates the tag. For decryption, the input
* buffer provides the tag which is consumed resulting in only the
* plaintext without a buffer for the tag returned to the caller.
*/
if (ctx->enc)
outlen = used + as;
else
outlen = used - as;
/*
* The cipher operation input data is reduced by the associated data
* length as this data is processed separately later on.
*/
used -= ctx->aead_assoclen;
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_aead_reqsize(tfm));
if (IS_ERR(areq))
return PTR_ERR(areq);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
if (err)
goto free;
/*
* Ensure output buffer is sufficiently large. If the caller provides
* less buffer space, only use the relative required input size. This
* allows AIO operation where the caller sent all data to be processed
* and the AIO operation performs the operation on the different chunks
* of the input data.
*/
if (usedpages < outlen) {
size_t less = outlen - usedpages;
if (used < less) {
err = -EINVAL;
goto free;
}
used -= less;
outlen -= less;
}
processed = used + ctx->aead_assoclen;
list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
for (i = 0; i < tsgl->cur; i++) {
struct scatterlist *process_sg = tsgl->sg + i;
if (!(process_sg->length) || !sg_page(process_sg))
continue;
tsgl_src = process_sg;
break;
}
if (tsgl_src)
break;
}
if (processed && !tsgl_src) {
err = -EFAULT;
goto free;
}
/*
* Copy of AAD from source to destination
*
* The AAD is copied to the destination buffer without change. Even
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
*
* To ensure efficiency, the following implementation ensure that the
* ciphers are invoked to perform a crypto operation in-place. This
* is achieved by memory management specified as follows.
*/
/* Use the RX SGL as source (and destination) for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
if (ctx->enc) {
/*
* Encryption operation - The in-place cipher operation is
* achieved by the following operation:
*
* TX SGL: AAD || PT
* | |
* | copy |
* v v
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sgt.sgl,
processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
* Decryption operation - To achieve an in-place cipher
* operation, the following SGL structure is used:
*
* TX SGL: AAD || CT || Tag
* | | ^
* | copy | | Create SGL link.
* v v |
* RX SGL: AAD || CT ----+
*/
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sgt.sgl,
outlen);
if (err)
goto free;
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
processed - as);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
/* Release TX SGL, except for tag data and reassign tag data. */
af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
/* chain the areq TX SGL holding the tag with RX SGL */
if (usedpages) {
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
struct scatterlist *sg = sgl_prev->sgt.sgl;
sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
rsgl_src = areq->tsgl;
}
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
sock_hold(sk);
areq->iocb = msg->msg_iocb;
/* Remember output size that will be generated. */
areq->outlen = outlen;
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
/* AIO operation in progress */
if (err == -EINPROGRESS)
return -EIOCBQUEUED;
sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &ctx->wait);
err = crypto_wait_req(ctx->enc ?
crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req),
&ctx->wait);
}
free:
af_alg_free_resources(areq);
return err ? err : outlen;
}
static int aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
struct sock *sk = sock->sk;
int ret = 0;
lock_sock(sk);
while (msg_data_left(msg)) {
int err = _aead_recvmsg(sock, msg, ignored, flags);
/*
* This error covers -EIOCBQUEUED which implies that we can
* only handle one AIO request. If the caller wants to have
* multiple AIO requests in parallel, he must make multiple
* separate AIO calls.
*
* Also return the error if no data has been processed so far.
*/
if (err <= 0) {
if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
ret = err;
goto out;
}
ret += err;
}
out:
af_alg_wmem_wakeup(sk);
release_sock(sk);
return ret;
}
static struct proto_ops algif_aead_ops = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = aead_sendmsg,
.recvmsg = aead_recvmsg,
.poll = af_alg_poll,
};
static int aead_check_key(struct socket *sock)
{
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct aead_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
lock_sock(sk);
if (!atomic_read(&ask->nokey_refcnt))
goto unlock_child;
psk = ask->parent;
pask = alg_sk(ask->parent);
tfm = pask->private;
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
atomic_dec(&pask->nokey_refcnt);
atomic_set(&ask->nokey_refcnt, 0);
err = 0;
unlock:
release_sock(psk);
unlock_child:
release_sock(sk);
return err;
}
static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t size)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_sendmsg(sock, msg, size);
}
static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
int err;
err = aead_check_key(sock);
if (err)
return err;
return aead_recvmsg(sock, msg, ignored, flags);
}
static struct proto_ops algif_aead_ops_nokey = {
.family = PF_ALG,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.getname = sock_no_getname,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.mmap = sock_no_mmap,
.bind = sock_no_bind,
.accept = sock_no_accept,
.release = af_alg_release,
.sendmsg = aead_sendmsg_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
struct crypto_sync_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
return ERR_CAST(aead);
}
null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
return ERR_CAST(null_tfm);
}
tfm->aead = aead;
tfm->null_tfm = null_tfm;
return tfm;
}
static void aead_release(void *private)
{
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
crypto_put_default_null_skcipher();
kfree(tfm);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
struct aead_tfm *tfm = private;
return crypto_aead_setauthsize(tfm->aead, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
}
static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
unsigned int len = sizeof(*ctx);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
memset(ctx, 0, len);
ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL);
if (!ctx->iv) {
sock_kfree_s(sk, ctx, len);
return -ENOMEM;
}
memset(ctx->iv, 0, ivlen);
INIT_LIST_HEAD(&ctx->tsgl_list);
ctx->len = len;
crypto_init_wait(&ctx->wait);
ask->private = ctx;
sk->sk_destruct = aead_sock_destruct;
return 0;
}
static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);
}
static const struct af_alg_type algif_type_aead = {
.bind = aead_bind,
.release = aead_release,
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.accept = aead_accept_parent,
.accept_nokey = aead_accept_parent_nokey,
.ops = &algif_aead_ops,
.ops_nokey = &algif_aead_ops_nokey,
.name = "aead",
.owner = THIS_MODULE
};
static int __init algif_aead_init(void)
{
return af_alg_register_type(&algif_type_aead);
}
static void __exit algif_aead_exit(void)
{
int err = af_alg_unregister_type(&algif_type_aead);
BUG_ON(err);
}
module_init(algif_aead_init);
module_exit(algif_aead_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Stephan Mueller <[email protected]>");
MODULE_DESCRIPTION("AEAD kernel crypto API user space interface");
| linux-master | crypto/algif_aead.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* RSA asymmetric public-key algorithm [RFC3447]
*
* Copyright (c) 2015, Intel Corporation
* Authors: Tadeusz Struk <[email protected]>
*/
#include <linux/fips.h>
#include <linux/module.h>
#include <linux/mpi.h>
#include <crypto/internal/rsa.h>
#include <crypto/internal/akcipher.h>
#include <crypto/akcipher.h>
#include <crypto/algapi.h>
struct rsa_mpi_key {
MPI n;
MPI e;
MPI d;
MPI p;
MPI q;
MPI dp;
MPI dq;
MPI qinv;
};
/*
* RSAEP function [RFC3447 sec 5.1.1]
* c = m^e mod n;
*/
static int _rsa_enc(const struct rsa_mpi_key *key, MPI c, MPI m)
{
/* (1) Validate 0 <= m < n */
if (mpi_cmp_ui(m, 0) < 0 || mpi_cmp(m, key->n) >= 0)
return -EINVAL;
/* (2) c = m^e mod n */
return mpi_powm(c, m, key->e, key->n);
}
/*
* RSADP function [RFC3447 sec 5.1.2]
* m_1 = c^dP mod p;
* m_2 = c^dQ mod q;
* h = (m_1 - m_2) * qInv mod p;
* m = m_2 + q * h;
*/
static int _rsa_dec_crt(const struct rsa_mpi_key *key, MPI m_or_m1_or_h, MPI c)
{
MPI m2, m12_or_qh;
int ret = -ENOMEM;
/* (1) Validate 0 <= c < n */
if (mpi_cmp_ui(c, 0) < 0 || mpi_cmp(c, key->n) >= 0)
return -EINVAL;
m2 = mpi_alloc(0);
m12_or_qh = mpi_alloc(0);
if (!m2 || !m12_or_qh)
goto err_free_mpi;
/* (2i) m_1 = c^dP mod p */
ret = mpi_powm(m_or_m1_or_h, c, key->dp, key->p);
if (ret)
goto err_free_mpi;
/* (2i) m_2 = c^dQ mod q */
ret = mpi_powm(m2, c, key->dq, key->q);
if (ret)
goto err_free_mpi;
/* (2iii) h = (m_1 - m_2) * qInv mod p */
mpi_sub(m12_or_qh, m_or_m1_or_h, m2);
mpi_mulm(m_or_m1_or_h, m12_or_qh, key->qinv, key->p);
/* (2iv) m = m_2 + q * h */
mpi_mul(m12_or_qh, key->q, m_or_m1_or_h);
mpi_addm(m_or_m1_or_h, m2, m12_or_qh, key->n);
ret = 0;
err_free_mpi:
mpi_free(m12_or_qh);
mpi_free(m2);
return ret;
}
static inline struct rsa_mpi_key *rsa_get_key(struct crypto_akcipher *tfm)
{
return akcipher_tfm_ctx(tfm);
}
static int rsa_enc(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI m, c = mpi_alloc(0);
int ret = 0;
int sign;
if (!c)
return -ENOMEM;
if (unlikely(!pkey->n || !pkey->e)) {
ret = -EINVAL;
goto err_free_c;
}
ret = -ENOMEM;
m = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!m)
goto err_free_c;
ret = _rsa_enc(pkey, c, m);
if (ret)
goto err_free_m;
ret = mpi_write_to_sgl(c, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_m;
if (sign < 0)
ret = -EBADMSG;
err_free_m:
mpi_free(m);
err_free_c:
mpi_free(c);
return ret;
}
static int rsa_dec(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
const struct rsa_mpi_key *pkey = rsa_get_key(tfm);
MPI c, m = mpi_alloc(0);
int ret = 0;
int sign;
if (!m)
return -ENOMEM;
if (unlikely(!pkey->n || !pkey->d)) {
ret = -EINVAL;
goto err_free_m;
}
ret = -ENOMEM;
c = mpi_read_raw_from_sgl(req->src, req->src_len);
if (!c)
goto err_free_m;
ret = _rsa_dec_crt(pkey, m, c);
if (ret)
goto err_free_c;
ret = mpi_write_to_sgl(m, req->dst, req->dst_len, &sign);
if (ret)
goto err_free_c;
if (sign < 0)
ret = -EBADMSG;
err_free_c:
mpi_free(c);
err_free_m:
mpi_free(m);
return ret;
}
static void rsa_free_mpi_key(struct rsa_mpi_key *key)
{
mpi_free(key->d);
mpi_free(key->e);
mpi_free(key->n);
mpi_free(key->p);
mpi_free(key->q);
mpi_free(key->dp);
mpi_free(key->dq);
mpi_free(key->qinv);
key->d = NULL;
key->e = NULL;
key->n = NULL;
key->p = NULL;
key->q = NULL;
key->dp = NULL;
key->dq = NULL;
key->qinv = NULL;
}
static int rsa_check_key_length(unsigned int len)
{
switch (len) {
case 512:
case 1024:
case 1536:
if (fips_enabled)
return -EINVAL;
fallthrough;
case 2048:
case 3072:
case 4096:
return 0;
}
return -EINVAL;
}
static int rsa_check_exponent_fips(MPI e)
{
MPI e_max = NULL;
/* check if odd */
if (!mpi_test_bit(e, 0)) {
return -EINVAL;
}
/* check if 2^16 < e < 2^256. */
if (mpi_cmp_ui(e, 65536) <= 0) {
return -EINVAL;
}
e_max = mpi_alloc(0);
mpi_set_bit(e_max, 256);
if (mpi_cmp(e, e_max) >= 0) {
mpi_free(e_max);
return -EINVAL;
}
mpi_free(e_max);
return 0;
}
static int rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
struct rsa_key raw_key = {0};
int ret;
/* Free the old MPI key if any */
rsa_free_mpi_key(mpi_key);
ret = rsa_parse_pub_key(&raw_key, key, keylen);
if (ret)
return ret;
mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
if (!mpi_key->e)
goto err;
mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
if (!mpi_key->n)
goto err;
if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
rsa_free_mpi_key(mpi_key);
return -EINVAL;
}
if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
rsa_free_mpi_key(mpi_key);
return -EINVAL;
}
return 0;
err:
rsa_free_mpi_key(mpi_key);
return -ENOMEM;
}
static int rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct rsa_mpi_key *mpi_key = akcipher_tfm_ctx(tfm);
struct rsa_key raw_key = {0};
int ret;
/* Free the old MPI key if any */
rsa_free_mpi_key(mpi_key);
ret = rsa_parse_priv_key(&raw_key, key, keylen);
if (ret)
return ret;
mpi_key->d = mpi_read_raw_data(raw_key.d, raw_key.d_sz);
if (!mpi_key->d)
goto err;
mpi_key->e = mpi_read_raw_data(raw_key.e, raw_key.e_sz);
if (!mpi_key->e)
goto err;
mpi_key->n = mpi_read_raw_data(raw_key.n, raw_key.n_sz);
if (!mpi_key->n)
goto err;
mpi_key->p = mpi_read_raw_data(raw_key.p, raw_key.p_sz);
if (!mpi_key->p)
goto err;
mpi_key->q = mpi_read_raw_data(raw_key.q, raw_key.q_sz);
if (!mpi_key->q)
goto err;
mpi_key->dp = mpi_read_raw_data(raw_key.dp, raw_key.dp_sz);
if (!mpi_key->dp)
goto err;
mpi_key->dq = mpi_read_raw_data(raw_key.dq, raw_key.dq_sz);
if (!mpi_key->dq)
goto err;
mpi_key->qinv = mpi_read_raw_data(raw_key.qinv, raw_key.qinv_sz);
if (!mpi_key->qinv)
goto err;
if (rsa_check_key_length(mpi_get_size(mpi_key->n) << 3)) {
rsa_free_mpi_key(mpi_key);
return -EINVAL;
}
if (fips_enabled && rsa_check_exponent_fips(mpi_key->e)) {
rsa_free_mpi_key(mpi_key);
return -EINVAL;
}
return 0;
err:
rsa_free_mpi_key(mpi_key);
return -ENOMEM;
}
static unsigned int rsa_max_size(struct crypto_akcipher *tfm)
{
struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
return mpi_get_size(pkey->n);
}
static void rsa_exit_tfm(struct crypto_akcipher *tfm)
{
struct rsa_mpi_key *pkey = akcipher_tfm_ctx(tfm);
rsa_free_mpi_key(pkey);
}
static struct akcipher_alg rsa = {
.encrypt = rsa_enc,
.decrypt = rsa_dec,
.set_priv_key = rsa_set_priv_key,
.set_pub_key = rsa_set_pub_key,
.max_size = rsa_max_size,
.exit = rsa_exit_tfm,
.base = {
.cra_name = "rsa",
.cra_driver_name = "rsa-generic",
.cra_priority = 100,
.cra_module = THIS_MODULE,
.cra_ctxsize = sizeof(struct rsa_mpi_key),
},
};
static int __init rsa_init(void)
{
int err;
err = crypto_register_akcipher(&rsa);
if (err)
return err;
err = crypto_register_template(&rsa_pkcs1pad_tmpl);
if (err) {
crypto_unregister_akcipher(&rsa);
return err;
}
return 0;
}
static void __exit rsa_exit(void)
{
crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa);
}
subsys_initcall(rsa_init);
module_exit(rsa_exit);
MODULE_ALIAS_CRYPTO("rsa");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RSA generic algorithm");
| linux-master | crypto/rsa.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CBC: Cipher Block Chaining mode
*
* Copyright (c) 2006-2016 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/log2.h>
#include <linux/module.h>
static int crypto_cbc_encrypt_segment(struct skcipher_walk *walk,
struct crypto_skcipher *skcipher)
{
unsigned int bsize = crypto_skcipher_blocksize(skcipher);
void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
struct crypto_cipher *cipher;
struct crypto_tfm *tfm;
u8 *iv = walk->iv;
cipher = skcipher_cipher_simple(skcipher);
tfm = crypto_cipher_tfm(cipher);
fn = crypto_cipher_alg(cipher)->cia_encrypt;
do {
crypto_xor(iv, src, bsize);
fn(tfm, dst, iv);
memcpy(iv, dst, bsize);
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
}
static int crypto_cbc_encrypt_inplace(struct skcipher_walk *walk,
struct crypto_skcipher *skcipher)
{
unsigned int bsize = crypto_skcipher_blocksize(skcipher);
void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
struct crypto_cipher *cipher;
struct crypto_tfm *tfm;
u8 *iv = walk->iv;
cipher = skcipher_cipher_simple(skcipher);
tfm = crypto_cipher_tfm(cipher);
fn = crypto_cipher_alg(cipher)->cia_encrypt;
do {
crypto_xor(src, iv, bsize);
fn(tfm, src, src);
iv = src;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
err = crypto_cbc_encrypt_inplace(&walk, skcipher);
else
err = crypto_cbc_encrypt_segment(&walk, skcipher);
err = skcipher_walk_done(&walk, err);
}
return err;
}
static int crypto_cbc_decrypt_segment(struct skcipher_walk *walk,
struct crypto_skcipher *skcipher)
{
unsigned int bsize = crypto_skcipher_blocksize(skcipher);
void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
struct crypto_cipher *cipher;
struct crypto_tfm *tfm;
u8 *iv = walk->iv;
cipher = skcipher_cipher_simple(skcipher);
tfm = crypto_cipher_tfm(cipher);
fn = crypto_cipher_alg(cipher)->cia_decrypt;
do {
fn(tfm, dst, src);
crypto_xor(dst, iv, bsize);
iv = src;
src += bsize;
dst += bsize;
} while ((nbytes -= bsize) >= bsize);
memcpy(walk->iv, iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt_inplace(struct skcipher_walk *walk,
struct crypto_skcipher *skcipher)
{
unsigned int bsize = crypto_skcipher_blocksize(skcipher);
void (*fn)(struct crypto_tfm *, u8 *, const u8 *);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
u8 last_iv[MAX_CIPHER_BLOCKSIZE];
struct crypto_cipher *cipher;
struct crypto_tfm *tfm;
cipher = skcipher_cipher_simple(skcipher);
tfm = crypto_cipher_tfm(cipher);
fn = crypto_cipher_alg(cipher)->cia_decrypt;
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
memcpy(last_iv, src, bsize);
for (;;) {
fn(tfm, src, src);
if ((nbytes -= bsize) < bsize)
break;
crypto_xor(src, src - bsize, bsize);
src -= bsize;
}
crypto_xor(src, walk->iv, bsize);
memcpy(walk->iv, last_iv, bsize);
return nbytes;
}
static int crypto_cbc_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr == walk.dst.virt.addr)
err = crypto_cbc_decrypt_inplace(&walk, skcipher);
else
err = crypto_cbc_decrypt_segment(&walk, skcipher);
err = skcipher_walk_done(&walk, err);
}
return err;
}
static int crypto_cbc_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
err = -EINVAL;
if (!is_power_of_2(alg->cra_blocksize))
goto out_free_inst;
inst->alg.encrypt = crypto_cbc_encrypt;
inst->alg.decrypt = crypto_cbc_decrypt;
err = skcipher_register_instance(tmpl, inst);
if (err) {
out_free_inst:
inst->free(inst);
}
return err;
}
static struct crypto_template crypto_cbc_tmpl = {
.name = "cbc",
.create = crypto_cbc_create,
.module = THIS_MODULE,
};
static int __init crypto_cbc_module_init(void)
{
return crypto_register_template(&crypto_cbc_tmpl);
}
static void __exit crypto_cbc_module_exit(void)
{
crypto_unregister_template(&crypto_cbc_tmpl);
}
subsys_initcall(crypto_cbc_module_init);
module_exit(crypto_cbc_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CBC block cipher mode of operation");
MODULE_ALIAS_CRYPTO("cbc");
| linux-master | crypto/cbc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2012 Xyratex Technology Limited
*/
/*
* This is crypto api shash wrappers to crc32_le.
*/
#include <asm/unaligned.h>
#include <linux/crc32.h>
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/kernel.h>
#define CHKSUM_BLOCK_SIZE 1
#define CHKSUM_DIGEST_SIZE 4
/** No default init with ~0 */
static int crc32_cra_init(struct crypto_tfm *tfm)
{
u32 *key = crypto_tfm_ctx(tfm);
*key = 0;
return 0;
}
/*
* Setting the seed allows arbitrary accumulators and flexible XOR policy
* If your algorithm starts with ~0, then XOR with ~0 before you set
* the seed.
*/
static int crc32_setkey(struct crypto_shash *hash, const u8 *key,
unsigned int keylen)
{
u32 *mctx = crypto_shash_ctx(hash);
if (keylen != sizeof(u32))
return -EINVAL;
*mctx = get_unaligned_le32(key);
return 0;
}
static int crc32_init(struct shash_desc *desc)
{
u32 *mctx = crypto_shash_ctx(desc->tfm);
u32 *crcp = shash_desc_ctx(desc);
*crcp = *mctx;
return 0;
}
static int crc32_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
u32 *crcp = shash_desc_ctx(desc);
*crcp = crc32_le(*crcp, data, len);
return 0;
}
/* No final XOR 0xFFFFFFFF, like crc32_le */
static int __crc32_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
put_unaligned_le32(crc32_le(*crcp, data, len), out);
return 0;
}
static int crc32_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_finup(shash_desc_ctx(desc), data, len, out);
}
static int crc32_final(struct shash_desc *desc, u8 *out)
{
u32 *crcp = shash_desc_ctx(desc);
put_unaligned_le32(*crcp, out);
return 0;
}
static int crc32_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len,
out);
}
static struct shash_alg alg = {
.setkey = crc32_setkey,
.init = crc32_init,
.update = crc32_update,
.final = crc32_final,
.finup = crc32_finup,
.digest = crc32_digest,
.descsize = sizeof(u32),
.digestsize = CHKSUM_DIGEST_SIZE,
.base = {
.cra_name = "crc32",
.cra_driver_name = "crc32-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
.cra_blocksize = CHKSUM_BLOCK_SIZE,
.cra_ctxsize = sizeof(u32),
.cra_module = THIS_MODULE,
.cra_init = crc32_cra_init,
}
};
static int __init crc32_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit crc32_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(crc32_mod_init);
module_exit(crc32_mod_fini);
MODULE_AUTHOR("Alexander Boyko <[email protected]>");
MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32");
MODULE_ALIAS_CRYPTO("crc32-generic");
| linux-master | crypto/crc32_generic.c |
/*
* Cryptographic API.
*
* Khazad Algorithm
*
* The Khazad algorithm was developed by Paulo S. L. M. Barreto and
* Vincent Rijmen. It was a finalist in the NESSIE encryption contest.
*
* The original authors have disclaimed all copyright interest in this
* code and thus put it in the public domain. The subsequent authors
* have put this under the GNU General Public License.
*
* By Aaron Grothe [email protected], August 1, 2004
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/byteorder.h>
#include <linux/types.h>
#define KHAZAD_KEY_SIZE 16
#define KHAZAD_BLOCK_SIZE 8
#define KHAZAD_ROUNDS 8
struct khazad_ctx {
u64 E[KHAZAD_ROUNDS + 1];
u64 D[KHAZAD_ROUNDS + 1];
};
static const u64 T0[256] = {
0xbad3d268bbb96a01ULL, 0x54fc4d19e59a66b1ULL, 0x2f71bc93e26514cdULL,
0x749ccdb925871b51ULL, 0x53f55102f7a257a4ULL, 0xd3686bb8d0d6be03ULL,
0xd26b6fbdd6deb504ULL, 0x4dd72964b35285feULL, 0x50f05d0dfdba4aadULL,
0xace98a26cf09e063ULL, 0x8d8a0e83091c9684ULL, 0xbfdcc679a5914d1aULL,
0x7090ddad3da7374dULL, 0x52f65507f1aa5ca3ULL, 0x9ab352c87ba417e1ULL,
0x4cd42d61b55a8ef9ULL, 0xea238f65460320acULL, 0xd56273a6c4e68411ULL,
0x97a466f155cc68c2ULL, 0xd16e63b2dcc6a80dULL, 0x3355ccffaa85d099ULL,
0x51f35908fbb241aaULL, 0x5bed712ac7e20f9cULL, 0xa6f7a204f359ae55ULL,
0xde7f5f81febec120ULL, 0x48d83d75ad7aa2e5ULL, 0xa8e59a32d729cc7fULL,
0x99b65ec771bc0ae8ULL, 0xdb704b90e096e63bULL, 0x3256c8faac8ddb9eULL,
0xb7c4e65195d11522ULL, 0xfc19d72b32b3aaceULL, 0xe338ab48704b7393ULL,
0x9ebf42dc63843bfdULL, 0x91ae7eef41fc52d0ULL, 0x9bb056cd7dac1ce6ULL,
0xe23baf4d76437894ULL, 0xbbd0d66dbdb16106ULL, 0x41c319589b32f1daULL,
0x6eb2a5cb7957e517ULL, 0xa5f2ae0bf941b35cULL, 0xcb400bc08016564bULL,
0x6bbdb1da677fc20cULL, 0x95a26efb59dc7eccULL, 0xa1febe1fe1619f40ULL,
0xf308eb1810cbc3e3ULL, 0xb1cefe4f81e12f30ULL, 0x0206080a0c10160eULL,
0xcc4917db922e675eULL, 0xc45137f3a26e3f66ULL, 0x1d2774694ee8cf53ULL,
0x143c504478a09c6cULL, 0xc3582be8b0560e73ULL, 0x63a591f2573f9a34ULL,
0xda734f95e69eed3cULL, 0x5de76934d3d2358eULL, 0x5fe1613edfc22380ULL,
0xdc79578bf2aed72eULL, 0x7d87e99413cf486eULL, 0xcd4a13de94266c59ULL,
0x7f81e19e1fdf5e60ULL, 0x5aee752fc1ea049bULL, 0x6cb4adc17547f319ULL,
0x5ce46d31d5da3e89ULL, 0xf704fb0c08ebefffULL, 0x266a98bed42d47f2ULL,
0xff1cdb2438abb7c7ULL, 0xed2a937e543b11b9ULL, 0xe825876f4a1336a2ULL,
0x9dba4ed3699c26f4ULL, 0x6fb1a1ce7f5fee10ULL, 0x8e8f028c03048b8dULL,
0x192b647d56c8e34fULL, 0xa0fdba1ae7699447ULL, 0xf00de7171ad3deeaULL,
0x89861e97113cba98ULL, 0x0f113c332278692dULL, 0x07091c1b12383115ULL,
0xafec8629c511fd6aULL, 0xfb10cb30208b9bdbULL, 0x0818202830405838ULL,
0x153f54417ea8976bULL, 0x0d1734392e687f23ULL, 0x040c101418202c1cULL,
0x0103040506080b07ULL, 0x64ac8de94507ab21ULL, 0xdf7c5b84f8b6ca27ULL,
0x769ac5b329970d5fULL, 0x798bf9800bef6472ULL, 0xdd7a538ef4a6dc29ULL,
0x3d47f4c98ef5b2b3ULL, 0x163a584e74b08a62ULL, 0x3f41fcc382e5a4bdULL,
0x3759dcebb2a5fc85ULL, 0x6db7a9c4734ff81eULL, 0x3848e0d890dd95a8ULL,
0xb9d6de67b1a17708ULL, 0x7395d1a237bf2a44ULL, 0xe926836a4c1b3da5ULL,
0x355fd4e1beb5ea8bULL, 0x55ff491ce3926db6ULL, 0x7193d9a83baf3c4aULL,
0x7b8df18a07ff727cULL, 0x8c890a860f149d83ULL, 0x7296d5a731b72143ULL,
0x88851a921734b19fULL, 0xf607ff090ee3e4f8ULL, 0x2a7ea882fc4d33d6ULL,
0x3e42f8c684edafbaULL, 0x5ee2653bd9ca2887ULL, 0x27699cbbd2254cf5ULL,
0x46ca0543890ac0cfULL, 0x0c14303c28607424ULL, 0x65af89ec430fa026ULL,
0x68b8bdd56d67df05ULL, 0x61a399f85b2f8c3aULL, 0x03050c0f0a181d09ULL,
0xc15e23e2bc46187dULL, 0x57f94116ef827bb8ULL, 0xd6677fa9cefe9918ULL,
0xd976439aec86f035ULL, 0x58e87d25cdfa1295ULL, 0xd875479fea8efb32ULL,
0x66aa85e34917bd2fULL, 0xd7647bacc8f6921fULL, 0x3a4ee8d29ccd83a6ULL,
0xc84507cf8a0e4b42ULL, 0x3c44f0cc88fdb9b4ULL, 0xfa13cf35268390dcULL,
0x96a762f453c463c5ULL, 0xa7f4a601f551a552ULL, 0x98b55ac277b401efULL,
0xec29977b52331abeULL, 0xb8d5da62b7a97c0fULL, 0xc7543bfca876226fULL,
0xaeef822cc319f66dULL, 0x69bbb9d06b6fd402ULL, 0x4bdd317aa762bfecULL,
0xabe0963ddd31d176ULL, 0xa9e69e37d121c778ULL, 0x67a981e64f1fb628ULL,
0x0a1e28223c504e36ULL, 0x47c901468f02cbc8ULL, 0xf20bef1d16c3c8e4ULL,
0xb5c2ee5b99c1032cULL, 0x226688aacc0d6beeULL, 0xe532b356647b4981ULL,
0xee2f9f715e230cb0ULL, 0xbedfc27ca399461dULL, 0x2b7dac87fa4538d1ULL,
0x819e3ebf217ce2a0ULL, 0x1236485a6c90a67eULL, 0x839836b52d6cf4aeULL,
0x1b2d6c775ad8f541ULL, 0x0e1238362470622aULL, 0x23658cafca0560e9ULL,
0xf502f30604fbf9f1ULL, 0x45cf094c8312ddc6ULL, 0x216384a5c61576e7ULL,
0xce4f1fd19e3e7150ULL, 0x49db3970ab72a9e2ULL, 0x2c74b09ce87d09c4ULL,
0xf916c33a2c9b8dd5ULL, 0xe637bf596e635488ULL, 0xb6c7e25493d91e25ULL,
0x2878a088f05d25d8ULL, 0x17395c4b72b88165ULL, 0x829b32b02b64ffa9ULL,
0x1a2e68725cd0fe46ULL, 0x8b80169d1d2cac96ULL, 0xfe1fdf213ea3bcc0ULL,
0x8a8312981b24a791ULL, 0x091b242d3648533fULL, 0xc94603ca8c064045ULL,
0x879426a1354cd8b2ULL, 0x4ed2256bb94a98f7ULL, 0xe13ea3427c5b659dULL,
0x2e72b896e46d1fcaULL, 0xe431b75362734286ULL, 0xe03da7477a536e9aULL,
0xeb208b60400b2babULL, 0x90ad7aea47f459d7ULL, 0xa4f1aa0eff49b85bULL,
0x1e22786644f0d25aULL, 0x85922eab395ccebcULL, 0x60a09dfd5d27873dULL,
0x0000000000000000ULL, 0x256f94b1de355afbULL, 0xf401f70302f3f2f6ULL,
0xf10ee3121cdbd5edULL, 0x94a16afe5fd475cbULL, 0x0b1d2c273a584531ULL,
0xe734bb5c686b5f8fULL, 0x759fc9bc238f1056ULL, 0xef2c9b74582b07b7ULL,
0x345cd0e4b8bde18cULL, 0x3153c4f5a695c697ULL, 0xd46177a3c2ee8f16ULL,
0xd06d67b7dacea30aULL, 0x869722a43344d3b5ULL, 0x7e82e59b19d75567ULL,
0xadea8e23c901eb64ULL, 0xfd1ad32e34bba1c9ULL, 0x297ba48df6552edfULL,
0x3050c0f0a09dcd90ULL, 0x3b4decd79ac588a1ULL, 0x9fbc46d9658c30faULL,
0xf815c73f2a9386d2ULL, 0xc6573ff9ae7e2968ULL, 0x13354c5f6a98ad79ULL,
0x060a181e14303a12ULL, 0x050f14111e28271bULL, 0xc55233f6a4663461ULL,
0x113344556688bb77ULL, 0x7799c1b62f9f0658ULL, 0x7c84ed9115c74369ULL,
0x7a8ef58f01f7797bULL, 0x7888fd850de76f75ULL, 0x365ad8eeb4adf782ULL,
0x1c24706c48e0c454ULL, 0x394be4dd96d59eafULL, 0x59eb7920cbf21992ULL,
0x1828607850c0e848ULL, 0x56fa4513e98a70bfULL, 0xb3c8f6458df1393eULL,
0xb0cdfa4a87e92437ULL, 0x246c90b4d83d51fcULL, 0x206080a0c01d7de0ULL,
0xb2cbf2408bf93239ULL, 0x92ab72e04be44fd9ULL, 0xa3f8b615ed71894eULL,
0xc05d27e7ba4e137aULL, 0x44cc0d49851ad6c1ULL, 0x62a695f751379133ULL,
0x103040506080b070ULL, 0xb4c1ea5e9fc9082bULL, 0x84912aae3f54c5bbULL,
0x43c511529722e7d4ULL, 0x93a876e54dec44deULL, 0xc25b2fedb65e0574ULL,
0x4ade357fa16ab4ebULL, 0xbddace73a9815b14ULL, 0x8f8c0689050c808aULL,
0x2d77b499ee7502c3ULL, 0xbcd9ca76af895013ULL, 0x9cb94ad66f942df3ULL,
0x6abeb5df6177c90bULL, 0x40c01d5d9d3afaddULL, 0xcf4c1bd498367a57ULL,
0xa2fbb210eb798249ULL, 0x809d3aba2774e9a7ULL, 0x4fd1216ebf4293f0ULL,
0x1f217c6342f8d95dULL, 0xca430fc5861e5d4cULL, 0xaae39238db39da71ULL,
0x42c61557912aecd3ULL
};
static const u64 T1[256] = {
0xd3ba68d2b9bb016aULL, 0xfc54194d9ae5b166ULL, 0x712f93bc65e2cd14ULL,
0x9c74b9cd8725511bULL, 0xf5530251a2f7a457ULL, 0x68d3b86bd6d003beULL,
0x6bd2bd6fded604b5ULL, 0xd74d642952b3fe85ULL, 0xf0500d5dbafdad4aULL,
0xe9ac268a09cf63e0ULL, 0x8a8d830e1c098496ULL, 0xdcbf79c691a51a4dULL,
0x9070addda73d4d37ULL, 0xf6520755aaf1a35cULL, 0xb39ac852a47be117ULL,
0xd44c612d5ab5f98eULL, 0x23ea658f0346ac20ULL, 0x62d5a673e6c41184ULL,
0xa497f166cc55c268ULL, 0x6ed1b263c6dc0da8ULL, 0x5533ffcc85aa99d0ULL,
0xf3510859b2fbaa41ULL, 0xed5b2a71e2c79c0fULL, 0xf7a604a259f355aeULL,
0x7fde815fbefe20c1ULL, 0xd848753d7aade5a2ULL, 0xe5a8329a29d77fccULL,
0xb699c75ebc71e80aULL, 0x70db904b96e03be6ULL, 0x5632fac88dac9edbULL,
0xc4b751e6d1952215ULL, 0x19fc2bd7b332ceaaULL, 0x38e348ab4b709373ULL,
0xbf9edc428463fd3bULL, 0xae91ef7efc41d052ULL, 0xb09bcd56ac7de61cULL,
0x3be24daf43769478ULL, 0xd0bb6dd6b1bd0661ULL, 0xc3415819329bdaf1ULL,
0xb26ecba5577917e5ULL, 0xf2a50bae41f95cb3ULL, 0x40cbc00b16804b56ULL,
0xbd6bdab17f670cc2ULL, 0xa295fb6edc59cc7eULL, 0xfea11fbe61e1409fULL,
0x08f318ebcb10e3c3ULL, 0xceb14ffee181302fULL, 0x06020a08100c0e16ULL,
0x49ccdb172e925e67ULL, 0x51c4f3376ea2663fULL, 0x271d6974e84e53cfULL,
0x3c144450a0786c9cULL, 0x58c3e82b56b0730eULL, 0xa563f2913f57349aULL,
0x73da954f9ee63cedULL, 0xe75d3469d2d38e35ULL, 0xe15f3e61c2df8023ULL,
0x79dc8b57aef22ed7ULL, 0x877d94e9cf136e48ULL, 0x4acdde132694596cULL,
0x817f9ee1df1f605eULL, 0xee5a2f75eac19b04ULL, 0xb46cc1ad477519f3ULL,
0xe45c316ddad5893eULL, 0x04f70cfbeb08ffefULL, 0x6a26be982dd4f247ULL,
0x1cff24dbab38c7b7ULL, 0x2aed7e933b54b911ULL, 0x25e86f87134aa236ULL,
0xba9dd34e9c69f426ULL, 0xb16fcea15f7f10eeULL, 0x8f8e8c0204038d8bULL,
0x2b197d64c8564fe3ULL, 0xfda01aba69e74794ULL, 0x0df017e7d31aeadeULL,
0x8689971e3c1198baULL, 0x110f333c78222d69ULL, 0x09071b1c38121531ULL,
0xecaf298611c56afdULL, 0x10fb30cb8b20db9bULL, 0x1808282040303858ULL,
0x3f154154a87e6b97ULL, 0x170d3934682e237fULL, 0x0c04141020181c2cULL,
0x030105040806070bULL, 0xac64e98d074521abULL, 0x7cdf845bb6f827caULL,
0x9a76b3c597295f0dULL, 0x8b7980f9ef0b7264ULL, 0x7add8e53a6f429dcULL,
0x473dc9f4f58eb3b2ULL, 0x3a164e58b074628aULL, 0x413fc3fce582bda4ULL,
0x5937ebdca5b285fcULL, 0xb76dc4a94f731ef8ULL, 0x4838d8e0dd90a895ULL,
0xd6b967dea1b10877ULL, 0x9573a2d1bf37442aULL, 0x26e96a831b4ca53dULL,
0x5f35e1d4b5be8beaULL, 0xff551c4992e3b66dULL, 0x9371a8d9af3b4a3cULL,
0x8d7b8af1ff077c72ULL, 0x898c860a140f839dULL, 0x9672a7d5b7314321ULL,
0x8588921a34179fb1ULL, 0x07f609ffe30ef8e4ULL, 0x7e2a82a84dfcd633ULL,
0x423ec6f8ed84baafULL, 0xe25e3b65cad98728ULL, 0x6927bb9c25d2f54cULL,
0xca4643050a89cfc0ULL, 0x140c3c3060282474ULL, 0xaf65ec890f4326a0ULL,
0xb868d5bd676d05dfULL, 0xa361f8992f5b3a8cULL, 0x05030f0c180a091dULL,
0x5ec1e22346bc7d18ULL, 0xf957164182efb87bULL, 0x67d6a97ffece1899ULL,
0x76d99a4386ec35f0ULL, 0xe858257dfacd9512ULL, 0x75d89f478eea32fbULL,
0xaa66e38517492fbdULL, 0x64d7ac7bf6c81f92ULL, 0x4e3ad2e8cd9ca683ULL,
0x45c8cf070e8a424bULL, 0x443cccf0fd88b4b9ULL, 0x13fa35cf8326dc90ULL,
0xa796f462c453c563ULL, 0xf4a701a651f552a5ULL, 0xb598c25ab477ef01ULL,
0x29ec7b973352be1aULL, 0xd5b862daa9b70f7cULL, 0x54c7fc3b76a86f22ULL,
0xefae2c8219c36df6ULL, 0xbb69d0b96f6b02d4ULL, 0xdd4b7a3162a7ecbfULL,
0xe0ab3d9631dd76d1ULL, 0xe6a9379e21d178c7ULL, 0xa967e6811f4f28b6ULL,
0x1e0a2228503c364eULL, 0xc9474601028fc8cbULL, 0x0bf21defc316e4c8ULL,
0xc2b55beec1992c03ULL, 0x6622aa880dccee6bULL, 0x32e556b37b648149ULL,
0x2fee719f235eb00cULL, 0xdfbe7cc299a31d46ULL, 0x7d2b87ac45fad138ULL,
0x9e81bf3e7c21a0e2ULL, 0x36125a48906c7ea6ULL, 0x9883b5366c2daef4ULL,
0x2d1b776cd85a41f5ULL, 0x120e363870242a62ULL, 0x6523af8c05cae960ULL,
0x02f506f3fb04f1f9ULL, 0xcf454c091283c6ddULL, 0x6321a58415c6e776ULL,
0x4fced11f3e9e5071ULL, 0xdb49703972abe2a9ULL, 0x742c9cb07de8c409ULL,
0x16f93ac39b2cd58dULL, 0x37e659bf636e8854ULL, 0xc7b654e2d993251eULL,
0x782888a05df0d825ULL, 0x39174b5cb8726581ULL, 0x9b82b032642ba9ffULL,
0x2e1a7268d05c46feULL, 0x808b9d162c1d96acULL, 0x1ffe21dfa33ec0bcULL,
0x838a9812241b91a7ULL, 0x1b092d2448363f53ULL, 0x46c9ca03068c4540ULL,
0x9487a1264c35b2d8ULL, 0xd24e6b254ab9f798ULL, 0x3ee142a35b7c9d65ULL,
0x722e96b86de4ca1fULL, 0x31e453b773628642ULL, 0x3de047a7537a9a6eULL,
0x20eb608b0b40ab2bULL, 0xad90ea7af447d759ULL, 0xf1a40eaa49ff5bb8ULL,
0x221e6678f0445ad2ULL, 0x9285ab2e5c39bcceULL, 0xa060fd9d275d3d87ULL,
0x0000000000000000ULL, 0x6f25b19435defb5aULL, 0x01f403f7f302f6f2ULL,
0x0ef112e3db1cedd5ULL, 0xa194fe6ad45fcb75ULL, 0x1d0b272c583a3145ULL,
0x34e75cbb6b688f5fULL, 0x9f75bcc98f235610ULL, 0x2cef749b2b58b707ULL,
0x5c34e4d0bdb88ce1ULL, 0x5331f5c495a697c6ULL, 0x61d4a377eec2168fULL,
0x6dd0b767ceda0aa3ULL, 0x9786a4224433b5d3ULL, 0x827e9be5d7196755ULL,
0xeaad238e01c964ebULL, 0x1afd2ed3bb34c9a1ULL, 0x7b298da455f6df2eULL,
0x5030f0c09da090cdULL, 0x4d3bd7ecc59aa188ULL, 0xbc9fd9468c65fa30ULL,
0x15f83fc7932ad286ULL, 0x57c6f93f7eae6829ULL, 0x35135f4c986a79adULL,
0x0a061e183014123aULL, 0x0f051114281e1b27ULL, 0x52c5f63366a46134ULL,
0x33115544886677bbULL, 0x9977b6c19f2f5806ULL, 0x847c91edc7156943ULL,
0x8e7a8ff5f7017b79ULL, 0x887885fde70d756fULL, 0x5a36eed8adb482f7ULL,
0x241c6c70e04854c4ULL, 0x4b39dde4d596af9eULL, 0xeb592079f2cb9219ULL,
0x28187860c05048e8ULL, 0xfa5613458ae9bf70ULL, 0xc8b345f6f18d3e39ULL,
0xcdb04afae9873724ULL, 0x6c24b4903dd8fc51ULL, 0x6020a0801dc0e07dULL,
0xcbb240f2f98b3932ULL, 0xab92e072e44bd94fULL, 0xf8a315b671ed4e89ULL,
0x5dc0e7274eba7a13ULL, 0xcc44490d1a85c1d6ULL, 0xa662f79537513391ULL,
0x30105040806070b0ULL, 0xc1b45eeac99f2b08ULL, 0x9184ae2a543fbbc5ULL,
0xc54352112297d4e7ULL, 0xa893e576ec4dde44ULL, 0x5bc2ed2f5eb67405ULL,
0xde4a7f356aa1ebb4ULL, 0xdabd73ce81a9145bULL, 0x8c8f89060c058a80ULL,
0x772d99b475eec302ULL, 0xd9bc76ca89af1350ULL, 0xb99cd64a946ff32dULL,
0xbe6adfb577610bc9ULL, 0xc0405d1d3a9dddfaULL, 0x4ccfd41b3698577aULL,
0xfba210b279eb4982ULL, 0x9d80ba3a7427a7e9ULL, 0xd14f6e2142bff093ULL,
0x211f637cf8425dd9ULL, 0x43cac50f1e864c5dULL, 0xe3aa389239db71daULL,
0xc64257152a91d3ecULL
};
static const u64 T2[256] = {
0xd268bad36a01bbb9ULL, 0x4d1954fc66b1e59aULL, 0xbc932f7114cde265ULL,
0xcdb9749c1b512587ULL, 0x510253f557a4f7a2ULL, 0x6bb8d368be03d0d6ULL,
0x6fbdd26bb504d6deULL, 0x29644dd785feb352ULL, 0x5d0d50f04aadfdbaULL,
0x8a26ace9e063cf09ULL, 0x0e838d8a9684091cULL, 0xc679bfdc4d1aa591ULL,
0xddad7090374d3da7ULL, 0x550752f65ca3f1aaULL, 0x52c89ab317e17ba4ULL,
0x2d614cd48ef9b55aULL, 0x8f65ea2320ac4603ULL, 0x73a6d5628411c4e6ULL,
0x66f197a468c255ccULL, 0x63b2d16ea80ddcc6ULL, 0xccff3355d099aa85ULL,
0x590851f341aafbb2ULL, 0x712a5bed0f9cc7e2ULL, 0xa204a6f7ae55f359ULL,
0x5f81de7fc120febeULL, 0x3d7548d8a2e5ad7aULL, 0x9a32a8e5cc7fd729ULL,
0x5ec799b60ae871bcULL, 0x4b90db70e63be096ULL, 0xc8fa3256db9eac8dULL,
0xe651b7c4152295d1ULL, 0xd72bfc19aace32b3ULL, 0xab48e3387393704bULL,
0x42dc9ebf3bfd6384ULL, 0x7eef91ae52d041fcULL, 0x56cd9bb01ce67dacULL,
0xaf4de23b78947643ULL, 0xd66dbbd06106bdb1ULL, 0x195841c3f1da9b32ULL,
0xa5cb6eb2e5177957ULL, 0xae0ba5f2b35cf941ULL, 0x0bc0cb40564b8016ULL,
0xb1da6bbdc20c677fULL, 0x6efb95a27ecc59dcULL, 0xbe1fa1fe9f40e161ULL,
0xeb18f308c3e310cbULL, 0xfe4fb1ce2f3081e1ULL, 0x080a0206160e0c10ULL,
0x17dbcc49675e922eULL, 0x37f3c4513f66a26eULL, 0x74691d27cf534ee8ULL,
0x5044143c9c6c78a0ULL, 0x2be8c3580e73b056ULL, 0x91f263a59a34573fULL,
0x4f95da73ed3ce69eULL, 0x69345de7358ed3d2ULL, 0x613e5fe12380dfc2ULL,
0x578bdc79d72ef2aeULL, 0xe9947d87486e13cfULL, 0x13decd4a6c599426ULL,
0xe19e7f815e601fdfULL, 0x752f5aee049bc1eaULL, 0xadc16cb4f3197547ULL,
0x6d315ce43e89d5daULL, 0xfb0cf704efff08ebULL, 0x98be266a47f2d42dULL,
0xdb24ff1cb7c738abULL, 0x937eed2a11b9543bULL, 0x876fe82536a24a13ULL,
0x4ed39dba26f4699cULL, 0xa1ce6fb1ee107f5fULL, 0x028c8e8f8b8d0304ULL,
0x647d192be34f56c8ULL, 0xba1aa0fd9447e769ULL, 0xe717f00ddeea1ad3ULL,
0x1e978986ba98113cULL, 0x3c330f11692d2278ULL, 0x1c1b070931151238ULL,
0x8629afecfd6ac511ULL, 0xcb30fb109bdb208bULL, 0x2028081858383040ULL,
0x5441153f976b7ea8ULL, 0x34390d177f232e68ULL, 0x1014040c2c1c1820ULL,
0x040501030b070608ULL, 0x8de964acab214507ULL, 0x5b84df7cca27f8b6ULL,
0xc5b3769a0d5f2997ULL, 0xf980798b64720befULL, 0x538edd7adc29f4a6ULL,
0xf4c93d47b2b38ef5ULL, 0x584e163a8a6274b0ULL, 0xfcc33f41a4bd82e5ULL,
0xdceb3759fc85b2a5ULL, 0xa9c46db7f81e734fULL, 0xe0d8384895a890ddULL,
0xde67b9d67708b1a1ULL, 0xd1a273952a4437bfULL, 0x836ae9263da54c1bULL,
0xd4e1355fea8bbeb5ULL, 0x491c55ff6db6e392ULL, 0xd9a871933c4a3bafULL,
0xf18a7b8d727c07ffULL, 0x0a868c899d830f14ULL, 0xd5a77296214331b7ULL,
0x1a928885b19f1734ULL, 0xff09f607e4f80ee3ULL, 0xa8822a7e33d6fc4dULL,
0xf8c63e42afba84edULL, 0x653b5ee22887d9caULL, 0x9cbb27694cf5d225ULL,
0x054346cac0cf890aULL, 0x303c0c1474242860ULL, 0x89ec65afa026430fULL,
0xbdd568b8df056d67ULL, 0x99f861a38c3a5b2fULL, 0x0c0f03051d090a18ULL,
0x23e2c15e187dbc46ULL, 0x411657f97bb8ef82ULL, 0x7fa9d6679918cefeULL,
0x439ad976f035ec86ULL, 0x7d2558e81295cdfaULL, 0x479fd875fb32ea8eULL,
0x85e366aabd2f4917ULL, 0x7bacd764921fc8f6ULL, 0xe8d23a4e83a69ccdULL,
0x07cfc8454b428a0eULL, 0xf0cc3c44b9b488fdULL, 0xcf35fa1390dc2683ULL,
0x62f496a763c553c4ULL, 0xa601a7f4a552f551ULL, 0x5ac298b501ef77b4ULL,
0x977bec291abe5233ULL, 0xda62b8d57c0fb7a9ULL, 0x3bfcc754226fa876ULL,
0x822caeeff66dc319ULL, 0xb9d069bbd4026b6fULL, 0x317a4bddbfeca762ULL,
0x963dabe0d176dd31ULL, 0x9e37a9e6c778d121ULL, 0x81e667a9b6284f1fULL,
0x28220a1e4e363c50ULL, 0x014647c9cbc88f02ULL, 0xef1df20bc8e416c3ULL,
0xee5bb5c2032c99c1ULL, 0x88aa22666beecc0dULL, 0xb356e5324981647bULL,
0x9f71ee2f0cb05e23ULL, 0xc27cbedf461da399ULL, 0xac872b7d38d1fa45ULL,
0x3ebf819ee2a0217cULL, 0x485a1236a67e6c90ULL, 0x36b58398f4ae2d6cULL,
0x6c771b2df5415ad8ULL, 0x38360e12622a2470ULL, 0x8caf236560e9ca05ULL,
0xf306f502f9f104fbULL, 0x094c45cfddc68312ULL, 0x84a5216376e7c615ULL,
0x1fd1ce4f71509e3eULL, 0x397049dba9e2ab72ULL, 0xb09c2c7409c4e87dULL,
0xc33af9168dd52c9bULL, 0xbf59e63754886e63ULL, 0xe254b6c71e2593d9ULL,
0xa088287825d8f05dULL, 0x5c4b1739816572b8ULL, 0x32b0829bffa92b64ULL,
0x68721a2efe465cd0ULL, 0x169d8b80ac961d2cULL, 0xdf21fe1fbcc03ea3ULL,
0x12988a83a7911b24ULL, 0x242d091b533f3648ULL, 0x03cac94640458c06ULL,
0x26a18794d8b2354cULL, 0x256b4ed298f7b94aULL, 0xa342e13e659d7c5bULL,
0xb8962e721fcae46dULL, 0xb753e43142866273ULL, 0xa747e03d6e9a7a53ULL,
0x8b60eb202bab400bULL, 0x7aea90ad59d747f4ULL, 0xaa0ea4f1b85bff49ULL,
0x78661e22d25a44f0ULL, 0x2eab8592cebc395cULL, 0x9dfd60a0873d5d27ULL,
0x0000000000000000ULL, 0x94b1256f5afbde35ULL, 0xf703f401f2f602f3ULL,
0xe312f10ed5ed1cdbULL, 0x6afe94a175cb5fd4ULL, 0x2c270b1d45313a58ULL,
0xbb5ce7345f8f686bULL, 0xc9bc759f1056238fULL, 0x9b74ef2c07b7582bULL,
0xd0e4345ce18cb8bdULL, 0xc4f53153c697a695ULL, 0x77a3d4618f16c2eeULL,
0x67b7d06da30adaceULL, 0x22a48697d3b53344ULL, 0xe59b7e82556719d7ULL,
0x8e23adeaeb64c901ULL, 0xd32efd1aa1c934bbULL, 0xa48d297b2edff655ULL,
0xc0f03050cd90a09dULL, 0xecd73b4d88a19ac5ULL, 0x46d99fbc30fa658cULL,
0xc73ff81586d22a93ULL, 0x3ff9c6572968ae7eULL, 0x4c5f1335ad796a98ULL,
0x181e060a3a121430ULL, 0x1411050f271b1e28ULL, 0x33f6c5523461a466ULL,
0x44551133bb776688ULL, 0xc1b6779906582f9fULL, 0xed917c84436915c7ULL,
0xf58f7a8e797b01f7ULL, 0xfd8578886f750de7ULL, 0xd8ee365af782b4adULL,
0x706c1c24c45448e0ULL, 0xe4dd394b9eaf96d5ULL, 0x792059eb1992cbf2ULL,
0x60781828e84850c0ULL, 0x451356fa70bfe98aULL, 0xf645b3c8393e8df1ULL,
0xfa4ab0cd243787e9ULL, 0x90b4246c51fcd83dULL, 0x80a020607de0c01dULL,
0xf240b2cb32398bf9ULL, 0x72e092ab4fd94be4ULL, 0xb615a3f8894eed71ULL,
0x27e7c05d137aba4eULL, 0x0d4944ccd6c1851aULL, 0x95f762a691335137ULL,
0x40501030b0706080ULL, 0xea5eb4c1082b9fc9ULL, 0x2aae8491c5bb3f54ULL,
0x115243c5e7d49722ULL, 0x76e593a844de4decULL, 0x2fedc25b0574b65eULL,
0x357f4adeb4eba16aULL, 0xce73bdda5b14a981ULL, 0x06898f8c808a050cULL,
0xb4992d7702c3ee75ULL, 0xca76bcd95013af89ULL, 0x4ad69cb92df36f94ULL,
0xb5df6abec90b6177ULL, 0x1d5d40c0fadd9d3aULL, 0x1bd4cf4c7a579836ULL,
0xb210a2fb8249eb79ULL, 0x3aba809de9a72774ULL, 0x216e4fd193f0bf42ULL,
0x7c631f21d95d42f8ULL, 0x0fc5ca435d4c861eULL, 0x9238aae3da71db39ULL,
0x155742c6ecd3912aULL
};
static const u64 T3[256] = {
0x68d2d3ba016ab9bbULL, 0x194dfc54b1669ae5ULL, 0x93bc712fcd1465e2ULL,
0xb9cd9c74511b8725ULL, 0x0251f553a457a2f7ULL, 0xb86b68d303bed6d0ULL,
0xbd6f6bd204b5ded6ULL, 0x6429d74dfe8552b3ULL, 0x0d5df050ad4abafdULL,
0x268ae9ac63e009cfULL, 0x830e8a8d84961c09ULL, 0x79c6dcbf1a4d91a5ULL,
0xaddd90704d37a73dULL, 0x0755f652a35caaf1ULL, 0xc852b39ae117a47bULL,
0x612dd44cf98e5ab5ULL, 0x658f23eaac200346ULL, 0xa67362d51184e6c4ULL,
0xf166a497c268cc55ULL, 0xb2636ed10da8c6dcULL, 0xffcc553399d085aaULL,
0x0859f351aa41b2fbULL, 0x2a71ed5b9c0fe2c7ULL, 0x04a2f7a655ae59f3ULL,
0x815f7fde20c1befeULL, 0x753dd848e5a27aadULL, 0x329ae5a87fcc29d7ULL,
0xc75eb699e80abc71ULL, 0x904b70db3be696e0ULL, 0xfac856329edb8dacULL,
0x51e6c4b72215d195ULL, 0x2bd719fcceaab332ULL, 0x48ab38e393734b70ULL,
0xdc42bf9efd3b8463ULL, 0xef7eae91d052fc41ULL, 0xcd56b09be61cac7dULL,
0x4daf3be294784376ULL, 0x6dd6d0bb0661b1bdULL, 0x5819c341daf1329bULL,
0xcba5b26e17e55779ULL, 0x0baef2a55cb341f9ULL, 0xc00b40cb4b561680ULL,
0xdab1bd6b0cc27f67ULL, 0xfb6ea295cc7edc59ULL, 0x1fbefea1409f61e1ULL,
0x18eb08f3e3c3cb10ULL, 0x4ffeceb1302fe181ULL, 0x0a0806020e16100cULL,
0xdb1749cc5e672e92ULL, 0xf33751c4663f6ea2ULL, 0x6974271d53cfe84eULL,
0x44503c146c9ca078ULL, 0xe82b58c3730e56b0ULL, 0xf291a563349a3f57ULL,
0x954f73da3ced9ee6ULL, 0x3469e75d8e35d2d3ULL, 0x3e61e15f8023c2dfULL,
0x8b5779dc2ed7aef2ULL, 0x94e9877d6e48cf13ULL, 0xde134acd596c2694ULL,
0x9ee1817f605edf1fULL, 0x2f75ee5a9b04eac1ULL, 0xc1adb46c19f34775ULL,
0x316de45c893edad5ULL, 0x0cfb04f7ffefeb08ULL, 0xbe986a26f2472dd4ULL,
0x24db1cffc7b7ab38ULL, 0x7e932aedb9113b54ULL, 0x6f8725e8a236134aULL,
0xd34eba9df4269c69ULL, 0xcea1b16f10ee5f7fULL, 0x8c028f8e8d8b0403ULL,
0x7d642b194fe3c856ULL, 0x1abafda0479469e7ULL, 0x17e70df0eaded31aULL,
0x971e868998ba3c11ULL, 0x333c110f2d697822ULL, 0x1b1c090715313812ULL,
0x2986ecaf6afd11c5ULL, 0x30cb10fbdb9b8b20ULL, 0x2820180838584030ULL,
0x41543f156b97a87eULL, 0x3934170d237f682eULL, 0x14100c041c2c2018ULL,
0x05040301070b0806ULL, 0xe98dac6421ab0745ULL, 0x845b7cdf27cab6f8ULL,
0xb3c59a765f0d9729ULL, 0x80f98b797264ef0bULL, 0x8e537add29dca6f4ULL,
0xc9f4473db3b2f58eULL, 0x4e583a16628ab074ULL, 0xc3fc413fbda4e582ULL,
0xebdc593785fca5b2ULL, 0xc4a9b76d1ef84f73ULL, 0xd8e04838a895dd90ULL,
0x67ded6b90877a1b1ULL, 0xa2d19573442abf37ULL, 0x6a8326e9a53d1b4cULL,
0xe1d45f358beab5beULL, 0x1c49ff55b66d92e3ULL, 0xa8d993714a3caf3bULL,
0x8af18d7b7c72ff07ULL, 0x860a898c839d140fULL, 0xa7d596724321b731ULL,
0x921a85889fb13417ULL, 0x09ff07f6f8e4e30eULL, 0x82a87e2ad6334dfcULL,
0xc6f8423ebaafed84ULL, 0x3b65e25e8728cad9ULL, 0xbb9c6927f54c25d2ULL,
0x4305ca46cfc00a89ULL, 0x3c30140c24746028ULL, 0xec89af6526a00f43ULL,
0xd5bdb86805df676dULL, 0xf899a3613a8c2f5bULL, 0x0f0c0503091d180aULL,
0xe2235ec17d1846bcULL, 0x1641f957b87b82efULL, 0xa97f67d61899feceULL,
0x9a4376d935f086ecULL, 0x257de8589512facdULL, 0x9f4775d832fb8eeaULL,
0xe385aa662fbd1749ULL, 0xac7b64d71f92f6c8ULL, 0xd2e84e3aa683cd9cULL,
0xcf0745c8424b0e8aULL, 0xccf0443cb4b9fd88ULL, 0x35cf13fadc908326ULL,
0xf462a796c563c453ULL, 0x01a6f4a752a551f5ULL, 0xc25ab598ef01b477ULL,
0x7b9729ecbe1a3352ULL, 0x62dad5b80f7ca9b7ULL, 0xfc3b54c76f2276a8ULL,
0x2c82efae6df619c3ULL, 0xd0b9bb6902d46f6bULL, 0x7a31dd4becbf62a7ULL,
0x3d96e0ab76d131ddULL, 0x379ee6a978c721d1ULL, 0xe681a96728b61f4fULL,
0x22281e0a364e503cULL, 0x4601c947c8cb028fULL, 0x1def0bf2e4c8c316ULL,
0x5beec2b52c03c199ULL, 0xaa886622ee6b0dccULL, 0x56b332e581497b64ULL,
0x719f2feeb00c235eULL, 0x7cc2dfbe1d4699a3ULL, 0x87ac7d2bd13845faULL,
0xbf3e9e81a0e27c21ULL, 0x5a4836127ea6906cULL, 0xb5369883aef46c2dULL,
0x776c2d1b41f5d85aULL, 0x3638120e2a627024ULL, 0xaf8c6523e96005caULL,
0x06f302f5f1f9fb04ULL, 0x4c09cf45c6dd1283ULL, 0xa5846321e77615c6ULL,
0xd11f4fce50713e9eULL, 0x7039db49e2a972abULL, 0x9cb0742cc4097de8ULL,
0x3ac316f9d58d9b2cULL, 0x59bf37e68854636eULL, 0x54e2c7b6251ed993ULL,
0x88a07828d8255df0ULL, 0x4b5c39176581b872ULL, 0xb0329b82a9ff642bULL,
0x72682e1a46fed05cULL, 0x9d16808b96ac2c1dULL, 0x21df1ffec0bca33eULL,
0x9812838a91a7241bULL, 0x2d241b093f534836ULL, 0xca0346c94540068cULL,
0xa1269487b2d84c35ULL, 0x6b25d24ef7984ab9ULL, 0x42a33ee19d655b7cULL,
0x96b8722eca1f6de4ULL, 0x53b731e486427362ULL, 0x47a73de09a6e537aULL,
0x608b20ebab2b0b40ULL, 0xea7aad90d759f447ULL, 0x0eaaf1a45bb849ffULL,
0x6678221e5ad2f044ULL, 0xab2e9285bcce5c39ULL, 0xfd9da0603d87275dULL,
0x0000000000000000ULL, 0xb1946f25fb5a35deULL, 0x03f701f4f6f2f302ULL,
0x12e30ef1edd5db1cULL, 0xfe6aa194cb75d45fULL, 0x272c1d0b3145583aULL,
0x5cbb34e78f5f6b68ULL, 0xbcc99f7556108f23ULL, 0x749b2cefb7072b58ULL,
0xe4d05c348ce1bdb8ULL, 0xf5c4533197c695a6ULL, 0xa37761d4168feec2ULL,
0xb7676dd00aa3cedaULL, 0xa4229786b5d34433ULL, 0x9be5827e6755d719ULL,
0x238eeaad64eb01c9ULL, 0x2ed31afdc9a1bb34ULL, 0x8da47b29df2e55f6ULL,
0xf0c0503090cd9da0ULL, 0xd7ec4d3ba188c59aULL, 0xd946bc9ffa308c65ULL,
0x3fc715f8d286932aULL, 0xf93f57c668297eaeULL, 0x5f4c351379ad986aULL,
0x1e180a06123a3014ULL, 0x11140f051b27281eULL, 0xf63352c5613466a4ULL,
0x5544331177bb8866ULL, 0xb6c1997758069f2fULL, 0x91ed847c6943c715ULL,
0x8ff58e7a7b79f701ULL, 0x85fd8878756fe70dULL, 0xeed85a3682f7adb4ULL,
0x6c70241c54c4e048ULL, 0xdde44b39af9ed596ULL, 0x2079eb599219f2cbULL,
0x7860281848e8c050ULL, 0x1345fa56bf708ae9ULL, 0x45f6c8b33e39f18dULL,
0x4afacdb03724e987ULL, 0xb4906c24fc513dd8ULL, 0xa0806020e07d1dc0ULL,
0x40f2cbb23932f98bULL, 0xe072ab92d94fe44bULL, 0x15b6f8a34e8971edULL,
0xe7275dc07a134ebaULL, 0x490dcc44c1d61a85ULL, 0xf795a66233913751ULL,
0x5040301070b08060ULL, 0x5eeac1b42b08c99fULL, 0xae2a9184bbc5543fULL,
0x5211c543d4e72297ULL, 0xe576a893de44ec4dULL, 0xed2f5bc274055eb6ULL,
0x7f35de4aebb46aa1ULL, 0x73cedabd145b81a9ULL, 0x89068c8f8a800c05ULL,
0x99b4772dc30275eeULL, 0x76cad9bc135089afULL, 0xd64ab99cf32d946fULL,
0xdfb5be6a0bc97761ULL, 0x5d1dc040ddfa3a9dULL, 0xd41b4ccf577a3698ULL,
0x10b2fba2498279ebULL, 0xba3a9d80a7e97427ULL, 0x6e21d14ff09342bfULL,
0x637c211f5dd9f842ULL, 0xc50f43ca4c5d1e86ULL, 0x3892e3aa71da39dbULL,
0x5715c642d3ec2a91ULL
};
static const u64 T4[256] = {
0xbbb96a01bad3d268ULL, 0xe59a66b154fc4d19ULL, 0xe26514cd2f71bc93ULL,
0x25871b51749ccdb9ULL, 0xf7a257a453f55102ULL, 0xd0d6be03d3686bb8ULL,
0xd6deb504d26b6fbdULL, 0xb35285fe4dd72964ULL, 0xfdba4aad50f05d0dULL,
0xcf09e063ace98a26ULL, 0x091c96848d8a0e83ULL, 0xa5914d1abfdcc679ULL,
0x3da7374d7090ddadULL, 0xf1aa5ca352f65507ULL, 0x7ba417e19ab352c8ULL,
0xb55a8ef94cd42d61ULL, 0x460320acea238f65ULL, 0xc4e68411d56273a6ULL,
0x55cc68c297a466f1ULL, 0xdcc6a80dd16e63b2ULL, 0xaa85d0993355ccffULL,
0xfbb241aa51f35908ULL, 0xc7e20f9c5bed712aULL, 0xf359ae55a6f7a204ULL,
0xfebec120de7f5f81ULL, 0xad7aa2e548d83d75ULL, 0xd729cc7fa8e59a32ULL,
0x71bc0ae899b65ec7ULL, 0xe096e63bdb704b90ULL, 0xac8ddb9e3256c8faULL,
0x95d11522b7c4e651ULL, 0x32b3aacefc19d72bULL, 0x704b7393e338ab48ULL,
0x63843bfd9ebf42dcULL, 0x41fc52d091ae7eefULL, 0x7dac1ce69bb056cdULL,
0x76437894e23baf4dULL, 0xbdb16106bbd0d66dULL, 0x9b32f1da41c31958ULL,
0x7957e5176eb2a5cbULL, 0xf941b35ca5f2ae0bULL, 0x8016564bcb400bc0ULL,
0x677fc20c6bbdb1daULL, 0x59dc7ecc95a26efbULL, 0xe1619f40a1febe1fULL,
0x10cbc3e3f308eb18ULL, 0x81e12f30b1cefe4fULL, 0x0c10160e0206080aULL,
0x922e675ecc4917dbULL, 0xa26e3f66c45137f3ULL, 0x4ee8cf531d277469ULL,
0x78a09c6c143c5044ULL, 0xb0560e73c3582be8ULL, 0x573f9a3463a591f2ULL,
0xe69eed3cda734f95ULL, 0xd3d2358e5de76934ULL, 0xdfc223805fe1613eULL,
0xf2aed72edc79578bULL, 0x13cf486e7d87e994ULL, 0x94266c59cd4a13deULL,
0x1fdf5e607f81e19eULL, 0xc1ea049b5aee752fULL, 0x7547f3196cb4adc1ULL,
0xd5da3e895ce46d31ULL, 0x08ebeffff704fb0cULL, 0xd42d47f2266a98beULL,
0x38abb7c7ff1cdb24ULL, 0x543b11b9ed2a937eULL, 0x4a1336a2e825876fULL,
0x699c26f49dba4ed3ULL, 0x7f5fee106fb1a1ceULL, 0x03048b8d8e8f028cULL,
0x56c8e34f192b647dULL, 0xe7699447a0fdba1aULL, 0x1ad3deeaf00de717ULL,
0x113cba9889861e97ULL, 0x2278692d0f113c33ULL, 0x1238311507091c1bULL,
0xc511fd6aafec8629ULL, 0x208b9bdbfb10cb30ULL, 0x3040583808182028ULL,
0x7ea8976b153f5441ULL, 0x2e687f230d173439ULL, 0x18202c1c040c1014ULL,
0x06080b0701030405ULL, 0x4507ab2164ac8de9ULL, 0xf8b6ca27df7c5b84ULL,
0x29970d5f769ac5b3ULL, 0x0bef6472798bf980ULL, 0xf4a6dc29dd7a538eULL,
0x8ef5b2b33d47f4c9ULL, 0x74b08a62163a584eULL, 0x82e5a4bd3f41fcc3ULL,
0xb2a5fc853759dcebULL, 0x734ff81e6db7a9c4ULL, 0x90dd95a83848e0d8ULL,
0xb1a17708b9d6de67ULL, 0x37bf2a447395d1a2ULL, 0x4c1b3da5e926836aULL,
0xbeb5ea8b355fd4e1ULL, 0xe3926db655ff491cULL, 0x3baf3c4a7193d9a8ULL,
0x07ff727c7b8df18aULL, 0x0f149d838c890a86ULL, 0x31b721437296d5a7ULL,
0x1734b19f88851a92ULL, 0x0ee3e4f8f607ff09ULL, 0xfc4d33d62a7ea882ULL,
0x84edafba3e42f8c6ULL, 0xd9ca28875ee2653bULL, 0xd2254cf527699cbbULL,
0x890ac0cf46ca0543ULL, 0x286074240c14303cULL, 0x430fa02665af89ecULL,
0x6d67df0568b8bdd5ULL, 0x5b2f8c3a61a399f8ULL, 0x0a181d0903050c0fULL,
0xbc46187dc15e23e2ULL, 0xef827bb857f94116ULL, 0xcefe9918d6677fa9ULL,
0xec86f035d976439aULL, 0xcdfa129558e87d25ULL, 0xea8efb32d875479fULL,
0x4917bd2f66aa85e3ULL, 0xc8f6921fd7647bacULL, 0x9ccd83a63a4ee8d2ULL,
0x8a0e4b42c84507cfULL, 0x88fdb9b43c44f0ccULL, 0x268390dcfa13cf35ULL,
0x53c463c596a762f4ULL, 0xf551a552a7f4a601ULL, 0x77b401ef98b55ac2ULL,
0x52331abeec29977bULL, 0xb7a97c0fb8d5da62ULL, 0xa876226fc7543bfcULL,
0xc319f66daeef822cULL, 0x6b6fd40269bbb9d0ULL, 0xa762bfec4bdd317aULL,
0xdd31d176abe0963dULL, 0xd121c778a9e69e37ULL, 0x4f1fb62867a981e6ULL,
0x3c504e360a1e2822ULL, 0x8f02cbc847c90146ULL, 0x16c3c8e4f20bef1dULL,
0x99c1032cb5c2ee5bULL, 0xcc0d6bee226688aaULL, 0x647b4981e532b356ULL,
0x5e230cb0ee2f9f71ULL, 0xa399461dbedfc27cULL, 0xfa4538d12b7dac87ULL,
0x217ce2a0819e3ebfULL, 0x6c90a67e1236485aULL, 0x2d6cf4ae839836b5ULL,
0x5ad8f5411b2d6c77ULL, 0x2470622a0e123836ULL, 0xca0560e923658cafULL,
0x04fbf9f1f502f306ULL, 0x8312ddc645cf094cULL, 0xc61576e7216384a5ULL,
0x9e3e7150ce4f1fd1ULL, 0xab72a9e249db3970ULL, 0xe87d09c42c74b09cULL,
0x2c9b8dd5f916c33aULL, 0x6e635488e637bf59ULL, 0x93d91e25b6c7e254ULL,
0xf05d25d82878a088ULL, 0x72b8816517395c4bULL, 0x2b64ffa9829b32b0ULL,
0x5cd0fe461a2e6872ULL, 0x1d2cac968b80169dULL, 0x3ea3bcc0fe1fdf21ULL,
0x1b24a7918a831298ULL, 0x3648533f091b242dULL, 0x8c064045c94603caULL,
0x354cd8b2879426a1ULL, 0xb94a98f74ed2256bULL, 0x7c5b659de13ea342ULL,
0xe46d1fca2e72b896ULL, 0x62734286e431b753ULL, 0x7a536e9ae03da747ULL,
0x400b2babeb208b60ULL, 0x47f459d790ad7aeaULL, 0xff49b85ba4f1aa0eULL,
0x44f0d25a1e227866ULL, 0x395ccebc85922eabULL, 0x5d27873d60a09dfdULL,
0x0000000000000000ULL, 0xde355afb256f94b1ULL, 0x02f3f2f6f401f703ULL,
0x1cdbd5edf10ee312ULL, 0x5fd475cb94a16afeULL, 0x3a5845310b1d2c27ULL,
0x686b5f8fe734bb5cULL, 0x238f1056759fc9bcULL, 0x582b07b7ef2c9b74ULL,
0xb8bde18c345cd0e4ULL, 0xa695c6973153c4f5ULL, 0xc2ee8f16d46177a3ULL,
0xdacea30ad06d67b7ULL, 0x3344d3b5869722a4ULL, 0x19d755677e82e59bULL,
0xc901eb64adea8e23ULL, 0x34bba1c9fd1ad32eULL, 0xf6552edf297ba48dULL,
0xa09dcd903050c0f0ULL, 0x9ac588a13b4decd7ULL, 0x658c30fa9fbc46d9ULL,
0x2a9386d2f815c73fULL, 0xae7e2968c6573ff9ULL, 0x6a98ad7913354c5fULL,
0x14303a12060a181eULL, 0x1e28271b050f1411ULL, 0xa4663461c55233f6ULL,
0x6688bb7711334455ULL, 0x2f9f06587799c1b6ULL, 0x15c743697c84ed91ULL,
0x01f7797b7a8ef58fULL, 0x0de76f757888fd85ULL, 0xb4adf782365ad8eeULL,
0x48e0c4541c24706cULL, 0x96d59eaf394be4ddULL, 0xcbf2199259eb7920ULL,
0x50c0e84818286078ULL, 0xe98a70bf56fa4513ULL, 0x8df1393eb3c8f645ULL,
0x87e92437b0cdfa4aULL, 0xd83d51fc246c90b4ULL, 0xc01d7de0206080a0ULL,
0x8bf93239b2cbf240ULL, 0x4be44fd992ab72e0ULL, 0xed71894ea3f8b615ULL,
0xba4e137ac05d27e7ULL, 0x851ad6c144cc0d49ULL, 0x5137913362a695f7ULL,
0x6080b07010304050ULL, 0x9fc9082bb4c1ea5eULL, 0x3f54c5bb84912aaeULL,
0x9722e7d443c51152ULL, 0x4dec44de93a876e5ULL, 0xb65e0574c25b2fedULL,
0xa16ab4eb4ade357fULL, 0xa9815b14bddace73ULL, 0x050c808a8f8c0689ULL,
0xee7502c32d77b499ULL, 0xaf895013bcd9ca76ULL, 0x6f942df39cb94ad6ULL,
0x6177c90b6abeb5dfULL, 0x9d3afadd40c01d5dULL, 0x98367a57cf4c1bd4ULL,
0xeb798249a2fbb210ULL, 0x2774e9a7809d3abaULL, 0xbf4293f04fd1216eULL,
0x42f8d95d1f217c63ULL, 0x861e5d4cca430fc5ULL, 0xdb39da71aae39238ULL,
0x912aecd342c61557ULL
};
static const u64 T5[256] = {
0xb9bb016ad3ba68d2ULL, 0x9ae5b166fc54194dULL, 0x65e2cd14712f93bcULL,
0x8725511b9c74b9cdULL, 0xa2f7a457f5530251ULL, 0xd6d003be68d3b86bULL,
0xded604b56bd2bd6fULL, 0x52b3fe85d74d6429ULL, 0xbafdad4af0500d5dULL,
0x09cf63e0e9ac268aULL, 0x1c0984968a8d830eULL, 0x91a51a4ddcbf79c6ULL,
0xa73d4d379070adddULL, 0xaaf1a35cf6520755ULL, 0xa47be117b39ac852ULL,
0x5ab5f98ed44c612dULL, 0x0346ac2023ea658fULL, 0xe6c4118462d5a673ULL,
0xcc55c268a497f166ULL, 0xc6dc0da86ed1b263ULL, 0x85aa99d05533ffccULL,
0xb2fbaa41f3510859ULL, 0xe2c79c0fed5b2a71ULL, 0x59f355aef7a604a2ULL,
0xbefe20c17fde815fULL, 0x7aade5a2d848753dULL, 0x29d77fcce5a8329aULL,
0xbc71e80ab699c75eULL, 0x96e03be670db904bULL, 0x8dac9edb5632fac8ULL,
0xd1952215c4b751e6ULL, 0xb332ceaa19fc2bd7ULL, 0x4b70937338e348abULL,
0x8463fd3bbf9edc42ULL, 0xfc41d052ae91ef7eULL, 0xac7de61cb09bcd56ULL,
0x437694783be24dafULL, 0xb1bd0661d0bb6dd6ULL, 0x329bdaf1c3415819ULL,
0x577917e5b26ecba5ULL, 0x41f95cb3f2a50baeULL, 0x16804b5640cbc00bULL,
0x7f670cc2bd6bdab1ULL, 0xdc59cc7ea295fb6eULL, 0x61e1409ffea11fbeULL,
0xcb10e3c308f318ebULL, 0xe181302fceb14ffeULL, 0x100c0e1606020a08ULL,
0x2e925e6749ccdb17ULL, 0x6ea2663f51c4f337ULL, 0xe84e53cf271d6974ULL,
0xa0786c9c3c144450ULL, 0x56b0730e58c3e82bULL, 0x3f57349aa563f291ULL,
0x9ee63ced73da954fULL, 0xd2d38e35e75d3469ULL, 0xc2df8023e15f3e61ULL,
0xaef22ed779dc8b57ULL, 0xcf136e48877d94e9ULL, 0x2694596c4acdde13ULL,
0xdf1f605e817f9ee1ULL, 0xeac19b04ee5a2f75ULL, 0x477519f3b46cc1adULL,
0xdad5893ee45c316dULL, 0xeb08ffef04f70cfbULL, 0x2dd4f2476a26be98ULL,
0xab38c7b71cff24dbULL, 0x3b54b9112aed7e93ULL, 0x134aa23625e86f87ULL,
0x9c69f426ba9dd34eULL, 0x5f7f10eeb16fcea1ULL, 0x04038d8b8f8e8c02ULL,
0xc8564fe32b197d64ULL, 0x69e74794fda01abaULL, 0xd31aeade0df017e7ULL,
0x3c1198ba8689971eULL, 0x78222d69110f333cULL, 0x3812153109071b1cULL,
0x11c56afdecaf2986ULL, 0x8b20db9b10fb30cbULL, 0x4030385818082820ULL,
0xa87e6b973f154154ULL, 0x682e237f170d3934ULL, 0x20181c2c0c041410ULL,
0x0806070b03010504ULL, 0x074521abac64e98dULL, 0xb6f827ca7cdf845bULL,
0x97295f0d9a76b3c5ULL, 0xef0b72648b7980f9ULL, 0xa6f429dc7add8e53ULL,
0xf58eb3b2473dc9f4ULL, 0xb074628a3a164e58ULL, 0xe582bda4413fc3fcULL,
0xa5b285fc5937ebdcULL, 0x4f731ef8b76dc4a9ULL, 0xdd90a8954838d8e0ULL,
0xa1b10877d6b967deULL, 0xbf37442a9573a2d1ULL, 0x1b4ca53d26e96a83ULL,
0xb5be8bea5f35e1d4ULL, 0x92e3b66dff551c49ULL, 0xaf3b4a3c9371a8d9ULL,
0xff077c728d7b8af1ULL, 0x140f839d898c860aULL, 0xb73143219672a7d5ULL,
0x34179fb18588921aULL, 0xe30ef8e407f609ffULL, 0x4dfcd6337e2a82a8ULL,
0xed84baaf423ec6f8ULL, 0xcad98728e25e3b65ULL, 0x25d2f54c6927bb9cULL,
0x0a89cfc0ca464305ULL, 0x60282474140c3c30ULL, 0x0f4326a0af65ec89ULL,
0x676d05dfb868d5bdULL, 0x2f5b3a8ca361f899ULL, 0x180a091d05030f0cULL,
0x46bc7d185ec1e223ULL, 0x82efb87bf9571641ULL, 0xfece189967d6a97fULL,
0x86ec35f076d99a43ULL, 0xfacd9512e858257dULL, 0x8eea32fb75d89f47ULL,
0x17492fbdaa66e385ULL, 0xf6c81f9264d7ac7bULL, 0xcd9ca6834e3ad2e8ULL,
0x0e8a424b45c8cf07ULL, 0xfd88b4b9443cccf0ULL, 0x8326dc9013fa35cfULL,
0xc453c563a796f462ULL, 0x51f552a5f4a701a6ULL, 0xb477ef01b598c25aULL,
0x3352be1a29ec7b97ULL, 0xa9b70f7cd5b862daULL, 0x76a86f2254c7fc3bULL,
0x19c36df6efae2c82ULL, 0x6f6b02d4bb69d0b9ULL, 0x62a7ecbfdd4b7a31ULL,
0x31dd76d1e0ab3d96ULL, 0x21d178c7e6a9379eULL, 0x1f4f28b6a967e681ULL,
0x503c364e1e0a2228ULL, 0x028fc8cbc9474601ULL, 0xc316e4c80bf21defULL,
0xc1992c03c2b55beeULL, 0x0dccee6b6622aa88ULL, 0x7b64814932e556b3ULL,
0x235eb00c2fee719fULL, 0x99a31d46dfbe7cc2ULL, 0x45fad1387d2b87acULL,
0x7c21a0e29e81bf3eULL, 0x906c7ea636125a48ULL, 0x6c2daef49883b536ULL,
0xd85a41f52d1b776cULL, 0x70242a62120e3638ULL, 0x05cae9606523af8cULL,
0xfb04f1f902f506f3ULL, 0x1283c6ddcf454c09ULL, 0x15c6e7766321a584ULL,
0x3e9e50714fced11fULL, 0x72abe2a9db497039ULL, 0x7de8c409742c9cb0ULL,
0x9b2cd58d16f93ac3ULL, 0x636e885437e659bfULL, 0xd993251ec7b654e2ULL,
0x5df0d825782888a0ULL, 0xb872658139174b5cULL, 0x642ba9ff9b82b032ULL,
0xd05c46fe2e1a7268ULL, 0x2c1d96ac808b9d16ULL, 0xa33ec0bc1ffe21dfULL,
0x241b91a7838a9812ULL, 0x48363f531b092d24ULL, 0x068c454046c9ca03ULL,
0x4c35b2d89487a126ULL, 0x4ab9f798d24e6b25ULL, 0x5b7c9d653ee142a3ULL,
0x6de4ca1f722e96b8ULL, 0x7362864231e453b7ULL, 0x537a9a6e3de047a7ULL,
0x0b40ab2b20eb608bULL, 0xf447d759ad90ea7aULL, 0x49ff5bb8f1a40eaaULL,
0xf0445ad2221e6678ULL, 0x5c39bcce9285ab2eULL, 0x275d3d87a060fd9dULL,
0x0000000000000000ULL, 0x35defb5a6f25b194ULL, 0xf302f6f201f403f7ULL,
0xdb1cedd50ef112e3ULL, 0xd45fcb75a194fe6aULL, 0x583a31451d0b272cULL,
0x6b688f5f34e75cbbULL, 0x8f2356109f75bcc9ULL, 0x2b58b7072cef749bULL,
0xbdb88ce15c34e4d0ULL, 0x95a697c65331f5c4ULL, 0xeec2168f61d4a377ULL,
0xceda0aa36dd0b767ULL, 0x4433b5d39786a422ULL, 0xd7196755827e9be5ULL,
0x01c964ebeaad238eULL, 0xbb34c9a11afd2ed3ULL, 0x55f6df2e7b298da4ULL,
0x9da090cd5030f0c0ULL, 0xc59aa1884d3bd7ecULL, 0x8c65fa30bc9fd946ULL,
0x932ad28615f83fc7ULL, 0x7eae682957c6f93fULL, 0x986a79ad35135f4cULL,
0x3014123a0a061e18ULL, 0x281e1b270f051114ULL, 0x66a4613452c5f633ULL,
0x886677bb33115544ULL, 0x9f2f58069977b6c1ULL, 0xc7156943847c91edULL,
0xf7017b798e7a8ff5ULL, 0xe70d756f887885fdULL, 0xadb482f75a36eed8ULL,
0xe04854c4241c6c70ULL, 0xd596af9e4b39dde4ULL, 0xf2cb9219eb592079ULL,
0xc05048e828187860ULL, 0x8ae9bf70fa561345ULL, 0xf18d3e39c8b345f6ULL,
0xe9873724cdb04afaULL, 0x3dd8fc516c24b490ULL, 0x1dc0e07d6020a080ULL,
0xf98b3932cbb240f2ULL, 0xe44bd94fab92e072ULL, 0x71ed4e89f8a315b6ULL,
0x4eba7a135dc0e727ULL, 0x1a85c1d6cc44490dULL, 0x37513391a662f795ULL,
0x806070b030105040ULL, 0xc99f2b08c1b45eeaULL, 0x543fbbc59184ae2aULL,
0x2297d4e7c5435211ULL, 0xec4dde44a893e576ULL, 0x5eb674055bc2ed2fULL,
0x6aa1ebb4de4a7f35ULL, 0x81a9145bdabd73ceULL, 0x0c058a808c8f8906ULL,
0x75eec302772d99b4ULL, 0x89af1350d9bc76caULL, 0x946ff32db99cd64aULL,
0x77610bc9be6adfb5ULL, 0x3a9dddfac0405d1dULL, 0x3698577a4ccfd41bULL,
0x79eb4982fba210b2ULL, 0x7427a7e99d80ba3aULL, 0x42bff093d14f6e21ULL,
0xf8425dd9211f637cULL, 0x1e864c5d43cac50fULL, 0x39db71dae3aa3892ULL,
0x2a91d3ecc6425715ULL
};
static const u64 T6[256] = {
0x6a01bbb9d268bad3ULL, 0x66b1e59a4d1954fcULL, 0x14cde265bc932f71ULL,
0x1b512587cdb9749cULL, 0x57a4f7a2510253f5ULL, 0xbe03d0d66bb8d368ULL,
0xb504d6de6fbdd26bULL, 0x85feb35229644dd7ULL, 0x4aadfdba5d0d50f0ULL,
0xe063cf098a26ace9ULL, 0x9684091c0e838d8aULL, 0x4d1aa591c679bfdcULL,
0x374d3da7ddad7090ULL, 0x5ca3f1aa550752f6ULL, 0x17e17ba452c89ab3ULL,
0x8ef9b55a2d614cd4ULL, 0x20ac46038f65ea23ULL, 0x8411c4e673a6d562ULL,
0x68c255cc66f197a4ULL, 0xa80ddcc663b2d16eULL, 0xd099aa85ccff3355ULL,
0x41aafbb2590851f3ULL, 0x0f9cc7e2712a5bedULL, 0xae55f359a204a6f7ULL,
0xc120febe5f81de7fULL, 0xa2e5ad7a3d7548d8ULL, 0xcc7fd7299a32a8e5ULL,
0x0ae871bc5ec799b6ULL, 0xe63be0964b90db70ULL, 0xdb9eac8dc8fa3256ULL,
0x152295d1e651b7c4ULL, 0xaace32b3d72bfc19ULL, 0x7393704bab48e338ULL,
0x3bfd638442dc9ebfULL, 0x52d041fc7eef91aeULL, 0x1ce67dac56cd9bb0ULL,
0x78947643af4de23bULL, 0x6106bdb1d66dbbd0ULL, 0xf1da9b32195841c3ULL,
0xe5177957a5cb6eb2ULL, 0xb35cf941ae0ba5f2ULL, 0x564b80160bc0cb40ULL,
0xc20c677fb1da6bbdULL, 0x7ecc59dc6efb95a2ULL, 0x9f40e161be1fa1feULL,
0xc3e310cbeb18f308ULL, 0x2f3081e1fe4fb1ceULL, 0x160e0c10080a0206ULL,
0x675e922e17dbcc49ULL, 0x3f66a26e37f3c451ULL, 0xcf534ee874691d27ULL,
0x9c6c78a05044143cULL, 0x0e73b0562be8c358ULL, 0x9a34573f91f263a5ULL,
0xed3ce69e4f95da73ULL, 0x358ed3d269345de7ULL, 0x2380dfc2613e5fe1ULL,
0xd72ef2ae578bdc79ULL, 0x486e13cfe9947d87ULL, 0x6c59942613decd4aULL,
0x5e601fdfe19e7f81ULL, 0x049bc1ea752f5aeeULL, 0xf3197547adc16cb4ULL,
0x3e89d5da6d315ce4ULL, 0xefff08ebfb0cf704ULL, 0x47f2d42d98be266aULL,
0xb7c738abdb24ff1cULL, 0x11b9543b937eed2aULL, 0x36a24a13876fe825ULL,
0x26f4699c4ed39dbaULL, 0xee107f5fa1ce6fb1ULL, 0x8b8d0304028c8e8fULL,
0xe34f56c8647d192bULL, 0x9447e769ba1aa0fdULL, 0xdeea1ad3e717f00dULL,
0xba98113c1e978986ULL, 0x692d22783c330f11ULL, 0x311512381c1b0709ULL,
0xfd6ac5118629afecULL, 0x9bdb208bcb30fb10ULL, 0x5838304020280818ULL,
0x976b7ea85441153fULL, 0x7f232e6834390d17ULL, 0x2c1c18201014040cULL,
0x0b07060804050103ULL, 0xab2145078de964acULL, 0xca27f8b65b84df7cULL,
0x0d5f2997c5b3769aULL, 0x64720beff980798bULL, 0xdc29f4a6538edd7aULL,
0xb2b38ef5f4c93d47ULL, 0x8a6274b0584e163aULL, 0xa4bd82e5fcc33f41ULL,
0xfc85b2a5dceb3759ULL, 0xf81e734fa9c46db7ULL, 0x95a890dde0d83848ULL,
0x7708b1a1de67b9d6ULL, 0x2a4437bfd1a27395ULL, 0x3da54c1b836ae926ULL,
0xea8bbeb5d4e1355fULL, 0x6db6e392491c55ffULL, 0x3c4a3bafd9a87193ULL,
0x727c07fff18a7b8dULL, 0x9d830f140a868c89ULL, 0x214331b7d5a77296ULL,
0xb19f17341a928885ULL, 0xe4f80ee3ff09f607ULL, 0x33d6fc4da8822a7eULL,
0xafba84edf8c63e42ULL, 0x2887d9ca653b5ee2ULL, 0x4cf5d2259cbb2769ULL,
0xc0cf890a054346caULL, 0x74242860303c0c14ULL, 0xa026430f89ec65afULL,
0xdf056d67bdd568b8ULL, 0x8c3a5b2f99f861a3ULL, 0x1d090a180c0f0305ULL,
0x187dbc4623e2c15eULL, 0x7bb8ef82411657f9ULL, 0x9918cefe7fa9d667ULL,
0xf035ec86439ad976ULL, 0x1295cdfa7d2558e8ULL, 0xfb32ea8e479fd875ULL,
0xbd2f491785e366aaULL, 0x921fc8f67bacd764ULL, 0x83a69ccde8d23a4eULL,
0x4b428a0e07cfc845ULL, 0xb9b488fdf0cc3c44ULL, 0x90dc2683cf35fa13ULL,
0x63c553c462f496a7ULL, 0xa552f551a601a7f4ULL, 0x01ef77b45ac298b5ULL,
0x1abe5233977bec29ULL, 0x7c0fb7a9da62b8d5ULL, 0x226fa8763bfcc754ULL,
0xf66dc319822caeefULL, 0xd4026b6fb9d069bbULL, 0xbfeca762317a4bddULL,
0xd176dd31963dabe0ULL, 0xc778d1219e37a9e6ULL, 0xb6284f1f81e667a9ULL,
0x4e363c5028220a1eULL, 0xcbc88f02014647c9ULL, 0xc8e416c3ef1df20bULL,
0x032c99c1ee5bb5c2ULL, 0x6beecc0d88aa2266ULL, 0x4981647bb356e532ULL,
0x0cb05e239f71ee2fULL, 0x461da399c27cbedfULL, 0x38d1fa45ac872b7dULL,
0xe2a0217c3ebf819eULL, 0xa67e6c90485a1236ULL, 0xf4ae2d6c36b58398ULL,
0xf5415ad86c771b2dULL, 0x622a247038360e12ULL, 0x60e9ca058caf2365ULL,
0xf9f104fbf306f502ULL, 0xddc68312094c45cfULL, 0x76e7c61584a52163ULL,
0x71509e3e1fd1ce4fULL, 0xa9e2ab72397049dbULL, 0x09c4e87db09c2c74ULL,
0x8dd52c9bc33af916ULL, 0x54886e63bf59e637ULL, 0x1e2593d9e254b6c7ULL,
0x25d8f05da0882878ULL, 0x816572b85c4b1739ULL, 0xffa92b6432b0829bULL,
0xfe465cd068721a2eULL, 0xac961d2c169d8b80ULL, 0xbcc03ea3df21fe1fULL,
0xa7911b2412988a83ULL, 0x533f3648242d091bULL, 0x40458c0603cac946ULL,
0xd8b2354c26a18794ULL, 0x98f7b94a256b4ed2ULL, 0x659d7c5ba342e13eULL,
0x1fcae46db8962e72ULL, 0x42866273b753e431ULL, 0x6e9a7a53a747e03dULL,
0x2bab400b8b60eb20ULL, 0x59d747f47aea90adULL, 0xb85bff49aa0ea4f1ULL,
0xd25a44f078661e22ULL, 0xcebc395c2eab8592ULL, 0x873d5d279dfd60a0ULL,
0x0000000000000000ULL, 0x5afbde3594b1256fULL, 0xf2f602f3f703f401ULL,
0xd5ed1cdbe312f10eULL, 0x75cb5fd46afe94a1ULL, 0x45313a582c270b1dULL,
0x5f8f686bbb5ce734ULL, 0x1056238fc9bc759fULL, 0x07b7582b9b74ef2cULL,
0xe18cb8bdd0e4345cULL, 0xc697a695c4f53153ULL, 0x8f16c2ee77a3d461ULL,
0xa30adace67b7d06dULL, 0xd3b5334422a48697ULL, 0x556719d7e59b7e82ULL,
0xeb64c9018e23adeaULL, 0xa1c934bbd32efd1aULL, 0x2edff655a48d297bULL,
0xcd90a09dc0f03050ULL, 0x88a19ac5ecd73b4dULL, 0x30fa658c46d99fbcULL,
0x86d22a93c73ff815ULL, 0x2968ae7e3ff9c657ULL, 0xad796a984c5f1335ULL,
0x3a121430181e060aULL, 0x271b1e281411050fULL, 0x3461a46633f6c552ULL,
0xbb77668844551133ULL, 0x06582f9fc1b67799ULL, 0x436915c7ed917c84ULL,
0x797b01f7f58f7a8eULL, 0x6f750de7fd857888ULL, 0xf782b4add8ee365aULL,
0xc45448e0706c1c24ULL, 0x9eaf96d5e4dd394bULL, 0x1992cbf2792059ebULL,
0xe84850c060781828ULL, 0x70bfe98a451356faULL, 0x393e8df1f645b3c8ULL,
0x243787e9fa4ab0cdULL, 0x51fcd83d90b4246cULL, 0x7de0c01d80a02060ULL,
0x32398bf9f240b2cbULL, 0x4fd94be472e092abULL, 0x894eed71b615a3f8ULL,
0x137aba4e27e7c05dULL, 0xd6c1851a0d4944ccULL, 0x9133513795f762a6ULL,
0xb070608040501030ULL, 0x082b9fc9ea5eb4c1ULL, 0xc5bb3f542aae8491ULL,
0xe7d49722115243c5ULL, 0x44de4dec76e593a8ULL, 0x0574b65e2fedc25bULL,
0xb4eba16a357f4adeULL, 0x5b14a981ce73bddaULL, 0x808a050c06898f8cULL,
0x02c3ee75b4992d77ULL, 0x5013af89ca76bcd9ULL, 0x2df36f944ad69cb9ULL,
0xc90b6177b5df6abeULL, 0xfadd9d3a1d5d40c0ULL, 0x7a5798361bd4cf4cULL,
0x8249eb79b210a2fbULL, 0xe9a727743aba809dULL, 0x93f0bf42216e4fd1ULL,
0xd95d42f87c631f21ULL, 0x5d4c861e0fc5ca43ULL, 0xda71db399238aae3ULL,
0xecd3912a155742c6ULL
};
static const u64 T7[256] = {
0x016ab9bb68d2d3baULL, 0xb1669ae5194dfc54ULL, 0xcd1465e293bc712fULL,
0x511b8725b9cd9c74ULL, 0xa457a2f70251f553ULL, 0x03bed6d0b86b68d3ULL,
0x04b5ded6bd6f6bd2ULL, 0xfe8552b36429d74dULL, 0xad4abafd0d5df050ULL,
0x63e009cf268ae9acULL, 0x84961c09830e8a8dULL, 0x1a4d91a579c6dcbfULL,
0x4d37a73daddd9070ULL, 0xa35caaf10755f652ULL, 0xe117a47bc852b39aULL,
0xf98e5ab5612dd44cULL, 0xac200346658f23eaULL, 0x1184e6c4a67362d5ULL,
0xc268cc55f166a497ULL, 0x0da8c6dcb2636ed1ULL, 0x99d085aaffcc5533ULL,
0xaa41b2fb0859f351ULL, 0x9c0fe2c72a71ed5bULL, 0x55ae59f304a2f7a6ULL,
0x20c1befe815f7fdeULL, 0xe5a27aad753dd848ULL, 0x7fcc29d7329ae5a8ULL,
0xe80abc71c75eb699ULL, 0x3be696e0904b70dbULL, 0x9edb8dacfac85632ULL,
0x2215d19551e6c4b7ULL, 0xceaab3322bd719fcULL, 0x93734b7048ab38e3ULL,
0xfd3b8463dc42bf9eULL, 0xd052fc41ef7eae91ULL, 0xe61cac7dcd56b09bULL,
0x947843764daf3be2ULL, 0x0661b1bd6dd6d0bbULL, 0xdaf1329b5819c341ULL,
0x17e55779cba5b26eULL, 0x5cb341f90baef2a5ULL, 0x4b561680c00b40cbULL,
0x0cc27f67dab1bd6bULL, 0xcc7edc59fb6ea295ULL, 0x409f61e11fbefea1ULL,
0xe3c3cb1018eb08f3ULL, 0x302fe1814ffeceb1ULL, 0x0e16100c0a080602ULL,
0x5e672e92db1749ccULL, 0x663f6ea2f33751c4ULL, 0x53cfe84e6974271dULL,
0x6c9ca07844503c14ULL, 0x730e56b0e82b58c3ULL, 0x349a3f57f291a563ULL,
0x3ced9ee6954f73daULL, 0x8e35d2d33469e75dULL, 0x8023c2df3e61e15fULL,
0x2ed7aef28b5779dcULL, 0x6e48cf1394e9877dULL, 0x596c2694de134acdULL,
0x605edf1f9ee1817fULL, 0x9b04eac12f75ee5aULL, 0x19f34775c1adb46cULL,
0x893edad5316de45cULL, 0xffefeb080cfb04f7ULL, 0xf2472dd4be986a26ULL,
0xc7b7ab3824db1cffULL, 0xb9113b547e932aedULL, 0xa236134a6f8725e8ULL,
0xf4269c69d34eba9dULL, 0x10ee5f7fcea1b16fULL, 0x8d8b04038c028f8eULL,
0x4fe3c8567d642b19ULL, 0x479469e71abafda0ULL, 0xeaded31a17e70df0ULL,
0x98ba3c11971e8689ULL, 0x2d697822333c110fULL, 0x153138121b1c0907ULL,
0x6afd11c52986ecafULL, 0xdb9b8b2030cb10fbULL, 0x3858403028201808ULL,
0x6b97a87e41543f15ULL, 0x237f682e3934170dULL, 0x1c2c201814100c04ULL,
0x070b080605040301ULL, 0x21ab0745e98dac64ULL, 0x27cab6f8845b7cdfULL,
0x5f0d9729b3c59a76ULL, 0x7264ef0b80f98b79ULL, 0x29dca6f48e537addULL,
0xb3b2f58ec9f4473dULL, 0x628ab0744e583a16ULL, 0xbda4e582c3fc413fULL,
0x85fca5b2ebdc5937ULL, 0x1ef84f73c4a9b76dULL, 0xa895dd90d8e04838ULL,
0x0877a1b167ded6b9ULL, 0x442abf37a2d19573ULL, 0xa53d1b4c6a8326e9ULL,
0x8beab5bee1d45f35ULL, 0xb66d92e31c49ff55ULL, 0x4a3caf3ba8d99371ULL,
0x7c72ff078af18d7bULL, 0x839d140f860a898cULL, 0x4321b731a7d59672ULL,
0x9fb13417921a8588ULL, 0xf8e4e30e09ff07f6ULL, 0xd6334dfc82a87e2aULL,
0xbaafed84c6f8423eULL, 0x8728cad93b65e25eULL, 0xf54c25d2bb9c6927ULL,
0xcfc00a894305ca46ULL, 0x247460283c30140cULL, 0x26a00f43ec89af65ULL,
0x05df676dd5bdb868ULL, 0x3a8c2f5bf899a361ULL, 0x091d180a0f0c0503ULL,
0x7d1846bce2235ec1ULL, 0xb87b82ef1641f957ULL, 0x1899fecea97f67d6ULL,
0x35f086ec9a4376d9ULL, 0x9512facd257de858ULL, 0x32fb8eea9f4775d8ULL,
0x2fbd1749e385aa66ULL, 0x1f92f6c8ac7b64d7ULL, 0xa683cd9cd2e84e3aULL,
0x424b0e8acf0745c8ULL, 0xb4b9fd88ccf0443cULL, 0xdc90832635cf13faULL,
0xc563c453f462a796ULL, 0x52a551f501a6f4a7ULL, 0xef01b477c25ab598ULL,
0xbe1a33527b9729ecULL, 0x0f7ca9b762dad5b8ULL, 0x6f2276a8fc3b54c7ULL,
0x6df619c32c82efaeULL, 0x02d46f6bd0b9bb69ULL, 0xecbf62a77a31dd4bULL,
0x76d131dd3d96e0abULL, 0x78c721d1379ee6a9ULL, 0x28b61f4fe681a967ULL,
0x364e503c22281e0aULL, 0xc8cb028f4601c947ULL, 0xe4c8c3161def0bf2ULL,
0x2c03c1995beec2b5ULL, 0xee6b0dccaa886622ULL, 0x81497b6456b332e5ULL,
0xb00c235e719f2feeULL, 0x1d4699a37cc2dfbeULL, 0xd13845fa87ac7d2bULL,
0xa0e27c21bf3e9e81ULL, 0x7ea6906c5a483612ULL, 0xaef46c2db5369883ULL,
0x41f5d85a776c2d1bULL, 0x2a6270243638120eULL, 0xe96005caaf8c6523ULL,
0xf1f9fb0406f302f5ULL, 0xc6dd12834c09cf45ULL, 0xe77615c6a5846321ULL,
0x50713e9ed11f4fceULL, 0xe2a972ab7039db49ULL, 0xc4097de89cb0742cULL,
0xd58d9b2c3ac316f9ULL, 0x8854636e59bf37e6ULL, 0x251ed99354e2c7b6ULL,
0xd8255df088a07828ULL, 0x6581b8724b5c3917ULL, 0xa9ff642bb0329b82ULL,
0x46fed05c72682e1aULL, 0x96ac2c1d9d16808bULL, 0xc0bca33e21df1ffeULL,
0x91a7241b9812838aULL, 0x3f5348362d241b09ULL, 0x4540068cca0346c9ULL,
0xb2d84c35a1269487ULL, 0xf7984ab96b25d24eULL, 0x9d655b7c42a33ee1ULL,
0xca1f6de496b8722eULL, 0x8642736253b731e4ULL, 0x9a6e537a47a73de0ULL,
0xab2b0b40608b20ebULL, 0xd759f447ea7aad90ULL, 0x5bb849ff0eaaf1a4ULL,
0x5ad2f0446678221eULL, 0xbcce5c39ab2e9285ULL, 0x3d87275dfd9da060ULL,
0x0000000000000000ULL, 0xfb5a35deb1946f25ULL, 0xf6f2f30203f701f4ULL,
0xedd5db1c12e30ef1ULL, 0xcb75d45ffe6aa194ULL, 0x3145583a272c1d0bULL,
0x8f5f6b685cbb34e7ULL, 0x56108f23bcc99f75ULL, 0xb7072b58749b2cefULL,
0x8ce1bdb8e4d05c34ULL, 0x97c695a6f5c45331ULL, 0x168feec2a37761d4ULL,
0x0aa3cedab7676dd0ULL, 0xb5d34433a4229786ULL, 0x6755d7199be5827eULL,
0x64eb01c9238eeaadULL, 0xc9a1bb342ed31afdULL, 0xdf2e55f68da47b29ULL,
0x90cd9da0f0c05030ULL, 0xa188c59ad7ec4d3bULL, 0xfa308c65d946bc9fULL,
0xd286932a3fc715f8ULL, 0x68297eaef93f57c6ULL, 0x79ad986a5f4c3513ULL,
0x123a30141e180a06ULL, 0x1b27281e11140f05ULL, 0x613466a4f63352c5ULL,
0x77bb886655443311ULL, 0x58069f2fb6c19977ULL, 0x6943c71591ed847cULL,
0x7b79f7018ff58e7aULL, 0x756fe70d85fd8878ULL, 0x82f7adb4eed85a36ULL,
0x54c4e0486c70241cULL, 0xaf9ed596dde44b39ULL, 0x9219f2cb2079eb59ULL,
0x48e8c05078602818ULL, 0xbf708ae91345fa56ULL, 0x3e39f18d45f6c8b3ULL,
0x3724e9874afacdb0ULL, 0xfc513dd8b4906c24ULL, 0xe07d1dc0a0806020ULL,
0x3932f98b40f2cbb2ULL, 0xd94fe44be072ab92ULL, 0x4e8971ed15b6f8a3ULL,
0x7a134ebae7275dc0ULL, 0xc1d61a85490dcc44ULL, 0x33913751f795a662ULL,
0x70b0806050403010ULL, 0x2b08c99f5eeac1b4ULL, 0xbbc5543fae2a9184ULL,
0xd4e722975211c543ULL, 0xde44ec4de576a893ULL, 0x74055eb6ed2f5bc2ULL,
0xebb46aa17f35de4aULL, 0x145b81a973cedabdULL, 0x8a800c0589068c8fULL,
0xc30275ee99b4772dULL, 0x135089af76cad9bcULL, 0xf32d946fd64ab99cULL,
0x0bc97761dfb5be6aULL, 0xddfa3a9d5d1dc040ULL, 0x577a3698d41b4ccfULL,
0x498279eb10b2fba2ULL, 0xa7e97427ba3a9d80ULL, 0xf09342bf6e21d14fULL,
0x5dd9f842637c211fULL, 0x4c5d1e86c50f43caULL, 0x71da39db3892e3aaULL,
0xd3ec2a915715c642ULL
};
static const u64 c[KHAZAD_ROUNDS + 1] = {
0xba542f7453d3d24dULL, 0x50ac8dbf70529a4cULL, 0xead597d133515ba6ULL,
0xde48a899db32b7fcULL, 0xe39e919be2bb416eULL, 0xa5cb6b95a1f3b102ULL,
0xccc41d14c363da5dULL, 0x5fdc7dcd7f5a6c5cULL, 0xf726ffede89d6f8eULL
};
static int khazad_setkey(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *key = (const __be32 *)in_key;
int r;
const u64 *S = T7;
u64 K2, K1;
/* key is supposed to be 32-bit aligned */
K2 = ((u64)be32_to_cpu(key[0]) << 32) | be32_to_cpu(key[1]);
K1 = ((u64)be32_to_cpu(key[2]) << 32) | be32_to_cpu(key[3]);
/* setup the encrypt key */
for (r = 0; r <= KHAZAD_ROUNDS; r++) {
ctx->E[r] = T0[(int)(K1 >> 56) ] ^
T1[(int)(K1 >> 48) & 0xff] ^
T2[(int)(K1 >> 40) & 0xff] ^
T3[(int)(K1 >> 32) & 0xff] ^
T4[(int)(K1 >> 24) & 0xff] ^
T5[(int)(K1 >> 16) & 0xff] ^
T6[(int)(K1 >> 8) & 0xff] ^
T7[(int)(K1 ) & 0xff] ^
c[r] ^ K2;
K2 = K1;
K1 = ctx->E[r];
}
/* Setup the decrypt key */
ctx->D[0] = ctx->E[KHAZAD_ROUNDS];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
K1 = ctx->E[KHAZAD_ROUNDS - r];
ctx->D[r] = T0[(int)S[(int)(K1 >> 56) ] & 0xff] ^
T1[(int)S[(int)(K1 >> 48) & 0xff] & 0xff] ^
T2[(int)S[(int)(K1 >> 40) & 0xff] & 0xff] ^
T3[(int)S[(int)(K1 >> 32) & 0xff] & 0xff] ^
T4[(int)S[(int)(K1 >> 24) & 0xff] & 0xff] ^
T5[(int)S[(int)(K1 >> 16) & 0xff] & 0xff] ^
T6[(int)S[(int)(K1 >> 8) & 0xff] & 0xff] ^
T7[(int)S[(int)(K1 ) & 0xff] & 0xff];
}
ctx->D[KHAZAD_ROUNDS] = ctx->E[0];
return 0;
}
static void khazad_crypt(const u64 roundKey[KHAZAD_ROUNDS + 1],
u8 *ciphertext, const u8 *plaintext)
{
const __be64 *src = (const __be64 *)plaintext;
__be64 *dst = (__be64 *)ciphertext;
int r;
u64 state;
state = be64_to_cpu(*src) ^ roundKey[0];
for (r = 1; r < KHAZAD_ROUNDS; r++) {
state = T0[(int)(state >> 56) ] ^
T1[(int)(state >> 48) & 0xff] ^
T2[(int)(state >> 40) & 0xff] ^
T3[(int)(state >> 32) & 0xff] ^
T4[(int)(state >> 24) & 0xff] ^
T5[(int)(state >> 16) & 0xff] ^
T6[(int)(state >> 8) & 0xff] ^
T7[(int)(state ) & 0xff] ^
roundKey[r];
}
state = (T0[(int)(state >> 56) ] & 0xff00000000000000ULL) ^
(T1[(int)(state >> 48) & 0xff] & 0x00ff000000000000ULL) ^
(T2[(int)(state >> 40) & 0xff] & 0x0000ff0000000000ULL) ^
(T3[(int)(state >> 32) & 0xff] & 0x000000ff00000000ULL) ^
(T4[(int)(state >> 24) & 0xff] & 0x00000000ff000000ULL) ^
(T5[(int)(state >> 16) & 0xff] & 0x0000000000ff0000ULL) ^
(T6[(int)(state >> 8) & 0xff] & 0x000000000000ff00ULL) ^
(T7[(int)(state ) & 0xff] & 0x00000000000000ffULL) ^
roundKey[KHAZAD_ROUNDS];
*dst = cpu_to_be64(state);
}
static void khazad_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->E, dst, src);
}
static void khazad_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
struct khazad_ctx *ctx = crypto_tfm_ctx(tfm);
khazad_crypt(ctx->D, dst, src);
}
static struct crypto_alg khazad_alg = {
.cra_name = "khazad",
.cra_driver_name = "khazad-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = KHAZAD_BLOCK_SIZE,
.cra_ctxsize = sizeof (struct khazad_ctx),
.cra_alignmask = 7,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = KHAZAD_KEY_SIZE,
.cia_max_keysize = KHAZAD_KEY_SIZE,
.cia_setkey = khazad_setkey,
.cia_encrypt = khazad_encrypt,
.cia_decrypt = khazad_decrypt } }
};
static int __init khazad_mod_init(void)
{
int ret = 0;
ret = crypto_register_alg(&khazad_alg);
return ret;
}
static void __exit khazad_mod_fini(void)
{
crypto_unregister_alg(&khazad_alg);
}
subsys_initcall(khazad_mod_init);
module_exit(khazad_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
MODULE_ALIAS_CRYPTO("khazad");
| linux-master | crypto/khazad.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SEED Cipher Algorithm.
*
* Documentation of SEED can be found in RFC 4269.
* Copyright (C) 2007 Korea Information Security Agency (KISA).
*/
#include <crypto/algapi.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/byteorder.h>
#define SEED_NUM_KCONSTANTS 16
#define SEED_KEY_SIZE 16
#define SEED_BLOCK_SIZE 16
#define SEED_KEYSCHED_LEN 32
/*
* #define byte(x, nr) ((unsigned char)((x) >> (nr*8)))
*/
static inline u8
byte(const u32 x, const unsigned n)
{
return x >> (n << 3);
}
struct seed_ctx {
u32 keysched[SEED_KEYSCHED_LEN];
};
static const u32 SS0[256] = {
0x2989a1a8, 0x05858184, 0x16c6d2d4, 0x13c3d3d0,
0x14445054, 0x1d0d111c, 0x2c8ca0ac, 0x25052124,
0x1d4d515c, 0x03434340, 0x18081018, 0x1e0e121c,
0x11415150, 0x3cccf0fc, 0x0acac2c8, 0x23436360,
0x28082028, 0x04444044, 0x20002020, 0x1d8d919c,
0x20c0e0e0, 0x22c2e2e0, 0x08c8c0c8, 0x17071314,
0x2585a1a4, 0x0f8f838c, 0x03030300, 0x3b4b7378,
0x3b8bb3b8, 0x13031310, 0x12c2d2d0, 0x2ecee2ec,
0x30407070, 0x0c8c808c, 0x3f0f333c, 0x2888a0a8,
0x32023230, 0x1dcdd1dc, 0x36c6f2f4, 0x34447074,
0x2ccce0ec, 0x15859194, 0x0b0b0308, 0x17475354,
0x1c4c505c, 0x1b4b5358, 0x3d8db1bc, 0x01010100,
0x24042024, 0x1c0c101c, 0x33437370, 0x18889098,
0x10001010, 0x0cccc0cc, 0x32c2f2f0, 0x19c9d1d8,
0x2c0c202c, 0x27c7e3e4, 0x32427270, 0x03838380,
0x1b8b9398, 0x11c1d1d0, 0x06868284, 0x09c9c1c8,
0x20406060, 0x10405050, 0x2383a3a0, 0x2bcbe3e8,
0x0d0d010c, 0x3686b2b4, 0x1e8e929c, 0x0f4f434c,
0x3787b3b4, 0x1a4a5258, 0x06c6c2c4, 0x38487078,
0x2686a2a4, 0x12021210, 0x2f8fa3ac, 0x15c5d1d4,
0x21416160, 0x03c3c3c0, 0x3484b0b4, 0x01414140,
0x12425250, 0x3d4d717c, 0x0d8d818c, 0x08080008,
0x1f0f131c, 0x19899198, 0x00000000, 0x19091118,
0x04040004, 0x13435350, 0x37c7f3f4, 0x21c1e1e0,
0x3dcdf1fc, 0x36467274, 0x2f0f232c, 0x27072324,
0x3080b0b0, 0x0b8b8388, 0x0e0e020c, 0x2b8ba3a8,
0x2282a2a0, 0x2e4e626c, 0x13839390, 0x0d4d414c,
0x29496168, 0x3c4c707c, 0x09090108, 0x0a0a0208,
0x3f8fb3bc, 0x2fcfe3ec, 0x33c3f3f0, 0x05c5c1c4,
0x07878384, 0x14041014, 0x3ecef2fc, 0x24446064,
0x1eced2dc, 0x2e0e222c, 0x0b4b4348, 0x1a0a1218,
0x06060204, 0x21012120, 0x2b4b6368, 0x26466264,
0x02020200, 0x35c5f1f4, 0x12829290, 0x0a8a8288,
0x0c0c000c, 0x3383b3b0, 0x3e4e727c, 0x10c0d0d0,
0x3a4a7278, 0x07474344, 0x16869294, 0x25c5e1e4,
0x26062224, 0x00808080, 0x2d8da1ac, 0x1fcfd3dc,
0x2181a1a0, 0x30003030, 0x37073334, 0x2e8ea2ac,
0x36063234, 0x15051114, 0x22022220, 0x38083038,
0x34c4f0f4, 0x2787a3a4, 0x05454144, 0x0c4c404c,
0x01818180, 0x29c9e1e8, 0x04848084, 0x17879394,
0x35053134, 0x0bcbc3c8, 0x0ecec2cc, 0x3c0c303c,
0x31417170, 0x11011110, 0x07c7c3c4, 0x09898188,
0x35457174, 0x3bcbf3f8, 0x1acad2d8, 0x38c8f0f8,
0x14849094, 0x19495158, 0x02828280, 0x04c4c0c4,
0x3fcff3fc, 0x09494148, 0x39093138, 0x27476364,
0x00c0c0c0, 0x0fcfc3cc, 0x17c7d3d4, 0x3888b0b8,
0x0f0f030c, 0x0e8e828c, 0x02424240, 0x23032320,
0x11819190, 0x2c4c606c, 0x1bcbd3d8, 0x2484a0a4,
0x34043034, 0x31c1f1f0, 0x08484048, 0x02c2c2c0,
0x2f4f636c, 0x3d0d313c, 0x2d0d212c, 0x00404040,
0x3e8eb2bc, 0x3e0e323c, 0x3c8cb0bc, 0x01c1c1c0,
0x2a8aa2a8, 0x3a8ab2b8, 0x0e4e424c, 0x15455154,
0x3b0b3338, 0x1cccd0dc, 0x28486068, 0x3f4f737c,
0x1c8c909c, 0x18c8d0d8, 0x0a4a4248, 0x16465254,
0x37477374, 0x2080a0a0, 0x2dcde1ec, 0x06464244,
0x3585b1b4, 0x2b0b2328, 0x25456164, 0x3acaf2f8,
0x23c3e3e0, 0x3989b1b8, 0x3181b1b0, 0x1f8f939c,
0x1e4e525c, 0x39c9f1f8, 0x26c6e2e4, 0x3282b2b0,
0x31013130, 0x2acae2e8, 0x2d4d616c, 0x1f4f535c,
0x24c4e0e4, 0x30c0f0f0, 0x0dcdc1cc, 0x08888088,
0x16061214, 0x3a0a3238, 0x18485058, 0x14c4d0d4,
0x22426260, 0x29092128, 0x07070304, 0x33033330,
0x28c8e0e8, 0x1b0b1318, 0x05050104, 0x39497178,
0x10809090, 0x2a4a6268, 0x2a0a2228, 0x1a8a9298,
};
static const u32 SS1[256] = {
0x38380830, 0xe828c8e0, 0x2c2d0d21, 0xa42686a2,
0xcc0fcfc3, 0xdc1eced2, 0xb03383b3, 0xb83888b0,
0xac2f8fa3, 0x60204060, 0x54154551, 0xc407c7c3,
0x44044440, 0x6c2f4f63, 0x682b4b63, 0x581b4b53,
0xc003c3c3, 0x60224262, 0x30330333, 0xb43585b1,
0x28290921, 0xa02080a0, 0xe022c2e2, 0xa42787a3,
0xd013c3d3, 0x90118191, 0x10110111, 0x04060602,
0x1c1c0c10, 0xbc3c8cb0, 0x34360632, 0x480b4b43,
0xec2fcfe3, 0x88088880, 0x6c2c4c60, 0xa82888a0,
0x14170713, 0xc404c4c0, 0x14160612, 0xf434c4f0,
0xc002c2c2, 0x44054541, 0xe021c1e1, 0xd416c6d2,
0x3c3f0f33, 0x3c3d0d31, 0x8c0e8e82, 0x98188890,
0x28280820, 0x4c0e4e42, 0xf436c6f2, 0x3c3e0e32,
0xa42585a1, 0xf839c9f1, 0x0c0d0d01, 0xdc1fcfd3,
0xd818c8d0, 0x282b0b23, 0x64264662, 0x783a4a72,
0x24270723, 0x2c2f0f23, 0xf031c1f1, 0x70324272,
0x40024242, 0xd414c4d0, 0x40014141, 0xc000c0c0,
0x70334373, 0x64274763, 0xac2c8ca0, 0x880b8b83,
0xf437c7f3, 0xac2d8da1, 0x80008080, 0x1c1f0f13,
0xc80acac2, 0x2c2c0c20, 0xa82a8aa2, 0x34340430,
0xd012c2d2, 0x080b0b03, 0xec2ecee2, 0xe829c9e1,
0x5c1d4d51, 0x94148490, 0x18180810, 0xf838c8f0,
0x54174753, 0xac2e8ea2, 0x08080800, 0xc405c5c1,
0x10130313, 0xcc0dcdc1, 0x84068682, 0xb83989b1,
0xfc3fcff3, 0x7c3d4d71, 0xc001c1c1, 0x30310131,
0xf435c5f1, 0x880a8a82, 0x682a4a62, 0xb03181b1,
0xd011c1d1, 0x20200020, 0xd417c7d3, 0x00020202,
0x20220222, 0x04040400, 0x68284860, 0x70314171,
0x04070703, 0xd81bcbd3, 0x9c1d8d91, 0x98198991,
0x60214161, 0xbc3e8eb2, 0xe426c6e2, 0x58194951,
0xdc1dcdd1, 0x50114151, 0x90108090, 0xdc1cccd0,
0x981a8a92, 0xa02383a3, 0xa82b8ba3, 0xd010c0d0,
0x80018181, 0x0c0f0f03, 0x44074743, 0x181a0a12,
0xe023c3e3, 0xec2ccce0, 0x8c0d8d81, 0xbc3f8fb3,
0x94168692, 0x783b4b73, 0x5c1c4c50, 0xa02282a2,
0xa02181a1, 0x60234363, 0x20230323, 0x4c0d4d41,
0xc808c8c0, 0x9c1e8e92, 0x9c1c8c90, 0x383a0a32,
0x0c0c0c00, 0x2c2e0e22, 0xb83a8ab2, 0x6c2e4e62,
0x9c1f8f93, 0x581a4a52, 0xf032c2f2, 0x90128292,
0xf033c3f3, 0x48094941, 0x78384870, 0xcc0cccc0,
0x14150511, 0xf83bcbf3, 0x70304070, 0x74354571,
0x7c3f4f73, 0x34350531, 0x10100010, 0x00030303,
0x64244460, 0x6c2d4d61, 0xc406c6c2, 0x74344470,
0xd415c5d1, 0xb43484b0, 0xe82acae2, 0x08090901,
0x74364672, 0x18190911, 0xfc3ecef2, 0x40004040,
0x10120212, 0xe020c0e0, 0xbc3d8db1, 0x04050501,
0xf83acaf2, 0x00010101, 0xf030c0f0, 0x282a0a22,
0x5c1e4e52, 0xa82989a1, 0x54164652, 0x40034343,
0x84058581, 0x14140410, 0x88098981, 0x981b8b93,
0xb03080b0, 0xe425c5e1, 0x48084840, 0x78394971,
0x94178793, 0xfc3cccf0, 0x1c1e0e12, 0x80028282,
0x20210121, 0x8c0c8c80, 0x181b0b13, 0x5c1f4f53,
0x74374773, 0x54144450, 0xb03282b2, 0x1c1d0d11,
0x24250521, 0x4c0f4f43, 0x00000000, 0x44064642,
0xec2dcde1, 0x58184850, 0x50124252, 0xe82bcbe3,
0x7c3e4e72, 0xd81acad2, 0xc809c9c1, 0xfc3dcdf1,
0x30300030, 0x94158591, 0x64254561, 0x3c3c0c30,
0xb43686b2, 0xe424c4e0, 0xb83b8bb3, 0x7c3c4c70,
0x0c0e0e02, 0x50104050, 0x38390931, 0x24260622,
0x30320232, 0x84048480, 0x68294961, 0x90138393,
0x34370733, 0xe427c7e3, 0x24240420, 0xa42484a0,
0xc80bcbc3, 0x50134353, 0x080a0a02, 0x84078783,
0xd819c9d1, 0x4c0c4c40, 0x80038383, 0x8c0f8f83,
0xcc0ecec2, 0x383b0b33, 0x480a4a42, 0xb43787b3,
};
static const u32 SS2[256] = {
0xa1a82989, 0x81840585, 0xd2d416c6, 0xd3d013c3,
0x50541444, 0x111c1d0d, 0xa0ac2c8c, 0x21242505,
0x515c1d4d, 0x43400343, 0x10181808, 0x121c1e0e,
0x51501141, 0xf0fc3ccc, 0xc2c80aca, 0x63602343,
0x20282808, 0x40440444, 0x20202000, 0x919c1d8d,
0xe0e020c0, 0xe2e022c2, 0xc0c808c8, 0x13141707,
0xa1a42585, 0x838c0f8f, 0x03000303, 0x73783b4b,
0xb3b83b8b, 0x13101303, 0xd2d012c2, 0xe2ec2ece,
0x70703040, 0x808c0c8c, 0x333c3f0f, 0xa0a82888,
0x32303202, 0xd1dc1dcd, 0xf2f436c6, 0x70743444,
0xe0ec2ccc, 0x91941585, 0x03080b0b, 0x53541747,
0x505c1c4c, 0x53581b4b, 0xb1bc3d8d, 0x01000101,
0x20242404, 0x101c1c0c, 0x73703343, 0x90981888,
0x10101000, 0xc0cc0ccc, 0xf2f032c2, 0xd1d819c9,
0x202c2c0c, 0xe3e427c7, 0x72703242, 0x83800383,
0x93981b8b, 0xd1d011c1, 0x82840686, 0xc1c809c9,
0x60602040, 0x50501040, 0xa3a02383, 0xe3e82bcb,
0x010c0d0d, 0xb2b43686, 0x929c1e8e, 0x434c0f4f,
0xb3b43787, 0x52581a4a, 0xc2c406c6, 0x70783848,
0xa2a42686, 0x12101202, 0xa3ac2f8f, 0xd1d415c5,
0x61602141, 0xc3c003c3, 0xb0b43484, 0x41400141,
0x52501242, 0x717c3d4d, 0x818c0d8d, 0x00080808,
0x131c1f0f, 0x91981989, 0x00000000, 0x11181909,
0x00040404, 0x53501343, 0xf3f437c7, 0xe1e021c1,
0xf1fc3dcd, 0x72743646, 0x232c2f0f, 0x23242707,
0xb0b03080, 0x83880b8b, 0x020c0e0e, 0xa3a82b8b,
0xa2a02282, 0x626c2e4e, 0x93901383, 0x414c0d4d,
0x61682949, 0x707c3c4c, 0x01080909, 0x02080a0a,
0xb3bc3f8f, 0xe3ec2fcf, 0xf3f033c3, 0xc1c405c5,
0x83840787, 0x10141404, 0xf2fc3ece, 0x60642444,
0xd2dc1ece, 0x222c2e0e, 0x43480b4b, 0x12181a0a,
0x02040606, 0x21202101, 0x63682b4b, 0x62642646,
0x02000202, 0xf1f435c5, 0x92901282, 0x82880a8a,
0x000c0c0c, 0xb3b03383, 0x727c3e4e, 0xd0d010c0,
0x72783a4a, 0x43440747, 0x92941686, 0xe1e425c5,
0x22242606, 0x80800080, 0xa1ac2d8d, 0xd3dc1fcf,
0xa1a02181, 0x30303000, 0x33343707, 0xa2ac2e8e,
0x32343606, 0x11141505, 0x22202202, 0x30383808,
0xf0f434c4, 0xa3a42787, 0x41440545, 0x404c0c4c,
0x81800181, 0xe1e829c9, 0x80840484, 0x93941787,
0x31343505, 0xc3c80bcb, 0xc2cc0ece, 0x303c3c0c,
0x71703141, 0x11101101, 0xc3c407c7, 0x81880989,
0x71743545, 0xf3f83bcb, 0xd2d81aca, 0xf0f838c8,
0x90941484, 0x51581949, 0x82800282, 0xc0c404c4,
0xf3fc3fcf, 0x41480949, 0x31383909, 0x63642747,
0xc0c000c0, 0xc3cc0fcf, 0xd3d417c7, 0xb0b83888,
0x030c0f0f, 0x828c0e8e, 0x42400242, 0x23202303,
0x91901181, 0x606c2c4c, 0xd3d81bcb, 0xa0a42484,
0x30343404, 0xf1f031c1, 0x40480848, 0xc2c002c2,
0x636c2f4f, 0x313c3d0d, 0x212c2d0d, 0x40400040,
0xb2bc3e8e, 0x323c3e0e, 0xb0bc3c8c, 0xc1c001c1,
0xa2a82a8a, 0xb2b83a8a, 0x424c0e4e, 0x51541545,
0x33383b0b, 0xd0dc1ccc, 0x60682848, 0x737c3f4f,
0x909c1c8c, 0xd0d818c8, 0x42480a4a, 0x52541646,
0x73743747, 0xa0a02080, 0xe1ec2dcd, 0x42440646,
0xb1b43585, 0x23282b0b, 0x61642545, 0xf2f83aca,
0xe3e023c3, 0xb1b83989, 0xb1b03181, 0x939c1f8f,
0x525c1e4e, 0xf1f839c9, 0xe2e426c6, 0xb2b03282,
0x31303101, 0xe2e82aca, 0x616c2d4d, 0x535c1f4f,
0xe0e424c4, 0xf0f030c0, 0xc1cc0dcd, 0x80880888,
0x12141606, 0x32383a0a, 0x50581848, 0xd0d414c4,
0x62602242, 0x21282909, 0x03040707, 0x33303303,
0xe0e828c8, 0x13181b0b, 0x01040505, 0x71783949,
0x90901080, 0x62682a4a, 0x22282a0a, 0x92981a8a,
};
static const u32 SS3[256] = {
0x08303838, 0xc8e0e828, 0x0d212c2d, 0x86a2a426,
0xcfc3cc0f, 0xced2dc1e, 0x83b3b033, 0x88b0b838,
0x8fa3ac2f, 0x40606020, 0x45515415, 0xc7c3c407,
0x44404404, 0x4f636c2f, 0x4b63682b, 0x4b53581b,
0xc3c3c003, 0x42626022, 0x03333033, 0x85b1b435,
0x09212829, 0x80a0a020, 0xc2e2e022, 0x87a3a427,
0xc3d3d013, 0x81919011, 0x01111011, 0x06020406,
0x0c101c1c, 0x8cb0bc3c, 0x06323436, 0x4b43480b,
0xcfe3ec2f, 0x88808808, 0x4c606c2c, 0x88a0a828,
0x07131417, 0xc4c0c404, 0x06121416, 0xc4f0f434,
0xc2c2c002, 0x45414405, 0xc1e1e021, 0xc6d2d416,
0x0f333c3f, 0x0d313c3d, 0x8e828c0e, 0x88909818,
0x08202828, 0x4e424c0e, 0xc6f2f436, 0x0e323c3e,
0x85a1a425, 0xc9f1f839, 0x0d010c0d, 0xcfd3dc1f,
0xc8d0d818, 0x0b23282b, 0x46626426, 0x4a72783a,
0x07232427, 0x0f232c2f, 0xc1f1f031, 0x42727032,
0x42424002, 0xc4d0d414, 0x41414001, 0xc0c0c000,
0x43737033, 0x47636427, 0x8ca0ac2c, 0x8b83880b,
0xc7f3f437, 0x8da1ac2d, 0x80808000, 0x0f131c1f,
0xcac2c80a, 0x0c202c2c, 0x8aa2a82a, 0x04303434,
0xc2d2d012, 0x0b03080b, 0xcee2ec2e, 0xc9e1e829,
0x4d515c1d, 0x84909414, 0x08101818, 0xc8f0f838,
0x47535417, 0x8ea2ac2e, 0x08000808, 0xc5c1c405,
0x03131013, 0xcdc1cc0d, 0x86828406, 0x89b1b839,
0xcff3fc3f, 0x4d717c3d, 0xc1c1c001, 0x01313031,
0xc5f1f435, 0x8a82880a, 0x4a62682a, 0x81b1b031,
0xc1d1d011, 0x00202020, 0xc7d3d417, 0x02020002,
0x02222022, 0x04000404, 0x48606828, 0x41717031,
0x07030407, 0xcbd3d81b, 0x8d919c1d, 0x89919819,
0x41616021, 0x8eb2bc3e, 0xc6e2e426, 0x49515819,
0xcdd1dc1d, 0x41515011, 0x80909010, 0xccd0dc1c,
0x8a92981a, 0x83a3a023, 0x8ba3a82b, 0xc0d0d010,
0x81818001, 0x0f030c0f, 0x47434407, 0x0a12181a,
0xc3e3e023, 0xcce0ec2c, 0x8d818c0d, 0x8fb3bc3f,
0x86929416, 0x4b73783b, 0x4c505c1c, 0x82a2a022,
0x81a1a021, 0x43636023, 0x03232023, 0x4d414c0d,
0xc8c0c808, 0x8e929c1e, 0x8c909c1c, 0x0a32383a,
0x0c000c0c, 0x0e222c2e, 0x8ab2b83a, 0x4e626c2e,
0x8f939c1f, 0x4a52581a, 0xc2f2f032, 0x82929012,
0xc3f3f033, 0x49414809, 0x48707838, 0xccc0cc0c,
0x05111415, 0xcbf3f83b, 0x40707030, 0x45717435,
0x4f737c3f, 0x05313435, 0x00101010, 0x03030003,
0x44606424, 0x4d616c2d, 0xc6c2c406, 0x44707434,
0xc5d1d415, 0x84b0b434, 0xcae2e82a, 0x09010809,
0x46727436, 0x09111819, 0xcef2fc3e, 0x40404000,
0x02121012, 0xc0e0e020, 0x8db1bc3d, 0x05010405,
0xcaf2f83a, 0x01010001, 0xc0f0f030, 0x0a22282a,
0x4e525c1e, 0x89a1a829, 0x46525416, 0x43434003,
0x85818405, 0x04101414, 0x89818809, 0x8b93981b,
0x80b0b030, 0xc5e1e425, 0x48404808, 0x49717839,
0x87939417, 0xccf0fc3c, 0x0e121c1e, 0x82828002,
0x01212021, 0x8c808c0c, 0x0b13181b, 0x4f535c1f,
0x47737437, 0x44505414, 0x82b2b032, 0x0d111c1d,
0x05212425, 0x4f434c0f, 0x00000000, 0x46424406,
0xcde1ec2d, 0x48505818, 0x42525012, 0xcbe3e82b,
0x4e727c3e, 0xcad2d81a, 0xc9c1c809, 0xcdf1fc3d,
0x00303030, 0x85919415, 0x45616425, 0x0c303c3c,
0x86b2b436, 0xc4e0e424, 0x8bb3b83b, 0x4c707c3c,
0x0e020c0e, 0x40505010, 0x09313839, 0x06222426,
0x02323032, 0x84808404, 0x49616829, 0x83939013,
0x07333437, 0xc7e3e427, 0x04202424, 0x84a0a424,
0xcbc3c80b, 0x43535013, 0x0a02080a, 0x87838407,
0xc9d1d819, 0x4c404c0c, 0x83838003, 0x8f838c0f,
0xcec2cc0e, 0x0b33383b, 0x4a42480a, 0x87b3b437,
};
static const u32 KC[SEED_NUM_KCONSTANTS] = {
0x9e3779b9, 0x3c6ef373, 0x78dde6e6, 0xf1bbcdcc,
0xe3779b99, 0xc6ef3733, 0x8dde6e67, 0x1bbcdccf,
0x3779b99e, 0x6ef3733c, 0xdde6e678, 0xbbcdccf1,
0x779b99e3, 0xef3733c6, 0xde6e678d, 0xbcdccf1b,
};
#define OP(X1, X2, X3, X4, rbase) \
t0 = X3 ^ ks[rbase]; \
t1 = X4 ^ ks[rbase+1]; \
t1 ^= t0; \
t1 = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^ \
SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; \
t0 += t1; \
t0 = SS0[byte(t0, 0)] ^ SS1[byte(t0, 1)] ^ \
SS2[byte(t0, 2)] ^ SS3[byte(t0, 3)]; \
t1 += t0; \
t1 = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^ \
SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)]; \
t0 += t1; \
X1 ^= t0; \
X2 ^= t1
static int seed_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
u32 *keyout = ctx->keysched;
const __be32 *key = (const __be32 *)in_key;
u32 i, t0, t1, x1, x2, x3, x4;
x1 = be32_to_cpu(key[0]);
x2 = be32_to_cpu(key[1]);
x3 = be32_to_cpu(key[2]);
x4 = be32_to_cpu(key[3]);
for (i = 0; i < SEED_NUM_KCONSTANTS; i++) {
t0 = x1 + x3 - KC[i];
t1 = x2 + KC[i] - x4;
*(keyout++) = SS0[byte(t0, 0)] ^ SS1[byte(t0, 1)] ^
SS2[byte(t0, 2)] ^ SS3[byte(t0, 3)];
*(keyout++) = SS0[byte(t1, 0)] ^ SS1[byte(t1, 1)] ^
SS2[byte(t1, 2)] ^ SS3[byte(t1, 3)];
if (i % 2 == 0) {
t0 = x1;
x1 = (x1 >> 8) ^ (x2 << 24);
x2 = (x2 >> 8) ^ (t0 << 24);
} else {
t0 = x3;
x3 = (x3 << 8) ^ (x4 >> 24);
x4 = (x4 << 8) ^ (t0 >> 24);
}
}
return 0;
}
/* encrypt a block of text */
static void seed_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
x1 = be32_to_cpu(src[0]);
x2 = be32_to_cpu(src[1]);
x3 = be32_to_cpu(src[2]);
x4 = be32_to_cpu(src[3]);
OP(x1, x2, x3, x4, 0);
OP(x3, x4, x1, x2, 2);
OP(x1, x2, x3, x4, 4);
OP(x3, x4, x1, x2, 6);
OP(x1, x2, x3, x4, 8);
OP(x3, x4, x1, x2, 10);
OP(x1, x2, x3, x4, 12);
OP(x3, x4, x1, x2, 14);
OP(x1, x2, x3, x4, 16);
OP(x3, x4, x1, x2, 18);
OP(x1, x2, x3, x4, 20);
OP(x3, x4, x1, x2, 22);
OP(x1, x2, x3, x4, 24);
OP(x3, x4, x1, x2, 26);
OP(x1, x2, x3, x4, 28);
OP(x3, x4, x1, x2, 30);
dst[0] = cpu_to_be32(x3);
dst[1] = cpu_to_be32(x4);
dst[2] = cpu_to_be32(x1);
dst[3] = cpu_to_be32(x2);
}
/* decrypt a block of text */
static void seed_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
const struct seed_ctx *ctx = crypto_tfm_ctx(tfm);
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 x1, x2, x3, x4, t0, t1;
const u32 *ks = ctx->keysched;
x1 = be32_to_cpu(src[0]);
x2 = be32_to_cpu(src[1]);
x3 = be32_to_cpu(src[2]);
x4 = be32_to_cpu(src[3]);
OP(x1, x2, x3, x4, 30);
OP(x3, x4, x1, x2, 28);
OP(x1, x2, x3, x4, 26);
OP(x3, x4, x1, x2, 24);
OP(x1, x2, x3, x4, 22);
OP(x3, x4, x1, x2, 20);
OP(x1, x2, x3, x4, 18);
OP(x3, x4, x1, x2, 16);
OP(x1, x2, x3, x4, 14);
OP(x3, x4, x1, x2, 12);
OP(x1, x2, x3, x4, 10);
OP(x3, x4, x1, x2, 8);
OP(x1, x2, x3, x4, 6);
OP(x3, x4, x1, x2, 4);
OP(x1, x2, x3, x4, 2);
OP(x3, x4, x1, x2, 0);
dst[0] = cpu_to_be32(x3);
dst[1] = cpu_to_be32(x4);
dst[2] = cpu_to_be32(x1);
dst[3] = cpu_to_be32(x2);
}
static struct crypto_alg seed_alg = {
.cra_name = "seed",
.cra_driver_name = "seed-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = SEED_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct seed_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = SEED_KEY_SIZE,
.cia_max_keysize = SEED_KEY_SIZE,
.cia_setkey = seed_set_key,
.cia_encrypt = seed_encrypt,
.cia_decrypt = seed_decrypt
}
}
};
static int __init seed_init(void)
{
return crypto_register_alg(&seed_alg);
}
static void __exit seed_fini(void)
{
crypto_unregister_alg(&seed_alg);
}
subsys_initcall(seed_init);
module_exit(seed_fini);
MODULE_DESCRIPTION("SEED Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Hye-Shik Chang <[email protected]>, Kim Hyun <[email protected]>");
MODULE_ALIAS_CRYPTO("seed");
| linux-master | crypto/seed.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Quick & dirty crypto testing module.
*
* This will only exist until we have a better testing mechanism
* (e.g. a char device).
*
* Copyright (c) 2002 James Morris <[email protected]>
* Copyright (c) 2002 Jean-Francois Dive <[email protected]>
* Copyright (c) 2007 Nokia Siemens Networks
*
* Updated RFC4106 AES-GCM testing.
* Authors: Aidan O'Mahony ([email protected])
* Adrian Hoban <[email protected]>
* Gabriele Paoloni <[email protected]>
* Tadeusz Struk ([email protected])
* Copyright (c) 2010, Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <crypto/aead.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <linux/err.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timex.h>
#include "internal.h"
#include "tcrypt.h"
/*
* Need slab memory for testing (size in number of pages).
*/
#define TVMEMSIZE 4
/*
* Used by test_cipher_speed()
*/
#define ENCRYPT 1
#define DECRYPT 0
#define MAX_DIGEST_SIZE 64
/*
* return a string with the driver name
*/
#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
/*
* Used by test_cipher_speed()
*/
static unsigned int sec;
static char *alg;
static u32 type;
static u32 mask;
static int mode;
static u32 num_mb = 8;
static unsigned int klen;
static char *tvmem[TVMEMSIZE];
static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
#define XBUFSIZE 8
#define MAX_IVLEN 32
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++) {
buf[i] = (void *)__get_free_page(GFP_KERNEL);
if (!buf[i])
goto err_free_buf;
}
return 0;
err_free_buf:
while (i-- > 0)
free_page((unsigned long)buf[i]);
return -ENOMEM;
}
static void testmgr_free_buf(char *buf[XBUFSIZE])
{
int i;
for (i = 0; i < XBUFSIZE; i++)
free_page((unsigned long)buf[i]);
}
static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
unsigned int buflen, const void *assoc,
unsigned int aad_size)
{
int np = (buflen + PAGE_SIZE - 1)/PAGE_SIZE;
int k, rem;
if (np > XBUFSIZE) {
rem = PAGE_SIZE;
np = XBUFSIZE;
} else {
rem = buflen % PAGE_SIZE;
}
sg_init_table(sg, np + 1);
sg_set_buf(&sg[0], assoc, aad_size);
if (rem)
np--;
for (k = 0; k < np; k++)
sg_set_buf(&sg[k + 1], xbuf[k], PAGE_SIZE);
if (rem)
sg_set_buf(&sg[k + 1], xbuf[k], rem);
}
static inline int do_one_aead_op(struct aead_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
struct test_mb_aead_data {
struct scatterlist sg[XBUFSIZE];
struct scatterlist sgout[XBUFSIZE];
struct aead_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
};
static int do_mult_aead_op(struct test_mb_aead_data *data, int enc,
u32 num_mb, int *rc)
{
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
if (enc == ENCRYPT)
rc[i] = crypto_aead_encrypt(data[i].req);
else
rc[i] = crypto_aead_decrypt(data[i].req);
}
/* Wait for all requests to finish */
for (i = 0; i < num_mb; i++) {
rc[i] = crypto_wait_req(rc[i], &data[i].wait);
if (rc[i]) {
pr_info("concurrent request %d error %d\n", i, rc[i]);
err = rc[i];
}
}
return err;
}
static int test_mb_aead_jiffies(struct test_mb_aead_data *data, int enc,
int blen, int secs, u32 num_mb)
{
unsigned long start, end;
int bcount;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
return ret;
}
static int test_mb_aead_cycles(struct test_mb_aead_data *data, int enc,
int blen, u32 num_mb)
{
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_aead_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_mult_aead_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
static void test_mb_aead_speed(const char *algo, int enc, int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize, u32 num_mb)
{
struct test_mb_aead_data *data;
struct crypto_aead *tfm;
unsigned int i, j, iv_len;
const int *b_size;
const char *key;
const char *e;
void *assoc;
char *iv;
int ret;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
return;
}
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
goto out_free_iv;
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto out_free_data;
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
ret);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xbuf)) {
while (i--)
testmgr_free_buf(data[i].xbuf);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].axbuf)) {
while (i--)
testmgr_free_buf(data[i].axbuf);
goto out_free_xbuf;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xoutbuf)) {
while (i--)
testmgr_free_buf(data[i].xoutbuf);
goto out_free_axbuf;
}
for (i = 0; i < num_mb; ++i) {
data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
while (i--)
aead_request_free(data[i].req);
goto out_free_xoutbuf;
}
}
for (i = 0; i < num_mb; ++i) {
crypto_init_wait(&data[i].wait);
aead_request_set_callback(data[i].req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data[i].wait);
}
pr_info("testing speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
i = 0;
do {
b_size = aead_sizes;
do {
int bs = round_up(*b_size, crypto_aead_blocksize(tfm));
if (bs + authsize > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
authsize + bs,
XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, bs);
/* Set up tfm global state, i.e. the key */
memset(tvmem[0], 0xff, PAGE_SIZE);
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_aead_clear_flags(tfm, ~0);
ret = crypto_aead_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(iv, 0xff, iv_len);
/* Now setup per request stuff, i.e. buffers */
for (j = 0; j < num_mb; ++j) {
struct test_mb_aead_data *cur = &data[j];
assoc = cur->axbuf[0];
memset(assoc, 0xff, aad_size);
sg_init_aead(cur->sg, cur->xbuf,
bs + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(cur->sgout, cur->xoutbuf,
bs + (enc ? authsize : 0),
assoc, aad_size);
aead_request_set_ad(cur->req, aad_size);
if (!enc) {
aead_request_set_crypt(cur->req,
cur->sgout,
cur->sg,
bs, iv);
ret = crypto_aead_encrypt(cur->req);
ret = do_one_aead_op(cur->req, ret);
if (ret) {
pr_err("calculating auth failed (%d)\n",
ret);
break;
}
}
aead_request_set_crypt(cur->req, cur->sg,
cur->sgout, bs +
(enc ? 0 : authsize),
iv);
}
if (secs) {
ret = test_mb_aead_jiffies(data, enc, bs,
secs, num_mb);
cond_resched();
} else {
ret = test_mb_aead_cycles(data, enc, bs,
num_mb);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
for (i = 0; i < num_mb; ++i)
aead_request_free(data[i].req);
out_free_xoutbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xoutbuf);
out_free_axbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].axbuf);
out_free_xbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xbuf);
out_free_tfm:
crypto_free_aead(tfm);
out_free_data:
kfree(data);
out_free_iv:
kfree(iv);
}
static int test_aead_jiffies(struct aead_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount, secs, (u64)bcount * blen);
return 0;
}
static int test_aead_cycles(struct aead_request *req, int enc, int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_aead_op(req, crypto_aead_encrypt(req));
else
ret = do_one_aead_op(req, crypto_aead_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_aead_speed(const char *algo, int enc, unsigned int secs,
struct aead_speed_template *template,
unsigned int tcount, u8 authsize,
unsigned int aad_size, u8 *keysize)
{
unsigned int i, j;
struct crypto_aead *tfm;
int ret = -ENOMEM;
const char *key;
struct aead_request *req;
struct scatterlist *sg;
struct scatterlist *sgout;
const char *e;
void *assoc;
char *iv;
char *xbuf[XBUFSIZE];
char *xoutbuf[XBUFSIZE];
char *axbuf[XBUFSIZE];
const int *b_size;
unsigned int iv_len;
struct crypto_wait wait;
iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
if (!iv)
return;
if (aad_size >= PAGE_SIZE) {
pr_err("associate data length (%u) too big\n", aad_size);
goto out_noxbuf;
}
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
if (testmgr_alloc_buf(xbuf))
goto out_noxbuf;
if (testmgr_alloc_buf(axbuf))
goto out_noaxbuf;
if (testmgr_alloc_buf(xoutbuf))
goto out_nooutbuf;
sg = kmalloc(sizeof(*sg) * 9 * 2, GFP_KERNEL);
if (!sg)
goto out_nosg;
sgout = &sg[9];
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
goto out_notfm;
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
ret);
goto out_noreq;
}
crypto_init_wait(&wait);
pr_info("testing speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
req = aead_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
goto out_noreq;
}
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
i = 0;
do {
b_size = aead_sizes;
do {
u32 bs = round_up(*b_size, crypto_aead_blocksize(tfm));
assoc = axbuf[0];
memset(assoc, 0xff, aad_size);
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
*keysize + bs,
TVMEMSIZE * PAGE_SIZE);
goto out;
}
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
ret = crypto_aead_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x: %d\n",
crypto_aead_get_flags(tfm), ret);
goto out;
}
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
memset(iv, 0xff, iv_len);
crypto_aead_clear_flags(tfm, ~0);
pr_info("test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
assoc, aad_size);
sg_init_aead(sgout, xoutbuf,
bs + (enc ? authsize : 0), assoc,
aad_size);
aead_request_set_ad(req, aad_size);
if (!enc) {
/*
* For decryption we need a proper auth so
* we do the encryption path once with buffers
* reversed (input <-> output) to calculate it
*/
aead_request_set_crypt(req, sgout, sg,
bs, iv);
ret = do_one_aead_op(req,
crypto_aead_encrypt(req));
if (ret) {
pr_err("calculating auth failed (%d)\n",
ret);
break;
}
}
aead_request_set_crypt(req, sg, sgout,
bs + (enc ? 0 : authsize),
iv);
if (secs) {
ret = test_aead_jiffies(req, enc, bs,
secs);
cond_resched();
} else {
ret = test_aead_cycles(req, enc, bs);
}
if (ret) {
pr_err("%s() failed return code=%d\n", e, ret);
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
aead_request_free(req);
out_noreq:
crypto_free_aead(tfm);
out_notfm:
kfree(sg);
out_nosg:
testmgr_free_buf(xoutbuf);
out_nooutbuf:
testmgr_free_buf(axbuf);
out_noaxbuf:
testmgr_free_buf(xbuf);
out_noxbuf:
kfree(iv);
}
static void test_hash_sg_init(struct scatterlist *sg)
{
int i;
sg_init_table(sg, TVMEMSIZE);
for (i = 0; i < TVMEMSIZE; i++) {
sg_set_buf(sg + i, tvmem[i], PAGE_SIZE);
memset(tvmem[i], 0xff, PAGE_SIZE);
}
}
static inline int do_one_ahash_op(struct ahash_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
char *out, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_jiffies(struct ahash_request *req, int blen,
int plen, char *out, int secs)
{
unsigned long start, end;
int bcount, pcount;
int ret;
if (plen == blen)
return test_ahash_jiffies_digest(req, blen, out, secs);
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
return ret;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
return ret;
}
/* we assume there is enough space in 'out' for the result */
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
return ret;
}
pr_cont("%6u opers/sec, %9lu bytes/sec\n",
bcount / secs, ((long)bcount * blen) / secs);
return 0;
}
static int test_ahash_cycles_digest(struct ahash_request *req, int blen,
char *out)
{
unsigned long cycles = 0;
int ret, i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_digest(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static int test_ahash_cycles(struct ahash_request *req, int blen,
int plen, char *out)
{
unsigned long cycles = 0;
int i, pcount, ret;
if (plen == blen)
return test_ahash_cycles_digest(req, blen, out);
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_one_ahash_op(req, crypto_ahash_init(req));
if (ret)
goto out;
for (pcount = 0; pcount < blen; pcount += plen) {
ret = do_one_ahash_op(req, crypto_ahash_update(req));
if (ret)
goto out;
}
ret = do_one_ahash_op(req, crypto_ahash_final(req));
if (ret)
goto out;
end = get_cycles();
cycles += end - start;
}
out:
if (ret)
return ret;
pr_cont("%6lu cycles/operation, %4lu cycles/byte\n",
cycles / 8, cycles / (8 * blen));
return 0;
}
static void test_ahash_speed_common(const char *algo, unsigned int secs,
struct hash_speed *speed, unsigned mask)
{
struct scatterlist sg[TVMEMSIZE];
struct crypto_wait wait;
struct ahash_request *req;
struct crypto_ahash *tfm;
char *output;
int i, ret;
tfm = crypto_alloc_ahash(algo, 0, mask);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
return;
}
pr_info("testing speed of async %s (%s)\n", algo,
get_driver_name(crypto_ahash, tfm));
if (crypto_ahash_digestsize(tfm) > MAX_DIGEST_SIZE) {
pr_err("digestsize(%u) > %d\n", crypto_ahash_digestsize(tfm),
MAX_DIGEST_SIZE);
goto out;
}
test_hash_sg_init(sg);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("ahash request allocation failure\n");
goto out;
}
crypto_init_wait(&wait);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
output = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
if (!output)
goto out_nomem;
for (i = 0; speed[i].blen != 0; i++) {
if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for tvmem (%lu)\n",
speed[i].blen, TVMEMSIZE * PAGE_SIZE);
break;
}
if (klen)
crypto_ahash_setkey(tfm, tvmem[0], klen);
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
ahash_request_set_crypt(req, sg, output, speed[i].plen);
if (secs) {
ret = test_ahash_jiffies(req, speed[i].blen,
speed[i].plen, output, secs);
cond_resched();
} else {
ret = test_ahash_cycles(req, speed[i].blen,
speed[i].plen, output);
}
if (ret) {
pr_err("hashing failed ret=%d\n", ret);
break;
}
}
kfree(output);
out_nomem:
ahash_request_free(req);
out:
crypto_free_ahash(tfm);
}
static void test_ahash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, 0);
}
static void test_hash_speed(const char *algo, unsigned int secs,
struct hash_speed *speed)
{
return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
}
struct test_mb_skcipher_data {
struct scatterlist sg[XBUFSIZE];
struct skcipher_request *req;
struct crypto_wait wait;
char *xbuf[XBUFSIZE];
};
static int do_mult_acipher_op(struct test_mb_skcipher_data *data, int enc,
u32 num_mb, int *rc)
{
int i, err = 0;
/* Fire up a bunch of concurrent requests */
for (i = 0; i < num_mb; i++) {
if (enc == ENCRYPT)
rc[i] = crypto_skcipher_encrypt(data[i].req);
else
rc[i] = crypto_skcipher_decrypt(data[i].req);
}
/* Wait for all requests to finish */
for (i = 0; i < num_mb; i++) {
rc[i] = crypto_wait_req(rc[i], &data[i].wait);
if (rc[i]) {
pr_info("concurrent request %d error %d\n", i, rc[i]);
err = rc[i];
}
}
return err;
}
static int test_mb_acipher_jiffies(struct test_mb_skcipher_data *data, int enc,
int blen, int secs, u32 num_mb)
{
unsigned long start, end;
int bcount;
int ret = 0;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount * num_mb, secs, (u64)bcount * blen * num_mb);
out:
kfree(rc);
return ret;
}
static int test_mb_acipher_cycles(struct test_mb_skcipher_data *data, int enc,
int blen, u32 num_mb)
{
unsigned long cycles = 0;
int ret = 0;
int i;
int *rc;
rc = kcalloc(num_mb, sizeof(*rc), GFP_KERNEL);
if (!rc)
return -ENOMEM;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
ret = do_mult_acipher_op(data, enc, num_mb, rc);
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
ret = do_mult_acipher_op(data, enc, num_mb, rc);
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / (8 * num_mb), blen);
out:
kfree(rc);
return ret;
}
static void test_mb_skcipher_speed(const char *algo, int enc, int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize, u32 num_mb)
{
struct test_mb_skcipher_data *data;
struct crypto_skcipher *tfm;
unsigned int i, j, iv_len;
const int *b_size;
const char *key;
const char *e;
char iv[128];
int ret;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
data = kcalloc(num_mb, sizeof(*data), GFP_KERNEL);
if (!data)
return;
tfm = crypto_alloc_skcipher(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n",
algo, PTR_ERR(tfm));
goto out_free_data;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xbuf)) {
while (i--)
testmgr_free_buf(data[i].xbuf);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i) {
data[i].req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: skcipher: Failed to allocate request for %s\n",
algo);
while (i--)
skcipher_request_free(data[i].req);
goto out_free_xbuf;
}
}
for (i = 0; i < num_mb; ++i) {
skcipher_request_set_callback(data[i].req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &data[i].wait);
crypto_init_wait(&data[i].wait);
}
pr_info("testing speed of multibuffer %s (%s) %s\n", algo,
get_driver_name(crypto_skcipher, tfm), e);
i = 0;
do {
b_size = block_sizes;
do {
u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
if (bs > XBUFSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for buffer (%lu)\n",
bs, XBUFSIZE * PAGE_SIZE);
goto out;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, bs);
/* Set up tfm global state, i.e. the key */
memset(tvmem[0], 0xff, PAGE_SIZE);
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_skcipher_clear_flags(tfm, ~0);
ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_skcipher_get_flags(tfm));
goto out;
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
/* Now setup per request stuff, i.e. buffers */
for (j = 0; j < num_mb; ++j) {
struct test_mb_skcipher_data *cur = &data[j];
unsigned int k = bs;
unsigned int pages = DIV_ROUND_UP(k, PAGE_SIZE);
unsigned int p = 0;
sg_init_table(cur->sg, pages);
while (k > PAGE_SIZE) {
sg_set_buf(cur->sg + p, cur->xbuf[p],
PAGE_SIZE);
memset(cur->xbuf[p], 0xff, PAGE_SIZE);
p++;
k -= PAGE_SIZE;
}
sg_set_buf(cur->sg + p, cur->xbuf[p], k);
memset(cur->xbuf[p], 0xff, k);
skcipher_request_set_crypt(cur->req, cur->sg,
cur->sg, bs, iv);
}
if (secs) {
ret = test_mb_acipher_jiffies(data, enc,
bs, secs,
num_mb);
cond_resched();
} else {
ret = test_mb_acipher_cycles(data, enc,
bs, num_mb);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out:
for (i = 0; i < num_mb; ++i)
skcipher_request_free(data[i].req);
out_free_xbuf:
for (i = 0; i < num_mb; ++i)
testmgr_free_buf(data[i].xbuf);
out_free_tfm:
crypto_free_skcipher(tfm);
out_free_data:
kfree(data);
}
static inline int do_one_acipher_op(struct skcipher_request *req, int ret)
{
struct crypto_wait *wait = req->base.data;
return crypto_wait_req(ret, wait);
}
static int test_acipher_jiffies(struct skcipher_request *req, int enc,
int blen, int secs)
{
unsigned long start, end;
int bcount;
int ret;
for (start = jiffies, end = start + secs * HZ, bcount = 0;
time_before(jiffies, end); bcount++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
return ret;
}
pr_cont("%d operations in %d seconds (%llu bytes)\n",
bcount, secs, (u64)bcount * blen);
return 0;
}
static int test_acipher_cycles(struct skcipher_request *req, int enc,
int blen)
{
unsigned long cycles = 0;
int ret = 0;
int i;
/* Warm-up run. */
for (i = 0; i < 4; i++) {
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
if (ret)
goto out;
}
/* The real thing. */
for (i = 0; i < 8; i++) {
cycles_t start, end;
start = get_cycles();
if (enc)
ret = do_one_acipher_op(req,
crypto_skcipher_encrypt(req));
else
ret = do_one_acipher_op(req,
crypto_skcipher_decrypt(req));
end = get_cycles();
if (ret)
goto out;
cycles += end - start;
}
out:
if (ret == 0)
pr_cont("1 operation in %lu cycles (%d bytes)\n",
(cycles + 4) / 8, blen);
return ret;
}
static void test_skcipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize, bool async)
{
unsigned int ret, i, j, k, iv_len;
struct crypto_wait wait;
const char *key;
char iv[128];
struct skcipher_request *req;
struct crypto_skcipher *tfm;
const int *b_size;
const char *e;
if (enc == ENCRYPT)
e = "encryption";
else
e = "decryption";
crypto_init_wait(&wait);
tfm = crypto_alloc_skcipher(algo, 0, async ? 0 : CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm)) {
pr_err("failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
return;
}
pr_info("testing speed of %s %s (%s) %s\n", async ? "async" : "sync",
algo, get_driver_name(crypto_skcipher, tfm), e);
req = skcipher_request_alloc(tfm, GFP_KERNEL);
if (!req) {
pr_err("skcipher: Failed to allocate request for %s\n", algo);
goto out;
}
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done, &wait);
i = 0;
do {
b_size = block_sizes;
do {
u32 bs = round_up(*b_size, crypto_skcipher_blocksize(tfm));
struct scatterlist sg[TVMEMSIZE];
if ((*keysize + bs) > TVMEMSIZE * PAGE_SIZE) {
pr_err("template (%u) too big for "
"tvmem (%lu)\n", *keysize + bs,
TVMEMSIZE * PAGE_SIZE);
goto out_free_req;
}
pr_info("test %u (%d bit key, %d byte blocks): ", i,
*keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
/* set key, plain text and IV */
key = tvmem[0];
for (j = 0; j < tcount; j++) {
if (template[j].klen == *keysize) {
key = template[j].key;
break;
}
}
crypto_skcipher_clear_flags(tfm, ~0);
ret = crypto_skcipher_setkey(tfm, key, *keysize);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_skcipher_get_flags(tfm));
goto out_free_req;
}
k = *keysize + bs;
sg_init_table(sg, DIV_ROUND_UP(k, PAGE_SIZE));
if (k > PAGE_SIZE) {
sg_set_buf(sg, tvmem[0] + *keysize,
PAGE_SIZE - *keysize);
k -= PAGE_SIZE;
j = 1;
while (k > PAGE_SIZE) {
sg_set_buf(sg + j, tvmem[j], PAGE_SIZE);
memset(tvmem[j], 0xff, PAGE_SIZE);
j++;
k -= PAGE_SIZE;
}
sg_set_buf(sg + j, tvmem[j], k);
memset(tvmem[j], 0xff, k);
} else {
sg_set_buf(sg, tvmem[0] + *keysize, bs);
}
iv_len = crypto_skcipher_ivsize(tfm);
if (iv_len)
memset(&iv, 0xff, iv_len);
skcipher_request_set_crypt(req, sg, sg, bs, iv);
if (secs) {
ret = test_acipher_jiffies(req, enc,
bs, secs);
cond_resched();
} else {
ret = test_acipher_cycles(req, enc,
bs);
}
if (ret) {
pr_err("%s() failed flags=%x\n", e,
crypto_skcipher_get_flags(tfm));
break;
}
b_size++;
i++;
} while (*b_size);
keysize++;
} while (*keysize);
out_free_req:
skcipher_request_free(req);
out:
crypto_free_skcipher(tfm);
}
static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
true);
}
static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
struct cipher_speed_template *template,
unsigned int tcount, u8 *keysize)
{
return test_skcipher_speed(algo, enc, secs, template, tcount, keysize,
false);
}
static inline int tcrypt_test(const char *alg)
{
int ret;
pr_debug("testing %s\n", alg);
ret = alg_test(alg, alg, 0, 0);
/* non-fips algs return -EINVAL or -ECANCELED in fips mode */
if (fips_enabled && (ret == -EINVAL || ret == -ECANCELED))
ret = 0;
return ret;
}
static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
{
int i;
int ret = 0;
switch (m) {
case 0:
if (alg) {
if (!crypto_has_alg(alg, type,
mask ?: CRYPTO_ALG_TYPE_MASK))
ret = -ENOENT;
break;
}
for (i = 1; i < 200; i++)
ret = min(ret, do_test(NULL, 0, 0, i, num_mb));
break;
case 1:
ret = min(ret, tcrypt_test("md5"));
break;
case 2:
ret = min(ret, tcrypt_test("sha1"));
break;
case 3:
ret = min(ret, tcrypt_test("ecb(des)"));
ret = min(ret, tcrypt_test("cbc(des)"));
ret = min(ret, tcrypt_test("ctr(des)"));
break;
case 4:
ret = min(ret, tcrypt_test("ecb(des3_ede)"));
ret = min(ret, tcrypt_test("cbc(des3_ede)"));
ret = min(ret, tcrypt_test("ctr(des3_ede)"));
break;
case 5:
ret = min(ret, tcrypt_test("md4"));
break;
case 6:
ret = min(ret, tcrypt_test("sha256"));
break;
case 7:
ret = min(ret, tcrypt_test("ecb(blowfish)"));
ret = min(ret, tcrypt_test("cbc(blowfish)"));
ret = min(ret, tcrypt_test("ctr(blowfish)"));
break;
case 8:
ret = min(ret, tcrypt_test("ecb(twofish)"));
ret = min(ret, tcrypt_test("cbc(twofish)"));
ret = min(ret, tcrypt_test("ctr(twofish)"));
ret = min(ret, tcrypt_test("lrw(twofish)"));
ret = min(ret, tcrypt_test("xts(twofish)"));
break;
case 9:
ret = min(ret, tcrypt_test("ecb(serpent)"));
ret = min(ret, tcrypt_test("cbc(serpent)"));
ret = min(ret, tcrypt_test("ctr(serpent)"));
ret = min(ret, tcrypt_test("lrw(serpent)"));
ret = min(ret, tcrypt_test("xts(serpent)"));
break;
case 10:
ret = min(ret, tcrypt_test("ecb(aes)"));
ret = min(ret, tcrypt_test("cbc(aes)"));
ret = min(ret, tcrypt_test("lrw(aes)"));
ret = min(ret, tcrypt_test("xts(aes)"));
ret = min(ret, tcrypt_test("ctr(aes)"));
ret = min(ret, tcrypt_test("rfc3686(ctr(aes))"));
ret = min(ret, tcrypt_test("ofb(aes)"));
ret = min(ret, tcrypt_test("cfb(aes)"));
ret = min(ret, tcrypt_test("xctr(aes)"));
break;
case 11:
ret = min(ret, tcrypt_test("sha384"));
break;
case 12:
ret = min(ret, tcrypt_test("sha512"));
break;
case 13:
ret = min(ret, tcrypt_test("deflate"));
break;
case 14:
ret = min(ret, tcrypt_test("ecb(cast5)"));
ret = min(ret, tcrypt_test("cbc(cast5)"));
ret = min(ret, tcrypt_test("ctr(cast5)"));
break;
case 15:
ret = min(ret, tcrypt_test("ecb(cast6)"));
ret = min(ret, tcrypt_test("cbc(cast6)"));
ret = min(ret, tcrypt_test("ctr(cast6)"));
ret = min(ret, tcrypt_test("lrw(cast6)"));
ret = min(ret, tcrypt_test("xts(cast6)"));
break;
case 16:
ret = min(ret, tcrypt_test("ecb(arc4)"));
break;
case 17:
ret = min(ret, tcrypt_test("michael_mic"));
break;
case 18:
ret = min(ret, tcrypt_test("crc32c"));
break;
case 19:
ret = min(ret, tcrypt_test("ecb(tea)"));
break;
case 20:
ret = min(ret, tcrypt_test("ecb(xtea)"));
break;
case 21:
ret = min(ret, tcrypt_test("ecb(khazad)"));
break;
case 22:
ret = min(ret, tcrypt_test("wp512"));
break;
case 23:
ret = min(ret, tcrypt_test("wp384"));
break;
case 24:
ret = min(ret, tcrypt_test("wp256"));
break;
case 26:
ret = min(ret, tcrypt_test("ecb(anubis)"));
ret = min(ret, tcrypt_test("cbc(anubis)"));
break;
case 30:
ret = min(ret, tcrypt_test("ecb(xeta)"));
break;
case 31:
ret = min(ret, tcrypt_test("pcbc(fcrypt)"));
break;
case 32:
ret = min(ret, tcrypt_test("ecb(camellia)"));
ret = min(ret, tcrypt_test("cbc(camellia)"));
ret = min(ret, tcrypt_test("ctr(camellia)"));
ret = min(ret, tcrypt_test("lrw(camellia)"));
ret = min(ret, tcrypt_test("xts(camellia)"));
break;
case 33:
ret = min(ret, tcrypt_test("sha224"));
break;
case 35:
ret = min(ret, tcrypt_test("gcm(aes)"));
break;
case 36:
ret = min(ret, tcrypt_test("lzo"));
break;
case 37:
ret = min(ret, tcrypt_test("ccm(aes)"));
break;
case 38:
ret = min(ret, tcrypt_test("cts(cbc(aes))"));
break;
case 39:
ret = min(ret, tcrypt_test("xxhash64"));
break;
case 40:
ret = min(ret, tcrypt_test("rmd160"));
break;
case 42:
ret = min(ret, tcrypt_test("blake2b-512"));
break;
case 43:
ret = min(ret, tcrypt_test("ecb(seed)"));
break;
case 45:
ret = min(ret, tcrypt_test("rfc4309(ccm(aes))"));
break;
case 46:
ret = min(ret, tcrypt_test("ghash"));
break;
case 47:
ret = min(ret, tcrypt_test("crct10dif"));
break;
case 48:
ret = min(ret, tcrypt_test("sha3-224"));
break;
case 49:
ret = min(ret, tcrypt_test("sha3-256"));
break;
case 50:
ret = min(ret, tcrypt_test("sha3-384"));
break;
case 51:
ret = min(ret, tcrypt_test("sha3-512"));
break;
case 52:
ret = min(ret, tcrypt_test("sm3"));
break;
case 53:
ret = min(ret, tcrypt_test("streebog256"));
break;
case 54:
ret = min(ret, tcrypt_test("streebog512"));
break;
case 55:
ret = min(ret, tcrypt_test("gcm(sm4)"));
break;
case 56:
ret = min(ret, tcrypt_test("ccm(sm4)"));
break;
case 57:
ret = min(ret, tcrypt_test("polyval"));
break;
case 58:
ret = min(ret, tcrypt_test("gcm(aria)"));
break;
case 59:
ret = min(ret, tcrypt_test("cts(cbc(sm4))"));
break;
case 100:
ret = min(ret, tcrypt_test("hmac(md5)"));
break;
case 101:
ret = min(ret, tcrypt_test("hmac(sha1)"));
break;
case 102:
ret = min(ret, tcrypt_test("hmac(sha256)"));
break;
case 103:
ret = min(ret, tcrypt_test("hmac(sha384)"));
break;
case 104:
ret = min(ret, tcrypt_test("hmac(sha512)"));
break;
case 105:
ret = min(ret, tcrypt_test("hmac(sha224)"));
break;
case 106:
ret = min(ret, tcrypt_test("xcbc(aes)"));
break;
case 108:
ret = min(ret, tcrypt_test("hmac(rmd160)"));
break;
case 109:
ret = min(ret, tcrypt_test("vmac64(aes)"));
break;
case 111:
ret = min(ret, tcrypt_test("hmac(sha3-224)"));
break;
case 112:
ret = min(ret, tcrypt_test("hmac(sha3-256)"));
break;
case 113:
ret = min(ret, tcrypt_test("hmac(sha3-384)"));
break;
case 114:
ret = min(ret, tcrypt_test("hmac(sha3-512)"));
break;
case 115:
ret = min(ret, tcrypt_test("hmac(streebog256)"));
break;
case 116:
ret = min(ret, tcrypt_test("hmac(streebog512)"));
break;
case 150:
ret = min(ret, tcrypt_test("ansi_cprng"));
break;
case 151:
ret = min(ret, tcrypt_test("rfc4106(gcm(aes))"));
break;
case 152:
ret = min(ret, tcrypt_test("rfc4543(gcm(aes))"));
break;
case 153:
ret = min(ret, tcrypt_test("cmac(aes)"));
break;
case 154:
ret = min(ret, tcrypt_test("cmac(des3_ede)"));
break;
case 155:
ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(aes))"));
break;
case 156:
ret = min(ret, tcrypt_test("authenc(hmac(md5),ecb(cipher_null))"));
break;
case 157:
ret = min(ret, tcrypt_test("authenc(hmac(sha1),ecb(cipher_null))"));
break;
case 158:
ret = min(ret, tcrypt_test("cbcmac(sm4)"));
break;
case 159:
ret = min(ret, tcrypt_test("cmac(sm4)"));
break;
case 160:
ret = min(ret, tcrypt_test("xcbc(sm4)"));
break;
case 181:
ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des))"));
break;
case 182:
ret = min(ret, tcrypt_test("authenc(hmac(sha1),cbc(des3_ede))"));
break;
case 183:
ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des))"));
break;
case 184:
ret = min(ret, tcrypt_test("authenc(hmac(sha224),cbc(des3_ede))"));
break;
case 185:
ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des))"));
break;
case 186:
ret = min(ret, tcrypt_test("authenc(hmac(sha256),cbc(des3_ede))"));
break;
case 187:
ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des))"));
break;
case 188:
ret = min(ret, tcrypt_test("authenc(hmac(sha384),cbc(des3_ede))"));
break;
case 189:
ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des))"));
break;
case 190:
ret = min(ret, tcrypt_test("authenc(hmac(sha512),cbc(des3_ede))"));
break;
case 191:
ret = min(ret, tcrypt_test("ecb(sm4)"));
ret = min(ret, tcrypt_test("cbc(sm4)"));
ret = min(ret, tcrypt_test("cfb(sm4)"));
ret = min(ret, tcrypt_test("ctr(sm4)"));
ret = min(ret, tcrypt_test("xts(sm4)"));
break;
case 192:
ret = min(ret, tcrypt_test("ecb(aria)"));
ret = min(ret, tcrypt_test("cbc(aria)"));
ret = min(ret, tcrypt_test("cfb(aria)"));
ret = min(ret, tcrypt_test("ctr(aria)"));
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 201:
test_cipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 202:
test_cipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 203:
test_cipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_cipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 204:
test_cipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_cipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 205:
test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_cipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_cipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 207:
test_cipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 208:
test_cipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 209:
test_cipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_cipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 210:
test_cipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_cipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_cipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_cipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 211:
test_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
test_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_20_28_36);
test_aead_speed("gcm(aes)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 212:
test_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
test_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
break;
case 213:
test_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
test_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT, sec,
NULL, 0, 16, 8, aead_speed_template_36);
break;
case 214:
test_cipher_speed("chacha20", ENCRYPT, sec, NULL, 0,
speed_template_32);
break;
case 215:
test_mb_aead_speed("rfc4106(gcm(aes))", ENCRYPT, sec, NULL,
0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
test_mb_aead_speed("rfc4106(gcm(aes))", DECRYPT, sec, NULL,
0, 16, 16, aead_speed_template_20_28_36, num_mb);
test_mb_aead_speed("gcm(aes)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16_24_32, num_mb);
break;
case 216:
test_mb_aead_speed("rfc4309(ccm(aes))", ENCRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
test_mb_aead_speed("rfc4309(ccm(aes))", DECRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
break;
case 217:
test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", ENCRYPT,
sec, NULL, 0, 16, 8, aead_speed_template_36,
num_mb);
test_mb_aead_speed("rfc7539esp(chacha20,poly1305)", DECRYPT,
sec, NULL, 0, 16, 8, aead_speed_template_36,
num_mb);
break;
case 218:
test_cipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cts(cbc(sm4))", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cts(cbc(sm4))", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_cipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_32);
test_cipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0,
speed_template_32);
break;
case 219:
test_cipher_speed("adiantum(xchacha12,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha12,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
test_cipher_speed("adiantum(xchacha20,aes)", DECRYPT, sec, NULL,
0, speed_template_32);
break;
case 220:
test_acipher_speed("essiv(cbc(aes),sha256)",
ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("essiv(cbc(aes),sha256)",
DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 221:
test_aead_speed("aegis128", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16);
test_aead_speed("aegis128", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16);
break;
case 222:
test_aead_speed("gcm(sm4)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16);
test_aead_speed("gcm(sm4)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16);
break;
case 223:
test_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
test_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec,
NULL, 0, 16, 16, aead_speed_template_19);
break;
case 224:
test_mb_aead_speed("gcm(sm4)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
test_mb_aead_speed("gcm(sm4)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
break;
case 225:
test_mb_aead_speed("rfc4309(ccm(sm4))", ENCRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
test_mb_aead_speed("rfc4309(ccm(sm4))", DECRYPT, sec, NULL, 0,
16, 16, aead_speed_template_19, num_mb);
break;
case 226:
test_cipher_speed("hctr2(aes)", ENCRYPT, sec, NULL,
0, speed_template_32);
break;
case 227:
test_cipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cbc(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("cfb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_cipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 228:
test_aead_speed("gcm(aria)", ENCRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
test_aead_speed("gcm(aria)", DECRYPT, sec,
NULL, 0, 16, 8, speed_template_16_24_32);
break;
case 229:
test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
break;
}
fallthrough;
case 301:
test_hash_speed("md4", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 302:
test_hash_speed("md5", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 303:
test_hash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 304:
test_hash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 305:
test_hash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 306:
test_hash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 307:
test_hash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 308:
test_hash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 309:
test_hash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 313:
test_hash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 314:
test_hash_speed("xxhash64", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 315:
test_hash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 317:
test_hash_speed("blake2b-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 318:
klen = 16;
test_hash_speed("ghash", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 319:
test_hash_speed("crc32c", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 320:
test_hash_speed("crct10dif", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 321:
test_hash_speed("poly1305", sec, poly1305_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 322:
test_hash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 323:
test_hash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 324:
test_hash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 325:
test_hash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 326:
test_hash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 327:
test_hash_speed("streebog256", sec,
generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 328:
test_hash_speed("streebog512", sec,
generic_hash_speed_template);
if (mode > 300 && mode < 400) break;
fallthrough;
case 399:
break;
case 400:
if (alg) {
test_ahash_speed(alg, sec, generic_hash_speed_template);
break;
}
fallthrough;
case 401:
test_ahash_speed("md4", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 402:
test_ahash_speed("md5", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 403:
test_ahash_speed("sha1", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 404:
test_ahash_speed("sha256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 405:
test_ahash_speed("sha384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 406:
test_ahash_speed("sha512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 407:
test_ahash_speed("wp256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 408:
test_ahash_speed("wp384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 409:
test_ahash_speed("wp512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 413:
test_ahash_speed("sha224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 414:
test_ahash_speed("xxhash64", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 415:
test_ahash_speed("rmd160", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 417:
test_ahash_speed("blake2b-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 418:
test_ahash_speed("sha3-224", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 419:
test_ahash_speed("sha3-256", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 420:
test_ahash_speed("sha3-384", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 421:
test_ahash_speed("sha3-512", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 422:
test_ahash_speed("sm3", sec, generic_hash_speed_template);
if (mode > 400 && mode < 500) break;
fallthrough;
case 499:
break;
case 500:
test_acipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL, 0,
speed_template_20_28_36);
test_acipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL, 0,
speed_template_20_28_36);
break;
case 501:
test_acipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("cfb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
test_acipher_speed("ofb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24);
break;
case 502:
test_acipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8);
test_acipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
speed_template_8);
break;
case 503:
test_acipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 504:
test_acipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48);
test_acipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64);
test_acipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64);
break;
case 505:
test_acipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8);
break;
case 506:
test_acipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16);
test_acipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16);
break;
case 507:
test_acipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 508:
test_acipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32);
test_acipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48);
test_acipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_64);
test_acipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_64);
break;
case 509:
test_acipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32);
test_acipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32);
break;
case 518:
test_acipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("cfb(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("cfb(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
speed_template_16);
test_acipher_speed("xts(sm4)", ENCRYPT, sec, NULL, 0,
speed_template_32);
test_acipher_speed("xts(sm4)", DECRYPT, sec, NULL, 0,
speed_template_32);
break;
case 519:
test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 600:
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("lrw(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("lrw(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("xts(aes)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(aes)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("cts(cbc(aes))", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cts(cbc(aes))", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ofb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ofb(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", ENCRYPT, sec, NULL,
0, speed_template_20_28_36, num_mb);
test_mb_skcipher_speed("rfc3686(ctr(aes))", DECRYPT, sec, NULL,
0, speed_template_20_28_36, num_mb);
break;
case 601:
test_mb_skcipher_speed("ecb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ecb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cbc(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cbc(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cfb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("cfb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ofb(des3_ede)", ENCRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
test_mb_skcipher_speed("ofb(des3_ede)", DECRYPT, sec,
des3_speed_template, DES3_SPEED_VECTORS,
speed_template_24, num_mb);
break;
case 602:
test_mb_skcipher_speed("ecb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ecb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cbc(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cbc(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cfb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("cfb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ofb(des)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
test_mb_skcipher_speed("ofb(des)", DECRYPT, sec, NULL, 0,
speed_template_8, num_mb);
break;
case 603:
test_mb_skcipher_speed("ecb(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(serpent)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(serpent)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(serpent)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 604:
test_mb_skcipher_speed("ecb(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ecb(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("cbc(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("ctr(twofish)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb);
test_mb_skcipher_speed("lrw(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("lrw(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_40_48, num_mb);
test_mb_skcipher_speed("xts(twofish)", ENCRYPT, sec, NULL, 0,
speed_template_32_48_64, num_mb);
test_mb_skcipher_speed("xts(twofish)", DECRYPT, sec, NULL, 0,
speed_template_32_48_64, num_mb);
break;
case 605:
test_mb_skcipher_speed("ecb(arc4)", ENCRYPT, sec, NULL, 0,
speed_template_8, num_mb);
break;
case 606:
test_mb_skcipher_speed("ecb(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ecb(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("cbc(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("cbc(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ctr(cast5)", ENCRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
test_mb_skcipher_speed("ctr(cast5)", DECRYPT, sec, NULL, 0,
speed_template_8_16, num_mb);
break;
case 607:
test_mb_skcipher_speed("ecb(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(cast6)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(cast6)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(cast6)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 608:
test_mb_skcipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(camellia)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("lrw(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("lrw(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_48, num_mb);
test_mb_skcipher_speed("xts(camellia)", ENCRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
test_mb_skcipher_speed("xts(camellia)", DECRYPT, sec, NULL, 0,
speed_template_32_64, num_mb);
break;
case 609:
test_mb_skcipher_speed("ecb(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ecb(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("cbc(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("cbc(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ctr(blowfish)", ENCRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
test_mb_skcipher_speed("ctr(blowfish)", DECRYPT, sec, NULL, 0,
speed_template_8_32, num_mb);
break;
case 610:
test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
break;
}
return ret;
}
static int __init tcrypt_mod_init(void)
{
int err = -ENOMEM;
int i;
for (i = 0; i < TVMEMSIZE; i++) {
tvmem[i] = (void *)__get_free_page(GFP_KERNEL);
if (!tvmem[i])
goto err_free_tv;
}
err = do_test(alg, type, mask, mode, num_mb);
if (err) {
pr_err("one or more tests failed!\n");
goto err_free_tv;
} else {
pr_debug("all tests passed\n");
}
/* We intentionaly return -EAGAIN to prevent keeping the module,
* unless we're running in fips mode. It does all its work from
* init() and doesn't offer any runtime functionality, but in
* the fips case, checking for a successful load is helpful.
* => we don't need it in the memory, do we?
* -- mludvig
*/
if (!fips_enabled)
err = -EAGAIN;
err_free_tv:
for (i = 0; i < TVMEMSIZE && tvmem[i]; i++)
free_page((unsigned long)tvmem[i]);
return err;
}
/*
* If an init function is provided, an exit function must also be provided
* to allow module unload.
*/
static void __exit tcrypt_mod_fini(void) { }
late_initcall(tcrypt_mod_init);
module_exit(tcrypt_mod_fini);
module_param(alg, charp, 0);
module_param(type, uint, 0);
module_param(mask, uint, 0);
module_param(mode, int, 0);
module_param(sec, uint, 0);
MODULE_PARM_DESC(sec, "Length in seconds of speed tests "
"(defaults to zero which uses CPU cycles instead)");
module_param(num_mb, uint, 0000);
MODULE_PARM_DESC(num_mb, "Number of concurrent requests to be used in mb speed tests (defaults to 8)");
module_param(klen, uint, 0);
MODULE_PARM_DESC(klen, "Key length (defaults to 0)");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Quick & dirty crypto testing module");
MODULE_AUTHOR("James Morris <[email protected]>");
| linux-master | crypto/tcrypt.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RSA padding templates.
*
* Copyright (c) 2015 Intel Corporation
*/
#include <crypto/algapi.h>
#include <crypto/akcipher.h>
#include <crypto/internal/akcipher.h>
#include <crypto/internal/rsa.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/scatterlist.h>
/*
* Hash algorithm OIDs plus ASN.1 DER wrappings [RFC4880 sec 5.2.2].
*/
static const u8 rsa_digest_info_md5[] = {
0x30, 0x20, 0x30, 0x0c, 0x06, 0x08,
0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x02, 0x05, /* OID */
0x05, 0x00, 0x04, 0x10
};
static const u8 rsa_digest_info_sha1[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x0e, 0x03, 0x02, 0x1a,
0x05, 0x00, 0x04, 0x14
};
static const u8 rsa_digest_info_rmd160[] = {
0x30, 0x21, 0x30, 0x09, 0x06, 0x05,
0x2b, 0x24, 0x03, 0x02, 0x01,
0x05, 0x00, 0x04, 0x14
};
static const u8 rsa_digest_info_sha224[] = {
0x30, 0x2d, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x04,
0x05, 0x00, 0x04, 0x1c
};
static const u8 rsa_digest_info_sha256[] = {
0x30, 0x31, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01,
0x05, 0x00, 0x04, 0x20
};
static const u8 rsa_digest_info_sha384[] = {
0x30, 0x41, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02,
0x05, 0x00, 0x04, 0x30
};
static const u8 rsa_digest_info_sha512[] = {
0x30, 0x51, 0x30, 0x0d, 0x06, 0x09,
0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03,
0x05, 0x00, 0x04, 0x40
};
static const struct rsa_asn1_template {
const char *name;
const u8 *data;
size_t size;
} rsa_asn1_templates[] = {
#define _(X) { #X, rsa_digest_info_##X, sizeof(rsa_digest_info_##X) }
_(md5),
_(sha1),
_(rmd160),
_(sha256),
_(sha384),
_(sha512),
_(sha224),
{ NULL }
#undef _
};
static const struct rsa_asn1_template *rsa_lookup_asn1(const char *name)
{
const struct rsa_asn1_template *p;
for (p = rsa_asn1_templates; p->name; p++)
if (strcmp(name, p->name) == 0)
return p;
return NULL;
}
struct pkcs1pad_ctx {
struct crypto_akcipher *child;
unsigned int key_size;
};
struct pkcs1pad_inst_ctx {
struct crypto_akcipher_spawn spawn;
const struct rsa_asn1_template *digest_info;
};
struct pkcs1pad_request {
struct scatterlist in_sg[2], out_sg[1];
uint8_t *in_buf, *out_buf;
struct akcipher_request child_req;
};
static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err;
ctx->key_size = 0;
err = crypto_akcipher_set_pub_key(ctx->child, key, keylen);
if (err)
return err;
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err > PAGE_SIZE)
return -ENOTSUPP;
ctx->key_size = err;
return 0;
}
static int pkcs1pad_set_priv_key(struct crypto_akcipher *tfm, const void *key,
unsigned int keylen)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
int err;
ctx->key_size = 0;
err = crypto_akcipher_set_priv_key(ctx->child, key, keylen);
if (err)
return err;
/* Find out new modulus size from rsa implementation */
err = crypto_akcipher_maxsize(ctx->child);
if (err > PAGE_SIZE)
return -ENOTSUPP;
ctx->key_size = err;
return 0;
}
static unsigned int pkcs1pad_get_max_size(struct crypto_akcipher *tfm)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
/*
* The maximum destination buffer size for the encrypt/sign operations
* will be the same as for RSA, even though it's smaller for
* decrypt/verify.
*/
return ctx->key_size;
}
static void pkcs1pad_sg_set_buf(struct scatterlist *sg, void *buf, size_t len,
struct scatterlist *next)
{
int nsegs = next ? 2 : 1;
sg_init_table(sg, nsegs);
sg_set_buf(sg, buf, len);
if (next)
sg_chain(sg, nsegs, next);
}
static int pkcs1pad_encrypt_sign_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int pad_len;
unsigned int len;
u8 *out_buf;
if (err)
goto out;
len = req_ctx->child_req.dst_len;
pad_len = ctx->key_size - len;
/* Four billion to one */
if (likely(!pad_len))
goto out;
out_buf = kzalloc(ctx->key_size, GFP_ATOMIC);
err = -ENOMEM;
if (!out_buf)
goto out;
sg_copy_to_buffer(req->dst, sg_nents_for_len(req->dst, len),
out_buf + pad_len, len);
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, ctx->key_size),
out_buf, ctx->key_size);
kfree_sensitive(out_buf);
out:
req->dst_len = ctx->key_size;
kfree(req_ctx->in_buf);
return err;
}
static void pkcs1pad_encrypt_sign_complete_cb(void *data, int err)
{
struct akcipher_request *req = data;
if (err == -EINPROGRESS)
goto out;
err = pkcs1pad_encrypt_sign_complete(req, err);
out:
akcipher_request_complete(req, err);
}
static int pkcs1pad_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
unsigned int i, ps_end;
if (!ctx->key_size)
return -EINVAL;
if (req->src_len > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
req->dst_len = ctx->key_size;
return -EOVERFLOW;
}
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
GFP_KERNEL);
if (!req_ctx->in_buf)
return -ENOMEM;
ps_end = ctx->key_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x02;
for (i = 1; i < ps_end; i++)
req_ctx->in_buf[i] = get_random_u32_inclusive(1, 255);
req_ctx->in_buf[ps_end] = 0x00;
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
/* Reuse output buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
}
static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
if (err)
goto done;
err = -EINVAL;
dst_len = req_ctx->child_req.dst_len;
if (dst_len < ctx->key_size - 1)
goto done;
out_buf = req_ctx->out_buf;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Decrypted value had no leading 0 byte */
goto done;
dst_len--;
out_buf++;
}
if (out_buf[0] != 0x02)
goto done;
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] == 0x00)
break;
if (pos < 9 || pos == dst_len)
goto done;
pos++;
err = 0;
if (req->dst_len < dst_len - pos)
err = -EOVERFLOW;
req->dst_len = dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
out_buf + pos, req->dst_len);
done:
kfree_sensitive(req_ctx->out_buf);
return err;
}
static void pkcs1pad_decrypt_complete_cb(void *data, int err)
{
struct akcipher_request *req = data;
if (err == -EINPROGRESS)
goto out;
err = pkcs1pad_decrypt_complete(req, err);
out:
akcipher_request_complete(req, err);
}
static int pkcs1pad_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
int err;
if (!ctx->key_size || req->src_len != ctx->key_size)
return -EINVAL;
req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_decrypt_complete_cb, req);
/* Reuse input buffer, output to a new buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
req_ctx->out_sg, req->src_len,
ctx->key_size);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_decrypt_complete(req, err);
return err;
}
static int pkcs1pad_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
int err;
unsigned int ps_end, digest_info_size = 0;
if (!ctx->key_size)
return -EINVAL;
if (digest_info)
digest_info_size = digest_info->size;
if (req->src_len + digest_info_size > ctx->key_size - 11)
return -EOVERFLOW;
if (req->dst_len < ctx->key_size) {
req->dst_len = ctx->key_size;
return -EOVERFLOW;
}
req_ctx->in_buf = kmalloc(ctx->key_size - 1 - req->src_len,
GFP_KERNEL);
if (!req_ctx->in_buf)
return -ENOMEM;
ps_end = ctx->key_size - digest_info_size - req->src_len - 2;
req_ctx->in_buf[0] = 0x01;
memset(req_ctx->in_buf + 1, 0xff, ps_end - 1);
req_ctx->in_buf[ps_end] = 0x00;
if (digest_info)
memcpy(req_ctx->in_buf + ps_end + 1, digest_info->data,
digest_info->size);
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
/* Reuse output buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req_ctx->in_sg,
req->dst, ctx->key_size - 1, req->dst_len);
err = crypto_akcipher_decrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_encrypt_sign_complete(req, err);
return err;
}
static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
const struct rsa_asn1_template *digest_info = ictx->digest_info;
const unsigned int sig_size = req->src_len;
const unsigned int digest_size = req->dst_len;
unsigned int dst_len;
unsigned int pos;
u8 *out_buf;
if (err)
goto done;
err = -EINVAL;
dst_len = req_ctx->child_req.dst_len;
if (dst_len < ctx->key_size - 1)
goto done;
out_buf = req_ctx->out_buf;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Decrypted value had no leading 0 byte */
goto done;
dst_len--;
out_buf++;
}
err = -EBADMSG;
if (out_buf[0] != 0x01)
goto done;
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] != 0xff)
break;
if (pos < 9 || pos == dst_len || out_buf[pos] != 0x00)
goto done;
pos++;
if (digest_info) {
if (digest_info->size > dst_len - pos)
goto done;
if (crypto_memneq(out_buf + pos, digest_info->data,
digest_info->size))
goto done;
pos += digest_info->size;
}
err = 0;
if (digest_size != dst_len - pos) {
err = -EKEYREJECTED;
req->dst_len = dst_len - pos;
goto done;
}
/* Extract appended digest. */
sg_pcopy_to_buffer(req->src,
sg_nents_for_len(req->src, sig_size + digest_size),
req_ctx->out_buf + ctx->key_size,
digest_size, sig_size);
/* Do the actual verification step. */
if (memcmp(req_ctx->out_buf + ctx->key_size, out_buf + pos,
digest_size) != 0)
err = -EKEYREJECTED;
done:
kfree_sensitive(req_ctx->out_buf);
return err;
}
static void pkcs1pad_verify_complete_cb(void *data, int err)
{
struct akcipher_request *req = data;
if (err == -EINPROGRESS)
goto out;
err = pkcs1pad_verify_complete(req, err);
out:
akcipher_request_complete(req, err);
}
/*
* The verify operation is here for completeness similar to the verification
* defined in RFC2313 section 10.2 except that block type 0 is not accepted,
* as in RFC2437. RFC2437 section 9.2 doesn't define any operation to
* retrieve the DigestInfo from a signature, instead the user is expected
* to call the sign operation to generate the expected signature and compare
* signatures instead of the message-digests.
*/
static int pkcs1pad_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
const unsigned int sig_size = req->src_len;
const unsigned int digest_size = req->dst_len;
int err;
if (WARN_ON(req->dst) || WARN_ON(!digest_size) ||
!ctx->key_size || sig_size != ctx->key_size)
return -EINVAL;
req_ctx->out_buf = kmalloc(ctx->key_size + digest_size, GFP_KERNEL);
if (!req_ctx->out_buf)
return -ENOMEM;
pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
ctx->key_size, NULL);
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_verify_complete_cb, req);
/* Reuse input buffer, output to a new buffer */
akcipher_request_set_crypt(&req_ctx->child_req, req->src,
req_ctx->out_sg, sig_size, ctx->key_size);
err = crypto_akcipher_encrypt(&req_ctx->child_req);
if (err != -EINPROGRESS && err != -EBUSY)
return pkcs1pad_verify_complete(req, err);
return err;
}
static int pkcs1pad_init_tfm(struct crypto_akcipher *tfm)
{
struct akcipher_instance *inst = akcipher_alg_instance(tfm);
struct pkcs1pad_inst_ctx *ictx = akcipher_instance_ctx(inst);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct crypto_akcipher *child_tfm;
child_tfm = crypto_spawn_akcipher(&ictx->spawn);
if (IS_ERR(child_tfm))
return PTR_ERR(child_tfm);
ctx->child = child_tfm;
akcipher_set_reqsize(tfm, sizeof(struct pkcs1pad_request) +
crypto_akcipher_reqsize(child_tfm));
return 0;
}
static void pkcs1pad_exit_tfm(struct crypto_akcipher *tfm)
{
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
crypto_free_akcipher(ctx->child);
}
static void pkcs1pad_free(struct akcipher_instance *inst)
{
struct pkcs1pad_inst_ctx *ctx = akcipher_instance_ctx(inst);
struct crypto_akcipher_spawn *spawn = &ctx->spawn;
crypto_drop_akcipher(spawn);
kfree(inst);
}
static int pkcs1pad_create(struct crypto_template *tmpl, struct rtattr **tb)
{
u32 mask;
struct akcipher_instance *inst;
struct pkcs1pad_inst_ctx *ctx;
struct akcipher_alg *rsa_alg;
const char *hash_name;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AKCIPHER, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = akcipher_instance_ctx(inst);
err = crypto_grab_akcipher(&ctx->spawn, akcipher_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
rsa_alg = crypto_spawn_akcipher_alg(&ctx->spawn);
if (strcmp(rsa_alg->base.cra_name, "rsa") != 0) {
err = -EINVAL;
goto err_free_inst;
}
err = -ENAMETOOLONG;
hash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(hash_name)) {
if (snprintf(inst->alg.base.cra_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s)",
rsa_alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
} else {
ctx->digest_info = rsa_lookup_asn1(hash_name);
if (!ctx->digest_info) {
err = -EINVAL;
goto err_free_inst;
}
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"pkcs1pad(%s,%s)", rsa_alg->base.cra_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name,
CRYPTO_MAX_ALG_NAME, "pkcs1pad(%s,%s)",
rsa_alg->base.cra_driver_name,
hash_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
}
inst->alg.base.cra_priority = rsa_alg->base.cra_priority;
inst->alg.base.cra_ctxsize = sizeof(struct pkcs1pad_ctx);
inst->alg.init = pkcs1pad_init_tfm;
inst->alg.exit = pkcs1pad_exit_tfm;
inst->alg.encrypt = pkcs1pad_encrypt;
inst->alg.decrypt = pkcs1pad_decrypt;
inst->alg.sign = pkcs1pad_sign;
inst->alg.verify = pkcs1pad_verify;
inst->alg.set_pub_key = pkcs1pad_set_pub_key;
inst->alg.set_priv_key = pkcs1pad_set_priv_key;
inst->alg.max_size = pkcs1pad_get_max_size;
inst->free = pkcs1pad_free;
err = akcipher_register_instance(tmpl, inst);
if (err) {
err_free_inst:
pkcs1pad_free(inst);
}
return err;
}
struct crypto_template rsa_pkcs1pad_tmpl = {
.name = "pkcs1pad",
.create = pkcs1pad_create,
.module = THIS_MODULE,
};
| linux-master | crypto/rsa-pkcs1pad.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GCM: Galois/Counter Mode.
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <[email protected]>
*/
#include <crypto/gf128mul.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <crypto/gcm.h>
#include <crypto/hash.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
struct gcm_instance_ctx {
struct crypto_skcipher_spawn ctr;
struct crypto_ahash_spawn ghash;
};
struct crypto_gcm_ctx {
struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
};
struct crypto_rfc4106_ctx {
struct crypto_aead *child;
u8 nonce[4];
};
struct crypto_rfc4106_req_ctx {
struct scatterlist src[3];
struct scatterlist dst[3];
struct aead_request subreq;
};
struct crypto_rfc4543_instance_ctx {
struct crypto_aead_spawn aead;
};
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
struct crypto_sync_skcipher *null;
u8 nonce[4];
};
struct crypto_rfc4543_req_ctx {
struct aead_request subreq;
};
struct crypto_gcm_ghash_ctx {
unsigned int cryptlen;
struct scatterlist *src;
int (*complete)(struct aead_request *req, u32 flags);
};
struct crypto_gcm_req_priv_ctx {
u8 iv[16];
u8 auth_tag[16];
u8 iauth_tag[16];
struct scatterlist src[3];
struct scatterlist dst[3];
struct scatterlist sg;
struct crypto_gcm_ghash_ctx ghash_ctx;
union {
struct ahash_request ahreq;
struct skcipher_request skreq;
} u;
};
static struct {
u8 buf[16];
struct scatterlist sg;
} *gcm_zeroes;
static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
static inline struct crypto_gcm_req_priv_ctx *crypto_gcm_reqctx(
struct aead_request *req)
{
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
static int crypto_gcm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ahash *ghash = ctx->ghash;
struct crypto_skcipher *ctr = ctx->ctr;
struct {
be128 hash;
u8 iv[16];
struct crypto_wait wait;
struct scatterlist sg[1];
struct skcipher_request req;
} *data;
int err;
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
if (err)
return err;
data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(ctr),
GFP_KERNEL);
if (!data)
return -ENOMEM;
crypto_init_wait(&data->wait);
sg_init_one(data->sg, &data->hash, sizeof(data->hash));
skcipher_request_set_tfm(&data->req, ctr);
skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
crypto_req_done,
&data->wait);
skcipher_request_set_crypt(&data->req, data->sg, data->sg,
sizeof(data->hash), data->iv);
err = crypto_wait_req(crypto_skcipher_encrypt(&data->req),
&data->wait);
if (err)
goto out;
crypto_ahash_clear_flags(ghash, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(ghash, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_ahash_setkey(ghash, (u8 *)&data->hash, sizeof(be128));
out:
kfree_sensitive(data);
return err;
}
static int crypto_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
return crypto_gcm_check_authsize(authsize);
}
static void crypto_gcm_init_common(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
__be32 counter = cpu_to_be32(1);
struct scatterlist *sg;
memset(pctx->auth_tag, 0, sizeof(pctx->auth_tag));
memcpy(pctx->iv, req->iv, GCM_AES_IV_SIZE);
memcpy(pctx->iv + GCM_AES_IV_SIZE, &counter, 4);
sg_init_table(pctx->src, 3);
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
if (sg != pctx->src + 1)
sg_chain(pctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(pctx->dst, 3);
sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
if (sg != pctx->dst + 1)
sg_chain(pctx->dst, 2, sg);
}
}
static void crypto_gcm_init_crypt(struct aead_request *req,
unsigned int cryptlen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
struct scatterlist *dst;
dst = req->src == req->dst ? pctx->src : pctx->dst;
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_crypt(skreq, pctx->src, dst,
cryptlen + sizeof(pctx->auth_tag),
pctx->iv);
}
static inline unsigned int gcm_remain(unsigned int len)
{
len &= 0xfU;
return len ? 16 - len : 0;
}
static void gcm_hash_len_done(void *data, int err);
static int gcm_hash_update(struct aead_request *req,
crypto_completion_t compl,
struct scatterlist *src,
unsigned int len, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
ahash_request_set_callback(ahreq, flags, compl, req);
ahash_request_set_crypt(ahreq, src, NULL, len);
return crypto_ahash_update(ahreq);
}
static int gcm_hash_remain(struct aead_request *req,
unsigned int remain,
crypto_completion_t compl, u32 flags)
{
return gcm_hash_update(req, compl, &gcm_zeroes->sg, remain, flags);
}
static int gcm_hash_len(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
be128 lengths;
lengths.a = cpu_to_be64(req->assoclen * 8);
lengths.b = cpu_to_be64(gctx->cryptlen * 8);
memcpy(pctx->iauth_tag, &lengths, 16);
sg_init_one(&pctx->sg, pctx->iauth_tag, 16);
ahash_request_set_callback(ahreq, flags, gcm_hash_len_done, req);
ahash_request_set_crypt(ahreq, &pctx->sg,
pctx->iauth_tag, sizeof(lengths));
return crypto_ahash_finup(ahreq);
}
static int gcm_hash_len_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
return gctx->complete(req, flags);
}
static void gcm_hash_len_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_len_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_crypt_remain_continue(struct aead_request *req, u32 flags)
{
return gcm_hash_len(req, flags) ?:
gcm_hash_len_continue(req, flags);
}
static void gcm_hash_crypt_remain_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_crypt_remain_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_crypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int remain;
remain = gcm_remain(gctx->cryptlen);
if (remain)
return gcm_hash_remain(req, remain,
gcm_hash_crypt_remain_done, flags) ?:
gcm_hash_crypt_remain_continue(req, flags);
return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_crypt_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_crypt_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_assoc_remain_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
if (gctx->cryptlen)
return gcm_hash_update(req, gcm_hash_crypt_done,
gctx->src, gctx->cryptlen, flags) ?:
gcm_hash_crypt_continue(req, flags);
return gcm_hash_crypt_remain_continue(req, flags);
}
static void gcm_hash_assoc_remain_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_assoc_remain_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_assoc_continue(struct aead_request *req, u32 flags)
{
unsigned int remain;
remain = gcm_remain(req->assoclen);
if (remain)
return gcm_hash_remain(req, remain,
gcm_hash_assoc_remain_done, flags) ?:
gcm_hash_assoc_remain_continue(req, flags);
return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_assoc_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_assoc_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash_init_continue(struct aead_request *req, u32 flags)
{
if (req->assoclen)
return gcm_hash_update(req, gcm_hash_assoc_done,
req->src, req->assoclen, flags) ?:
gcm_hash_assoc_continue(req, flags);
return gcm_hash_assoc_remain_continue(req, flags);
}
static void gcm_hash_init_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_hash_init_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int gcm_hash(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct ahash_request *ahreq = &pctx->u.ahreq;
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req));
ahash_request_set_tfm(ahreq, ctx->ghash);
ahash_request_set_callback(ahreq, flags, gcm_hash_init_done, req);
return crypto_ahash_init(ahreq) ?:
gcm_hash_init_continue(req, flags);
}
static int gcm_enc_copy_hash(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
crypto_xor(auth_tag, pctx->iauth_tag, 16);
scatterwalk_map_and_copy(auth_tag, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
}
static int gcm_encrypt_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
gctx->src = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
gctx->cryptlen = req->cryptlen;
gctx->complete = gcm_enc_copy_hash;
return gcm_hash(req, flags);
}
static void gcm_encrypt_done(void *data, int err)
{
struct aead_request *req = data;
if (err)
goto out;
err = gcm_encrypt_continue(req, 0);
if (err == -EINPROGRESS)
return;
out:
aead_request_complete(req, err);
}
static int crypto_gcm_encrypt(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
u32 flags = aead_request_flags(req);
crypto_gcm_init_common(req);
crypto_gcm_init_crypt(req, req->cryptlen);
skcipher_request_set_callback(skreq, flags, gcm_encrypt_done, req);
return crypto_skcipher_encrypt(skreq) ?:
gcm_encrypt_continue(req, flags);
}
static int crypto_gcm_verify(struct aead_request *req)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
u8 *auth_tag = pctx->auth_tag;
u8 *iauth_tag = pctx->iauth_tag;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
crypto_xor(auth_tag, iauth_tag, 16);
scatterwalk_map_and_copy(iauth_tag, req->src,
req->assoclen + cryptlen, authsize, 0);
return crypto_memneq(iauth_tag, auth_tag, authsize) ? -EBADMSG : 0;
}
static void gcm_decrypt_done(void *data, int err)
{
struct aead_request *req = data;
if (!err)
err = crypto_gcm_verify(req);
aead_request_complete(req, err);
}
static int gcm_dec_hash_continue(struct aead_request *req, u32 flags)
{
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct skcipher_request *skreq = &pctx->u.skreq;
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
crypto_gcm_init_crypt(req, gctx->cryptlen);
skcipher_request_set_callback(skreq, flags, gcm_decrypt_done, req);
return crypto_skcipher_decrypt(skreq) ?: crypto_gcm_verify(req);
}
static int crypto_gcm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
u32 flags = aead_request_flags(req);
cryptlen -= authsize;
crypto_gcm_init_common(req);
gctx->src = sg_next(pctx->src);
gctx->cryptlen = cryptlen;
gctx->complete = gcm_dec_hash_continue;
return gcm_hash(req, flags);
}
static int crypto_gcm_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct gcm_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_skcipher *ctr;
struct crypto_ahash *ghash;
unsigned long align;
int err;
ghash = crypto_spawn_ahash(&ictx->ghash);
if (IS_ERR(ghash))
return PTR_ERR(ghash);
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_hash;
ctx->ctr = ctr;
ctx->ghash = ghash;
align = crypto_aead_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(tfm,
align + offsetof(struct crypto_gcm_req_priv_ctx, u) +
max(sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(ctr),
sizeof(struct ahash_request) +
crypto_ahash_reqsize(ghash)));
return 0;
err_free_hash:
crypto_free_ahash(ghash);
return err;
}
static void crypto_gcm_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_gcm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->ghash);
crypto_free_skcipher(ctx->ctr);
}
static void crypto_gcm_free(struct aead_instance *inst)
{
struct gcm_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_skcipher(&ctx->ctr);
crypto_drop_ahash(&ctx->ghash);
kfree(inst);
}
static int crypto_gcm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *ctr_name,
const char *ghash_name)
{
u32 mask;
struct aead_instance *inst;
struct gcm_instance_ctx *ctx;
struct skcipher_alg *ctr;
struct hash_alg_common *ghash;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ctx->ghash, aead_crypto_instance(inst),
ghash_name, 0, mask);
if (err)
goto err_free_inst;
ghash = crypto_spawn_ahash_alg(&ctx->ghash);
err = -EINVAL;
if (strcmp(ghash->base.cra_name, "ghash") != 0 ||
ghash->digestsize != 16)
goto err_free_inst;
err = crypto_grab_skcipher(&ctx->ctr, aead_crypto_instance(inst),
ctr_name, 0, mask);
if (err)
goto err_free_inst;
ctr = crypto_spawn_skcipher_alg(&ctx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"gcm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"gcm_base(%s,%s)", ctr->base.cra_driver_name,
ghash->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = (ghash->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = ghash->base.cra_alignmask |
ctr->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_gcm_ctx);
inst->alg.ivsize = GCM_AES_IV_SIZE;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.init = crypto_gcm_init_tfm;
inst->alg.exit = crypto_gcm_exit_tfm;
inst->alg.setkey = crypto_gcm_setkey;
inst->alg.setauthsize = crypto_gcm_setauthsize;
inst->alg.encrypt = crypto_gcm_encrypt;
inst->alg.decrypt = crypto_gcm_decrypt;
inst->free = crypto_gcm_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_gcm_free(inst);
}
return err;
}
static int crypto_gcm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)", cipher_name) >=
CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_gcm_create_common(tmpl, tb, ctr_name, "ghash");
}
static int crypto_gcm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
const char *ctr_name;
const char *ghash_name;
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name);
ghash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(ghash_name))
return PTR_ERR(ghash_name);
return crypto_gcm_create_common(tmpl, tb, ctr_name, ghash_name);
}
static int crypto_rfc4106_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
if (keylen < 4)
return -EINVAL;
keylen -= 4;
memcpy(ctx->nonce, key + keylen, 4);
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4106_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(parent);
int err;
err = crypto_rfc4106_check_authsize(authsize);
if (err)
return err;
return crypto_aead_setauthsize(ctx->child, authsize);
}
static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req)
{
struct crypto_rfc4106_req_ctx *rctx = aead_request_ctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(aead);
struct aead_request *subreq = &rctx->subreq;
struct crypto_aead *child = ctx->child;
struct scatterlist *sg;
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1);
scatterwalk_map_and_copy(iv + GCM_AES_IV_SIZE, req->src, 0, req->assoclen - 8, 0);
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
sg_init_table(rctx->src, 3);
sg_set_buf(rctx->src, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
if (sg != rctx->src + 1)
sg_chain(rctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(rctx->dst, 3);
sg_set_buf(rctx->dst, iv + GCM_AES_IV_SIZE, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
if (sg != rctx->dst + 1)
sg_chain(rctx->dst, 2, sg);
}
aead_request_set_tfm(subreq, child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, rctx->src,
req->src == req->dst ? rctx->src : rctx->dst,
req->cryptlen, iv);
aead_request_set_ad(subreq, req->assoclen - 8);
return subreq;
}
static int crypto_rfc4106_encrypt(struct aead_request *req)
{
int err;
err = crypto_ipsec_check_assoclen(req->assoclen);
if (err)
return err;
req = crypto_rfc4106_crypt(req);
return crypto_aead_encrypt(req);
}
static int crypto_rfc4106_decrypt(struct aead_request *req)
{
int err;
err = crypto_ipsec_check_assoclen(req->assoclen);
if (err)
return err;
req = crypto_rfc4106_crypt(req);
return crypto_aead_decrypt(req);
}
static int crypto_rfc4106_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
unsigned long align;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
ctx->child = aead;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(
tfm,
sizeof(struct crypto_rfc4106_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
align + 24);
return 0;
}
static void crypto_rfc4106_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_rfc4106_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
static void crypto_rfc4106_free(struct aead_instance *inst)
{
crypto_drop_aead(aead_instance_ctx(inst));
kfree(inst);
}
static int crypto_rfc4106_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = aead_instance_ctx(inst);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
/* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc4106(%s)", alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4106(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4106_ctx);
inst->alg.ivsize = GCM_RFC4106_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.init = crypto_rfc4106_init_tfm;
inst->alg.exit = crypto_rfc4106_exit_tfm;
inst->alg.setkey = crypto_rfc4106_setkey;
inst->alg.setauthsize = crypto_rfc4106_setauthsize;
inst->alg.encrypt = crypto_rfc4106_encrypt;
inst->alg.decrypt = crypto_rfc4106_decrypt;
inst->free = crypto_rfc4106_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_rfc4106_free(inst);
}
return err;
}
static int crypto_rfc4543_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
if (keylen < 4)
return -EINVAL;
keylen -= 4;
memcpy(ctx->nonce, key + keylen, 4);
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4543_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(parent);
if (authsize != 16)
return -EINVAL;
return crypto_aead_setauthsize(ctx->child, authsize);
}
static int crypto_rfc4543_crypt(struct aead_request *req, bool enc)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_rfc4543_req_ctx *rctx = aead_request_ctx(req);
struct aead_request *subreq = &rctx->subreq;
unsigned int authsize = crypto_aead_authsize(aead);
u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
crypto_aead_alignmask(ctx->child) + 1);
int err;
if (req->src != req->dst) {
err = crypto_rfc4543_copy_src_to_dst(req, enc);
if (err)
return err;
}
memcpy(iv, ctx->nonce, 4);
memcpy(iv + 4, req->iv, 8);
aead_request_set_tfm(subreq, ctx->child);
aead_request_set_callback(subreq, req->base.flags,
req->base.complete, req->base.data);
aead_request_set_crypt(subreq, req->src, req->dst,
enc ? 0 : authsize, iv);
aead_request_set_ad(subreq, req->assoclen + req->cryptlen -
subreq->cryptlen);
return enc ? crypto_aead_encrypt(subreq) : crypto_aead_decrypt(subreq);
}
static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int nbytes = req->assoclen + req->cryptlen -
(enc ? 0 : authsize);
SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
skcipher_request_set_sync_tfm(nreq, ctx->null);
skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
return crypto_skcipher_encrypt(nreq);
}
static int crypto_rfc4543_encrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
crypto_rfc4543_crypt(req, true);
}
static int crypto_rfc4543_decrypt(struct aead_request *req)
{
return crypto_ipsec_check_assoclen(req->assoclen) ?:
crypto_rfc4543_crypt(req, false);
}
static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct crypto_rfc4543_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
struct crypto_sync_skcipher *null;
unsigned long align;
int err = 0;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_aead;
ctx->child = aead;
ctx->null = null;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(
tfm,
sizeof(struct crypto_rfc4543_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
align + GCM_AES_IV_SIZE);
return 0;
err_free_aead:
crypto_free_aead(aead);
return err;
}
static void crypto_rfc4543_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
crypto_put_default_null_skcipher();
}
static void crypto_rfc4543_free(struct aead_instance *inst)
{
struct crypto_rfc4543_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_aead(&ctx->aead);
kfree(inst);
}
static int crypto_rfc4543_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
u32 mask;
struct aead_instance *inst;
struct aead_alg *alg;
struct crypto_rfc4543_instance_ctx *ctx;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ctx = aead_instance_ctx(inst);
err = crypto_grab_aead(&ctx->aead, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(&ctx->aead);
err = -EINVAL;
/* Underlying IV size must be 12. */
if (crypto_aead_alg_ivsize(alg) != GCM_AES_IV_SIZE)
goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc4543(%s)", alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4543(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4543_ctx);
inst->alg.ivsize = GCM_RFC4543_IV_SIZE;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
inst->alg.init = crypto_rfc4543_init_tfm;
inst->alg.exit = crypto_rfc4543_exit_tfm;
inst->alg.setkey = crypto_rfc4543_setkey;
inst->alg.setauthsize = crypto_rfc4543_setauthsize;
inst->alg.encrypt = crypto_rfc4543_encrypt;
inst->alg.decrypt = crypto_rfc4543_decrypt;
inst->free = crypto_rfc4543_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_rfc4543_free(inst);
}
return err;
}
static struct crypto_template crypto_gcm_tmpls[] = {
{
.name = "gcm_base",
.create = crypto_gcm_base_create,
.module = THIS_MODULE,
}, {
.name = "gcm",
.create = crypto_gcm_create,
.module = THIS_MODULE,
}, {
.name = "rfc4106",
.create = crypto_rfc4106_create,
.module = THIS_MODULE,
}, {
.name = "rfc4543",
.create = crypto_rfc4543_create,
.module = THIS_MODULE,
},
};
static int __init crypto_gcm_module_init(void)
{
int err;
gcm_zeroes = kzalloc(sizeof(*gcm_zeroes), GFP_KERNEL);
if (!gcm_zeroes)
return -ENOMEM;
sg_init_one(&gcm_zeroes->sg, gcm_zeroes->buf, sizeof(gcm_zeroes->buf));
err = crypto_register_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
if (err)
kfree(gcm_zeroes);
return err;
}
static void __exit crypto_gcm_module_exit(void)
{
kfree(gcm_zeroes);
crypto_unregister_templates(crypto_gcm_tmpls,
ARRAY_SIZE(crypto_gcm_tmpls));
}
subsys_initcall(crypto_gcm_module_init);
module_exit(crypto_gcm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Galois/Counter Mode");
MODULE_AUTHOR("Mikko Herranen <[email protected]>");
MODULE_ALIAS_CRYPTO("gcm_base");
MODULE_ALIAS_CRYPTO("rfc4106");
MODULE_ALIAS_CRYPTO("rfc4543");
MODULE_ALIAS_CRYPTO("gcm");
| linux-master | crypto/gcm.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Crypto API wrapper for the generic SHA256 code from lib/crypto/sha256.c
*
* Copyright (c) Jean-Luc Cooke <[email protected]>
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) 2002 James Morris <[email protected]>
* SHA224 Support Copyright 2007 Intel Corporation <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha2.h>
#include <crypto/sha256_base.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE] = {
0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
0x2f
};
EXPORT_SYMBOL_GPL(sha224_zero_message_hash);
const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE] = {
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
};
EXPORT_SYMBOL_GPL(sha256_zero_message_hash);
int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
sha256_update(shash_desc_ctx(desc), data, len);
return 0;
}
EXPORT_SYMBOL(crypto_sha256_update);
static int crypto_sha256_final(struct shash_desc *desc, u8 *out)
{
if (crypto_shash_digestsize(desc->tfm) == SHA224_DIGEST_SIZE)
sha224_final(shash_desc_ctx(desc), out);
else
sha256_final(shash_desc_ctx(desc), out);
return 0;
}
int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *hash)
{
sha256_update(shash_desc_ctx(desc), data, len);
return crypto_sha256_final(desc, hash);
}
EXPORT_SYMBOL(crypto_sha256_finup);
static struct shash_alg sha256_algs[2] = { {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_base_init,
.update = crypto_sha256_update,
.final = crypto_sha256_final,
.finup = crypto_sha256_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha256",
.cra_driver_name= "sha256-generic",
.cra_priority = 100,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_base_init,
.update = crypto_sha256_update,
.final = crypto_sha256_final,
.finup = crypto_sha256_finup,
.descsize = sizeof(struct sha256_state),
.base = {
.cra_name = "sha224",
.cra_driver_name= "sha224-generic",
.cra_priority = 100,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init sha256_generic_mod_init(void)
{
return crypto_register_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
static void __exit sha256_generic_mod_fini(void)
{
crypto_unregister_shashes(sha256_algs, ARRAY_SIZE(sha256_algs));
}
subsys_initcall(sha256_generic_mod_init);
module_exit(sha256_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha224");
MODULE_ALIAS_CRYPTO("sha224-generic");
MODULE_ALIAS_CRYPTO("sha256");
MODULE_ALIAS_CRYPTO("sha256-generic");
| linux-master | crypto/sha256_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Synchronous Cryptographic Hash operations.
*
* Copyright (c) 2008 Herbert Xu <[email protected]>
*/
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
#include <net/netlink.h>
#include "hash.h"
#define MAX_SHASH_ALIGNMASK 63
static const struct crypto_type crypto_shash_type;
static inline struct crypto_istat_hash *shash_get_stat(struct shash_alg *alg)
{
return hash_get_stat(&alg->halg);
}
static inline int crypto_shash_errstat(struct shash_alg *alg, int err)
{
return crypto_hash_errstat(&alg->halg, err);
}
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(shash_no_setkey);
static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned long absize;
u8 *buffer, *alignbuffer;
int err;
absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
buffer = kmalloc(absize, GFP_ATOMIC);
if (!buffer)
return -ENOMEM;
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(alignbuffer, key, keylen);
err = shash->setkey(tfm, alignbuffer, keylen);
kfree_sensitive(buffer);
return err;
}
static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
{
if (crypto_shash_alg_needs_key(alg))
crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
}
int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if ((unsigned long)key & alignmask)
err = shash_setkey_unaligned(tfm, key, keylen);
else
err = shash->setkey(tfm, key, keylen);
if (unlikely(err)) {
shash_set_needkey(tfm, shash);
return err;
}
crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
/*
* We cannot count on __aligned() working for large values:
* https://patchwork.kernel.org/patch/9507697/
*/
u8 ubuf[MAX_SHASH_ALIGNMASK * 2];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
return -EINVAL;
if (unaligned_len > len)
unaligned_len = len;
memcpy(buf, data, unaligned_len);
err = shash->update(desc, buf, unaligned_len);
memset(buf, 0, unaligned_len);
return err ?:
shash->update(desc, data + unaligned_len, len - unaligned_len);
}
int crypto_shash_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_add(len, &shash_get_stat(shash)->hash_tlen);
if ((unsigned long)data & alignmask)
err = shash_update_unaligned(desc, data, len);
else
err = shash->update(desc, data, len);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_update);
static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
/*
* We cannot count on __aligned() working for large values:
* https://patchwork.kernel.org/patch/9507697/
*/
u8 ubuf[MAX_SHASH_ALIGNMASK + HASH_MAX_DIGESTSIZE];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
return -EINVAL;
err = shash->final(desc, buf);
if (err)
goto out;
memcpy(out, buf, ds);
out:
memset(buf, 0, ds);
return err;
}
int crypto_shash_final(struct shash_desc *desc, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
atomic64_inc(&shash_get_stat(shash)->hash_cnt);
if ((unsigned long)out & alignmask)
err = shash_final_unaligned(desc, out);
else
err = shash->final(desc, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_final);
static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return shash_update_unaligned(desc, data, len) ?:
shash_final_unaligned(desc, out);
}
int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = shash_get_stat(shash);
atomic64_inc(&istat->hash_cnt);
atomic64_add(len, &istat->hash_tlen);
}
if (((unsigned long)data | (unsigned long)out) & alignmask)
err = shash_finup_unaligned(desc, data, len, out);
else
err = shash->finup(desc, data, len, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_finup);
static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return crypto_shash_init(desc) ?:
shash_update_unaligned(desc, data, len) ?:
shash_final_unaligned(desc, out);
}
int crypto_shash_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
struct crypto_shash *tfm = desc->tfm;
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned long alignmask = crypto_shash_alignmask(tfm);
int err;
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
struct crypto_istat_hash *istat = shash_get_stat(shash);
atomic64_inc(&istat->hash_cnt);
atomic64_add(len, &istat->hash_tlen);
}
if (crypto_shash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
err = -ENOKEY;
else if (((unsigned long)data | (unsigned long)out) & alignmask)
err = shash_digest_unaligned(desc, data, len, out);
else
err = shash->digest(desc, data, len, out);
return crypto_shash_errstat(shash, err);
}
EXPORT_SYMBOL_GPL(crypto_shash_digest);
int crypto_shash_tfm_digest(struct crypto_shash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
SHASH_DESC_ON_STACK(desc, tfm);
int err;
desc->tfm = tfm;
err = crypto_shash_digest(desc, data, len, out);
shash_desc_zero(desc);
return err;
}
EXPORT_SYMBOL_GPL(crypto_shash_tfm_digest);
static int shash_default_export(struct shash_desc *desc, void *out)
{
memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm));
return 0;
}
static int shash_default_import(struct shash_desc *desc, const void *in)
{
memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm));
return 0;
}
static int shash_async_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
struct crypto_shash **ctx = crypto_ahash_ctx(tfm);
return crypto_shash_setkey(*ctx, key, keylen);
}
static int shash_async_init(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
return crypto_shash_init(desc);
}
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc)
{
struct crypto_hash_walk walk;
int nbytes;
for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0;
nbytes = crypto_hash_walk_done(&walk, nbytes))
nbytes = crypto_shash_update(desc, walk.data, nbytes);
return nbytes;
}
EXPORT_SYMBOL_GPL(shash_ahash_update);
static int shash_async_update(struct ahash_request *req)
{
return shash_ahash_update(req, ahash_request_ctx(req));
}
static int shash_async_final(struct ahash_request *req)
{
return crypto_shash_final(ahash_request_ctx(req), req->result);
}
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc)
{
struct crypto_hash_walk walk;
int nbytes;
nbytes = crypto_hash_walk_first(req, &walk);
if (!nbytes)
return crypto_shash_final(desc, req->result);
do {
nbytes = crypto_hash_walk_last(&walk) ?
crypto_shash_finup(desc, walk.data, nbytes,
req->result) :
crypto_shash_update(desc, walk.data, nbytes);
nbytes = crypto_hash_walk_done(&walk, nbytes);
} while (nbytes > 0);
return nbytes;
}
EXPORT_SYMBOL_GPL(shash_ahash_finup);
static int shash_async_finup(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
return shash_ahash_finup(req, desc);
}
int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
{
unsigned int nbytes = req->nbytes;
struct scatterlist *sg;
unsigned int offset;
int err;
if (nbytes &&
(sg = req->src, offset = sg->offset,
nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
void *data;
data = kmap_local_page(sg_page(sg));
err = crypto_shash_digest(desc, data + offset, nbytes,
req->result);
kunmap_local(data);
} else
err = crypto_shash_init(desc) ?:
shash_ahash_finup(req, desc);
return err;
}
EXPORT_SYMBOL_GPL(shash_ahash_digest);
static int shash_async_digest(struct ahash_request *req)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
return shash_ahash_digest(req, desc);
}
static int shash_async_export(struct ahash_request *req, void *out)
{
return crypto_shash_export(ahash_request_ctx(req), out);
}
static int shash_async_import(struct ahash_request *req, const void *in)
{
struct crypto_shash **ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct shash_desc *desc = ahash_request_ctx(req);
desc->tfm = *ctx;
return crypto_shash_import(desc, in);
}
static void crypto_exit_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
crypto_free_shash(*ctx);
}
int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
{
struct crypto_alg *calg = tfm->__crt_alg;
struct shash_alg *alg = __crypto_shash_alg(calg);
struct crypto_ahash *crt = __crypto_ahash_cast(tfm);
struct crypto_shash **ctx = crypto_tfm_ctx(tfm);
struct crypto_shash *shash;
if (!crypto_mod_get(calg))
return -EAGAIN;
shash = crypto_create_tfm(calg, &crypto_shash_type);
if (IS_ERR(shash)) {
crypto_mod_put(calg);
return PTR_ERR(shash);
}
*ctx = shash;
tfm->exit = crypto_exit_shash_ops_async;
crt->init = shash_async_init;
crt->update = shash_async_update;
crt->final = shash_async_final;
crt->finup = shash_async_finup;
crt->digest = shash_async_digest;
if (crypto_shash_alg_has_setkey(alg))
crt->setkey = shash_async_setkey;
crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
CRYPTO_TFM_NEED_KEY);
crt->export = shash_async_export;
crt->import = shash_async_import;
crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash);
return 0;
}
struct crypto_ahash *crypto_clone_shash_ops_async(struct crypto_ahash *nhash,
struct crypto_ahash *hash)
{
struct crypto_shash **nctx = crypto_ahash_ctx(nhash);
struct crypto_shash **ctx = crypto_ahash_ctx(hash);
struct crypto_shash *shash;
shash = crypto_clone_shash(*ctx);
if (IS_ERR(shash)) {
crypto_free_ahash(nhash);
return ERR_CAST(shash);
}
*nctx = shash;
return nhash;
}
static void crypto_shash_exit_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
alg->exit_tfm(hash);
}
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_shash *hash = __crypto_shash_cast(tfm);
struct shash_alg *alg = crypto_shash_alg(hash);
int err;
hash->descsize = alg->descsize;
shash_set_needkey(hash, alg);
if (alg->exit_tfm)
tfm->exit = crypto_shash_exit_tfm;
if (!alg->init_tfm)
return 0;
err = alg->init_tfm(hash);
if (err)
return err;
/* ->init_tfm() may have increased the descsize. */
if (WARN_ON_ONCE(hash->descsize > HASH_MAX_DESCSIZE)) {
if (alg->exit_tfm)
alg->exit_tfm(hash);
return -EINVAL;
}
return 0;
}
static void crypto_shash_free_instance(struct crypto_instance *inst)
{
struct shash_instance *shash = shash_instance(inst);
shash->free(shash);
}
static int __maybe_unused crypto_shash_report(
struct sk_buff *skb, struct crypto_alg *alg)
{
struct crypto_report_hash rhash;
struct shash_alg *salg = __crypto_shash_alg(alg);
memset(&rhash, 0, sizeof(rhash));
strscpy(rhash.type, "shash", sizeof(rhash.type));
rhash.blocksize = alg->cra_blocksize;
rhash.digestsize = salg->digestsize;
return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash);
}
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
__maybe_unused;
static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
{
struct shash_alg *salg = __crypto_shash_alg(alg);
seq_printf(m, "type : shash\n");
seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
seq_printf(m, "digestsize : %u\n", salg->digestsize);
}
static int __maybe_unused crypto_shash_report_stat(
struct sk_buff *skb, struct crypto_alg *alg)
{
return crypto_hash_report_stat(skb, alg, "shash");
}
static const struct crypto_type crypto_shash_type = {
.extsize = crypto_alg_extsize,
.init_tfm = crypto_shash_init_tfm,
.free = crypto_shash_free_instance,
#ifdef CONFIG_PROC_FS
.show = crypto_shash_show,
#endif
#if IS_ENABLED(CONFIG_CRYPTO_USER)
.report = crypto_shash_report,
#endif
#ifdef CONFIG_CRYPTO_STATS
.report_stat = crypto_shash_report_stat,
#endif
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
.maskset = CRYPTO_ALG_TYPE_MASK,
.type = CRYPTO_ALG_TYPE_SHASH,
.tfmsize = offsetof(struct crypto_shash, base),
};
int crypto_grab_shash(struct crypto_shash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
spawn->base.frontend = &crypto_shash_type;
return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_grab_shash);
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
u32 mask)
{
return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_shash);
int crypto_has_shash(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_shash_type, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_has_shash);
struct crypto_shash *crypto_clone_shash(struct crypto_shash *hash)
{
struct crypto_tfm *tfm = crypto_shash_tfm(hash);
struct shash_alg *alg = crypto_shash_alg(hash);
struct crypto_shash *nhash;
int err;
if (!crypto_shash_alg_has_setkey(alg)) {
tfm = crypto_tfm_get(tfm);
if (IS_ERR(tfm))
return ERR_CAST(tfm);
return hash;
}
if (!alg->clone_tfm && (alg->init_tfm || alg->base.cra_init))
return ERR_PTR(-ENOSYS);
nhash = crypto_clone_tfm(&crypto_shash_type, tfm);
if (IS_ERR(nhash))
return nhash;
nhash->descsize = hash->descsize;
if (alg->clone_tfm) {
err = alg->clone_tfm(nhash, hash);
if (err) {
crypto_free_shash(nhash);
return ERR_PTR(err);
}
}
return nhash;
}
EXPORT_SYMBOL_GPL(crypto_clone_shash);
int hash_prepare_alg(struct hash_alg_common *alg)
{
struct crypto_istat_hash *istat = hash_get_stat(alg);
struct crypto_alg *base = &alg->base;
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
return -EINVAL;
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
memset(istat, 0, sizeof(*istat));
return 0;
}
static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
int err;
if (alg->descsize > HASH_MAX_DESCSIZE)
return -EINVAL;
if (base->cra_alignmask > MAX_SHASH_ALIGNMASK)
return -EINVAL;
if ((alg->export && !alg->import) || (alg->import && !alg->export))
return -EINVAL;
err = hash_prepare_alg(&alg->halg);
if (err)
return err;
base->cra_type = &crypto_shash_type;
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
if (!alg->finup)
alg->finup = shash_finup_unaligned;
if (!alg->digest)
alg->digest = shash_digest_unaligned;
if (!alg->export) {
alg->export = shash_default_export;
alg->import = shash_default_import;
alg->halg.statesize = alg->descsize;
}
if (!alg->setkey)
alg->setkey = shash_no_setkey;
return 0;
}
int crypto_register_shash(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
int err;
err = shash_prepare_alg(alg);
if (err)
return err;
return crypto_register_alg(base);
}
EXPORT_SYMBOL_GPL(crypto_register_shash);
void crypto_unregister_shash(struct shash_alg *alg)
{
crypto_unregister_alg(&alg->base);
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
int crypto_register_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_shash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);
void crypto_unregister_shashes(struct shash_alg *algs, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst)
{
int err;
if (WARN_ON(!inst->free))
return -EINVAL;
err = shash_prepare_alg(&inst->alg);
if (err)
return err;
return crypto_register_instance(tmpl, shash_crypto_instance(inst));
}
EXPORT_SYMBOL_GPL(shash_register_instance);
void shash_free_singlespawn_instance(struct shash_instance *inst)
{
crypto_drop_spawn(shash_instance_ctx(inst));
kfree(inst);
}
EXPORT_SYMBOL_GPL(shash_free_singlespawn_instance);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Synchronous cryptographic hash type");
| linux-master | crypto/shash.c |
/*
* Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
* Copyright (c) 2019 Vitaly Chikunov <[email protected]>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <crypto/ecc_curve.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/fips.h>
#include <crypto/ecdh.h>
#include <crypto/rng.h>
#include <crypto/internal/ecc.h>
#include <asm/unaligned.h>
#include <linux/ratelimit.h>
#include "ecc_curve_defs.h"
typedef struct {
u64 m_low;
u64 m_high;
} uint128_t;
/* Returns curv25519 curve param */
const struct ecc_curve *ecc_get_curve25519(void)
{
return &ecc_25519;
}
EXPORT_SYMBOL(ecc_get_curve25519);
const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
{
switch (curve_id) {
/* In FIPS mode only allow P256 and higher */
case ECC_CURVE_NIST_P192:
return fips_enabled ? NULL : &nist_p192;
case ECC_CURVE_NIST_P256:
return &nist_p256;
case ECC_CURVE_NIST_P384:
return &nist_p384;
default:
return NULL;
}
}
EXPORT_SYMBOL(ecc_get_curve);
static u64 *ecc_alloc_digits_space(unsigned int ndigits)
{
size_t len = ndigits * sizeof(u64);
if (!len)
return NULL;
return kmalloc(len, GFP_KERNEL);
}
static void ecc_free_digits_space(u64 *space)
{
kfree_sensitive(space);
}
struct ecc_point *ecc_alloc_point(unsigned int ndigits)
{
struct ecc_point *p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return NULL;
p->x = ecc_alloc_digits_space(ndigits);
if (!p->x)
goto err_alloc_x;
p->y = ecc_alloc_digits_space(ndigits);
if (!p->y)
goto err_alloc_y;
p->ndigits = ndigits;
return p;
err_alloc_y:
ecc_free_digits_space(p->x);
err_alloc_x:
kfree(p);
return NULL;
}
EXPORT_SYMBOL(ecc_alloc_point);
void ecc_free_point(struct ecc_point *p)
{
if (!p)
return;
kfree_sensitive(p->x);
kfree_sensitive(p->y);
kfree_sensitive(p);
}
EXPORT_SYMBOL(ecc_free_point);
static void vli_clear(u64 *vli, unsigned int ndigits)
{
int i;
for (i = 0; i < ndigits; i++)
vli[i] = 0;
}
/* Returns true if vli == 0, false otherwise. */
bool vli_is_zero(const u64 *vli, unsigned int ndigits)
{
int i;
for (i = 0; i < ndigits; i++) {
if (vli[i])
return false;
}
return true;
}
EXPORT_SYMBOL(vli_is_zero);
/* Returns nonzero if bit of vli is set. */
static u64 vli_test_bit(const u64 *vli, unsigned int bit)
{
return (vli[bit / 64] & ((u64)1 << (bit % 64)));
}
static bool vli_is_negative(const u64 *vli, unsigned int ndigits)
{
return vli_test_bit(vli, ndigits * 64 - 1);
}
/* Counts the number of 64-bit "digits" in vli. */
static unsigned int vli_num_digits(const u64 *vli, unsigned int ndigits)
{
int i;
/* Search from the end until we find a non-zero digit.
* We do it in reverse because we expect that most digits will
* be nonzero.
*/
for (i = ndigits - 1; i >= 0 && vli[i] == 0; i--);
return (i + 1);
}
/* Counts the number of bits required for vli. */
unsigned int vli_num_bits(const u64 *vli, unsigned int ndigits)
{
unsigned int i, num_digits;
u64 digit;
num_digits = vli_num_digits(vli, ndigits);
if (num_digits == 0)
return 0;
digit = vli[num_digits - 1];
for (i = 0; digit; i++)
digit >>= 1;
return ((num_digits - 1) * 64 + i);
}
EXPORT_SYMBOL(vli_num_bits);
/* Set dest from unaligned bit string src. */
void vli_from_be64(u64 *dest, const void *src, unsigned int ndigits)
{
int i;
const u64 *from = src;
for (i = 0; i < ndigits; i++)
dest[i] = get_unaligned_be64(&from[ndigits - 1 - i]);
}
EXPORT_SYMBOL(vli_from_be64);
void vli_from_le64(u64 *dest, const void *src, unsigned int ndigits)
{
int i;
const u64 *from = src;
for (i = 0; i < ndigits; i++)
dest[i] = get_unaligned_le64(&from[i]);
}
EXPORT_SYMBOL(vli_from_le64);
/* Sets dest = src. */
static void vli_set(u64 *dest, const u64 *src, unsigned int ndigits)
{
int i;
for (i = 0; i < ndigits; i++)
dest[i] = src[i];
}
/* Returns sign of left - right. */
int vli_cmp(const u64 *left, const u64 *right, unsigned int ndigits)
{
int i;
for (i = ndigits - 1; i >= 0; i--) {
if (left[i] > right[i])
return 1;
else if (left[i] < right[i])
return -1;
}
return 0;
}
EXPORT_SYMBOL(vli_cmp);
/* Computes result = in << c, returning carry. Can modify in place
* (if result == in). 0 < shift < 64.
*/
static u64 vli_lshift(u64 *result, const u64 *in, unsigned int shift,
unsigned int ndigits)
{
u64 carry = 0;
int i;
for (i = 0; i < ndigits; i++) {
u64 temp = in[i];
result[i] = (temp << shift) | carry;
carry = temp >> (64 - shift);
}
return carry;
}
/* Computes vli = vli >> 1. */
static void vli_rshift1(u64 *vli, unsigned int ndigits)
{
u64 *end = vli;
u64 carry = 0;
vli += ndigits;
while (vli-- > end) {
u64 temp = *vli;
*vli = (temp >> 1) | carry;
carry = temp << 63;
}
}
/* Computes result = left + right, returning carry. Can modify in place. */
static u64 vli_add(u64 *result, const u64 *left, const u64 *right,
unsigned int ndigits)
{
u64 carry = 0;
int i;
for (i = 0; i < ndigits; i++) {
u64 sum;
sum = left[i] + right[i] + carry;
if (sum != left[i])
carry = (sum < left[i]);
result[i] = sum;
}
return carry;
}
/* Computes result = left + right, returning carry. Can modify in place. */
static u64 vli_uadd(u64 *result, const u64 *left, u64 right,
unsigned int ndigits)
{
u64 carry = right;
int i;
for (i = 0; i < ndigits; i++) {
u64 sum;
sum = left[i] + carry;
if (sum != left[i])
carry = (sum < left[i]);
else
carry = !!carry;
result[i] = sum;
}
return carry;
}
/* Computes result = left - right, returning borrow. Can modify in place. */
u64 vli_sub(u64 *result, const u64 *left, const u64 *right,
unsigned int ndigits)
{
u64 borrow = 0;
int i;
for (i = 0; i < ndigits; i++) {
u64 diff;
diff = left[i] - right[i] - borrow;
if (diff != left[i])
borrow = (diff > left[i]);
result[i] = diff;
}
return borrow;
}
EXPORT_SYMBOL(vli_sub);
/* Computes result = left - right, returning borrow. Can modify in place. */
static u64 vli_usub(u64 *result, const u64 *left, u64 right,
unsigned int ndigits)
{
u64 borrow = right;
int i;
for (i = 0; i < ndigits; i++) {
u64 diff;
diff = left[i] - borrow;
if (diff != left[i])
borrow = (diff > left[i]);
result[i] = diff;
}
return borrow;
}
static uint128_t mul_64_64(u64 left, u64 right)
{
uint128_t result;
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
unsigned __int128 m = (unsigned __int128)left * right;
result.m_low = m;
result.m_high = m >> 64;
#else
u64 a0 = left & 0xffffffffull;
u64 a1 = left >> 32;
u64 b0 = right & 0xffffffffull;
u64 b1 = right >> 32;
u64 m0 = a0 * b0;
u64 m1 = a0 * b1;
u64 m2 = a1 * b0;
u64 m3 = a1 * b1;
m2 += (m0 >> 32);
m2 += m1;
/* Overflow */
if (m2 < m1)
m3 += 0x100000000ull;
result.m_low = (m0 & 0xffffffffull) | (m2 << 32);
result.m_high = m3 + (m2 >> 32);
#endif
return result;
}
static uint128_t add_128_128(uint128_t a, uint128_t b)
{
uint128_t result;
result.m_low = a.m_low + b.m_low;
result.m_high = a.m_high + b.m_high + (result.m_low < a.m_low);
return result;
}
static void vli_mult(u64 *result, const u64 *left, const u64 *right,
unsigned int ndigits)
{
uint128_t r01 = { 0, 0 };
u64 r2 = 0;
unsigned int i, k;
/* Compute each digit of result in sequence, maintaining the
* carries.
*/
for (k = 0; k < ndigits * 2 - 1; k++) {
unsigned int min;
if (k < ndigits)
min = 0;
else
min = (k + 1) - ndigits;
for (i = min; i <= k && i < ndigits; i++) {
uint128_t product;
product = mul_64_64(left[i], right[k - i]);
r01 = add_128_128(r01, product);
r2 += (r01.m_high < product.m_high);
}
result[k] = r01.m_low;
r01.m_low = r01.m_high;
r01.m_high = r2;
r2 = 0;
}
result[ndigits * 2 - 1] = r01.m_low;
}
/* Compute product = left * right, for a small right value. */
static void vli_umult(u64 *result, const u64 *left, u32 right,
unsigned int ndigits)
{
uint128_t r01 = { 0 };
unsigned int k;
for (k = 0; k < ndigits; k++) {
uint128_t product;
product = mul_64_64(left[k], right);
r01 = add_128_128(r01, product);
/* no carry */
result[k] = r01.m_low;
r01.m_low = r01.m_high;
r01.m_high = 0;
}
result[k] = r01.m_low;
for (++k; k < ndigits * 2; k++)
result[k] = 0;
}
static void vli_square(u64 *result, const u64 *left, unsigned int ndigits)
{
uint128_t r01 = { 0, 0 };
u64 r2 = 0;
int i, k;
for (k = 0; k < ndigits * 2 - 1; k++) {
unsigned int min;
if (k < ndigits)
min = 0;
else
min = (k + 1) - ndigits;
for (i = min; i <= k && i <= k - i; i++) {
uint128_t product;
product = mul_64_64(left[i], left[k - i]);
if (i < k - i) {
r2 += product.m_high >> 63;
product.m_high = (product.m_high << 1) |
(product.m_low >> 63);
product.m_low <<= 1;
}
r01 = add_128_128(r01, product);
r2 += (r01.m_high < product.m_high);
}
result[k] = r01.m_low;
r01.m_low = r01.m_high;
r01.m_high = r2;
r2 = 0;
}
result[ndigits * 2 - 1] = r01.m_low;
}
/* Computes result = (left + right) % mod.
* Assumes that left < mod and right < mod, result != mod.
*/
static void vli_mod_add(u64 *result, const u64 *left, const u64 *right,
const u64 *mod, unsigned int ndigits)
{
u64 carry;
carry = vli_add(result, left, right, ndigits);
/* result > mod (result = mod + remainder), so subtract mod to
* get remainder.
*/
if (carry || vli_cmp(result, mod, ndigits) >= 0)
vli_sub(result, result, mod, ndigits);
}
/* Computes result = (left - right) % mod.
* Assumes that left < mod and right < mod, result != mod.
*/
static void vli_mod_sub(u64 *result, const u64 *left, const u64 *right,
const u64 *mod, unsigned int ndigits)
{
u64 borrow = vli_sub(result, left, right, ndigits);
/* In this case, p_result == -diff == (max int) - diff.
* Since -x % d == d - x, we can get the correct result from
* result + mod (with overflow).
*/
if (borrow)
vli_add(result, result, mod, ndigits);
}
/*
* Computes result = product % mod
* for special form moduli: p = 2^k-c, for small c (note the minus sign)
*
* References:
* R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
* 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
* Algorithm 9.2.13 (Fast mod operation for special-form moduli).
*/
static void vli_mmod_special(u64 *result, const u64 *product,
const u64 *mod, unsigned int ndigits)
{
u64 c = -mod[0];
u64 t[ECC_MAX_DIGITS * 2];
u64 r[ECC_MAX_DIGITS * 2];
vli_set(r, product, ndigits * 2);
while (!vli_is_zero(r + ndigits, ndigits)) {
vli_umult(t, r + ndigits, c, ndigits);
vli_clear(r + ndigits, ndigits);
vli_add(r, r, t, ndigits * 2);
}
vli_set(t, mod, ndigits);
vli_clear(t + ndigits, ndigits);
while (vli_cmp(r, t, ndigits * 2) >= 0)
vli_sub(r, r, t, ndigits * 2);
vli_set(result, r, ndigits);
}
/*
* Computes result = product % mod
* for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
* where k-1 does not fit into qword boundary by -1 bit (such as 255).
* References (loosely based on):
* A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
* 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
* URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
*
* H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
* Handbook of Elliptic and Hyperelliptic Curve Cryptography.
* Algorithm 10.25 Fast reduction for special form moduli
*/
static void vli_mmod_special2(u64 *result, const u64 *product,
const u64 *mod, unsigned int ndigits)
{
u64 c2 = mod[0] * 2;
u64 q[ECC_MAX_DIGITS];
u64 r[ECC_MAX_DIGITS * 2];
u64 m[ECC_MAX_DIGITS * 2]; /* expanded mod */
int carry; /* last bit that doesn't fit into q */
int i;
vli_set(m, mod, ndigits);
vli_clear(m + ndigits, ndigits);
vli_set(r, product, ndigits);
/* q and carry are top bits */
vli_set(q, product + ndigits, ndigits);
vli_clear(r + ndigits, ndigits);
carry = vli_is_negative(r, ndigits);
if (carry)
r[ndigits - 1] &= (1ull << 63) - 1;
for (i = 1; carry || !vli_is_zero(q, ndigits); i++) {
u64 qc[ECC_MAX_DIGITS * 2];
vli_umult(qc, q, c2, ndigits);
if (carry)
vli_uadd(qc, qc, mod[0], ndigits * 2);
vli_set(q, qc + ndigits, ndigits);
vli_clear(qc + ndigits, ndigits);
carry = vli_is_negative(qc, ndigits);
if (carry)
qc[ndigits - 1] &= (1ull << 63) - 1;
if (i & 1)
vli_sub(r, r, qc, ndigits * 2);
else
vli_add(r, r, qc, ndigits * 2);
}
while (vli_is_negative(r, ndigits * 2))
vli_add(r, r, m, ndigits * 2);
while (vli_cmp(r, m, ndigits * 2) >= 0)
vli_sub(r, r, m, ndigits * 2);
vli_set(result, r, ndigits);
}
/*
* Computes result = product % mod, where product is 2N words long.
* Reference: Ken MacKay's micro-ecc.
* Currently only designed to work for curve_p or curve_n.
*/
static void vli_mmod_slow(u64 *result, u64 *product, const u64 *mod,
unsigned int ndigits)
{
u64 mod_m[2 * ECC_MAX_DIGITS];
u64 tmp[2 * ECC_MAX_DIGITS];
u64 *v[2] = { tmp, product };
u64 carry = 0;
unsigned int i;
/* Shift mod so its highest set bit is at the maximum position. */
int shift = (ndigits * 2 * 64) - vli_num_bits(mod, ndigits);
int word_shift = shift / 64;
int bit_shift = shift % 64;
vli_clear(mod_m, word_shift);
if (bit_shift > 0) {
for (i = 0; i < ndigits; ++i) {
mod_m[word_shift + i] = (mod[i] << bit_shift) | carry;
carry = mod[i] >> (64 - bit_shift);
}
} else
vli_set(mod_m + word_shift, mod, ndigits);
for (i = 1; shift >= 0; --shift) {
u64 borrow = 0;
unsigned int j;
for (j = 0; j < ndigits * 2; ++j) {
u64 diff = v[i][j] - mod_m[j] - borrow;
if (diff != v[i][j])
borrow = (diff > v[i][j]);
v[1 - i][j] = diff;
}
i = !(i ^ borrow); /* Swap the index if there was no borrow */
vli_rshift1(mod_m, ndigits);
mod_m[ndigits - 1] |= mod_m[ndigits] << (64 - 1);
vli_rshift1(mod_m + ndigits, ndigits);
}
vli_set(result, v[i], ndigits);
}
/* Computes result = product % mod using Barrett's reduction with precomputed
* value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
* length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
* boundary.
*
* Reference:
* R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
* 2.4.1 Barrett's algorithm. Algorithm 2.5.
*/
static void vli_mmod_barrett(u64 *result, u64 *product, const u64 *mod,
unsigned int ndigits)
{
u64 q[ECC_MAX_DIGITS * 2];
u64 r[ECC_MAX_DIGITS * 2];
const u64 *mu = mod + ndigits;
vli_mult(q, product + ndigits, mu, ndigits);
if (mu[ndigits])
vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits);
vli_mult(r, mod, q + ndigits, ndigits);
vli_sub(r, product, r, ndigits * 2);
while (!vli_is_zero(r + ndigits, ndigits) ||
vli_cmp(r, mod, ndigits) != -1) {
u64 carry;
carry = vli_sub(r, r, mod, ndigits);
vli_usub(r + ndigits, r + ndigits, carry, ndigits);
}
vli_set(result, r, ndigits);
}
/* Computes p_result = p_product % curve_p.
* See algorithm 5 and 6 from
* http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
*/
static void vli_mmod_fast_192(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
const unsigned int ndigits = 3;
int carry;
vli_set(result, product, ndigits);
vli_set(tmp, &product[3], ndigits);
carry = vli_add(result, result, tmp, ndigits);
tmp[0] = 0;
tmp[1] = product[3];
tmp[2] = product[4];
carry += vli_add(result, result, tmp, ndigits);
tmp[0] = tmp[1] = product[5];
tmp[2] = 0;
carry += vli_add(result, result, tmp, ndigits);
while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
carry -= vli_sub(result, result, curve_prime, ndigits);
}
/* Computes result = product % curve_prime
* from http://www.nsa.gov/ia/_files/nist-routines.pdf
*/
static void vli_mmod_fast_256(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
const unsigned int ndigits = 4;
/* t */
vli_set(result, product, ndigits);
/* s1 */
tmp[0] = 0;
tmp[1] = product[5] & 0xffffffff00000000ull;
tmp[2] = product[6];
tmp[3] = product[7];
carry = vli_lshift(tmp, tmp, 1, ndigits);
carry += vli_add(result, result, tmp, ndigits);
/* s2 */
tmp[1] = product[6] << 32;
tmp[2] = (product[6] >> 32) | (product[7] << 32);
tmp[3] = product[7] >> 32;
carry += vli_lshift(tmp, tmp, 1, ndigits);
carry += vli_add(result, result, tmp, ndigits);
/* s3 */
tmp[0] = product[4];
tmp[1] = product[5] & 0xffffffff;
tmp[2] = 0;
tmp[3] = product[7];
carry += vli_add(result, result, tmp, ndigits);
/* s4 */
tmp[0] = (product[4] >> 32) | (product[5] << 32);
tmp[1] = (product[5] >> 32) | (product[6] & 0xffffffff00000000ull);
tmp[2] = product[7];
tmp[3] = (product[6] >> 32) | (product[4] << 32);
carry += vli_add(result, result, tmp, ndigits);
/* d1 */
tmp[0] = (product[5] >> 32) | (product[6] << 32);
tmp[1] = (product[6] >> 32);
tmp[2] = 0;
tmp[3] = (product[4] & 0xffffffff) | (product[5] << 32);
carry -= vli_sub(result, result, tmp, ndigits);
/* d2 */
tmp[0] = product[6];
tmp[1] = product[7];
tmp[2] = 0;
tmp[3] = (product[4] >> 32) | (product[5] & 0xffffffff00000000ull);
carry -= vli_sub(result, result, tmp, ndigits);
/* d3 */
tmp[0] = (product[6] >> 32) | (product[7] << 32);
tmp[1] = (product[7] >> 32) | (product[4] << 32);
tmp[2] = (product[4] >> 32) | (product[5] << 32);
tmp[3] = (product[6] << 32);
carry -= vli_sub(result, result, tmp, ndigits);
/* d4 */
tmp[0] = product[7];
tmp[1] = product[4] & 0xffffffff00000000ull;
tmp[2] = product[5];
tmp[3] = product[6] & 0xffffffff00000000ull;
carry -= vli_sub(result, result, tmp, ndigits);
if (carry < 0) {
do {
carry += vli_add(result, result, curve_prime, ndigits);
} while (carry < 0);
} else {
while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
carry -= vli_sub(result, result, curve_prime, ndigits);
}
}
#define SL32OR32(x32, y32) (((u64)x32 << 32) | y32)
#define AND64H(x64) (x64 & 0xffFFffFF00000000ull)
#define AND64L(x64) (x64 & 0x00000000ffFFffFFull)
/* Computes result = product % curve_prime
* from "Mathematical routines for the NIST prime elliptic curves"
*/
static void vli_mmod_fast_384(u64 *result, const u64 *product,
const u64 *curve_prime, u64 *tmp)
{
int carry;
const unsigned int ndigits = 6;
/* t */
vli_set(result, product, ndigits);
/* s1 */
tmp[0] = 0; // 0 || 0
tmp[1] = 0; // 0 || 0
tmp[2] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[3] = product[11]>>32; // 0 ||a23
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry = vli_lshift(tmp, tmp, 1, ndigits);
carry += vli_add(result, result, tmp, ndigits);
/* s2 */
tmp[0] = product[6]; //a13||a12
tmp[1] = product[7]; //a15||a14
tmp[2] = product[8]; //a17||a16
tmp[3] = product[9]; //a19||a18
tmp[4] = product[10]; //a21||a20
tmp[5] = product[11]; //a23||a22
carry += vli_add(result, result, tmp, ndigits);
/* s3 */
tmp[0] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[1] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
tmp[2] = SL32OR32(product[7], (product[6])>>32); //a14||a13
tmp[3] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
tmp[4] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
tmp[5] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
carry += vli_add(result, result, tmp, ndigits);
/* s4 */
tmp[0] = AND64H(product[11]); //a23|| 0
tmp[1] = (product[10]<<32); //a20|| 0
tmp[2] = product[6]; //a13||a12
tmp[3] = product[7]; //a15||a14
tmp[4] = product[8]; //a17||a16
tmp[5] = product[9]; //a19||a18
carry += vli_add(result, result, tmp, ndigits);
/* s5 */
tmp[0] = 0; // 0|| 0
tmp[1] = 0; // 0|| 0
tmp[2] = product[10]; //a21||a20
tmp[3] = product[11]; //a23||a22
tmp[4] = 0; // 0|| 0
tmp[5] = 0; // 0|| 0
carry += vli_add(result, result, tmp, ndigits);
/* s6 */
tmp[0] = AND64L(product[10]); // 0 ||a20
tmp[1] = AND64H(product[10]); //a21|| 0
tmp[2] = product[11]; //a23||a22
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry += vli_add(result, result, tmp, ndigits);
/* d1 */
tmp[0] = SL32OR32(product[6], (product[11]>>32)); //a12||a23
tmp[1] = SL32OR32(product[7], (product[6]>>32)); //a14||a13
tmp[2] = SL32OR32(product[8], (product[7]>>32)); //a16||a15
tmp[3] = SL32OR32(product[9], (product[8]>>32)); //a18||a17
tmp[4] = SL32OR32(product[10], (product[9]>>32)); //a20||a19
tmp[5] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
carry -= vli_sub(result, result, tmp, ndigits);
/* d2 */
tmp[0] = (product[10]<<32); //a20|| 0
tmp[1] = SL32OR32(product[11], (product[10]>>32)); //a22||a21
tmp[2] = (product[11]>>32); // 0 ||a23
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry -= vli_sub(result, result, tmp, ndigits);
/* d3 */
tmp[0] = 0; // 0 || 0
tmp[1] = AND64H(product[11]); //a23|| 0
tmp[2] = product[11]>>32; // 0 ||a23
tmp[3] = 0; // 0 || 0
tmp[4] = 0; // 0 || 0
tmp[5] = 0; // 0 || 0
carry -= vli_sub(result, result, tmp, ndigits);
if (carry < 0) {
do {
carry += vli_add(result, result, curve_prime, ndigits);
} while (carry < 0);
} else {
while (carry || vli_cmp(curve_prime, result, ndigits) != 1)
carry -= vli_sub(result, result, curve_prime, ndigits);
}
}
#undef SL32OR32
#undef AND64H
#undef AND64L
/* Computes result = product % curve_prime for different curve_primes.
*
* Note that curve_primes are distinguished just by heuristic check and
* not by complete conformance check.
*/
static bool vli_mmod_fast(u64 *result, u64 *product,
const struct ecc_curve *curve)
{
u64 tmp[2 * ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* All NIST curves have name prefix 'nist_' */
if (strncmp(curve->name, "nist_", 5) != 0) {
/* Try to handle Pseudo-Marsenne primes. */
if (curve_prime[ndigits - 1] == -1ull) {
vli_mmod_special(result, product, curve_prime,
ndigits);
return true;
} else if (curve_prime[ndigits - 1] == 1ull << 63 &&
curve_prime[ndigits - 2] == 0) {
vli_mmod_special2(result, product, curve_prime,
ndigits);
return true;
}
vli_mmod_barrett(result, product, curve_prime, ndigits);
return true;
}
switch (ndigits) {
case 3:
vli_mmod_fast_192(result, product, curve_prime, tmp);
break;
case 4:
vli_mmod_fast_256(result, product, curve_prime, tmp);
break;
case 6:
vli_mmod_fast_384(result, product, curve_prime, tmp);
break;
default:
pr_err_ratelimited("ecc: unsupported digits size!\n");
return false;
}
return true;
}
/* Computes result = (left * right) % mod.
* Assumes that mod is big enough curve order.
*/
void vli_mod_mult_slow(u64 *result, const u64 *left, const u64 *right,
const u64 *mod, unsigned int ndigits)
{
u64 product[ECC_MAX_DIGITS * 2];
vli_mult(product, left, right, ndigits);
vli_mmod_slow(result, product, mod, ndigits);
}
EXPORT_SYMBOL(vli_mod_mult_slow);
/* Computes result = (left * right) % curve_prime. */
static void vli_mod_mult_fast(u64 *result, const u64 *left, const u64 *right,
const struct ecc_curve *curve)
{
u64 product[2 * ECC_MAX_DIGITS];
vli_mult(product, left, right, curve->g.ndigits);
vli_mmod_fast(result, product, curve);
}
/* Computes result = left^2 % curve_prime. */
static void vli_mod_square_fast(u64 *result, const u64 *left,
const struct ecc_curve *curve)
{
u64 product[2 * ECC_MAX_DIGITS];
vli_square(product, left, curve->g.ndigits);
vli_mmod_fast(result, product, curve);
}
#define EVEN(vli) (!(vli[0] & 1))
/* Computes result = (1 / p_input) % mod. All VLIs are the same size.
* See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
* https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
*/
void vli_mod_inv(u64 *result, const u64 *input, const u64 *mod,
unsigned int ndigits)
{
u64 a[ECC_MAX_DIGITS], b[ECC_MAX_DIGITS];
u64 u[ECC_MAX_DIGITS], v[ECC_MAX_DIGITS];
u64 carry;
int cmp_result;
if (vli_is_zero(input, ndigits)) {
vli_clear(result, ndigits);
return;
}
vli_set(a, input, ndigits);
vli_set(b, mod, ndigits);
vli_clear(u, ndigits);
u[0] = 1;
vli_clear(v, ndigits);
while ((cmp_result = vli_cmp(a, b, ndigits)) != 0) {
carry = 0;
if (EVEN(a)) {
vli_rshift1(a, ndigits);
if (!EVEN(u))
carry = vli_add(u, u, mod, ndigits);
vli_rshift1(u, ndigits);
if (carry)
u[ndigits - 1] |= 0x8000000000000000ull;
} else if (EVEN(b)) {
vli_rshift1(b, ndigits);
if (!EVEN(v))
carry = vli_add(v, v, mod, ndigits);
vli_rshift1(v, ndigits);
if (carry)
v[ndigits - 1] |= 0x8000000000000000ull;
} else if (cmp_result > 0) {
vli_sub(a, a, b, ndigits);
vli_rshift1(a, ndigits);
if (vli_cmp(u, v, ndigits) < 0)
vli_add(u, u, mod, ndigits);
vli_sub(u, u, v, ndigits);
if (!EVEN(u))
carry = vli_add(u, u, mod, ndigits);
vli_rshift1(u, ndigits);
if (carry)
u[ndigits - 1] |= 0x8000000000000000ull;
} else {
vli_sub(b, b, a, ndigits);
vli_rshift1(b, ndigits);
if (vli_cmp(v, u, ndigits) < 0)
vli_add(v, v, mod, ndigits);
vli_sub(v, v, u, ndigits);
if (!EVEN(v))
carry = vli_add(v, v, mod, ndigits);
vli_rshift1(v, ndigits);
if (carry)
v[ndigits - 1] |= 0x8000000000000000ull;
}
}
vli_set(result, u, ndigits);
}
EXPORT_SYMBOL(vli_mod_inv);
/* ------ Point operations ------ */
/* Returns true if p_point is the point at infinity, false otherwise. */
bool ecc_point_is_zero(const struct ecc_point *point)
{
return (vli_is_zero(point->x, point->ndigits) &&
vli_is_zero(point->y, point->ndigits));
}
EXPORT_SYMBOL(ecc_point_is_zero);
/* Point multiplication algorithm using Montgomery's ladder with co-Z
* coordinates. From https://eprint.iacr.org/2011/338.pdf
*/
/* Double in place */
static void ecc_point_double_jacobian(u64 *x1, u64 *y1, u64 *z1,
const struct ecc_curve *curve)
{
/* t1 = x, t2 = y, t3 = z */
u64 t4[ECC_MAX_DIGITS];
u64 t5[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
if (vli_is_zero(z1, ndigits))
return;
/* t4 = y1^2 */
vli_mod_square_fast(t4, y1, curve);
/* t5 = x1*y1^2 = A */
vli_mod_mult_fast(t5, x1, t4, curve);
/* t4 = y1^4 */
vli_mod_square_fast(t4, t4, curve);
/* t2 = y1*z1 = z3 */
vli_mod_mult_fast(y1, y1, z1, curve);
/* t3 = z1^2 */
vli_mod_square_fast(z1, z1, curve);
/* t1 = x1 + z1^2 */
vli_mod_add(x1, x1, z1, curve_prime, ndigits);
/* t3 = 2*z1^2 */
vli_mod_add(z1, z1, z1, curve_prime, ndigits);
/* t3 = x1 - z1^2 */
vli_mod_sub(z1, x1, z1, curve_prime, ndigits);
/* t1 = x1^2 - z1^4 */
vli_mod_mult_fast(x1, x1, z1, curve);
/* t3 = 2*(x1^2 - z1^4) */
vli_mod_add(z1, x1, x1, curve_prime, ndigits);
/* t1 = 3*(x1^2 - z1^4) */
vli_mod_add(x1, x1, z1, curve_prime, ndigits);
if (vli_test_bit(x1, 0)) {
u64 carry = vli_add(x1, x1, curve_prime, ndigits);
vli_rshift1(x1, ndigits);
x1[ndigits - 1] |= carry << 63;
} else {
vli_rshift1(x1, ndigits);
}
/* t1 = 3/2*(x1^2 - z1^4) = B */
/* t3 = B^2 */
vli_mod_square_fast(z1, x1, curve);
/* t3 = B^2 - A */
vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
/* t3 = B^2 - 2A = x3 */
vli_mod_sub(z1, z1, t5, curve_prime, ndigits);
/* t5 = A - x3 */
vli_mod_sub(t5, t5, z1, curve_prime, ndigits);
/* t1 = B * (A - x3) */
vli_mod_mult_fast(x1, x1, t5, curve);
/* t4 = B * (A - x3) - y1^4 = y3 */
vli_mod_sub(t4, x1, t4, curve_prime, ndigits);
vli_set(x1, z1, ndigits);
vli_set(z1, y1, ndigits);
vli_set(y1, t4, ndigits);
}
/* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
static void apply_z(u64 *x1, u64 *y1, u64 *z, const struct ecc_curve *curve)
{
u64 t1[ECC_MAX_DIGITS];
vli_mod_square_fast(t1, z, curve); /* z^2 */
vli_mod_mult_fast(x1, x1, t1, curve); /* x1 * z^2 */
vli_mod_mult_fast(t1, t1, z, curve); /* z^3 */
vli_mod_mult_fast(y1, y1, t1, curve); /* y1 * z^3 */
}
/* P = (x1, y1) => 2P, (x2, y2) => P' */
static void xycz_initial_double(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
u64 *p_initial_z, const struct ecc_curve *curve)
{
u64 z[ECC_MAX_DIGITS];
const unsigned int ndigits = curve->g.ndigits;
vli_set(x2, x1, ndigits);
vli_set(y2, y1, ndigits);
vli_clear(z, ndigits);
z[0] = 1;
if (p_initial_z)
vli_set(z, p_initial_z, ndigits);
apply_z(x1, y1, z, curve);
ecc_point_double_jacobian(x1, y1, z, curve);
apply_z(x2, y2, z, curve);
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
* Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
* or P => P', Q => P + Q
*/
static void xycz_add(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
const struct ecc_curve *curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */
vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */
vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */
vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 - y1 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t5 = (y2 - y1)^2 = D */
vli_mod_square_fast(t5, y2, curve);
/* t5 = D - B */
vli_mod_sub(t5, t5, x1, curve_prime, ndigits);
/* t5 = D - B - C = x3 */
vli_mod_sub(t5, t5, x2, curve_prime, ndigits);
/* t3 = C - B */
vli_mod_sub(x2, x2, x1, curve_prime, ndigits);
/* t2 = y1*(C - B) */
vli_mod_mult_fast(y1, y1, x2, curve);
/* t3 = B - x3 */
vli_mod_sub(x2, x1, t5, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */
vli_mod_mult_fast(y2, y2, x2, curve);
/* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
vli_set(x2, t5, ndigits);
}
/* Input P = (x1, y1, Z), Q = (x2, y2, Z)
* Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
* or P => P - Q, Q => P + Q
*/
static void xycz_add_c(u64 *x1, u64 *y1, u64 *x2, u64 *y2,
const struct ecc_curve *curve)
{
/* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
u64 t5[ECC_MAX_DIGITS];
u64 t6[ECC_MAX_DIGITS];
u64 t7[ECC_MAX_DIGITS];
const u64 *curve_prime = curve->p;
const unsigned int ndigits = curve->g.ndigits;
/* t5 = x2 - x1 */
vli_mod_sub(t5, x2, x1, curve_prime, ndigits);
/* t5 = (x2 - x1)^2 = A */
vli_mod_square_fast(t5, t5, curve);
/* t1 = x1*A = B */
vli_mod_mult_fast(x1, x1, t5, curve);
/* t3 = x2*A = C */
vli_mod_mult_fast(x2, x2, t5, curve);
/* t4 = y2 + y1 */
vli_mod_add(t5, y2, y1, curve_prime, ndigits);
/* t4 = y2 - y1 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t6 = C - B */
vli_mod_sub(t6, x2, x1, curve_prime, ndigits);
/* t2 = y1 * (C - B) */
vli_mod_mult_fast(y1, y1, t6, curve);
/* t6 = B + C */
vli_mod_add(t6, x1, x2, curve_prime, ndigits);
/* t3 = (y2 - y1)^2 */
vli_mod_square_fast(x2, y2, curve);
/* t3 = x3 */
vli_mod_sub(x2, x2, t6, curve_prime, ndigits);
/* t7 = B - x3 */
vli_mod_sub(t7, x1, x2, curve_prime, ndigits);
/* t4 = (y2 - y1)*(B - x3) */
vli_mod_mult_fast(y2, y2, t7, curve);
/* t4 = y3 */
vli_mod_sub(y2, y2, y1, curve_prime, ndigits);
/* t7 = (y2 + y1)^2 = F */
vli_mod_square_fast(t7, t5, curve);
/* t7 = x3' */
vli_mod_sub(t7, t7, t6, curve_prime, ndigits);
/* t6 = x3' - B */
vli_mod_sub(t6, t7, x1, curve_prime, ndigits);
/* t6 = (y2 + y1)*(x3' - B) */
vli_mod_mult_fast(t6, t6, t5, curve);
/* t2 = y3' */
vli_mod_sub(y1, t6, y1, curve_prime, ndigits);
vli_set(x1, t7, ndigits);
}
static void ecc_point_mult(struct ecc_point *result,
const struct ecc_point *point, const u64 *scalar,
u64 *initial_z, const struct ecc_curve *curve,
unsigned int ndigits)
{
/* R0 and R1 */
u64 rx[2][ECC_MAX_DIGITS];
u64 ry[2][ECC_MAX_DIGITS];
u64 z[ECC_MAX_DIGITS];
u64 sk[2][ECC_MAX_DIGITS];
u64 *curve_prime = curve->p;
int i, nb;
int num_bits;
int carry;
carry = vli_add(sk[0], scalar, curve->n, ndigits);
vli_add(sk[1], sk[0], curve->n, ndigits);
scalar = sk[!carry];
num_bits = sizeof(u64) * ndigits * 8 + 1;
vli_set(rx[1], point->x, ndigits);
vli_set(ry[1], point->y, ndigits);
xycz_initial_double(rx[1], ry[1], rx[0], ry[0], initial_z, curve);
for (i = num_bits - 2; i > 0; i--) {
nb = !vli_test_bit(scalar, i);
xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
}
nb = !vli_test_bit(scalar, 0);
xycz_add_c(rx[1 - nb], ry[1 - nb], rx[nb], ry[nb], curve);
/* Find final 1/Z value. */
/* X1 - X0 */
vli_mod_sub(z, rx[1], rx[0], curve_prime, ndigits);
/* Yb * (X1 - X0) */
vli_mod_mult_fast(z, z, ry[1 - nb], curve);
/* xP * Yb * (X1 - X0) */
vli_mod_mult_fast(z, z, point->x, curve);
/* 1 / (xP * Yb * (X1 - X0)) */
vli_mod_inv(z, z, curve_prime, point->ndigits);
/* yP / (xP * Yb * (X1 - X0)) */
vli_mod_mult_fast(z, z, point->y, curve);
/* Xb * yP / (xP * Yb * (X1 - X0)) */
vli_mod_mult_fast(z, z, rx[1 - nb], curve);
/* End 1/Z calculation */
xycz_add(rx[nb], ry[nb], rx[1 - nb], ry[1 - nb], curve);
apply_z(rx[0], ry[0], z, curve);
vli_set(result->x, rx[0], ndigits);
vli_set(result->y, ry[0], ndigits);
}
/* Computes R = P + Q mod p */
static void ecc_point_add(const struct ecc_point *result,
const struct ecc_point *p, const struct ecc_point *q,
const struct ecc_curve *curve)
{
u64 z[ECC_MAX_DIGITS];
u64 px[ECC_MAX_DIGITS];
u64 py[ECC_MAX_DIGITS];
unsigned int ndigits = curve->g.ndigits;
vli_set(result->x, q->x, ndigits);
vli_set(result->y, q->y, ndigits);
vli_mod_sub(z, result->x, p->x, curve->p, ndigits);
vli_set(px, p->x, ndigits);
vli_set(py, p->y, ndigits);
xycz_add(px, py, result->x, result->y, curve);
vli_mod_inv(z, z, curve->p, ndigits);
apply_z(result->x, result->y, z, curve);
}
/* Computes R = u1P + u2Q mod p using Shamir's trick.
* Based on: Kenneth MacKay's micro-ecc (2014).
*/
void ecc_point_mult_shamir(const struct ecc_point *result,
const u64 *u1, const struct ecc_point *p,
const u64 *u2, const struct ecc_point *q,
const struct ecc_curve *curve)
{
u64 z[ECC_MAX_DIGITS];
u64 sump[2][ECC_MAX_DIGITS];
u64 *rx = result->x;
u64 *ry = result->y;
unsigned int ndigits = curve->g.ndigits;
unsigned int num_bits;
struct ecc_point sum = ECC_POINT_INIT(sump[0], sump[1], ndigits);
const struct ecc_point *points[4];
const struct ecc_point *point;
unsigned int idx;
int i;
ecc_point_add(&sum, p, q, curve);
points[0] = NULL;
points[1] = p;
points[2] = q;
points[3] = ∑
num_bits = max(vli_num_bits(u1, ndigits), vli_num_bits(u2, ndigits));
i = num_bits - 1;
idx = !!vli_test_bit(u1, i);
idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
vli_set(rx, point->x, ndigits);
vli_set(ry, point->y, ndigits);
vli_clear(z + 1, ndigits - 1);
z[0] = 1;
for (--i; i >= 0; i--) {
ecc_point_double_jacobian(rx, ry, z, curve);
idx = !!vli_test_bit(u1, i);
idx |= (!!vli_test_bit(u2, i)) << 1;
point = points[idx];
if (point) {
u64 tx[ECC_MAX_DIGITS];
u64 ty[ECC_MAX_DIGITS];
u64 tz[ECC_MAX_DIGITS];
vli_set(tx, point->x, ndigits);
vli_set(ty, point->y, ndigits);
apply_z(tx, ty, z, curve);
vli_mod_sub(tz, rx, tx, curve->p, ndigits);
xycz_add(tx, ty, rx, ry, curve);
vli_mod_mult_fast(z, z, tz, curve);
}
}
vli_mod_inv(z, z, curve->p, ndigits);
apply_z(rx, ry, z, curve);
}
EXPORT_SYMBOL(ecc_point_mult_shamir);
static int __ecc_is_key_valid(const struct ecc_curve *curve,
const u64 *private_key, unsigned int ndigits)
{
u64 one[ECC_MAX_DIGITS] = { 1, };
u64 res[ECC_MAX_DIGITS];
if (!private_key)
return -EINVAL;
if (curve->g.ndigits != ndigits)
return -EINVAL;
/* Make sure the private key is in the range [2, n-3]. */
if (vli_cmp(one, private_key, ndigits) != -1)
return -EINVAL;
vli_sub(res, curve->n, one, ndigits);
vli_sub(res, res, one, ndigits);
if (vli_cmp(res, private_key, ndigits) != 1)
return -EINVAL;
return 0;
}
int ecc_is_key_valid(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, unsigned int private_key_len)
{
int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
if (private_key_len != nbytes)
return -EINVAL;
return __ecc_is_key_valid(curve, private_key, ndigits);
}
EXPORT_SYMBOL(ecc_is_key_valid);
/*
* ECC private keys are generated using the method of extra random bits,
* equivalent to that described in FIPS 186-4, Appendix B.4.1.
*
* d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
* than requested
* 0 <= c mod(n-1) <= n-2 and implies that
* 1 <= d <= n-1
*
* This method generates a private key uniformly distributed in the range
* [1, n-1].
*/
int ecc_gen_privkey(unsigned int curve_id, unsigned int ndigits, u64 *privkey)
{
const struct ecc_curve *curve = ecc_get_curve(curve_id);
u64 priv[ECC_MAX_DIGITS];
unsigned int nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
unsigned int nbits = vli_num_bits(curve->n, ndigits);
int err;
/* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
if (nbits < 160 || ndigits > ARRAY_SIZE(priv))
return -EINVAL;
/*
* FIPS 186-4 recommends that the private key should be obtained from a
* RBG with a security strength equal to or greater than the security
* strength associated with N.
*
* The maximum security strength identified by NIST SP800-57pt1r4 for
* ECC is 256 (N >= 512).
*
* This condition is met by the default RNG because it selects a favored
* DRBG with a security strength of 256.
*/
if (crypto_get_default_rng())
return -EFAULT;
err = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
crypto_put_default_rng();
if (err)
return err;
/* Make sure the private key is in the valid range. */
if (__ecc_is_key_valid(curve, priv, ndigits))
return -EINVAL;
ecc_swap_digits(priv, privkey, ndigits);
return 0;
}
EXPORT_SYMBOL(ecc_gen_privkey);
int ecc_make_pub_key(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, u64 *public_key)
{
int ret = 0;
struct ecc_point *pk;
u64 priv[ECC_MAX_DIGITS];
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !curve || ndigits > ARRAY_SIZE(priv)) {
ret = -EINVAL;
goto out;
}
ecc_swap_digits(private_key, priv, ndigits);
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
goto out;
}
ecc_point_mult(pk, &curve->g, priv, NULL, curve, ndigits);
/* SP800-56A rev 3 5.6.2.1.3 key check */
if (ecc_is_pubkey_valid_full(curve, pk)) {
ret = -EAGAIN;
goto err_free_point;
}
ecc_swap_digits(pk->x, public_key, ndigits);
ecc_swap_digits(pk->y, &public_key[ndigits], ndigits);
err_free_point:
ecc_free_point(pk);
out:
return ret;
}
EXPORT_SYMBOL(ecc_make_pub_key);
/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
int ecc_is_pubkey_valid_partial(const struct ecc_curve *curve,
struct ecc_point *pk)
{
u64 yy[ECC_MAX_DIGITS], xxx[ECC_MAX_DIGITS], w[ECC_MAX_DIGITS];
if (WARN_ON(pk->ndigits != curve->g.ndigits))
return -EINVAL;
/* Check 1: Verify key is not the zero point. */
if (ecc_point_is_zero(pk))
return -EINVAL;
/* Check 2: Verify key is in the range [1, p-1]. */
if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
return -EINVAL;
if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
return -EINVAL;
/* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
vli_mod_square_fast(yy, pk->y, curve); /* y^2 */
vli_mod_square_fast(xxx, pk->x, curve); /* x^2 */
vli_mod_mult_fast(xxx, xxx, pk->x, curve); /* x^3 */
vli_mod_mult_fast(w, curve->a, pk->x, curve); /* a·x */
vli_mod_add(w, w, curve->b, curve->p, pk->ndigits); /* a·x + b */
vli_mod_add(w, w, xxx, curve->p, pk->ndigits); /* x^3 + a·x + b */
if (vli_cmp(yy, w, pk->ndigits) != 0) /* Equation */
return -EINVAL;
return 0;
}
EXPORT_SYMBOL(ecc_is_pubkey_valid_partial);
/* SP800-56A section 5.6.2.3.3 full verification */
int ecc_is_pubkey_valid_full(const struct ecc_curve *curve,
struct ecc_point *pk)
{
struct ecc_point *nQ;
/* Checks 1 through 3 */
int ret = ecc_is_pubkey_valid_partial(curve, pk);
if (ret)
return ret;
/* Check 4: Verify that nQ is the zero point. */
nQ = ecc_alloc_point(pk->ndigits);
if (!nQ)
return -ENOMEM;
ecc_point_mult(nQ, pk, curve->n, NULL, curve, pk->ndigits);
if (!ecc_point_is_zero(nQ))
ret = -EINVAL;
ecc_free_point(nQ);
return ret;
}
EXPORT_SYMBOL(ecc_is_pubkey_valid_full);
int crypto_ecdh_shared_secret(unsigned int curve_id, unsigned int ndigits,
const u64 *private_key, const u64 *public_key,
u64 *secret)
{
int ret = 0;
struct ecc_point *product, *pk;
u64 priv[ECC_MAX_DIGITS];
u64 rand_z[ECC_MAX_DIGITS];
unsigned int nbytes;
const struct ecc_curve *curve = ecc_get_curve(curve_id);
if (!private_key || !public_key || !curve ||
ndigits > ARRAY_SIZE(priv) || ndigits > ARRAY_SIZE(rand_z)) {
ret = -EINVAL;
goto out;
}
nbytes = ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
get_random_bytes(rand_z, nbytes);
pk = ecc_alloc_point(ndigits);
if (!pk) {
ret = -ENOMEM;
goto out;
}
ecc_swap_digits(public_key, pk->x, ndigits);
ecc_swap_digits(&public_key[ndigits], pk->y, ndigits);
ret = ecc_is_pubkey_valid_partial(curve, pk);
if (ret)
goto err_alloc_product;
ecc_swap_digits(private_key, priv, ndigits);
product = ecc_alloc_point(ndigits);
if (!product) {
ret = -ENOMEM;
goto err_alloc_product;
}
ecc_point_mult(product, pk, priv, rand_z, curve, ndigits);
if (ecc_point_is_zero(product)) {
ret = -EFAULT;
goto err_validity;
}
ecc_swap_digits(product->x, secret, ndigits);
err_validity:
memzero_explicit(priv, sizeof(priv));
memzero_explicit(rand_z, sizeof(rand_z));
ecc_free_point(product);
err_alloc_product:
ecc_free_point(pk);
out:
return ret;
}
EXPORT_SYMBOL(crypto_ecdh_shared_secret);
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | crypto/ecc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Kernel cryptographic api.
* cast5.c - Cast5 cipher algorithm (rfc2144).
*
* Derived from GnuPG implementation of cast5.
*
* Major Changes.
* Complete conformance to rfc2144.
* Supports key size from 40 to 128 bits.
*
* Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
* Copyright (C) 2003 Kartikey Mahendra Bhatt <[email protected]>.
*/
#include <asm/unaligned.h>
#include <crypto/algapi.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <crypto/cast5.h>
static const u32 s5[256] = {
0x7ec90c04, 0x2c6e74b9, 0x9b0e66df, 0xa6337911, 0xb86a7fff,
0x1dd358f5, 0x44dd9d44, 0x1731167f,
0x08fbf1fa, 0xe7f511cc, 0xd2051b00, 0x735aba00, 0x2ab722d8,
0x386381cb, 0xacf6243a, 0x69befd7a,
0xe6a2e77f, 0xf0c720cd, 0xc4494816, 0xccf5c180, 0x38851640,
0x15b0a848, 0xe68b18cb, 0x4caadeff,
0x5f480a01, 0x0412b2aa, 0x259814fc, 0x41d0efe2, 0x4e40b48d,
0x248eb6fb, 0x8dba1cfe, 0x41a99b02,
0x1a550a04, 0xba8f65cb, 0x7251f4e7, 0x95a51725, 0xc106ecd7,
0x97a5980a, 0xc539b9aa, 0x4d79fe6a,
0xf2f3f763, 0x68af8040, 0xed0c9e56, 0x11b4958b, 0xe1eb5a88,
0x8709e6b0, 0xd7e07156, 0x4e29fea7,
0x6366e52d, 0x02d1c000, 0xc4ac8e05, 0x9377f571, 0x0c05372a,
0x578535f2, 0x2261be02, 0xd642a0c9,
0xdf13a280, 0x74b55bd2, 0x682199c0, 0xd421e5ec, 0x53fb3ce8,
0xc8adedb3, 0x28a87fc9, 0x3d959981,
0x5c1ff900, 0xfe38d399, 0x0c4eff0b, 0x062407ea, 0xaa2f4fb1,
0x4fb96976, 0x90c79505, 0xb0a8a774,
0xef55a1ff, 0xe59ca2c2, 0xa6b62d27, 0xe66a4263, 0xdf65001f,
0x0ec50966, 0xdfdd55bc, 0x29de0655,
0x911e739a, 0x17af8975, 0x32c7911c, 0x89f89468, 0x0d01e980,
0x524755f4, 0x03b63cc9, 0x0cc844b2,
0xbcf3f0aa, 0x87ac36e9, 0xe53a7426, 0x01b3d82b, 0x1a9e7449,
0x64ee2d7e, 0xcddbb1da, 0x01c94910,
0xb868bf80, 0x0d26f3fd, 0x9342ede7, 0x04a5c284, 0x636737b6,
0x50f5b616, 0xf24766e3, 0x8eca36c1,
0x136e05db, 0xfef18391, 0xfb887a37, 0xd6e7f7d4, 0xc7fb7dc9,
0x3063fcdf, 0xb6f589de, 0xec2941da,
0x26e46695, 0xb7566419, 0xf654efc5, 0xd08d58b7, 0x48925401,
0xc1bacb7f, 0xe5ff550f, 0xb6083049,
0x5bb5d0e8, 0x87d72e5a, 0xab6a6ee1, 0x223a66ce, 0xc62bf3cd,
0x9e0885f9, 0x68cb3e47, 0x086c010f,
0xa21de820, 0xd18b69de, 0xf3f65777, 0xfa02c3f6, 0x407edac3,
0xcbb3d550, 0x1793084d, 0xb0d70eba,
0x0ab378d5, 0xd951fb0c, 0xded7da56, 0x4124bbe4, 0x94ca0b56,
0x0f5755d1, 0xe0e1e56e, 0x6184b5be,
0x580a249f, 0x94f74bc0, 0xe327888e, 0x9f7b5561, 0xc3dc0280,
0x05687715, 0x646c6bd7, 0x44904db3,
0x66b4f0a3, 0xc0f1648a, 0x697ed5af, 0x49e92ff6, 0x309e374f,
0x2cb6356a, 0x85808573, 0x4991f840,
0x76f0ae02, 0x083be84d, 0x28421c9a, 0x44489406, 0x736e4cb8,
0xc1092910, 0x8bc95fc6, 0x7d869cf4,
0x134f616f, 0x2e77118d, 0xb31b2be1, 0xaa90b472, 0x3ca5d717,
0x7d161bba, 0x9cad9010, 0xaf462ba2,
0x9fe459d2, 0x45d34559, 0xd9f2da13, 0xdbc65487, 0xf3e4f94e,
0x176d486f, 0x097c13ea, 0x631da5c7,
0x445f7382, 0x175683f4, 0xcdc66a97, 0x70be0288, 0xb3cdcf72,
0x6e5dd2f3, 0x20936079, 0x459b80a5,
0xbe60e2db, 0xa9c23101, 0xeba5315c, 0x224e42f2, 0x1c5c1572,
0xf6721b2c, 0x1ad2fff3, 0x8c25404e,
0x324ed72f, 0x4067b7fd, 0x0523138e, 0x5ca3bc78, 0xdc0fd66e,
0x75922283, 0x784d6b17, 0x58ebb16e,
0x44094f85, 0x3f481d87, 0xfcfeae7b, 0x77b5ff76, 0x8c2302bf,
0xaaf47556, 0x5f46b02a, 0x2b092801,
0x3d38f5f7, 0x0ca81f36, 0x52af4a8a, 0x66d5e7c0, 0xdf3b0874,
0x95055110, 0x1b5ad7a8, 0xf61ed5ad,
0x6cf6e479, 0x20758184, 0xd0cefa65, 0x88f7be58, 0x4a046826,
0x0ff6f8f3, 0xa09c7f70, 0x5346aba0,
0x5ce96c28, 0xe176eda3, 0x6bac307f, 0x376829d2, 0x85360fa9,
0x17e3fe2a, 0x24b79767, 0xf5a96b20,
0xd6cd2595, 0x68ff1ebf, 0x7555442c, 0xf19f06be, 0xf9e0659a,
0xeeb9491d, 0x34010718, 0xbb30cab8,
0xe822fe15, 0x88570983, 0x750e6249, 0xda627e55, 0x5e76ffa8,
0xb1534546, 0x6d47de08, 0xefe9e7d4
};
static const u32 s6[256] = {
0xf6fa8f9d, 0x2cac6ce1, 0x4ca34867, 0xe2337f7c, 0x95db08e7,
0x016843b4, 0xeced5cbc, 0x325553ac,
0xbf9f0960, 0xdfa1e2ed, 0x83f0579d, 0x63ed86b9, 0x1ab6a6b8,
0xde5ebe39, 0xf38ff732, 0x8989b138,
0x33f14961, 0xc01937bd, 0xf506c6da, 0xe4625e7e, 0xa308ea99,
0x4e23e33c, 0x79cbd7cc, 0x48a14367,
0xa3149619, 0xfec94bd5, 0xa114174a, 0xeaa01866, 0xa084db2d,
0x09a8486f, 0xa888614a, 0x2900af98,
0x01665991, 0xe1992863, 0xc8f30c60, 0x2e78ef3c, 0xd0d51932,
0xcf0fec14, 0xf7ca07d2, 0xd0a82072,
0xfd41197e, 0x9305a6b0, 0xe86be3da, 0x74bed3cd, 0x372da53c,
0x4c7f4448, 0xdab5d440, 0x6dba0ec3,
0x083919a7, 0x9fbaeed9, 0x49dbcfb0, 0x4e670c53, 0x5c3d9c01,
0x64bdb941, 0x2c0e636a, 0xba7dd9cd,
0xea6f7388, 0xe70bc762, 0x35f29adb, 0x5c4cdd8d, 0xf0d48d8c,
0xb88153e2, 0x08a19866, 0x1ae2eac8,
0x284caf89, 0xaa928223, 0x9334be53, 0x3b3a21bf, 0x16434be3,
0x9aea3906, 0xefe8c36e, 0xf890cdd9,
0x80226dae, 0xc340a4a3, 0xdf7e9c09, 0xa694a807, 0x5b7c5ecc,
0x221db3a6, 0x9a69a02f, 0x68818a54,
0xceb2296f, 0x53c0843a, 0xfe893655, 0x25bfe68a, 0xb4628abc,
0xcf222ebf, 0x25ac6f48, 0xa9a99387,
0x53bddb65, 0xe76ffbe7, 0xe967fd78, 0x0ba93563, 0x8e342bc1,
0xe8a11be9, 0x4980740d, 0xc8087dfc,
0x8de4bf99, 0xa11101a0, 0x7fd37975, 0xda5a26c0, 0xe81f994f,
0x9528cd89, 0xfd339fed, 0xb87834bf,
0x5f04456d, 0x22258698, 0xc9c4c83b, 0x2dc156be, 0x4f628daa,
0x57f55ec5, 0xe2220abe, 0xd2916ebf,
0x4ec75b95, 0x24f2c3c0, 0x42d15d99, 0xcd0d7fa0, 0x7b6e27ff,
0xa8dc8af0, 0x7345c106, 0xf41e232f,
0x35162386, 0xe6ea8926, 0x3333b094, 0x157ec6f2, 0x372b74af,
0x692573e4, 0xe9a9d848, 0xf3160289,
0x3a62ef1d, 0xa787e238, 0xf3a5f676, 0x74364853, 0x20951063,
0x4576698d, 0xb6fad407, 0x592af950,
0x36f73523, 0x4cfb6e87, 0x7da4cec0, 0x6c152daa, 0xcb0396a8,
0xc50dfe5d, 0xfcd707ab, 0x0921c42f,
0x89dff0bb, 0x5fe2be78, 0x448f4f33, 0x754613c9, 0x2b05d08d,
0x48b9d585, 0xdc049441, 0xc8098f9b,
0x7dede786, 0xc39a3373, 0x42410005, 0x6a091751, 0x0ef3c8a6,
0x890072d6, 0x28207682, 0xa9a9f7be,
0xbf32679d, 0xd45b5b75, 0xb353fd00, 0xcbb0e358, 0x830f220a,
0x1f8fb214, 0xd372cf08, 0xcc3c4a13,
0x8cf63166, 0x061c87be, 0x88c98f88, 0x6062e397, 0x47cf8e7a,
0xb6c85283, 0x3cc2acfb, 0x3fc06976,
0x4e8f0252, 0x64d8314d, 0xda3870e3, 0x1e665459, 0xc10908f0,
0x513021a5, 0x6c5b68b7, 0x822f8aa0,
0x3007cd3e, 0x74719eef, 0xdc872681, 0x073340d4, 0x7e432fd9,
0x0c5ec241, 0x8809286c, 0xf592d891,
0x08a930f6, 0x957ef305, 0xb7fbffbd, 0xc266e96f, 0x6fe4ac98,
0xb173ecc0, 0xbc60b42a, 0x953498da,
0xfba1ae12, 0x2d4bd736, 0x0f25faab, 0xa4f3fceb, 0xe2969123,
0x257f0c3d, 0x9348af49, 0x361400bc,
0xe8816f4a, 0x3814f200, 0xa3f94043, 0x9c7a54c2, 0xbc704f57,
0xda41e7f9, 0xc25ad33a, 0x54f4a084,
0xb17f5505, 0x59357cbe, 0xedbd15c8, 0x7f97c5ab, 0xba5ac7b5,
0xb6f6deaf, 0x3a479c3a, 0x5302da25,
0x653d7e6a, 0x54268d49, 0x51a477ea, 0x5017d55b, 0xd7d25d88,
0x44136c76, 0x0404a8c8, 0xb8e5a121,
0xb81a928a, 0x60ed5869, 0x97c55b96, 0xeaec991b, 0x29935913,
0x01fdb7f1, 0x088e8dfa, 0x9ab6f6f5,
0x3b4cbf9f, 0x4a5de3ab, 0xe6051d35, 0xa0e1d855, 0xd36b4cf1,
0xf544edeb, 0xb0e93524, 0xbebb8fbd,
0xa2d762cf, 0x49c92f54, 0x38b5f331, 0x7128a454, 0x48392905,
0xa65b1db8, 0x851c97bd, 0xd675cf2f
};
static const u32 s7[256] = {
0x85e04019, 0x332bf567, 0x662dbfff, 0xcfc65693, 0x2a8d7f6f,
0xab9bc912, 0xde6008a1, 0x2028da1f,
0x0227bce7, 0x4d642916, 0x18fac300, 0x50f18b82, 0x2cb2cb11,
0xb232e75c, 0x4b3695f2, 0xb28707de,
0xa05fbcf6, 0xcd4181e9, 0xe150210c, 0xe24ef1bd, 0xb168c381,
0xfde4e789, 0x5c79b0d8, 0x1e8bfd43,
0x4d495001, 0x38be4341, 0x913cee1d, 0x92a79c3f, 0x089766be,
0xbaeeadf4, 0x1286becf, 0xb6eacb19,
0x2660c200, 0x7565bde4, 0x64241f7a, 0x8248dca9, 0xc3b3ad66,
0x28136086, 0x0bd8dfa8, 0x356d1cf2,
0x107789be, 0xb3b2e9ce, 0x0502aa8f, 0x0bc0351e, 0x166bf52a,
0xeb12ff82, 0xe3486911, 0xd34d7516,
0x4e7b3aff, 0x5f43671b, 0x9cf6e037, 0x4981ac83, 0x334266ce,
0x8c9341b7, 0xd0d854c0, 0xcb3a6c88,
0x47bc2829, 0x4725ba37, 0xa66ad22b, 0x7ad61f1e, 0x0c5cbafa,
0x4437f107, 0xb6e79962, 0x42d2d816,
0x0a961288, 0xe1a5c06e, 0x13749e67, 0x72fc081a, 0xb1d139f7,
0xf9583745, 0xcf19df58, 0xbec3f756,
0xc06eba30, 0x07211b24, 0x45c28829, 0xc95e317f, 0xbc8ec511,
0x38bc46e9, 0xc6e6fa14, 0xbae8584a,
0xad4ebc46, 0x468f508b, 0x7829435f, 0xf124183b, 0x821dba9f,
0xaff60ff4, 0xea2c4e6d, 0x16e39264,
0x92544a8b, 0x009b4fc3, 0xaba68ced, 0x9ac96f78, 0x06a5b79a,
0xb2856e6e, 0x1aec3ca9, 0xbe838688,
0x0e0804e9, 0x55f1be56, 0xe7e5363b, 0xb3a1f25d, 0xf7debb85,
0x61fe033c, 0x16746233, 0x3c034c28,
0xda6d0c74, 0x79aac56c, 0x3ce4e1ad, 0x51f0c802, 0x98f8f35a,
0x1626a49f, 0xeed82b29, 0x1d382fe3,
0x0c4fb99a, 0xbb325778, 0x3ec6d97b, 0x6e77a6a9, 0xcb658b5c,
0xd45230c7, 0x2bd1408b, 0x60c03eb7,
0xb9068d78, 0xa33754f4, 0xf430c87d, 0xc8a71302, 0xb96d8c32,
0xebd4e7be, 0xbe8b9d2d, 0x7979fb06,
0xe7225308, 0x8b75cf77, 0x11ef8da4, 0xe083c858, 0x8d6b786f,
0x5a6317a6, 0xfa5cf7a0, 0x5dda0033,
0xf28ebfb0, 0xf5b9c310, 0xa0eac280, 0x08b9767a, 0xa3d9d2b0,
0x79d34217, 0x021a718d, 0x9ac6336a,
0x2711fd60, 0x438050e3, 0x069908a8, 0x3d7fedc4, 0x826d2bef,
0x4eeb8476, 0x488dcf25, 0x36c9d566,
0x28e74e41, 0xc2610aca, 0x3d49a9cf, 0xbae3b9df, 0xb65f8de6,
0x92aeaf64, 0x3ac7d5e6, 0x9ea80509,
0xf22b017d, 0xa4173f70, 0xdd1e16c3, 0x15e0d7f9, 0x50b1b887,
0x2b9f4fd5, 0x625aba82, 0x6a017962,
0x2ec01b9c, 0x15488aa9, 0xd716e740, 0x40055a2c, 0x93d29a22,
0xe32dbf9a, 0x058745b9, 0x3453dc1e,
0xd699296e, 0x496cff6f, 0x1c9f4986, 0xdfe2ed07, 0xb87242d1,
0x19de7eae, 0x053e561a, 0x15ad6f8c,
0x66626c1c, 0x7154c24c, 0xea082b2a, 0x93eb2939, 0x17dcb0f0,
0x58d4f2ae, 0x9ea294fb, 0x52cf564c,
0x9883fe66, 0x2ec40581, 0x763953c3, 0x01d6692e, 0xd3a0c108,
0xa1e7160e, 0xe4f2dfa6, 0x693ed285,
0x74904698, 0x4c2b0edd, 0x4f757656, 0x5d393378, 0xa132234f,
0x3d321c5d, 0xc3f5e194, 0x4b269301,
0xc79f022f, 0x3c997e7e, 0x5e4f9504, 0x3ffafbbd, 0x76f7ad0e,
0x296693f4, 0x3d1fce6f, 0xc61e45be,
0xd3b5ab34, 0xf72bf9b7, 0x1b0434c0, 0x4e72b567, 0x5592a33d,
0xb5229301, 0xcfd2a87f, 0x60aeb767,
0x1814386b, 0x30bcc33d, 0x38a0c07d, 0xfd1606f2, 0xc363519b,
0x589dd390, 0x5479f8e6, 0x1cb8d647,
0x97fd61a9, 0xea7759f4, 0x2d57539d, 0x569a58cf, 0xe84e63ad,
0x462e1b78, 0x6580f87e, 0xf3817914,
0x91da55f4, 0x40a230f3, 0xd1988f35, 0xb6e318d2, 0x3ffa50bc,
0x3d40f021, 0xc3c0bdae, 0x4958c24c,
0x518f36b2, 0x84b1d370, 0x0fedce83, 0x878ddada, 0xf2a279c7,
0x94e01be8, 0x90716f4b, 0x954b8aa3
};
static const u32 sb8[256] = {
0xe216300d, 0xbbddfffc, 0xa7ebdabd, 0x35648095, 0x7789f8b7,
0xe6c1121b, 0x0e241600, 0x052ce8b5,
0x11a9cfb0, 0xe5952f11, 0xece7990a, 0x9386d174, 0x2a42931c,
0x76e38111, 0xb12def3a, 0x37ddddfc,
0xde9adeb1, 0x0a0cc32c, 0xbe197029, 0x84a00940, 0xbb243a0f,
0xb4d137cf, 0xb44e79f0, 0x049eedfd,
0x0b15a15d, 0x480d3168, 0x8bbbde5a, 0x669ded42, 0xc7ece831,
0x3f8f95e7, 0x72df191b, 0x7580330d,
0x94074251, 0x5c7dcdfa, 0xabbe6d63, 0xaa402164, 0xb301d40a,
0x02e7d1ca, 0x53571dae, 0x7a3182a2,
0x12a8ddec, 0xfdaa335d, 0x176f43e8, 0x71fb46d4, 0x38129022,
0xce949ad4, 0xb84769ad, 0x965bd862,
0x82f3d055, 0x66fb9767, 0x15b80b4e, 0x1d5b47a0, 0x4cfde06f,
0xc28ec4b8, 0x57e8726e, 0x647a78fc,
0x99865d44, 0x608bd593, 0x6c200e03, 0x39dc5ff6, 0x5d0b00a3,
0xae63aff2, 0x7e8bd632, 0x70108c0c,
0xbbd35049, 0x2998df04, 0x980cf42a, 0x9b6df491, 0x9e7edd53,
0x06918548, 0x58cb7e07, 0x3b74ef2e,
0x522fffb1, 0xd24708cc, 0x1c7e27cd, 0xa4eb215b, 0x3cf1d2e2,
0x19b47a38, 0x424f7618, 0x35856039,
0x9d17dee7, 0x27eb35e6, 0xc9aff67b, 0x36baf5b8, 0x09c467cd,
0xc18910b1, 0xe11dbf7b, 0x06cd1af8,
0x7170c608, 0x2d5e3354, 0xd4de495a, 0x64c6d006, 0xbcc0c62c,
0x3dd00db3, 0x708f8f34, 0x77d51b42,
0x264f620f, 0x24b8d2bf, 0x15c1b79e, 0x46a52564, 0xf8d7e54e,
0x3e378160, 0x7895cda5, 0x859c15a5,
0xe6459788, 0xc37bc75f, 0xdb07ba0c, 0x0676a3ab, 0x7f229b1e,
0x31842e7b, 0x24259fd7, 0xf8bef472,
0x835ffcb8, 0x6df4c1f2, 0x96f5b195, 0xfd0af0fc, 0xb0fe134c,
0xe2506d3d, 0x4f9b12ea, 0xf215f225,
0xa223736f, 0x9fb4c428, 0x25d04979, 0x34c713f8, 0xc4618187,
0xea7a6e98, 0x7cd16efc, 0x1436876c,
0xf1544107, 0xbedeee14, 0x56e9af27, 0xa04aa441, 0x3cf7c899,
0x92ecbae6, 0xdd67016d, 0x151682eb,
0xa842eedf, 0xfdba60b4, 0xf1907b75, 0x20e3030f, 0x24d8c29e,
0xe139673b, 0xefa63fb8, 0x71873054,
0xb6f2cf3b, 0x9f326442, 0xcb15a4cc, 0xb01a4504, 0xf1e47d8d,
0x844a1be5, 0xbae7dfdc, 0x42cbda70,
0xcd7dae0a, 0x57e85b7a, 0xd53f5af6, 0x20cf4d8c, 0xcea4d428,
0x79d130a4, 0x3486ebfb, 0x33d3cddc,
0x77853b53, 0x37effcb5, 0xc5068778, 0xe580b3e6, 0x4e68b8f4,
0xc5c8b37e, 0x0d809ea2, 0x398feb7c,
0x132a4f94, 0x43b7950e, 0x2fee7d1c, 0x223613bd, 0xdd06caa2,
0x37df932b, 0xc4248289, 0xacf3ebc3,
0x5715f6b7, 0xef3478dd, 0xf267616f, 0xc148cbe4, 0x9052815e,
0x5e410fab, 0xb48a2465, 0x2eda7fa4,
0xe87b40e4, 0xe98ea084, 0x5889e9e1, 0xefd390fc, 0xdd07d35b,
0xdb485694, 0x38d7e5b2, 0x57720101,
0x730edebc, 0x5b643113, 0x94917e4f, 0x503c2fba, 0x646f1282,
0x7523d24a, 0xe0779695, 0xf9c17a8f,
0x7a5b2121, 0xd187b896, 0x29263a4d, 0xba510cdf, 0x81f47c9f,
0xad1163ed, 0xea7b5965, 0x1a00726e,
0x11403092, 0x00da6d77, 0x4a0cdd61, 0xad1f4603, 0x605bdfb0,
0x9eedc364, 0x22ebe6a8, 0xcee7d28a,
0xa0e736a0, 0x5564a6b9, 0x10853209, 0xc7eb8f37, 0x2de705ca,
0x8951570f, 0xdf09822b, 0xbd691a6c,
0xaa12e4f2, 0x87451c0f, 0xe0f6a27a, 0x3ada4819, 0x4cf1764f,
0x0d771c2b, 0x67cdb156, 0x350d8384,
0x5938fa0f, 0x42399ef3, 0x36997b07, 0x0e84093d, 0x4aa93e61,
0x8360d87b, 0x1fa98b0c, 0x1149382c,
0xe97625a5, 0x0614d1b7, 0x0e25244b, 0x0c768347, 0x589e8d82,
0x0d2059d1, 0xa466bb1e, 0xf8da0a82,
0x04f19130, 0xba6e4ec0, 0x99265164, 0x1ee7230d, 0x50b2ad80,
0xeaee6801, 0x8db2a283, 0xea8bf59e
};
#define s1 cast_s1
#define s2 cast_s2
#define s3 cast_s3
#define s4 cast_s4
#define F1(D, m, r) ((I = ((m) + (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] ^ s2[(I>>16)&0xff]) - s3[(I>>8)&0xff]) + s4[I&0xff]))
#define F2(D, m, r) ((I = ((m) ^ (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] - s2[(I>>16)&0xff]) + s3[(I>>8)&0xff]) ^ s4[I&0xff]))
#define F3(D, m, r) ((I = ((m) - (D))), (I = rol32(I, (r))), \
(((s1[I >> 24] + s2[(I>>16)&0xff]) ^ s3[(I>>8)&0xff]) - s4[I&0xff]))
void __cast5_encrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
u32 l, r, t;
u32 I; /* used by the Fx macros */
u32 *Km;
u8 *Kr;
Km = c->Km;
Kr = c->Kr;
/* (L0,R0) <-- (m1...m64). (Split the plaintext into left and
* right 32-bit halves L0 = m1...m32 and R0 = m33...m64.)
*/
l = get_unaligned_be32(inbuf);
r = get_unaligned_be32(inbuf + 4);
/* (16 rounds) for i from 1 to 16, compute Li and Ri as follows:
* Li = Ri-1;
* Ri = Li-1 ^ f(Ri-1,Kmi,Kri), where f is defined in Section 2.2
* Rounds 1, 4, 7, 10, 13, and 16 use f function Type 1.
* Rounds 2, 5, 8, 11, and 14 use f function Type 2.
* Rounds 3, 6, 9, 12, and 15 use f function Type 3.
*/
t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
if (!(c->rr)) {
t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
}
/* c1...c64 <-- (R16,L16). (Exchange final blocks L16, R16 and
* concatenate to form the ciphertext.) */
put_unaligned_be32(r, outbuf);
put_unaligned_be32(l, outbuf + 4);
}
EXPORT_SYMBOL_GPL(__cast5_encrypt);
static void cast5_encrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast5_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
void __cast5_decrypt(struct cast5_ctx *c, u8 *outbuf, const u8 *inbuf)
{
u32 l, r, t;
u32 I;
u32 *Km;
u8 *Kr;
Km = c->Km;
Kr = c->Kr;
l = get_unaligned_be32(inbuf);
r = get_unaligned_be32(inbuf + 4);
if (!(c->rr)) {
t = l; l = r; r = t ^ F1(r, Km[15], Kr[15]);
t = l; l = r; r = t ^ F3(r, Km[14], Kr[14]);
t = l; l = r; r = t ^ F2(r, Km[13], Kr[13]);
t = l; l = r; r = t ^ F1(r, Km[12], Kr[12]);
}
t = l; l = r; r = t ^ F3(r, Km[11], Kr[11]);
t = l; l = r; r = t ^ F2(r, Km[10], Kr[10]);
t = l; l = r; r = t ^ F1(r, Km[9], Kr[9]);
t = l; l = r; r = t ^ F3(r, Km[8], Kr[8]);
t = l; l = r; r = t ^ F2(r, Km[7], Kr[7]);
t = l; l = r; r = t ^ F1(r, Km[6], Kr[6]);
t = l; l = r; r = t ^ F3(r, Km[5], Kr[5]);
t = l; l = r; r = t ^ F2(r, Km[4], Kr[4]);
t = l; l = r; r = t ^ F1(r, Km[3], Kr[3]);
t = l; l = r; r = t ^ F3(r, Km[2], Kr[2]);
t = l; l = r; r = t ^ F2(r, Km[1], Kr[1]);
t = l; l = r; r = t ^ F1(r, Km[0], Kr[0]);
put_unaligned_be32(r, outbuf);
put_unaligned_be32(l, outbuf + 4);
}
EXPORT_SYMBOL_GPL(__cast5_decrypt);
static void cast5_decrypt(struct crypto_tfm *tfm, u8 *outbuf, const u8 *inbuf)
{
__cast5_decrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
}
static void key_schedule(u32 *x, u32 *z, u32 *k)
{
#define xi(i) ((x[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
#define zi(i) ((z[(i)/4] >> (8*(3-((i)%4)))) & 0xff)
z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^
s7[xi(8)];
z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^
sb8[xi(10)];
z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^
s5[xi(9)];
z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^
s6[xi(11)];
k[0] = s5[zi(8)] ^ s6[zi(9)] ^ s7[zi(7)] ^ sb8[zi(6)] ^ s5[zi(2)];
k[1] = s5[zi(10)] ^ s6[zi(11)] ^ s7[zi(5)] ^ sb8[zi(4)] ^
s6[zi(6)];
k[2] = s5[zi(12)] ^ s6[zi(13)] ^ s7[zi(3)] ^ sb8[zi(2)] ^
s7[zi(9)];
k[3] = s5[zi(14)] ^ s6[zi(15)] ^ s7[zi(1)] ^ sb8[zi(0)] ^
sb8[zi(12)];
x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^
s7[zi(0)];
x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^
sb8[zi(2)];
x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^
s5[zi(1)];
x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^
s6[zi(3)];
k[4] = s5[xi(3)] ^ s6[xi(2)] ^ s7[xi(12)] ^ sb8[xi(13)] ^
s5[xi(8)];
k[5] = s5[xi(1)] ^ s6[xi(0)] ^ s7[xi(14)] ^ sb8[xi(15)] ^
s6[xi(13)];
k[6] = s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(8)] ^ sb8[xi(9)] ^ s7[xi(3)];
k[7] = s5[xi(5)] ^ s6[xi(4)] ^ s7[xi(10)] ^ sb8[xi(11)] ^
sb8[xi(7)];
z[0] = x[0] ^ s5[xi(13)] ^ s6[xi(15)] ^ s7[xi(12)] ^ sb8[xi(14)] ^
s7[xi(8)];
z[1] = x[2] ^ s5[zi(0)] ^ s6[zi(2)] ^ s7[zi(1)] ^ sb8[zi(3)] ^
sb8[xi(10)];
z[2] = x[3] ^ s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(5)] ^ sb8[zi(4)] ^
s5[xi(9)];
z[3] = x[1] ^ s5[zi(10)] ^ s6[zi(9)] ^ s7[zi(11)] ^ sb8[zi(8)] ^
s6[xi(11)];
k[8] = s5[zi(3)] ^ s6[zi(2)] ^ s7[zi(12)] ^ sb8[zi(13)] ^
s5[zi(9)];
k[9] = s5[zi(1)] ^ s6[zi(0)] ^ s7[zi(14)] ^ sb8[zi(15)] ^
s6[zi(12)];
k[10] = s5[zi(7)] ^ s6[zi(6)] ^ s7[zi(8)] ^ sb8[zi(9)] ^ s7[zi(2)];
k[11] = s5[zi(5)] ^ s6[zi(4)] ^ s7[zi(10)] ^ sb8[zi(11)] ^
sb8[zi(6)];
x[0] = z[2] ^ s5[zi(5)] ^ s6[zi(7)] ^ s7[zi(4)] ^ sb8[zi(6)] ^
s7[zi(0)];
x[1] = z[0] ^ s5[xi(0)] ^ s6[xi(2)] ^ s7[xi(1)] ^ sb8[xi(3)] ^
sb8[zi(2)];
x[2] = z[1] ^ s5[xi(7)] ^ s6[xi(6)] ^ s7[xi(5)] ^ sb8[xi(4)] ^
s5[zi(1)];
x[3] = z[3] ^ s5[xi(10)] ^ s6[xi(9)] ^ s7[xi(11)] ^ sb8[xi(8)] ^
s6[zi(3)];
k[12] = s5[xi(8)] ^ s6[xi(9)] ^ s7[xi(7)] ^ sb8[xi(6)] ^ s5[xi(3)];
k[13] = s5[xi(10)] ^ s6[xi(11)] ^ s7[xi(5)] ^ sb8[xi(4)] ^
s6[xi(7)];
k[14] = s5[xi(12)] ^ s6[xi(13)] ^ s7[xi(3)] ^ sb8[xi(2)] ^
s7[xi(8)];
k[15] = s5[xi(14)] ^ s6[xi(15)] ^ s7[xi(1)] ^ sb8[xi(0)] ^
sb8[xi(13)];
#undef xi
#undef zi
}
int cast5_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len)
{
struct cast5_ctx *c = crypto_tfm_ctx(tfm);
int i;
u32 x[4];
u32 z[4];
u32 k[16];
__be32 p_key[4];
c->rr = key_len <= 10 ? 1 : 0;
memset(p_key, 0, 16);
memcpy(p_key, key, key_len);
x[0] = be32_to_cpu(p_key[0]);
x[1] = be32_to_cpu(p_key[1]);
x[2] = be32_to_cpu(p_key[2]);
x[3] = be32_to_cpu(p_key[3]);
key_schedule(x, z, k);
for (i = 0; i < 16; i++)
c->Km[i] = k[i];
key_schedule(x, z, k);
for (i = 0; i < 16; i++)
c->Kr[i] = k[i] & 0x1f;
return 0;
}
EXPORT_SYMBOL_GPL(cast5_setkey);
static struct crypto_alg alg = {
.cra_name = "cast5",
.cra_driver_name = "cast5-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = CAST5_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct cast5_ctx),
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = CAST5_MIN_KEY_SIZE,
.cia_max_keysize = CAST5_MAX_KEY_SIZE,
.cia_setkey = cast5_setkey,
.cia_encrypt = cast5_encrypt,
.cia_decrypt = cast5_decrypt
}
}
};
static int __init cast5_mod_init(void)
{
return crypto_register_alg(&alg);
}
static void __exit cast5_mod_fini(void)
{
crypto_unregister_alg(&alg);
}
subsys_initcall(cast5_mod_init);
module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast5");
MODULE_ALIAS_CRYPTO("cast5-generic");
| linux-master | crypto/cast5_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* Null algorithms, aka Much Ado About Nothing.
*
* These are needed for IPsec, and may be useful in general for
* testing & debugging.
*
* The null cipher is compliant with RFC2410.
*
* Copyright (c) 2002 James Morris <[email protected]>
*/
#include <crypto/null.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
if (slen > *dlen)
return -EINVAL;
memcpy(dst, src, slen);
*dlen = slen;
return 0;
}
static int null_init(struct shash_desc *desc)
{
return 0;
}
static int null_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return 0;
}
static int null_final(struct shash_desc *desc, u8 *out)
{
return 0;
}
static int null_digest(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
return 0;
}
static int null_hash_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static int null_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static int null_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int keylen)
{ return 0; }
static void null_crypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
memcpy(dst, src, NULL_BLOCK_SIZE);
}
static int null_skcipher_crypt(struct skcipher_request *req)
{
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes) {
if (walk.src.virt.addr != walk.dst.virt.addr)
memcpy(walk.dst.virt.addr, walk.src.virt.addr,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static struct shash_alg digest_null = {
.digestsize = NULL_DIGEST_SIZE,
.setkey = null_hash_setkey,
.init = null_init,
.update = null_update,
.finup = null_digest,
.digest = null_digest,
.final = null_final,
.base = {
.cra_name = "digest_null",
.cra_driver_name = "digest_null-generic",
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static struct skcipher_alg skcipher_null = {
.base.cra_name = "ecb(cipher_null)",
.base.cra_driver_name = "ecb-cipher_null",
.base.cra_priority = 100,
.base.cra_blocksize = NULL_BLOCK_SIZE,
.base.cra_ctxsize = 0,
.base.cra_module = THIS_MODULE,
.min_keysize = NULL_KEY_SIZE,
.max_keysize = NULL_KEY_SIZE,
.ivsize = NULL_IV_SIZE,
.setkey = null_skcipher_setkey,
.encrypt = null_skcipher_crypt,
.decrypt = null_skcipher_crypt,
};
static struct crypto_alg null_algs[] = { {
.cra_name = "cipher_null",
.cra_driver_name = "cipher_null-generic",
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .cipher = {
.cia_min_keysize = NULL_KEY_SIZE,
.cia_max_keysize = NULL_KEY_SIZE,
.cia_setkey = null_setkey,
.cia_encrypt = null_crypt,
.cia_decrypt = null_crypt } }
}, {
.cra_name = "compress_null",
.cra_driver_name = "compress_null-generic",
.cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
.cra_blocksize = NULL_BLOCK_SIZE,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
.cra_u = { .compress = {
.coa_compress = null_compress,
.coa_decompress = null_compress } }
} };
MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
struct crypto_sync_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(tfm))
goto unlock;
crypto_default_null_skcipher = tfm;
}
crypto_default_null_skcipher_refcnt++;
unlock:
mutex_unlock(&crypto_default_null_skcipher_lock);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_get_default_null_skcipher);
void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
crypto_free_sync_skcipher(crypto_default_null_skcipher);
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
}
EXPORT_SYMBOL_GPL(crypto_put_default_null_skcipher);
static int __init crypto_null_mod_init(void)
{
int ret = 0;
ret = crypto_register_algs(null_algs, ARRAY_SIZE(null_algs));
if (ret < 0)
goto out;
ret = crypto_register_shash(&digest_null);
if (ret < 0)
goto out_unregister_algs;
ret = crypto_register_skcipher(&skcipher_null);
if (ret < 0)
goto out_unregister_shash;
return 0;
out_unregister_shash:
crypto_unregister_shash(&digest_null);
out_unregister_algs:
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
out:
return ret;
}
static void __exit crypto_null_mod_fini(void)
{
crypto_unregister_algs(null_algs, ARRAY_SIZE(null_algs));
crypto_unregister_shash(&digest_null);
crypto_unregister_skcipher(&skcipher_null);
}
subsys_initcall(crypto_null_mod_init);
module_exit(crypto_null_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Null Cryptographic Algorithms");
| linux-master | crypto/crypto_null.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* GHASH: hash function for GCM (Galois/Counter Mode).
*
* Copyright (c) 2007 Nokia Siemens Networks - Mikko Herranen <[email protected]>
* Copyright (c) 2009 Intel Corp.
* Author: Huang Ying <[email protected]>
*/
/*
* GHASH is a keyed hash function used in GCM authentication tag generation.
*
* The original GCM paper [1] presents GHASH as a function GHASH(H, A, C) which
* takes a 16-byte hash key H, additional authenticated data A, and a ciphertext
* C. It formats A and C into a single byte string X, interprets X as a
* polynomial over GF(2^128), and evaluates this polynomial at the point H.
*
* However, the NIST standard for GCM [2] presents GHASH as GHASH(H, X) where X
* is the already-formatted byte string containing both A and C.
*
* "ghash" in the Linux crypto API uses the 'X' (pre-formatted) convention,
* since the API supports only a single data stream per hash. Thus, the
* formatting of 'A' and 'C' is done in the "gcm" template, not in "ghash".
*
* The reason "ghash" is separate from "gcm" is to allow "gcm" to use an
* accelerated "ghash" when a standalone accelerated "gcm(aes)" is unavailable.
* It is generally inappropriate to use "ghash" for other purposes, since it is
* an "ε-almost-XOR-universal hash function", not a cryptographic hash function.
* It can only be used securely in crypto modes specially designed to use it.
*
* [1] The Galois/Counter Mode of Operation (GCM)
* (http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.694.695&rep=rep1&type=pdf)
* [2] Recommendation for Block Cipher Modes of Operation: Galois/Counter Mode (GCM) and GMAC
* (https://csrc.nist.gov/publications/detail/sp/800-38d/final)
*/
#include <crypto/algapi.h>
#include <crypto/gf128mul.h>
#include <crypto/ghash.h>
#include <crypto/internal/hash.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
static int ghash_init(struct shash_desc *desc)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
memset(dctx, 0, sizeof(*dctx));
return 0;
}
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *key, unsigned int keylen)
{
struct ghash_ctx *ctx = crypto_shash_ctx(tfm);
be128 k;
if (keylen != GHASH_BLOCK_SIZE)
return -EINVAL;
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
BUILD_BUG_ON(sizeof(k) != GHASH_BLOCK_SIZE);
memcpy(&k, key, GHASH_BLOCK_SIZE); /* avoid violating alignment rules */
ctx->gf128 = gf128mul_init_4k_lle(&k);
memzero_explicit(&k, GHASH_BLOCK_SIZE);
if (!ctx->gf128)
return -ENOMEM;
return 0;
}
static int ghash_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *dst = dctx->buffer;
if (dctx->bytes) {
int n = min(srclen, dctx->bytes);
u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
dctx->bytes -= n;
srclen -= n;
while (n--)
*pos++ ^= *src++;
if (!dctx->bytes)
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
while (srclen >= GHASH_BLOCK_SIZE) {
crypto_xor(dst, src, GHASH_BLOCK_SIZE);
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
src += GHASH_BLOCK_SIZE;
srclen -= GHASH_BLOCK_SIZE;
}
if (srclen) {
dctx->bytes = GHASH_BLOCK_SIZE - srclen;
while (srclen--)
*dst++ ^= *src++;
}
return 0;
}
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
{
u8 *dst = dctx->buffer;
if (dctx->bytes) {
u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes);
while (dctx->bytes--)
*tmp++ ^= 0;
gf128mul_4k_lle((be128 *)dst, ctx->gf128);
}
dctx->bytes = 0;
}
static int ghash_final(struct shash_desc *desc, u8 *dst)
{
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
u8 *buf = dctx->buffer;
ghash_flush(ctx, dctx);
memcpy(dst, buf, GHASH_BLOCK_SIZE);
return 0;
}
static void ghash_exit_tfm(struct crypto_tfm *tfm)
{
struct ghash_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->gf128)
gf128mul_free_4k(ctx->gf128);
}
static struct shash_alg ghash_alg = {
.digestsize = GHASH_DIGEST_SIZE,
.init = ghash_init,
.update = ghash_update,
.final = ghash_final,
.setkey = ghash_setkey,
.descsize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-generic",
.cra_priority = 100,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct ghash_ctx),
.cra_module = THIS_MODULE,
.cra_exit = ghash_exit_tfm,
},
};
static int __init ghash_mod_init(void)
{
return crypto_register_shash(&ghash_alg);
}
static void __exit ghash_mod_exit(void)
{
crypto_unregister_shash(&ghash_alg);
}
subsys_initcall(ghash_mod_init);
module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH hash function");
MODULE_ALIAS_CRYPTO("ghash");
MODULE_ALIAS_CRYPTO("ghash-generic");
| linux-master | crypto/ghash-generic.c |
/*
* Non-physical true random number generator based on timing jitter --
* Jitter RNG standalone code.
*
* Copyright Stephan Mueller <[email protected]>, 2015 - 2023
*
* Design
* ======
*
* See https://www.chronox.de/jent.html
*
* License
* =======
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, in which case the provisions of the GPL2 are
* required INSTEAD OF the above restrictions. (This clause is
* necessary due to a potential bad interaction between the GPL and
* the restrictions contained in a BSD-style copyright.)
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
/*
* This Jitterentropy RNG is based on the jitterentropy library
* version 3.4.0 provided at https://www.chronox.de/jent.html
*/
#ifdef __OPTIMIZE__
#error "The CPU Jitter random number generator must not be compiled with optimizations. See documentation. Use the compiler switch -O0 for compiling jitterentropy.c."
#endif
typedef unsigned long long __u64;
typedef long long __s64;
typedef unsigned int __u32;
typedef unsigned char u8;
#define NULL ((void *) 0)
/* The entropy pool */
struct rand_data {
/* SHA3-256 is used as conditioner */
#define DATA_SIZE_BITS 256
/* all data values that are vital to maintain the security
* of the RNG are marked as SENSITIVE. A user must not
* access that information while the RNG executes its loops to
* calculate the next random value. */
void *hash_state; /* SENSITIVE hash state entropy pool */
__u64 prev_time; /* SENSITIVE Previous time stamp */
__u64 last_delta; /* SENSITIVE stuck test */
__s64 last_delta2; /* SENSITIVE stuck test */
unsigned int osr; /* Oversample rate */
#define JENT_MEMORY_BLOCKS 64
#define JENT_MEMORY_BLOCKSIZE 32
#define JENT_MEMORY_ACCESSLOOPS 128
#define JENT_MEMORY_SIZE (JENT_MEMORY_BLOCKS*JENT_MEMORY_BLOCKSIZE)
unsigned char *mem; /* Memory access location with size of
* memblocks * memblocksize */
unsigned int memlocation; /* Pointer to byte in *mem */
unsigned int memblocks; /* Number of memory blocks in *mem */
unsigned int memblocksize; /* Size of one memory block in bytes */
unsigned int memaccessloops; /* Number of memory accesses per random
* bit generation */
/* Repetition Count Test */
unsigned int rct_count; /* Number of stuck values */
/* Intermittent health test failure threshold of 2^-30 */
/* From an SP800-90B perspective, this RCT cutoff value is equal to 31. */
/* However, our RCT implementation starts at 1, so we subtract 1 here. */
#define JENT_RCT_CUTOFF (31 - 1) /* Taken from SP800-90B sec 4.4.1 */
#define JENT_APT_CUTOFF 325 /* Taken from SP800-90B sec 4.4.2 */
/* Permanent health test failure threshold of 2^-60 */
/* From an SP800-90B perspective, this RCT cutoff value is equal to 61. */
/* However, our RCT implementation starts at 1, so we subtract 1 here. */
#define JENT_RCT_CUTOFF_PERMANENT (61 - 1)
#define JENT_APT_CUTOFF_PERMANENT 355
#define JENT_APT_WINDOW_SIZE 512 /* Data window size */
/* LSB of time stamp to process */
#define JENT_APT_LSB 16
#define JENT_APT_WORD_MASK (JENT_APT_LSB - 1)
unsigned int apt_observations; /* Number of collected observations */
unsigned int apt_count; /* APT counter */
unsigned int apt_base; /* APT base reference */
unsigned int apt_base_set:1; /* APT base reference set? */
};
/* Flags that can be used to initialize the RNG */
#define JENT_DISABLE_MEMORY_ACCESS (1<<2) /* Disable memory access for more
* entropy, saves MEMORY_SIZE RAM for
* entropy collector */
/* -- error codes for init function -- */
#define JENT_ENOTIME 1 /* Timer service not available */
#define JENT_ECOARSETIME 2 /* Timer too coarse for RNG */
#define JENT_ENOMONOTONIC 3 /* Timer is not monotonic increasing */
#define JENT_EVARVAR 5 /* Timer does not produce variations of
* variations (2nd derivation of time is
* zero). */
#define JENT_ESTUCK 8 /* Too many stuck results during init. */
#define JENT_EHEALTH 9 /* Health test failed during initialization */
/*
* The output n bits can receive more than n bits of min entropy, of course,
* but the fixed output of the conditioning function can only asymptotically
* approach the output size bits of min entropy, not attain that bound. Random
* maps will tend to have output collisions, which reduces the creditable
* output entropy (that is what SP 800-90B Section 3.1.5.1.2 attempts to bound).
*
* The value "64" is justified in Appendix A.4 of the current 90C draft,
* and aligns with NIST's in "epsilon" definition in this document, which is
* that a string can be considered "full entropy" if you can bound the min
* entropy in each bit of output to at least 1-epsilon, where epsilon is
* required to be <= 2^(-32).
*/
#define JENT_ENTROPY_SAFETY_FACTOR 64
#include <linux/fips.h>
#include "jitterentropy.h"
/***************************************************************************
* Adaptive Proportion Test
*
* This test complies with SP800-90B section 4.4.2.
***************************************************************************/
/*
* Reset the APT counter
*
* @ec [in] Reference to entropy collector
*/
static void jent_apt_reset(struct rand_data *ec, unsigned int delta_masked)
{
/* Reset APT counter */
ec->apt_count = 0;
ec->apt_base = delta_masked;
ec->apt_observations = 0;
}
/*
* Insert a new entropy event into APT
*
* @ec [in] Reference to entropy collector
* @delta_masked [in] Masked time delta to process
*/
static void jent_apt_insert(struct rand_data *ec, unsigned int delta_masked)
{
/* Initialize the base reference */
if (!ec->apt_base_set) {
ec->apt_base = delta_masked;
ec->apt_base_set = 1;
return;
}
if (delta_masked == ec->apt_base)
ec->apt_count++;
ec->apt_observations++;
if (ec->apt_observations >= JENT_APT_WINDOW_SIZE)
jent_apt_reset(ec, delta_masked);
}
/* APT health test failure detection */
static int jent_apt_permanent_failure(struct rand_data *ec)
{
return (ec->apt_count >= JENT_APT_CUTOFF_PERMANENT) ? 1 : 0;
}
static int jent_apt_failure(struct rand_data *ec)
{
return (ec->apt_count >= JENT_APT_CUTOFF) ? 1 : 0;
}
/***************************************************************************
* Stuck Test and its use as Repetition Count Test
*
* The Jitter RNG uses an enhanced version of the Repetition Count Test
* (RCT) specified in SP800-90B section 4.4.1. Instead of counting identical
* back-to-back values, the input to the RCT is the counting of the stuck
* values during the generation of one Jitter RNG output block.
*
* The RCT is applied with an alpha of 2^{-30} compliant to FIPS 140-2 IG 9.8.
*
* During the counting operation, the Jitter RNG always calculates the RCT
* cut-off value of C. If that value exceeds the allowed cut-off value,
* the Jitter RNG output block will be calculated completely but discarded at
* the end. The caller of the Jitter RNG is informed with an error code.
***************************************************************************/
/*
* Repetition Count Test as defined in SP800-90B section 4.4.1
*
* @ec [in] Reference to entropy collector
* @stuck [in] Indicator whether the value is stuck
*/
static void jent_rct_insert(struct rand_data *ec, int stuck)
{
if (stuck) {
ec->rct_count++;
} else {
/* Reset RCT */
ec->rct_count = 0;
}
}
static inline __u64 jent_delta(__u64 prev, __u64 next)
{
#define JENT_UINT64_MAX (__u64)(~((__u64) 0))
return (prev < next) ? (next - prev) :
(JENT_UINT64_MAX - prev + 1 + next);
}
/*
* Stuck test by checking the:
* 1st derivative of the jitter measurement (time delta)
* 2nd derivative of the jitter measurement (delta of time deltas)
* 3rd derivative of the jitter measurement (delta of delta of time deltas)
*
* All values must always be non-zero.
*
* @ec [in] Reference to entropy collector
* @current_delta [in] Jitter time delta
*
* @return
* 0 jitter measurement not stuck (good bit)
* 1 jitter measurement stuck (reject bit)
*/
static int jent_stuck(struct rand_data *ec, __u64 current_delta)
{
__u64 delta2 = jent_delta(ec->last_delta, current_delta);
__u64 delta3 = jent_delta(ec->last_delta2, delta2);
ec->last_delta = current_delta;
ec->last_delta2 = delta2;
/*
* Insert the result of the comparison of two back-to-back time
* deltas.
*/
jent_apt_insert(ec, current_delta);
if (!current_delta || !delta2 || !delta3) {
/* RCT with a stuck bit */
jent_rct_insert(ec, 1);
return 1;
}
/* RCT with a non-stuck bit */
jent_rct_insert(ec, 0);
return 0;
}
/* RCT health test failure detection */
static int jent_rct_permanent_failure(struct rand_data *ec)
{
return (ec->rct_count >= JENT_RCT_CUTOFF_PERMANENT) ? 1 : 0;
}
static int jent_rct_failure(struct rand_data *ec)
{
return (ec->rct_count >= JENT_RCT_CUTOFF) ? 1 : 0;
}
/* Report of health test failures */
static int jent_health_failure(struct rand_data *ec)
{
return jent_rct_failure(ec) | jent_apt_failure(ec);
}
static int jent_permanent_health_failure(struct rand_data *ec)
{
return jent_rct_permanent_failure(ec) | jent_apt_permanent_failure(ec);
}
/***************************************************************************
* Noise sources
***************************************************************************/
/*
* Update of the loop count used for the next round of
* an entropy collection.
*
* Input:
* @bits is the number of low bits of the timer to consider
* @min is the number of bits we shift the timer value to the right at
* the end to make sure we have a guaranteed minimum value
*
* @return Newly calculated loop counter
*/
static __u64 jent_loop_shuffle(unsigned int bits, unsigned int min)
{
__u64 time = 0;
__u64 shuffle = 0;
unsigned int i = 0;
unsigned int mask = (1<<bits) - 1;
jent_get_nstime(&time);
/*
* We fold the time value as much as possible to ensure that as many
* bits of the time stamp are included as possible.
*/
for (i = 0; ((DATA_SIZE_BITS + bits - 1) / bits) > i; i++) {
shuffle ^= time & mask;
time = time >> bits;
}
/*
* We add a lower boundary value to ensure we have a minimum
* RNG loop count.
*/
return (shuffle + (1<<min));
}
/*
* CPU Jitter noise source -- this is the noise source based on the CPU
* execution time jitter
*
* This function injects the individual bits of the time value into the
* entropy pool using a hash.
*
* ec [in] entropy collector
* time [in] time stamp to be injected
* stuck [in] Is the time stamp identified as stuck?
*
* Output:
* updated hash context in the entropy collector or error code
*/
static int jent_condition_data(struct rand_data *ec, __u64 time, int stuck)
{
#define SHA3_HASH_LOOP (1<<3)
struct {
int rct_count;
unsigned int apt_observations;
unsigned int apt_count;
unsigned int apt_base;
} addtl = {
ec->rct_count,
ec->apt_observations,
ec->apt_count,
ec->apt_base
};
return jent_hash_time(ec->hash_state, time, (u8 *)&addtl, sizeof(addtl),
SHA3_HASH_LOOP, stuck);
}
/*
* Memory Access noise source -- this is a noise source based on variations in
* memory access times
*
* This function performs memory accesses which will add to the timing
* variations due to an unknown amount of CPU wait states that need to be
* added when accessing memory. The memory size should be larger than the L1
* caches as outlined in the documentation and the associated testing.
*
* The L1 cache has a very high bandwidth, albeit its access rate is usually
* slower than accessing CPU registers. Therefore, L1 accesses only add minimal
* variations as the CPU has hardly to wait. Starting with L2, significant
* variations are added because L2 typically does not belong to the CPU any more
* and therefore a wider range of CPU wait states is necessary for accesses.
* L3 and real memory accesses have even a wider range of wait states. However,
* to reliably access either L3 or memory, the ec->mem memory must be quite
* large which is usually not desirable.
*
* @ec [in] Reference to the entropy collector with the memory access data -- if
* the reference to the memory block to be accessed is NULL, this noise
* source is disabled
* @loop_cnt [in] if a value not equal to 0 is set, use the given value
* number of loops to perform the LFSR
*/
static void jent_memaccess(struct rand_data *ec, __u64 loop_cnt)
{
unsigned int wrap = 0;
__u64 i = 0;
#define MAX_ACC_LOOP_BIT 7
#define MIN_ACC_LOOP_BIT 0
__u64 acc_loop_cnt =
jent_loop_shuffle(MAX_ACC_LOOP_BIT, MIN_ACC_LOOP_BIT);
if (NULL == ec || NULL == ec->mem)
return;
wrap = ec->memblocksize * ec->memblocks;
/*
* testing purposes -- allow test app to set the counter, not
* needed during runtime
*/
if (loop_cnt)
acc_loop_cnt = loop_cnt;
for (i = 0; i < (ec->memaccessloops + acc_loop_cnt); i++) {
unsigned char *tmpval = ec->mem + ec->memlocation;
/*
* memory access: just add 1 to one byte,
* wrap at 255 -- memory access implies read
* from and write to memory location
*/
*tmpval = (*tmpval + 1) & 0xff;
/*
* Addition of memblocksize - 1 to pointer
* with wrap around logic to ensure that every
* memory location is hit evenly
*/
ec->memlocation = ec->memlocation + ec->memblocksize - 1;
ec->memlocation = ec->memlocation % wrap;
}
}
/***************************************************************************
* Start of entropy processing logic
***************************************************************************/
/*
* This is the heart of the entropy generation: calculate time deltas and
* use the CPU jitter in the time deltas. The jitter is injected into the
* entropy pool.
*
* WARNING: ensure that ->prev_time is primed before using the output
* of this function! This can be done by calling this function
* and not using its result.
*
* @ec [in] Reference to entropy collector
*
* @return result of stuck test
*/
static int jent_measure_jitter(struct rand_data *ec)
{
__u64 time = 0;
__u64 current_delta = 0;
int stuck;
/* Invoke one noise source before time measurement to add variations */
jent_memaccess(ec, 0);
/*
* Get time stamp and calculate time delta to previous
* invocation to measure the timing variations
*/
jent_get_nstime(&time);
current_delta = jent_delta(ec->prev_time, time);
ec->prev_time = time;
/* Check whether we have a stuck measurement. */
stuck = jent_stuck(ec, current_delta);
/* Now call the next noise sources which also injects the data */
if (jent_condition_data(ec, current_delta, stuck))
stuck = 1;
return stuck;
}
/*
* Generator of one 64 bit random number
* Function fills rand_data->hash_state
*
* @ec [in] Reference to entropy collector
*/
static void jent_gen_entropy(struct rand_data *ec)
{
unsigned int k = 0, safety_factor = 0;
if (fips_enabled)
safety_factor = JENT_ENTROPY_SAFETY_FACTOR;
/* priming of the ->prev_time value */
jent_measure_jitter(ec);
while (!jent_health_failure(ec)) {
/* If a stuck measurement is received, repeat measurement */
if (jent_measure_jitter(ec))
continue;
/*
* We multiply the loop value with ->osr to obtain the
* oversampling rate requested by the caller
*/
if (++k >= ((DATA_SIZE_BITS + safety_factor) * ec->osr))
break;
}
}
/*
* Entry function: Obtain entropy for the caller.
*
* This function invokes the entropy gathering logic as often to generate
* as many bytes as requested by the caller. The entropy gathering logic
* creates 64 bit per invocation.
*
* This function truncates the last 64 bit entropy value output to the exact
* size specified by the caller.
*
* @ec [in] Reference to entropy collector
* @data [in] pointer to buffer for storing random data -- buffer must already
* exist
* @len [in] size of the buffer, specifying also the requested number of random
* in bytes
*
* @return 0 when request is fulfilled or an error
*
* The following error codes can occur:
* -1 entropy_collector is NULL or the generation failed
* -2 Intermittent health failure
* -3 Permanent health failure
*/
int jent_read_entropy(struct rand_data *ec, unsigned char *data,
unsigned int len)
{
unsigned char *p = data;
if (!ec)
return -1;
while (len > 0) {
unsigned int tocopy;
jent_gen_entropy(ec);
if (jent_permanent_health_failure(ec)) {
/*
* At this point, the Jitter RNG instance is considered
* as a failed instance. There is no rerun of the
* startup test any more, because the caller
* is assumed to not further use this instance.
*/
return -3;
} else if (jent_health_failure(ec)) {
/*
* Perform startup health tests and return permanent
* error if it fails.
*/
if (jent_entropy_init(ec->hash_state))
return -3;
return -2;
}
if ((DATA_SIZE_BITS / 8) < len)
tocopy = (DATA_SIZE_BITS / 8);
else
tocopy = len;
if (jent_read_random_block(ec->hash_state, p, tocopy))
return -1;
len -= tocopy;
p += tocopy;
}
return 0;
}
/***************************************************************************
* Initialization logic
***************************************************************************/
struct rand_data *jent_entropy_collector_alloc(unsigned int osr,
unsigned int flags,
void *hash_state)
{
struct rand_data *entropy_collector;
entropy_collector = jent_zalloc(sizeof(struct rand_data));
if (!entropy_collector)
return NULL;
if (!(flags & JENT_DISABLE_MEMORY_ACCESS)) {
/* Allocate memory for adding variations based on memory
* access
*/
entropy_collector->mem = jent_zalloc(JENT_MEMORY_SIZE);
if (!entropy_collector->mem) {
jent_zfree(entropy_collector);
return NULL;
}
entropy_collector->memblocksize = JENT_MEMORY_BLOCKSIZE;
entropy_collector->memblocks = JENT_MEMORY_BLOCKS;
entropy_collector->memaccessloops = JENT_MEMORY_ACCESSLOOPS;
}
/* verify and set the oversampling rate */
if (osr == 0)
osr = 1; /* minimum sampling rate is 1 */
entropy_collector->osr = osr;
entropy_collector->hash_state = hash_state;
/* fill the data pad with non-zero values */
jent_gen_entropy(entropy_collector);
return entropy_collector;
}
void jent_entropy_collector_free(struct rand_data *entropy_collector)
{
jent_zfree(entropy_collector->mem);
entropy_collector->mem = NULL;
jent_zfree(entropy_collector);
}
int jent_entropy_init(void *hash_state)
{
int i;
__u64 delta_sum = 0;
__u64 old_delta = 0;
unsigned int nonstuck = 0;
int time_backwards = 0;
int count_mod = 0;
int count_stuck = 0;
struct rand_data ec = { 0 };
/* Required for RCT */
ec.osr = 1;
ec.hash_state = hash_state;
/* We could perform statistical tests here, but the problem is
* that we only have a few loop counts to do testing. These
* loop counts may show some slight skew and we produce
* false positives.
*
* Moreover, only old systems show potentially problematic
* jitter entropy that could potentially be caught here. But
* the RNG is intended for hardware that is available or widely
* used, but not old systems that are long out of favor. Thus,
* no statistical tests.
*/
/*
* We could add a check for system capabilities such as clock_getres or
* check for CONFIG_X86_TSC, but it does not make much sense as the
* following sanity checks verify that we have a high-resolution
* timer.
*/
/*
* TESTLOOPCOUNT needs some loops to identify edge systems. 100 is
* definitely too little.
*
* SP800-90B requires at least 1024 initial test cycles.
*/
#define TESTLOOPCOUNT 1024
#define CLEARCACHE 100
for (i = 0; (TESTLOOPCOUNT + CLEARCACHE) > i; i++) {
__u64 time = 0;
__u64 time2 = 0;
__u64 delta = 0;
unsigned int lowdelta = 0;
int stuck;
/* Invoke core entropy collection logic */
jent_get_nstime(&time);
ec.prev_time = time;
jent_condition_data(&ec, time, 0);
jent_get_nstime(&time2);
/* test whether timer works */
if (!time || !time2)
return JENT_ENOTIME;
delta = jent_delta(time, time2);
/*
* test whether timer is fine grained enough to provide
* delta even when called shortly after each other -- this
* implies that we also have a high resolution timer
*/
if (!delta)
return JENT_ECOARSETIME;
stuck = jent_stuck(&ec, delta);
/*
* up to here we did not modify any variable that will be
* evaluated later, but we already performed some work. Thus we
* already have had an impact on the caches, branch prediction,
* etc. with the goal to clear it to get the worst case
* measurements.
*/
if (i < CLEARCACHE)
continue;
if (stuck)
count_stuck++;
else {
nonstuck++;
/*
* Ensure that the APT succeeded.
*
* With the check below that count_stuck must be less
* than 10% of the overall generated raw entropy values
* it is guaranteed that the APT is invoked at
* floor((TESTLOOPCOUNT * 0.9) / 64) == 14 times.
*/
if ((nonstuck % JENT_APT_WINDOW_SIZE) == 0) {
jent_apt_reset(&ec,
delta & JENT_APT_WORD_MASK);
}
}
/* Validate health test result */
if (jent_health_failure(&ec))
return JENT_EHEALTH;
/* test whether we have an increasing timer */
if (!(time2 > time))
time_backwards++;
/* use 32 bit value to ensure compilation on 32 bit arches */
lowdelta = time2 - time;
if (!(lowdelta % 100))
count_mod++;
/*
* ensure that we have a varying delta timer which is necessary
* for the calculation of entropy -- perform this check
* only after the first loop is executed as we need to prime
* the old_data value
*/
if (delta > old_delta)
delta_sum += (delta - old_delta);
else
delta_sum += (old_delta - delta);
old_delta = delta;
}
/*
* we allow up to three times the time running backwards.
* CLOCK_REALTIME is affected by adjtime and NTP operations. Thus,
* if such an operation just happens to interfere with our test, it
* should not fail. The value of 3 should cover the NTP case being
* performed during our test run.
*/
if (time_backwards > 3)
return JENT_ENOMONOTONIC;
/*
* Variations of deltas of time must on average be larger
* than 1 to ensure the entropy estimation
* implied with 1 is preserved
*/
if ((delta_sum) <= 1)
return JENT_EVARVAR;
/*
* Ensure that we have variations in the time stamp below 10 for at
* least 10% of all checks -- on some platforms, the counter increments
* in multiples of 100, but not always
*/
if ((TESTLOOPCOUNT/10 * 9) < count_mod)
return JENT_ECOARSETIME;
/*
* If we have more than 90% stuck results, then this Jitter RNG is
* likely to not work well.
*/
if ((TESTLOOPCOUNT/10 * 9) < count_stuck)
return JENT_ESTUCK;
return 0;
}
| linux-master | crypto/jitterentropy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* CCM: Counter with CBC-MAC
*
* (C) Copyright IBM Corp. 2007 - Joy Latten <[email protected]>
*/
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
struct ccm_instance_ctx {
struct crypto_skcipher_spawn ctr;
struct crypto_ahash_spawn mac;
};
struct crypto_ccm_ctx {
struct crypto_ahash *mac;
struct crypto_skcipher *ctr;
};
struct crypto_rfc4309_ctx {
struct crypto_aead *child;
u8 nonce[3];
};
struct crypto_rfc4309_req_ctx {
struct scatterlist src[3];
struct scatterlist dst[3];
struct aead_request subreq;
};
struct crypto_ccm_req_priv_ctx {
u8 odata[16];
u8 idata[16];
u8 auth_tag[16];
u32 flags;
struct scatterlist src[3];
struct scatterlist dst[3];
union {
struct ahash_request ahreq;
struct skcipher_request skreq;
};
};
struct cbcmac_tfm_ctx {
struct crypto_cipher *child;
};
struct cbcmac_desc_ctx {
unsigned int len;
};
static inline struct crypto_ccm_req_priv_ctx *crypto_ccm_reqctx(
struct aead_request *req)
{
unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req));
return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1);
}
static int set_msg_len(u8 *block, unsigned int msglen, int csize)
{
__be32 data;
memset(block, 0, csize);
block += csize;
if (csize >= 4)
csize = 4;
else if (msglen > (1 << (8 * csize)))
return -EOVERFLOW;
data = cpu_to_be32(msglen);
memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
return 0;
}
static int crypto_ccm_setkey(struct crypto_aead *aead, const u8 *key,
unsigned int keylen)
{
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_skcipher *ctr = ctx->ctr;
struct crypto_ahash *mac = ctx->mac;
int err;
crypto_skcipher_clear_flags(ctr, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(ctr, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(ctr, key, keylen);
if (err)
return err;
crypto_ahash_clear_flags(mac, CRYPTO_TFM_REQ_MASK);
crypto_ahash_set_flags(mac, crypto_aead_get_flags(aead) &
CRYPTO_TFM_REQ_MASK);
return crypto_ahash_setkey(mac, key, keylen);
}
static int crypto_ccm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
switch (authsize) {
case 4:
case 6:
case 8:
case 10:
case 12:
case 14:
case 16:
break;
default:
return -EINVAL;
}
return 0;
}
static int format_input(u8 *info, struct aead_request *req,
unsigned int cryptlen)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int lp = req->iv[0];
unsigned int l = lp + 1;
unsigned int m;
m = crypto_aead_authsize(aead);
memcpy(info, req->iv, 16);
/* format control info per RFC 3610 and
* NIST Special Publication 800-38C
*/
*info |= (8 * ((m - 2) / 2));
if (req->assoclen)
*info |= 64;
return set_msg_len(info + 16 - l, cryptlen, l);
}
static int format_adata(u8 *adata, unsigned int a)
{
int len = 0;
/* add control info for associated data
* RFC 3610 and NIST Special Publication 800-38C
*/
if (a < 65280) {
*(__be16 *)adata = cpu_to_be16(a);
len = 2;
} else {
*(__be16 *)adata = cpu_to_be16(0xfffe);
*(__be32 *)&adata[2] = cpu_to_be32(a);
len = 6;
}
return len;
}
static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
unsigned int cryptlen)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct ahash_request *ahreq = &pctx->ahreq;
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 *odata = pctx->odata;
u8 *idata = pctx->idata;
int ilen, err;
/* format control data for input */
err = format_input(odata, req, cryptlen);
if (err)
goto out;
sg_init_table(sg, 3);
sg_set_buf(&sg[0], odata, 16);
/* format associated data and compute into mac */
if (assoclen) {
ilen = format_adata(idata, assoclen);
sg_set_buf(&sg[1], idata, ilen);
sg_chain(sg, 3, req->src);
} else {
ilen = 0;
sg_chain(sg, 2, req->src);
}
ahash_request_set_tfm(ahreq, ctx->mac);
ahash_request_set_callback(ahreq, pctx->flags, NULL, NULL);
ahash_request_set_crypt(ahreq, sg, NULL, assoclen + ilen + 16);
err = crypto_ahash_init(ahreq);
if (err)
goto out;
err = crypto_ahash_update(ahreq);
if (err)
goto out;
/* we need to pad the MAC input to a round multiple of the block size */
ilen = 16 - (assoclen + ilen) % 16;
if (ilen < 16) {
memset(idata, 0, ilen);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], idata, ilen);
if (plain)
sg_chain(sg, 2, plain);
plain = sg;
cryptlen += ilen;
}
ahash_request_set_crypt(ahreq, plain, odata, cryptlen);
err = crypto_ahash_finup(ahreq);
out:
return err;
}
static void crypto_ccm_encrypt_done(void *data, int err)
{
struct aead_request *req = data;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
u8 *odata = pctx->odata;
if (!err)
scatterwalk_map_and_copy(odata, req->dst,
req->assoclen + req->cryptlen,
crypto_aead_authsize(aead), 1);
aead_request_complete(req, err);
}
static inline int crypto_ccm_check_iv(const u8 *iv)
{
/* 2 <= L <= 8, so 1 <= L' <= 7. */
if (1 > iv[0] || iv[0] > 7)
return -EINVAL;
return 0;
}
static int crypto_ccm_init_crypt(struct aead_request *req, u8 *tag)
{
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct scatterlist *sg;
u8 *iv = req->iv;
int err;
err = crypto_ccm_check_iv(iv);
if (err)
return err;
pctx->flags = aead_request_flags(req);
/* Note: rfc 3610 and NIST 800-38C require counter of
* zero to encrypt auth tag.
*/
memset(iv + 15 - iv[0], 0, iv[0] + 1);
sg_init_table(pctx->src, 3);
sg_set_buf(pctx->src, tag, 16);
sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
if (sg != pctx->src + 1)
sg_chain(pctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(pctx->dst, 3);
sg_set_buf(pctx->dst, tag, 16);
sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
if (sg != pctx->dst + 1)
sg_chain(pctx->dst, 2, sg);
}
return 0;
}
static int crypto_ccm_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int cryptlen = req->cryptlen;
u8 *odata = pctx->odata;
u8 *iv = req->iv;
int err;
err = crypto_ccm_init_crypt(req, odata);
if (err)
return err;
err = crypto_ccm_auth(req, sg_next(pctx->src), cryptlen);
if (err)
return err;
dst = pctx->src;
if (req->src != req->dst)
dst = pctx->dst;
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_encrypt_done, req);
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_skcipher_encrypt(skreq);
if (err)
return err;
/* copy authtag to end of dst */
scatterwalk_map_and_copy(odata, sg_next(dst), cryptlen,
crypto_aead_authsize(aead), 1);
return err;
}
static void crypto_ccm_decrypt_done(void *data, int err)
{
struct aead_request *req = data;
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen - authsize;
struct scatterlist *dst;
pctx->flags = 0;
dst = sg_next(req->src == req->dst ? pctx->src : pctx->dst);
if (!err) {
err = crypto_ccm_auth(req, dst, cryptlen);
if (!err && crypto_memneq(pctx->auth_tag, pctx->odata, authsize))
err = -EBADMSG;
}
aead_request_complete(req, err);
}
static int crypto_ccm_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct skcipher_request *skreq = &pctx->skreq;
struct scatterlist *dst;
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
err = crypto_ccm_init_crypt(req, authtag);
if (err)
return err;
scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
authsize, 0);
dst = pctx->src;
if (req->src != req->dst)
dst = pctx->dst;
memcpy(iv, req->iv, 16);
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
skcipher_request_set_crypt(skreq, pctx->src, dst, cryptlen + 16, iv);
err = crypto_skcipher_decrypt(skreq);
if (err)
return err;
err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
if (err)
return err;
/* verify */
if (crypto_memneq(authtag, odata, authsize))
return -EBADMSG;
return err;
}
static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct ccm_instance_ctx *ictx = aead_instance_ctx(inst);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *mac;
struct crypto_skcipher *ctr;
unsigned long align;
int err;
mac = crypto_spawn_ahash(&ictx->mac);
if (IS_ERR(mac))
return PTR_ERR(mac);
ctr = crypto_spawn_skcipher(&ictx->ctr);
err = PTR_ERR(ctr);
if (IS_ERR(ctr))
goto err_free_mac;
ctx->mac = mac;
ctx->ctr = ctr;
align = crypto_aead_alignmask(tfm);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(
tfm,
align + sizeof(struct crypto_ccm_req_priv_ctx) +
max(crypto_ahash_reqsize(mac), crypto_skcipher_reqsize(ctr)));
return 0;
err_free_mac:
crypto_free_ahash(mac);
return err;
}
static void crypto_ccm_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_ahash(ctx->mac);
crypto_free_skcipher(ctx->ctr);
}
static void crypto_ccm_free(struct aead_instance *inst)
{
struct ccm_instance_ctx *ctx = aead_instance_ctx(inst);
crypto_drop_ahash(&ctx->mac);
crypto_drop_skcipher(&ctx->ctr);
kfree(inst);
}
static int crypto_ccm_create_common(struct crypto_template *tmpl,
struct rtattr **tb,
const char *ctr_name,
const char *mac_name)
{
u32 mask;
struct aead_instance *inst;
struct ccm_instance_ctx *ictx;
struct skcipher_alg *ctr;
struct hash_alg_common *mac;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*ictx), GFP_KERNEL);
if (!inst)
return -ENOMEM;
ictx = aead_instance_ctx(inst);
err = crypto_grab_ahash(&ictx->mac, aead_crypto_instance(inst),
mac_name, 0, mask | CRYPTO_ALG_ASYNC);
if (err)
goto err_free_inst;
mac = crypto_spawn_ahash_alg(&ictx->mac);
err = -EINVAL;
if (strncmp(mac->base.cra_name, "cbcmac(", 7) != 0 ||
mac->digestsize != 16)
goto err_free_inst;
err = crypto_grab_skcipher(&ictx->ctr, aead_crypto_instance(inst),
ctr_name, 0, mask);
if (err)
goto err_free_inst;
ctr = crypto_spawn_skcipher_alg(&ictx->ctr);
/* The skcipher algorithm must be CTR mode, using 16-byte blocks. */
err = -EINVAL;
if (strncmp(ctr->base.cra_name, "ctr(", 4) != 0 ||
crypto_skcipher_alg_ivsize(ctr) != 16 ||
ctr->base.cra_blocksize != 1)
goto err_free_inst;
/* ctr and cbcmac must use the same underlying block cipher. */
if (strcmp(ctr->base.cra_name + 4, mac->base.cra_name + 7) != 0)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"ccm(%s", ctr->base.cra_name + 4) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
if (snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"ccm_base(%s,%s)", ctr->base.cra_driver_name,
mac->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = (mac->base.cra_priority +
ctr->base.cra_priority) / 2;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = mac->base.cra_alignmask |
ctr->base.cra_alignmask;
inst->alg.ivsize = 16;
inst->alg.chunksize = crypto_skcipher_alg_chunksize(ctr);
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_ccm_ctx);
inst->alg.init = crypto_ccm_init_tfm;
inst->alg.exit = crypto_ccm_exit_tfm;
inst->alg.setkey = crypto_ccm_setkey;
inst->alg.setauthsize = crypto_ccm_setauthsize;
inst->alg.encrypt = crypto_ccm_encrypt;
inst->alg.decrypt = crypto_ccm_decrypt;
inst->free = crypto_ccm_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_ccm_free(inst);
}
return err;
}
static int crypto_ccm_create(struct crypto_template *tmpl, struct rtattr **tb)
{
const char *cipher_name;
char ctr_name[CRYPTO_MAX_ALG_NAME];
char mac_name[CRYPTO_MAX_ALG_NAME];
cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(cipher_name))
return PTR_ERR(cipher_name);
if (snprintf(ctr_name, CRYPTO_MAX_ALG_NAME, "ctr(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(mac_name, CRYPTO_MAX_ALG_NAME, "cbcmac(%s)",
cipher_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_ccm_base_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
const char *ctr_name;
const char *mac_name;
ctr_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(ctr_name))
return PTR_ERR(ctr_name);
mac_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(mac_name))
return PTR_ERR(mac_name);
return crypto_ccm_create_common(tmpl, tb, ctr_name, mac_name);
}
static int crypto_rfc4309_setkey(struct crypto_aead *parent, const u8 *key,
unsigned int keylen)
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
struct crypto_aead *child = ctx->child;
if (keylen < 3)
return -EINVAL;
keylen -= 3;
memcpy(ctx->nonce, key + keylen, 3);
crypto_aead_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(child, crypto_aead_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
return crypto_aead_setkey(child, key, keylen);
}
static int crypto_rfc4309_setauthsize(struct crypto_aead *parent,
unsigned int authsize)
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(parent);
switch (authsize) {
case 8:
case 12:
case 16:
break;
default:
return -EINVAL;
}
return crypto_aead_setauthsize(ctx->child, authsize);
}
static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req)
{
struct crypto_rfc4309_req_ctx *rctx = aead_request_ctx(req);
struct aead_request *subreq = &rctx->subreq;
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(aead);
struct crypto_aead *child = ctx->child;
struct scatterlist *sg;
u8 *iv = PTR_ALIGN((u8 *)(subreq + 1) + crypto_aead_reqsize(child),
crypto_aead_alignmask(child) + 1);
/* L' */
iv[0] = 3;
memcpy(iv + 1, ctx->nonce, 3);
memcpy(iv + 4, req->iv, 8);
scatterwalk_map_and_copy(iv + 16, req->src, 0, req->assoclen - 8, 0);
sg_init_table(rctx->src, 3);
sg_set_buf(rctx->src, iv + 16, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->src + 1, req->src, req->assoclen);
if (sg != rctx->src + 1)
sg_chain(rctx->src, 2, sg);
if (req->src != req->dst) {
sg_init_table(rctx->dst, 3);
sg_set_buf(rctx->dst, iv + 16, req->assoclen - 8);
sg = scatterwalk_ffwd(rctx->dst + 1, req->dst, req->assoclen);
if (sg != rctx->dst + 1)
sg_chain(rctx->dst, 2, sg);
}
aead_request_set_tfm(subreq, child);
aead_request_set_callback(subreq, req->base.flags, req->base.complete,
req->base.data);
aead_request_set_crypt(subreq, rctx->src,
req->src == req->dst ? rctx->src : rctx->dst,
req->cryptlen, iv);
aead_request_set_ad(subreq, req->assoclen - 8);
return subreq;
}
static int crypto_rfc4309_encrypt(struct aead_request *req)
{
if (req->assoclen != 16 && req->assoclen != 20)
return -EINVAL;
req = crypto_rfc4309_crypt(req);
return crypto_aead_encrypt(req);
}
static int crypto_rfc4309_decrypt(struct aead_request *req)
{
if (req->assoclen != 16 && req->assoclen != 20)
return -EINVAL;
req = crypto_rfc4309_crypt(req);
return crypto_aead_decrypt(req);
}
static int crypto_rfc4309_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct crypto_aead_spawn *spawn = aead_instance_ctx(inst);
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
unsigned long align;
aead = crypto_spawn_aead(spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
ctx->child = aead;
align = crypto_aead_alignmask(aead);
align &= ~(crypto_tfm_ctx_alignment() - 1);
crypto_aead_set_reqsize(
tfm,
sizeof(struct crypto_rfc4309_req_ctx) +
ALIGN(crypto_aead_reqsize(aead), crypto_tfm_ctx_alignment()) +
align + 32);
return 0;
}
static void crypto_rfc4309_exit_tfm(struct crypto_aead *tfm)
{
struct crypto_rfc4309_ctx *ctx = crypto_aead_ctx(tfm);
crypto_free_aead(ctx->child);
}
static void crypto_rfc4309_free(struct aead_instance *inst)
{
crypto_drop_aead(aead_instance_ctx(inst));
kfree(inst);
}
static int crypto_rfc4309_create(struct crypto_template *tmpl,
struct rtattr **tb)
{
u32 mask;
struct aead_instance *inst;
struct crypto_aead_spawn *spawn;
struct aead_alg *alg;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_AEAD, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = aead_instance_ctx(inst);
err = crypto_grab_aead(spawn, aead_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_aead_alg(spawn);
err = -EINVAL;
/* We only support 16-byte blocks. */
if (crypto_aead_alg_ivsize(alg) != 16)
goto err_free_inst;
/* Not a stream cipher? */
if (alg->base.cra_blocksize != 1)
goto err_free_inst;
err = -ENAMETOOLONG;
if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
"rfc4309(%s)", alg->base.cra_name) >=
CRYPTO_MAX_ALG_NAME ||
snprintf(inst->alg.base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
"rfc4309(%s)", alg->base.cra_driver_name) >=
CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
inst->alg.ivsize = 8;
inst->alg.chunksize = crypto_aead_alg_chunksize(alg);
inst->alg.maxauthsize = 16;
inst->alg.base.cra_ctxsize = sizeof(struct crypto_rfc4309_ctx);
inst->alg.init = crypto_rfc4309_init_tfm;
inst->alg.exit = crypto_rfc4309_exit_tfm;
inst->alg.setkey = crypto_rfc4309_setkey;
inst->alg.setauthsize = crypto_rfc4309_setauthsize;
inst->alg.encrypt = crypto_rfc4309_encrypt;
inst->alg.decrypt = crypto_rfc4309_decrypt;
inst->free = crypto_rfc4309_free;
err = aead_register_instance(tmpl, inst);
if (err) {
err_free_inst:
crypto_rfc4309_free(inst);
}
return err;
}
static int crypto_cbcmac_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
struct cbcmac_tfm_ctx *ctx = crypto_shash_ctx(parent);
return crypto_cipher_setkey(ctx->child, inkey, keylen);
}
static int crypto_cbcmac_digest_init(struct shash_desc *pdesc)
{
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
int bs = crypto_shash_digestsize(pdesc->tfm);
u8 *dg = (u8 *)ctx + crypto_shash_descsize(pdesc->tfm) - bs;
ctx->len = 0;
memset(dg, 0, bs);
return 0;
}
static int crypto_cbcmac_digest_update(struct shash_desc *pdesc, const u8 *p,
unsigned int len)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
while (len > 0) {
unsigned int l = min(len, bs - ctx->len);
crypto_xor(dg + ctx->len, p, l);
ctx->len +=l;
len -= l;
p += l;
if (ctx->len == bs) {
crypto_cipher_encrypt_one(tfm, dg, dg);
ctx->len = 0;
}
}
return 0;
}
static int crypto_cbcmac_digest_final(struct shash_desc *pdesc, u8 *out)
{
struct crypto_shash *parent = pdesc->tfm;
struct cbcmac_tfm_ctx *tctx = crypto_shash_ctx(parent);
struct cbcmac_desc_ctx *ctx = shash_desc_ctx(pdesc);
struct crypto_cipher *tfm = tctx->child;
int bs = crypto_shash_digestsize(parent);
u8 *dg = (u8 *)ctx + crypto_shash_descsize(parent) - bs;
if (ctx->len)
crypto_cipher_encrypt_one(tfm, dg, dg);
memcpy(out, dg, bs);
return 0;
}
static int cbcmac_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
cipher = crypto_spawn_cipher(spawn);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
ctx->child = cipher;
return 0;
};
static void cbcmac_exit_tfm(struct crypto_tfm *tfm)
{
struct cbcmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
crypto_free_cipher(ctx->child);
}
static int cbcmac_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct shash_instance *inst;
struct crypto_cipher_spawn *spawn;
struct crypto_alg *alg;
u32 mask;
int err;
err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
if (err)
return err;
inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
if (!inst)
return -ENOMEM;
spawn = shash_instance_ctx(inst);
err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
crypto_attr_alg_name(tb[1]), 0, mask);
if (err)
goto err_free_inst;
alg = crypto_spawn_cipher_alg(spawn);
err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
if (err)
goto err_free_inst;
inst->alg.base.cra_priority = alg->cra_priority;
inst->alg.base.cra_blocksize = 1;
inst->alg.digestsize = alg->cra_blocksize;
inst->alg.descsize = ALIGN(sizeof(struct cbcmac_desc_ctx),
alg->cra_alignmask + 1) +
alg->cra_blocksize;
inst->alg.base.cra_ctxsize = sizeof(struct cbcmac_tfm_ctx);
inst->alg.base.cra_init = cbcmac_init_tfm;
inst->alg.base.cra_exit = cbcmac_exit_tfm;
inst->alg.init = crypto_cbcmac_digest_init;
inst->alg.update = crypto_cbcmac_digest_update;
inst->alg.final = crypto_cbcmac_digest_final;
inst->alg.setkey = crypto_cbcmac_digest_setkey;
inst->free = shash_free_singlespawn_instance;
err = shash_register_instance(tmpl, inst);
if (err) {
err_free_inst:
shash_free_singlespawn_instance(inst);
}
return err;
}
static struct crypto_template crypto_ccm_tmpls[] = {
{
.name = "cbcmac",
.create = cbcmac_create,
.module = THIS_MODULE,
}, {
.name = "ccm_base",
.create = crypto_ccm_base_create,
.module = THIS_MODULE,
}, {
.name = "ccm",
.create = crypto_ccm_create,
.module = THIS_MODULE,
}, {
.name = "rfc4309",
.create = crypto_rfc4309_create,
.module = THIS_MODULE,
},
};
static int __init crypto_ccm_module_init(void)
{
return crypto_register_templates(crypto_ccm_tmpls,
ARRAY_SIZE(crypto_ccm_tmpls));
}
static void __exit crypto_ccm_module_exit(void)
{
crypto_unregister_templates(crypto_ccm_tmpls,
ARRAY_SIZE(crypto_ccm_tmpls));
}
subsys_initcall(crypto_ccm_module_init);
module_exit(crypto_ccm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Counter with CBC MAC");
MODULE_ALIAS_CRYPTO("ccm_base");
MODULE_ALIAS_CRYPTO("rfc4309");
MODULE_ALIAS_CRYPTO("ccm");
MODULE_ALIAS_CRYPTO("cbcmac");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/ccm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* OFB: Output FeedBack mode
*
* Copyright (C) 2018 ARM Limited or its affiliates.
* All rights reserved.
*/
#include <crypto/algapi.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
static int crypto_ofb_crypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
const unsigned int bsize = crypto_cipher_blocksize(cipher);
struct skcipher_walk walk;
int err;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes >= bsize) {
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 * const iv = walk.iv;
unsigned int nbytes = walk.nbytes;
do {
crypto_cipher_encrypt_one(cipher, iv, iv);
crypto_xor_cpy(dst, src, iv, bsize);
dst += bsize;
src += bsize;
} while ((nbytes -= bsize) >= bsize);
err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
crypto_cipher_encrypt_one(cipher, walk.iv, walk.iv);
crypto_xor_cpy(walk.dst.virt.addr, walk.src.virt.addr, walk.iv,
walk.nbytes);
err = skcipher_walk_done(&walk, 0);
}
return err;
}
static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct skcipher_instance *inst;
struct crypto_alg *alg;
int err;
inst = skcipher_alloc_instance_simple(tmpl, tb);
if (IS_ERR(inst))
return PTR_ERR(inst);
alg = skcipher_ialg_simple(inst);
/* OFB mode is a stream cipher. */
inst->alg.base.cra_blocksize = 1;
/*
* To simplify the implementation, configure the skcipher walk to only
* give a partial block at the very end, never earlier.
*/
inst->alg.chunksize = alg->cra_blocksize;
inst->alg.encrypt = crypto_ofb_crypt;
inst->alg.decrypt = crypto_ofb_crypt;
err = skcipher_register_instance(tmpl, inst);
if (err)
inst->free(inst);
return err;
}
static struct crypto_template crypto_ofb_tmpl = {
.name = "ofb",
.create = crypto_ofb_create,
.module = THIS_MODULE,
};
static int __init crypto_ofb_module_init(void)
{
return crypto_register_template(&crypto_ofb_tmpl);
}
static void __exit crypto_ofb_module_exit(void)
{
crypto_unregister_template(&crypto_ofb_tmpl);
}
subsys_initcall(crypto_ofb_module_init);
module_exit(crypto_ofb_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OFB block cipher mode of operation");
MODULE_ALIAS_CRYPTO("ofb");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/ofb.c |
/*
* Poly1305 authenticator algorithm, RFC7539
*
* Copyright (C) 2015 Martin Willi
*
* Based on public domain code by Andrew Moon and Daniel J. Bernstein.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <crypto/algapi.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/poly1305.h>
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <asm/unaligned.h>
static int crypto_poly1305_init(struct shash_desc *desc)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
poly1305_core_init(&dctx->h);
dctx->buflen = 0;
dctx->rset = 0;
dctx->sset = false;
return 0;
}
static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
const u8 *src, unsigned int srclen)
{
if (!dctx->sset) {
if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) {
poly1305_core_setkey(&dctx->core_r, src);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->rset = 2;
}
if (srclen >= POLY1305_BLOCK_SIZE) {
dctx->s[0] = get_unaligned_le32(src + 0);
dctx->s[1] = get_unaligned_le32(src + 4);
dctx->s[2] = get_unaligned_le32(src + 8);
dctx->s[3] = get_unaligned_le32(src + 12);
src += POLY1305_BLOCK_SIZE;
srclen -= POLY1305_BLOCK_SIZE;
dctx->sset = true;
}
}
return srclen;
}
static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int srclen)
{
unsigned int datalen;
if (unlikely(!dctx->sset)) {
datalen = crypto_poly1305_setdesckey(dctx, src, srclen);
src += srclen - datalen;
srclen = datalen;
}
poly1305_core_blocks(&dctx->h, &dctx->core_r, src,
srclen / POLY1305_BLOCK_SIZE, 1);
}
static int crypto_poly1305_update(struct shash_desc *desc,
const u8 *src, unsigned int srclen)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
unsigned int bytes;
if (unlikely(dctx->buflen)) {
bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen);
memcpy(dctx->buf + dctx->buflen, src, bytes);
src += bytes;
srclen -= bytes;
dctx->buflen += bytes;
if (dctx->buflen == POLY1305_BLOCK_SIZE) {
poly1305_blocks(dctx, dctx->buf,
POLY1305_BLOCK_SIZE);
dctx->buflen = 0;
}
}
if (likely(srclen >= POLY1305_BLOCK_SIZE)) {
poly1305_blocks(dctx, src, srclen);
src += srclen - (srclen % POLY1305_BLOCK_SIZE);
srclen %= POLY1305_BLOCK_SIZE;
}
if (unlikely(srclen)) {
dctx->buflen = srclen;
memcpy(dctx->buf, src, srclen);
}
return 0;
}
static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst)
{
struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc);
if (unlikely(!dctx->sset))
return -ENOKEY;
poly1305_final_generic(dctx, dst);
return 0;
}
static struct shash_alg poly1305_alg = {
.digestsize = POLY1305_DIGEST_SIZE,
.init = crypto_poly1305_init,
.update = crypto_poly1305_update,
.final = crypto_poly1305_final,
.descsize = sizeof(struct poly1305_desc_ctx),
.base = {
.cra_name = "poly1305",
.cra_driver_name = "poly1305-generic",
.cra_priority = 100,
.cra_blocksize = POLY1305_BLOCK_SIZE,
.cra_module = THIS_MODULE,
},
};
static int __init poly1305_mod_init(void)
{
return crypto_register_shash(&poly1305_alg);
}
static void __exit poly1305_mod_exit(void)
{
crypto_unregister_shash(&poly1305_alg);
}
subsys_initcall(poly1305_mod_init);
module_exit(poly1305_mod_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Martin Willi <[email protected]>");
MODULE_DESCRIPTION("Poly1305 authenticator");
MODULE_ALIAS_CRYPTO("poly1305");
MODULE_ALIAS_CRYPTO("poly1305-generic");
| linux-master | crypto/poly1305_generic.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API for algorithms (i.e., low-level API).
*
* Copyright (c) 2006 Herbert Xu <[email protected]>
*/
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include "internal.h"
static LIST_HEAD(crypto_template_list);
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
DEFINE_PER_CPU(bool, crypto_simd_disabled_for_test);
EXPORT_PER_CPU_SYMBOL_GPL(crypto_simd_disabled_for_test);
#endif
static inline void crypto_check_module_sig(struct module *mod)
{
if (fips_enabled && mod && !module_sig_ok(mod))
panic("Module %s signature verification failed in FIPS mode\n",
module_name(mod));
}
static int crypto_check_alg(struct crypto_alg *alg)
{
crypto_check_module_sig(alg->cra_module);
if (!alg->cra_name[0] || !alg->cra_driver_name[0])
return -EINVAL;
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
/* General maximums for all algs. */
if (alg->cra_alignmask > MAX_ALGAPI_ALIGNMASK)
return -EINVAL;
if (alg->cra_blocksize > MAX_ALGAPI_BLOCKSIZE)
return -EINVAL;
/* Lower maximums for specific alg types. */
if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_CIPHER) {
if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
return -EINVAL;
if (alg->cra_blocksize > MAX_CIPHER_BLOCKSIZE)
return -EINVAL;
}
if (alg->cra_priority < 0)
return -EINVAL;
refcount_set(&alg->cra_refcnt, 1);
return 0;
}
static void crypto_free_instance(struct crypto_instance *inst)
{
inst->alg.cra_type->free(inst);
}
static void crypto_destroy_instance_workfn(struct work_struct *w)
{
struct crypto_instance *inst = container_of(w, struct crypto_instance,
free_work);
struct crypto_template *tmpl = inst->tmpl;
crypto_free_instance(inst);
crypto_tmpl_put(tmpl);
}
static void crypto_destroy_instance(struct crypto_alg *alg)
{
struct crypto_instance *inst = container_of(alg,
struct crypto_instance,
alg);
INIT_WORK(&inst->free_work, crypto_destroy_instance_workfn);
schedule_work(&inst->free_work);
}
/*
* This function adds a spawn to the list secondary_spawns which
* will be used at the end of crypto_remove_spawns to unregister
* instances, unless the spawn happens to be one that is depended
* on by the new algorithm (nalg in crypto_remove_spawns).
*
* This function is also responsible for resurrecting any algorithms
* in the dependency chain of nalg by unsetting n->dead.
*/
static struct list_head *crypto_more_spawns(struct crypto_alg *alg,
struct list_head *stack,
struct list_head *top,
struct list_head *secondary_spawns)
{
struct crypto_spawn *spawn, *n;
spawn = list_first_entry_or_null(stack, struct crypto_spawn, list);
if (!spawn)
return NULL;
n = list_prev_entry(spawn, list);
list_move(&spawn->list, secondary_spawns);
if (list_is_last(&n->list, stack))
return top;
n = list_next_entry(n, list);
if (!spawn->dead)
n->dead = false;
return &n->inst->alg.cra_users;
}
static void crypto_remove_instance(struct crypto_instance *inst,
struct list_head *list)
{
struct crypto_template *tmpl = inst->tmpl;
if (crypto_is_dead(&inst->alg))
return;
inst->alg.cra_flags |= CRYPTO_ALG_DEAD;
if (!tmpl || !crypto_tmpl_get(tmpl))
return;
list_move(&inst->alg.cra_list, list);
hlist_del(&inst->list);
inst->alg.cra_destroy = crypto_destroy_instance;
BUG_ON(!list_empty(&inst->alg.cra_users));
}
/*
* Given an algorithm alg, remove all algorithms that depend on it
* through spawns. If nalg is not null, then exempt any algorithms
* that is depended on by nalg. This is useful when nalg itself
* depends on alg.
*/
void crypto_remove_spawns(struct crypto_alg *alg, struct list_head *list,
struct crypto_alg *nalg)
{
u32 new_type = (nalg ?: alg)->cra_flags;
struct crypto_spawn *spawn, *n;
LIST_HEAD(secondary_spawns);
struct list_head *spawns;
LIST_HEAD(stack);
LIST_HEAD(top);
spawns = &alg->cra_users;
list_for_each_entry_safe(spawn, n, spawns, list) {
if ((spawn->alg->cra_flags ^ new_type) & spawn->mask)
continue;
list_move(&spawn->list, &top);
}
/*
* Perform a depth-first walk starting from alg through
* the cra_users tree. The list stack records the path
* from alg to the current spawn.
*/
spawns = ⊤
do {
while (!list_empty(spawns)) {
struct crypto_instance *inst;
spawn = list_first_entry(spawns, struct crypto_spawn,
list);
inst = spawn->inst;
list_move(&spawn->list, &stack);
spawn->dead = !spawn->registered || &inst->alg != nalg;
if (!spawn->registered)
break;
BUG_ON(&inst->alg == alg);
if (&inst->alg == nalg)
break;
spawns = &inst->alg.cra_users;
/*
* Even if spawn->registered is true, the
* instance itself may still be unregistered.
* This is because it may have failed during
* registration. Therefore we still need to
* make the following test.
*
* We may encounter an unregistered instance here, since
* an instance's spawns are set up prior to the instance
* being registered. An unregistered instance will have
* NULL ->cra_users.next, since ->cra_users isn't
* properly initialized until registration. But an
* unregistered instance cannot have any users, so treat
* it the same as ->cra_users being empty.
*/
if (spawns->next == NULL)
break;
}
} while ((spawns = crypto_more_spawns(alg, &stack, &top,
&secondary_spawns)));
/*
* Remove all instances that are marked as dead. Also
* complete the resurrection of the others by moving them
* back to the cra_users list.
*/
list_for_each_entry_safe(spawn, n, &secondary_spawns, list) {
if (!spawn->dead)
list_move(&spawn->list, &spawn->alg->cra_users);
else if (spawn->registered)
crypto_remove_instance(spawn->inst, list);
}
}
EXPORT_SYMBOL_GPL(crypto_remove_spawns);
static void crypto_alg_finish_registration(struct crypto_alg *alg,
bool fulfill_requests,
struct list_head *algs_to_put)
{
struct crypto_alg *q;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
continue;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
struct crypto_larval *larval = (void *)q;
/*
* Check to see if either our generic name or
* specific name can satisfy the name requested
* by the larval entry q.
*/
if (strcmp(alg->cra_name, q->cra_name) &&
strcmp(alg->cra_driver_name, q->cra_name))
continue;
if (larval->adult)
continue;
if ((q->cra_flags ^ alg->cra_flags) & larval->mask)
continue;
if (fulfill_requests && crypto_mod_get(alg))
larval->adult = alg;
else
larval->adult = ERR_PTR(-EAGAIN);
continue;
}
if (strcmp(alg->cra_name, q->cra_name))
continue;
if (strcmp(alg->cra_driver_name, q->cra_driver_name) &&
q->cra_priority > alg->cra_priority)
continue;
crypto_remove_spawns(q, algs_to_put, alg);
}
crypto_notify(CRYPTO_MSG_ALG_LOADED, alg);
}
static struct crypto_larval *crypto_alloc_test_larval(struct crypto_alg *alg)
{
struct crypto_larval *larval;
if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER) ||
IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) ||
(alg->cra_flags & CRYPTO_ALG_INTERNAL))
return NULL; /* No self-test needed */
larval = crypto_larval_alloc(alg->cra_name,
alg->cra_flags | CRYPTO_ALG_TESTED, 0);
if (IS_ERR(larval))
return larval;
larval->adult = crypto_mod_get(alg);
if (!larval->adult) {
kfree(larval);
return ERR_PTR(-ENOENT);
}
refcount_set(&larval->alg.cra_refcnt, 1);
memcpy(larval->alg.cra_driver_name, alg->cra_driver_name,
CRYPTO_MAX_ALG_NAME);
larval->alg.cra_priority = alg->cra_priority;
return larval;
}
static struct crypto_larval *
__crypto_register_alg(struct crypto_alg *alg, struct list_head *algs_to_put)
{
struct crypto_alg *q;
struct crypto_larval *larval;
int ret = -EAGAIN;
if (crypto_is_dead(alg))
goto err;
INIT_LIST_HEAD(&alg->cra_users);
ret = -EEXIST;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (q == alg)
goto err;
if (crypto_is_moribund(q))
continue;
if (crypto_is_larval(q)) {
if (!strcmp(alg->cra_driver_name, q->cra_driver_name))
goto err;
continue;
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}
larval = crypto_alloc_test_larval(alg);
if (IS_ERR(larval))
goto out;
list_add(&alg->cra_list, &crypto_alg_list);
if (larval) {
/* No cheating! */
alg->cra_flags &= ~CRYPTO_ALG_TESTED;
list_add(&larval->alg.cra_list, &crypto_alg_list);
} else {
alg->cra_flags |= CRYPTO_ALG_TESTED;
crypto_alg_finish_registration(alg, true, algs_to_put);
}
out:
return larval;
err:
larval = ERR_PTR(ret);
goto out;
}
void crypto_alg_tested(const char *name, int err)
{
struct crypto_larval *test;
struct crypto_alg *alg;
struct crypto_alg *q;
LIST_HEAD(list);
bool best;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
test = (struct crypto_larval *)q;
if (!strcmp(q->cra_driver_name, name))
goto found;
}
pr_err("alg: Unexpected test result for %s: %d\n", name, err);
goto unlock;
found:
q->cra_flags |= CRYPTO_ALG_DEAD;
alg = test->adult;
if (list_empty(&alg->cra_list))
goto complete;
if (err == -ECANCELED)
alg->cra_flags |= CRYPTO_ALG_FIPS_INTERNAL;
else if (err)
goto complete;
else
alg->cra_flags &= ~CRYPTO_ALG_FIPS_INTERNAL;
alg->cra_flags |= CRYPTO_ALG_TESTED;
/*
* If a higher-priority implementation of the same algorithm is
* currently being tested, then don't fulfill request larvals.
*/
best = true;
list_for_each_entry(q, &crypto_alg_list, cra_list) {
if (crypto_is_moribund(q) || !crypto_is_larval(q))
continue;
if (strcmp(alg->cra_name, q->cra_name))
continue;
if (q->cra_priority > alg->cra_priority) {
best = false;
break;
}
}
crypto_alg_finish_registration(alg, best, &list);
complete:
complete_all(&test->completion);
unlock:
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_alg_tested);
void crypto_remove_final(struct list_head *list)
{
struct crypto_alg *alg;
struct crypto_alg *n;
list_for_each_entry_safe(alg, n, list, cra_list) {
list_del_init(&alg->cra_list);
crypto_alg_put(alg);
}
}
EXPORT_SYMBOL_GPL(crypto_remove_final);
int crypto_register_alg(struct crypto_alg *alg)
{
struct crypto_larval *larval;
LIST_HEAD(algs_to_put);
bool test_started = false;
int err;
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
err = crypto_check_alg(alg);
if (err)
return err;
down_write(&crypto_alg_sem);
larval = __crypto_register_alg(alg, &algs_to_put);
if (!IS_ERR_OR_NULL(larval)) {
test_started = crypto_boot_test_finished();
larval->test_started = test_started;
}
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
if (test_started)
crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_alg);
static int crypto_remove_alg(struct crypto_alg *alg, struct list_head *list)
{
if (unlikely(list_empty(&alg->cra_list)))
return -ENOENT;
alg->cra_flags |= CRYPTO_ALG_DEAD;
list_del_init(&alg->cra_list);
crypto_remove_spawns(alg, list, NULL);
return 0;
}
void crypto_unregister_alg(struct crypto_alg *alg)
{
int ret;
LIST_HEAD(list);
down_write(&crypto_alg_sem);
ret = crypto_remove_alg(alg, &list);
up_write(&crypto_alg_sem);
if (WARN(ret, "Algorithm %s is not registered", alg->cra_driver_name))
return;
if (WARN_ON(refcount_read(&alg->cra_refcnt) != 1))
return;
if (alg->cra_destroy)
alg->cra_destroy(alg);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_alg);
int crypto_register_algs(struct crypto_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_alg(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_alg(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_algs);
void crypto_unregister_algs(struct crypto_alg *algs, int count)
{
int i;
for (i = 0; i < count; i++)
crypto_unregister_alg(&algs[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_algs);
int crypto_register_template(struct crypto_template *tmpl)
{
struct crypto_template *q;
int err = -EEXIST;
down_write(&crypto_alg_sem);
crypto_check_module_sig(tmpl->module);
list_for_each_entry(q, &crypto_template_list, list) {
if (q == tmpl)
goto out;
}
list_add(&tmpl->list, &crypto_template_list);
err = 0;
out:
up_write(&crypto_alg_sem);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_template);
int crypto_register_templates(struct crypto_template *tmpls, int count)
{
int i, err;
for (i = 0; i < count; i++) {
err = crypto_register_template(&tmpls[i]);
if (err)
goto out;
}
return 0;
out:
for (--i; i >= 0; --i)
crypto_unregister_template(&tmpls[i]);
return err;
}
EXPORT_SYMBOL_GPL(crypto_register_templates);
void crypto_unregister_template(struct crypto_template *tmpl)
{
struct crypto_instance *inst;
struct hlist_node *n;
struct hlist_head *list;
LIST_HEAD(users);
down_write(&crypto_alg_sem);
BUG_ON(list_empty(&tmpl->list));
list_del_init(&tmpl->list);
list = &tmpl->instances;
hlist_for_each_entry(inst, list, list) {
int err = crypto_remove_alg(&inst->alg, &users);
BUG_ON(err);
}
up_write(&crypto_alg_sem);
hlist_for_each_entry_safe(inst, n, list, list) {
BUG_ON(refcount_read(&inst->alg.cra_refcnt) != 1);
crypto_free_instance(inst);
}
crypto_remove_final(&users);
}
EXPORT_SYMBOL_GPL(crypto_unregister_template);
void crypto_unregister_templates(struct crypto_template *tmpls, int count)
{
int i;
for (i = count - 1; i >= 0; --i)
crypto_unregister_template(&tmpls[i]);
}
EXPORT_SYMBOL_GPL(crypto_unregister_templates);
static struct crypto_template *__crypto_lookup_template(const char *name)
{
struct crypto_template *q, *tmpl = NULL;
down_read(&crypto_alg_sem);
list_for_each_entry(q, &crypto_template_list, list) {
if (strcmp(q->name, name))
continue;
if (unlikely(!crypto_tmpl_get(q)))
continue;
tmpl = q;
break;
}
up_read(&crypto_alg_sem);
return tmpl;
}
struct crypto_template *crypto_lookup_template(const char *name)
{
return try_then_request_module(__crypto_lookup_template(name),
"crypto-%s", name);
}
EXPORT_SYMBOL_GPL(crypto_lookup_template);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst)
{
struct crypto_larval *larval;
struct crypto_spawn *spawn;
u32 fips_internal = 0;
LIST_HEAD(algs_to_put);
int err;
err = crypto_check_alg(&inst->alg);
if (err)
return err;
inst->alg.cra_module = tmpl->module;
inst->alg.cra_flags |= CRYPTO_ALG_INSTANCE;
down_write(&crypto_alg_sem);
larval = ERR_PTR(-EAGAIN);
for (spawn = inst->spawns; spawn;) {
struct crypto_spawn *next;
if (spawn->dead)
goto unlock;
next = spawn->next;
spawn->inst = inst;
spawn->registered = true;
fips_internal |= spawn->alg->cra_flags;
crypto_mod_put(spawn->alg);
spawn = next;
}
inst->alg.cra_flags |= (fips_internal & CRYPTO_ALG_FIPS_INTERNAL);
larval = __crypto_register_alg(&inst->alg, &algs_to_put);
if (IS_ERR(larval))
goto unlock;
else if (larval)
larval->test_started = true;
hlist_add_head(&inst->list, &tmpl->instances);
inst->tmpl = tmpl;
unlock:
up_write(&crypto_alg_sem);
if (IS_ERR(larval))
return PTR_ERR(larval);
if (larval)
crypto_wait_for_test(larval);
crypto_remove_final(&algs_to_put);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_register_instance);
void crypto_unregister_instance(struct crypto_instance *inst)
{
LIST_HEAD(list);
down_write(&crypto_alg_sem);
crypto_remove_spawns(&inst->alg, &list, NULL);
crypto_remove_instance(inst, &list);
up_write(&crypto_alg_sem);
crypto_remove_final(&list);
}
EXPORT_SYMBOL_GPL(crypto_unregister_instance);
int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
const char *name, u32 type, u32 mask)
{
struct crypto_alg *alg;
int err = -EAGAIN;
if (WARN_ON_ONCE(inst == NULL))
return -EINVAL;
/* Allow the result of crypto_attr_alg_name() to be passed directly */
if (IS_ERR(name))
return PTR_ERR(name);
alg = crypto_find_alg(name, spawn->frontend,
type | CRYPTO_ALG_FIPS_INTERNAL, mask);
if (IS_ERR(alg))
return PTR_ERR(alg);
down_write(&crypto_alg_sem);
if (!crypto_is_moribund(alg)) {
list_add(&spawn->list, &alg->cra_users);
spawn->alg = alg;
spawn->mask = mask;
spawn->next = inst->spawns;
inst->spawns = spawn;
inst->alg.cra_flags |=
(alg->cra_flags & CRYPTO_ALG_INHERITED_FLAGS);
err = 0;
}
up_write(&crypto_alg_sem);
if (err)
crypto_mod_put(alg);
return err;
}
EXPORT_SYMBOL_GPL(crypto_grab_spawn);
void crypto_drop_spawn(struct crypto_spawn *spawn)
{
if (!spawn->alg) /* not yet initialized? */
return;
down_write(&crypto_alg_sem);
if (!spawn->dead)
list_del(&spawn->list);
up_write(&crypto_alg_sem);
if (!spawn->registered)
crypto_mod_put(spawn->alg);
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
static struct crypto_alg *crypto_spawn_alg(struct crypto_spawn *spawn)
{
struct crypto_alg *alg = ERR_PTR(-EAGAIN);
struct crypto_alg *target;
bool shoot = false;
down_read(&crypto_alg_sem);
if (!spawn->dead) {
alg = spawn->alg;
if (!crypto_mod_get(alg)) {
target = crypto_alg_get(alg);
shoot = true;
alg = ERR_PTR(-EAGAIN);
}
}
up_read(&crypto_alg_sem);
if (shoot) {
crypto_shoot_alg(target);
crypto_alg_put(target);
}
return alg;
}
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = ERR_PTR(-EINVAL);
if (unlikely((alg->cra_flags ^ type) & mask))
goto out_put_alg;
tfm = __crypto_alloc_tfm(alg, type, mask);
if (IS_ERR(tfm))
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn)
{
struct crypto_alg *alg;
struct crypto_tfm *tfm;
alg = crypto_spawn_alg(spawn);
if (IS_ERR(alg))
return ERR_CAST(alg);
tfm = crypto_create_tfm(alg, spawn->frontend);
if (IS_ERR(tfm))
goto out_put_alg;
return tfm;
out_put_alg:
crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
int crypto_register_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_register_notifier);
int crypto_unregister_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&crypto_chain, nb);
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb)
{
struct rtattr *rta = tb[0];
struct crypto_attr_type *algt;
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*algt))
return ERR_PTR(-EINVAL);
if (rta->rta_type != CRYPTOA_TYPE)
return ERR_PTR(-EINVAL);
algt = RTA_DATA(rta);
return algt;
}
EXPORT_SYMBOL_GPL(crypto_get_attr_type);
/**
* crypto_check_attr_type() - check algorithm type and compute inherited mask
* @tb: the template parameters
* @type: the algorithm type the template would be instantiated as
* @mask_ret: (output) the mask that should be passed to crypto_grab_*()
* to restrict the flags of any inner algorithms
*
* Validate that the algorithm type the user requested is compatible with the
* one the template would actually be instantiated as. E.g., if the user is
* doing crypto_alloc_shash("cbc(aes)", ...), this would return an error because
* the "cbc" template creates an "skcipher" algorithm, not an "shash" algorithm.
*
* Also compute the mask to use to restrict the flags of any inner algorithms.
*
* Return: 0 on success; -errno on failure
*/
int crypto_check_attr_type(struct rtattr **tb, u32 type, u32 *mask_ret)
{
struct crypto_attr_type *algt;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
if ((algt->type ^ type) & algt->mask)
return -EINVAL;
*mask_ret = crypto_algt_inherited_mask(algt);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_check_attr_type);
const char *crypto_attr_alg_name(struct rtattr *rta)
{
struct crypto_attr_alg *alga;
if (!rta)
return ERR_PTR(-ENOENT);
if (RTA_PAYLOAD(rta) < sizeof(*alga))
return ERR_PTR(-EINVAL);
if (rta->rta_type != CRYPTOA_ALG)
return ERR_PTR(-EINVAL);
alga = RTA_DATA(rta);
alga->name[CRYPTO_MAX_ALG_NAME - 1] = 0;
return alga->name;
}
EXPORT_SYMBOL_GPL(crypto_attr_alg_name);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg)
{
if (snprintf(inst->alg.cra_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", name,
alg->cra_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
name, alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
return -ENAMETOOLONG;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_inst_setname);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen)
{
INIT_LIST_HEAD(&queue->list);
queue->backlog = &queue->list;
queue->qlen = 0;
queue->max_qlen = max_qlen;
}
EXPORT_SYMBOL_GPL(crypto_init_queue);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request)
{
int err = -EINPROGRESS;
if (unlikely(queue->qlen >= queue->max_qlen)) {
if (!(request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
err = -ENOSPC;
goto out;
}
err = -EBUSY;
if (queue->backlog == &queue->list)
queue->backlog = &request->list;
}
queue->qlen++;
list_add_tail(&request->list, &queue->list);
out:
return err;
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request);
void crypto_enqueue_request_head(struct crypto_queue *queue,
struct crypto_async_request *request)
{
if (unlikely(queue->qlen >= queue->max_qlen))
queue->backlog = queue->backlog->prev;
queue->qlen++;
list_add(&request->list, &queue->list);
}
EXPORT_SYMBOL_GPL(crypto_enqueue_request_head);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue)
{
struct list_head *request;
if (unlikely(!queue->qlen))
return NULL;
queue->qlen--;
if (queue->backlog != &queue->list)
queue->backlog = queue->backlog->next;
request = queue->list.next;
list_del(request);
return list_entry(request, struct crypto_async_request, list);
}
EXPORT_SYMBOL_GPL(crypto_dequeue_request);
static inline void crypto_inc_byte(u8 *a, unsigned int size)
{
u8 *b = (a + size);
u8 c;
for (; size; size--) {
c = *--b + 1;
*b = c;
if (c)
break;
}
}
void crypto_inc(u8 *a, unsigned int size)
{
__be32 *b = (__be32 *)(a + size);
u32 c;
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
IS_ALIGNED((unsigned long)b, __alignof__(*b)))
for (; size >= 4; size -= 4) {
c = be32_to_cpu(*--b) + 1;
*b = cpu_to_be32(c);
if (likely(c))
return;
}
crypto_inc_byte(a, size);
}
EXPORT_SYMBOL_GPL(crypto_inc);
unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize +
(alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1));
}
EXPORT_SYMBOL_GPL(crypto_alg_extsize);
int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
u32 type, u32 mask)
{
int ret = 0;
struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
if (!IS_ERR(alg)) {
crypto_mod_put(alg);
ret = 1;
}
return ret;
}
EXPORT_SYMBOL_GPL(crypto_type_has_alg);
static void __init crypto_start_tests(void)
{
if (IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS))
return;
for (;;) {
struct crypto_larval *larval = NULL;
struct crypto_alg *q;
down_write(&crypto_alg_sem);
list_for_each_entry(q, &crypto_alg_list, cra_list) {
struct crypto_larval *l;
if (!crypto_is_larval(q))
continue;
l = (void *)q;
if (!crypto_is_test_larval(l))
continue;
if (l->test_started)
continue;
l->test_started = true;
larval = l;
break;
}
up_write(&crypto_alg_sem);
if (!larval)
break;
crypto_wait_for_test(larval);
}
set_crypto_boot_test_finished();
}
static int __init crypto_algapi_init(void)
{
crypto_init_proc();
crypto_start_tests();
return 0;
}
static void __exit crypto_algapi_exit(void)
{
crypto_exit_proc();
}
/*
* We run this at late_initcall so that all the built-in algorithms
* have had a chance to register themselves first.
*/
late_initcall(crypto_algapi_init);
module_exit(crypto_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");
MODULE_SOFTDEP("pre: cryptomgr");
| linux-master | crypto/algapi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* xor.c : Multiple Devices driver for Linux
*
* Copyright (C) 1996, 1997, 1998, 1999, 2000,
* Ingo Molnar, Matti Aarnio, Jakub Jelinek, Richard Henderson.
*
* Dispatch optimized RAID-5 checksumming functions.
*/
#define BH_TRACE 0
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/raid/xor.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
#include <asm/xor.h>
#ifndef XOR_SELECT_TEMPLATE
#define XOR_SELECT_TEMPLATE(x) (x)
#endif
/* The xor routines to use. */
static struct xor_block_template *active_template;
void
xor_blocks(unsigned int src_count, unsigned int bytes, void *dest, void **srcs)
{
unsigned long *p1, *p2, *p3, *p4;
p1 = (unsigned long *) srcs[0];
if (src_count == 1) {
active_template->do_2(bytes, dest, p1);
return;
}
p2 = (unsigned long *) srcs[1];
if (src_count == 2) {
active_template->do_3(bytes, dest, p1, p2);
return;
}
p3 = (unsigned long *) srcs[2];
if (src_count == 3) {
active_template->do_4(bytes, dest, p1, p2, p3);
return;
}
p4 = (unsigned long *) srcs[3];
active_template->do_5(bytes, dest, p1, p2, p3, p4);
}
EXPORT_SYMBOL(xor_blocks);
/* Set of all registered templates. */
static struct xor_block_template *__initdata template_list;
#ifndef MODULE
static void __init do_xor_register(struct xor_block_template *tmpl)
{
tmpl->next = template_list;
template_list = tmpl;
}
static int __init register_xor_blocks(void)
{
active_template = XOR_SELECT_TEMPLATE(NULL);
if (!active_template) {
#define xor_speed do_xor_register
// register all the templates and pick the first as the default
XOR_TRY_TEMPLATES;
#undef xor_speed
active_template = template_list;
}
return 0;
}
#endif
#define BENCH_SIZE 4096
#define REPS 800U
static void __init
do_xor_speed(struct xor_block_template *tmpl, void *b1, void *b2)
{
int speed;
int i, j;
ktime_t min, start, diff;
tmpl->next = template_list;
template_list = tmpl;
preempt_disable();
min = (ktime_t)S64_MAX;
for (i = 0; i < 3; i++) {
start = ktime_get();
for (j = 0; j < REPS; j++) {
mb(); /* prevent loop optimization */
tmpl->do_2(BENCH_SIZE, b1, b2);
mb();
}
diff = ktime_sub(ktime_get(), start);
if (diff < min)
min = diff;
}
preempt_enable();
// bytes/ns == GB/s, multiply by 1000 to get MB/s [not MiB/s]
if (!min)
min = 1;
speed = (1000 * REPS * BENCH_SIZE) / (unsigned int)ktime_to_ns(min);
tmpl->speed = speed;
pr_info(" %-16s: %5d MB/sec\n", tmpl->name, speed);
}
static int __init
calibrate_xor_blocks(void)
{
void *b1, *b2;
struct xor_block_template *f, *fastest;
fastest = XOR_SELECT_TEMPLATE(NULL);
if (fastest) {
printk(KERN_INFO "xor: automatically using best "
"checksumming function %-10s\n",
fastest->name);
goto out;
}
b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
if (!b1) {
printk(KERN_WARNING "xor: Yikes! No memory available.\n");
return -ENOMEM;
}
b2 = b1 + 2*PAGE_SIZE + BENCH_SIZE;
/*
* If this arch/cpu has a short-circuited selection, don't loop through
* all the possible functions, just test the best one
*/
#define xor_speed(templ) do_xor_speed((templ), b1, b2)
printk(KERN_INFO "xor: measuring software checksum speed\n");
template_list = NULL;
XOR_TRY_TEMPLATES;
fastest = template_list;
for (f = fastest; f; f = f->next)
if (f->speed > fastest->speed)
fastest = f;
pr_info("xor: using function: %s (%d MB/sec)\n",
fastest->name, fastest->speed);
#undef xor_speed
free_pages((unsigned long)b1, 2);
out:
active_template = fastest;
return 0;
}
static __exit void xor_exit(void) { }
MODULE_LICENSE("GPL");
#ifndef MODULE
/* when built-in xor.o must initialize before drivers/md/md.o */
core_initcall(register_xor_blocks);
#endif
module_init(calibrate_xor_blocks);
module_exit(xor_exit);
| linux-master | crypto/xor.c |
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* SM4, as specified in
* https://tools.ietf.org/id/draft-ribose-cfrg-sm4-10.html
*
* Copyright (C) 2018 ARM Limited or its affiliates.
* Copyright (c) 2021 Tianjia Zhang <[email protected]>
*/
#include <linux/module.h>
#include <asm/unaligned.h>
#include <crypto/sm4.h>
static const u32 ____cacheline_aligned fk[4] = {
0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc
};
static const u32 ____cacheline_aligned ck[32] = {
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269,
0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9,
0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9,
0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229,
0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209,
0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279
};
static const u8 ____cacheline_aligned sbox[256] = {
0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7,
0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05,
0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3,
0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99,
0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a,
0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62,
0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95,
0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6,
0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba,
0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8,
0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b,
0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35,
0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2,
0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87,
0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52,
0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e,
0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5,
0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1,
0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55,
0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3,
0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60,
0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f,
0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f,
0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51,
0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f,
0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8,
0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd,
0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0,
0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e,
0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84,
0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20,
0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48
};
extern const u32 crypto_sm4_fk[4] __alias(fk);
extern const u32 crypto_sm4_ck[32] __alias(ck);
extern const u8 crypto_sm4_sbox[256] __alias(sbox);
EXPORT_SYMBOL(crypto_sm4_fk);
EXPORT_SYMBOL(crypto_sm4_ck);
EXPORT_SYMBOL(crypto_sm4_sbox);
static inline u32 sm4_t_non_lin_sub(u32 x)
{
u32 out;
out = (u32)sbox[x & 0xff];
out |= (u32)sbox[(x >> 8) & 0xff] << 8;
out |= (u32)sbox[(x >> 16) & 0xff] << 16;
out |= (u32)sbox[(x >> 24) & 0xff] << 24;
return out;
}
static inline u32 sm4_key_lin_sub(u32 x)
{
return x ^ rol32(x, 13) ^ rol32(x, 23);
}
static inline u32 sm4_enc_lin_sub(u32 x)
{
return x ^ rol32(x, 2) ^ rol32(x, 10) ^ rol32(x, 18) ^ rol32(x, 24);
}
static inline u32 sm4_key_sub(u32 x)
{
return sm4_key_lin_sub(sm4_t_non_lin_sub(x));
}
static inline u32 sm4_enc_sub(u32 x)
{
return sm4_enc_lin_sub(sm4_t_non_lin_sub(x));
}
static inline u32 sm4_round(u32 x0, u32 x1, u32 x2, u32 x3, u32 rk)
{
return x0 ^ sm4_enc_sub(x1 ^ x2 ^ x3 ^ rk);
}
/**
* sm4_expandkey - Expands the SM4 key as described in GB/T 32907-2016
* @ctx: The location where the computed key will be stored.
* @in_key: The supplied key.
* @key_len: The length of the supplied key.
*
* Returns 0 on success. The function fails only if an invalid key size (or
* pointer) is supplied.
*/
int sm4_expandkey(struct sm4_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
u32 rk[4];
const u32 *key = (u32 *)in_key;
int i;
if (key_len != SM4_KEY_SIZE)
return -EINVAL;
rk[0] = get_unaligned_be32(&key[0]) ^ fk[0];
rk[1] = get_unaligned_be32(&key[1]) ^ fk[1];
rk[2] = get_unaligned_be32(&key[2]) ^ fk[2];
rk[3] = get_unaligned_be32(&key[3]) ^ fk[3];
for (i = 0; i < 32; i += 4) {
rk[0] ^= sm4_key_sub(rk[1] ^ rk[2] ^ rk[3] ^ ck[i + 0]);
rk[1] ^= sm4_key_sub(rk[2] ^ rk[3] ^ rk[0] ^ ck[i + 1]);
rk[2] ^= sm4_key_sub(rk[3] ^ rk[0] ^ rk[1] ^ ck[i + 2]);
rk[3] ^= sm4_key_sub(rk[0] ^ rk[1] ^ rk[2] ^ ck[i + 3]);
ctx->rkey_enc[i + 0] = rk[0];
ctx->rkey_enc[i + 1] = rk[1];
ctx->rkey_enc[i + 2] = rk[2];
ctx->rkey_enc[i + 3] = rk[3];
ctx->rkey_dec[31 - 0 - i] = rk[0];
ctx->rkey_dec[31 - 1 - i] = rk[1];
ctx->rkey_dec[31 - 2 - i] = rk[2];
ctx->rkey_dec[31 - 3 - i] = rk[3];
}
return 0;
}
EXPORT_SYMBOL_GPL(sm4_expandkey);
/**
* sm4_crypt_block - Encrypt or decrypt a single SM4 block
* @rk: The rkey_enc for encrypt or rkey_dec for decrypt
* @out: Buffer to store output data
* @in: Buffer containing the input data
*/
void sm4_crypt_block(const u32 *rk, u8 *out, const u8 *in)
{
u32 x[4], i;
x[0] = get_unaligned_be32(in + 0 * 4);
x[1] = get_unaligned_be32(in + 1 * 4);
x[2] = get_unaligned_be32(in + 2 * 4);
x[3] = get_unaligned_be32(in + 3 * 4);
for (i = 0; i < 32; i += 4) {
x[0] = sm4_round(x[0], x[1], x[2], x[3], rk[i + 0]);
x[1] = sm4_round(x[1], x[2], x[3], x[0], rk[i + 1]);
x[2] = sm4_round(x[2], x[3], x[0], x[1], rk[i + 2]);
x[3] = sm4_round(x[3], x[0], x[1], x[2], rk[i + 3]);
}
put_unaligned_be32(x[3 - 0], out + 0 * 4);
put_unaligned_be32(x[3 - 1], out + 1 * 4);
put_unaligned_be32(x[3 - 2], out + 2 * 4);
put_unaligned_be32(x[3 - 3], out + 3 * 4);
}
EXPORT_SYMBOL_GPL(sm4_crypt_block);
MODULE_DESCRIPTION("Generic SM4 library");
MODULE_LICENSE("GPL v2");
| linux-master | crypto/sm4.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* SHA1 Secure Hash Algorithm.
*
* Derived from cryptoapi implementation, adapted for in-place
* scatterlist interface.
*
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <[email protected]>
* Copyright (c) Jean-Francois Dive <[email protected]>
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <crypto/sha1.h>
#include <crypto/sha1_base.h>
#include <asm/byteorder.h>
const u8 sha1_zero_message_hash[SHA1_DIGEST_SIZE] = {
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
0xaf, 0xd8, 0x07, 0x09
};
EXPORT_SYMBOL_GPL(sha1_zero_message_hash);
static void sha1_generic_block_fn(struct sha1_state *sst, u8 const *src,
int blocks)
{
u32 temp[SHA1_WORKSPACE_WORDS];
while (blocks--) {
sha1_transform(sst->state, src, temp);
src += SHA1_BLOCK_SIZE;
}
memzero_explicit(temp, sizeof(temp));
}
int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
return sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
}
EXPORT_SYMBOL(crypto_sha1_update);
static int sha1_final(struct shash_desc *desc, u8 *out)
{
sha1_base_do_finalize(desc, sha1_generic_block_fn);
return sha1_base_finish(desc, out);
}
int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
unsigned int len, u8 *out)
{
sha1_base_do_update(desc, data, len, sha1_generic_block_fn);
return sha1_final(desc, out);
}
EXPORT_SYMBOL(crypto_sha1_finup);
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_base_init,
.update = crypto_sha1_update,
.final = sha1_final,
.finup = crypto_sha1_finup,
.descsize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-generic",
.cra_priority = 100,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_generic_mod_init(void)
{
return crypto_register_shash(&alg);
}
static void __exit sha1_generic_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
subsys_initcall(sha1_generic_mod_init);
module_exit(sha1_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-generic");
| linux-master | crypto/sha1_generic.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ESSIV skcipher and aead template for block encryption
*
* This template encapsulates the ESSIV IV generation algorithm used by
* dm-crypt and fscrypt, which converts the initial vector for the skcipher
* used for block encryption, by encrypting it using the hash of the
* skcipher key as encryption key. Usually, the input IV is a 64-bit sector
* number in LE representation zero-padded to the size of the IV, but this
* is not assumed by this driver.
*
* The typical use of this template is to instantiate the skcipher
* 'essiv(cbc(aes),sha256)', which is the only instantiation used by
* fscrypt, and the most relevant one for dm-crypt. However, dm-crypt
* also permits ESSIV to be used in combination with the authenc template,
* e.g., 'essiv(authenc(hmac(sha256),cbc(aes)),sha256)', in which case
* we need to instantiate an aead that accepts the same special key format
* as the authenc template, and deals with the way the encrypted IV is
* embedded into the AAD area of the aead request. This means the AEAD
* flavor produced by this template is tightly coupled to the way dm-crypt
* happens to use it.
*
* Copyright (c) 2019 Linaro, Ltd. <[email protected]>
*
* Heavily based on:
* adiantum length-preserving encryption mode
*
* Copyright 2018 Google LLC
*/
#include <crypto/authenc.h>
#include <crypto/internal/aead.h>
#include <crypto/internal/cipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include "internal.h"
struct essiv_instance_ctx {
union {
struct crypto_skcipher_spawn skcipher_spawn;
struct crypto_aead_spawn aead_spawn;
} u;
char essiv_cipher_name[CRYPTO_MAX_ALG_NAME];
char shash_driver_name[CRYPTO_MAX_ALG_NAME];
};
struct essiv_tfm_ctx {
union {
struct crypto_skcipher *skcipher;
struct crypto_aead *aead;
} u;
struct crypto_cipher *essiv_cipher;
struct crypto_shash *hash;
int ivoffset;
};
struct essiv_aead_request_ctx {
struct scatterlist sg[4];
u8 *assoc;
struct aead_request aead_req;
};
static int essiv_skcipher_setkey(struct crypto_skcipher *tfm,
const u8 *key, unsigned int keylen)
{
struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
u8 salt[HASH_MAX_DIGESTSIZE];
int err;
crypto_skcipher_clear_flags(tctx->u.skcipher, CRYPTO_TFM_REQ_MASK);
crypto_skcipher_set_flags(tctx->u.skcipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_skcipher_setkey(tctx->u.skcipher, key, keylen);
if (err)
return err;
err = crypto_shash_tfm_digest(tctx->hash, key, keylen, salt);
if (err)
return err;
crypto_cipher_clear_flags(tctx->essiv_cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->essiv_cipher,
crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_cipher_setkey(tctx->essiv_cipher, salt,
crypto_shash_digestsize(tctx->hash));
}
static int essiv_aead_setkey(struct crypto_aead *tfm, const u8 *key,
unsigned int keylen)
{
struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
SHASH_DESC_ON_STACK(desc, tctx->hash);
struct crypto_authenc_keys keys;
u8 salt[HASH_MAX_DIGESTSIZE];
int err;
crypto_aead_clear_flags(tctx->u.aead, CRYPTO_TFM_REQ_MASK);
crypto_aead_set_flags(tctx->u.aead, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
err = crypto_aead_setkey(tctx->u.aead, key, keylen);
if (err)
return err;
if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
return -EINVAL;
desc->tfm = tctx->hash;
err = crypto_shash_init(desc) ?:
crypto_shash_update(desc, keys.enckey, keys.enckeylen) ?:
crypto_shash_finup(desc, keys.authkey, keys.authkeylen, salt);
if (err)
return err;
crypto_cipher_clear_flags(tctx->essiv_cipher, CRYPTO_TFM_REQ_MASK);
crypto_cipher_set_flags(tctx->essiv_cipher, crypto_aead_get_flags(tfm) &
CRYPTO_TFM_REQ_MASK);
return crypto_cipher_setkey(tctx->essiv_cipher, salt,
crypto_shash_digestsize(tctx->hash));
}
static int essiv_aead_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
return crypto_aead_setauthsize(tctx->u.aead, authsize);
}
static void essiv_skcipher_done(void *data, int err)
{
struct skcipher_request *req = data;
skcipher_request_complete(req, err);
}
static int essiv_skcipher_crypt(struct skcipher_request *req, bool enc)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct skcipher_request *subreq = skcipher_request_ctx(req);
crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
skcipher_request_set_tfm(subreq, tctx->u.skcipher);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
req->iv);
skcipher_request_set_callback(subreq, skcipher_request_flags(req),
essiv_skcipher_done, req);
return enc ? crypto_skcipher_encrypt(subreq) :
crypto_skcipher_decrypt(subreq);
}
static int essiv_skcipher_encrypt(struct skcipher_request *req)
{
return essiv_skcipher_crypt(req, true);
}
static int essiv_skcipher_decrypt(struct skcipher_request *req)
{
return essiv_skcipher_crypt(req, false);
}
static void essiv_aead_done(void *data, int err)
{
struct aead_request *req = data;
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
if (err == -EINPROGRESS)
goto out;
kfree(rctx->assoc);
out:
aead_request_complete(req, err);
}
static int essiv_aead_crypt(struct aead_request *req, bool enc)
{
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
const struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
struct essiv_aead_request_ctx *rctx = aead_request_ctx(req);
struct aead_request *subreq = &rctx->aead_req;
struct scatterlist *src = req->src;
int err;
crypto_cipher_encrypt_one(tctx->essiv_cipher, req->iv, req->iv);
/*
* dm-crypt embeds the sector number and the IV in the AAD region, so
* we have to copy the converted IV into the right scatterlist before
* we pass it on.
*/
rctx->assoc = NULL;
if (req->src == req->dst || !enc) {
scatterwalk_map_and_copy(req->iv, req->dst,
req->assoclen - crypto_aead_ivsize(tfm),
crypto_aead_ivsize(tfm), 1);
} else {
u8 *iv = (u8 *)aead_request_ctx(req) + tctx->ivoffset;
int ivsize = crypto_aead_ivsize(tfm);
int ssize = req->assoclen - ivsize;
struct scatterlist *sg;
int nents;
if (ssize < 0)
return -EINVAL;
nents = sg_nents_for_len(req->src, ssize);
if (nents < 0)
return -EINVAL;
memcpy(iv, req->iv, ivsize);
sg_init_table(rctx->sg, 4);
if (unlikely(nents > 1)) {
/*
* This is a case that rarely occurs in practice, but
* for correctness, we have to deal with it nonetheless.
*/
rctx->assoc = kmalloc(ssize, GFP_ATOMIC);
if (!rctx->assoc)
return -ENOMEM;
scatterwalk_map_and_copy(rctx->assoc, req->src, 0,
ssize, 0);
sg_set_buf(rctx->sg, rctx->assoc, ssize);
} else {
sg_set_page(rctx->sg, sg_page(req->src), ssize,
req->src->offset);
}
sg_set_buf(rctx->sg + 1, iv, ivsize);
sg = scatterwalk_ffwd(rctx->sg + 2, req->src, req->assoclen);
if (sg != rctx->sg + 2)
sg_chain(rctx->sg, 3, sg);
src = rctx->sg;
}
aead_request_set_tfm(subreq, tctx->u.aead);
aead_request_set_ad(subreq, req->assoclen);
aead_request_set_callback(subreq, aead_request_flags(req),
essiv_aead_done, req);
aead_request_set_crypt(subreq, src, req->dst, req->cryptlen, req->iv);
err = enc ? crypto_aead_encrypt(subreq) :
crypto_aead_decrypt(subreq);
if (rctx->assoc && err != -EINPROGRESS && err != -EBUSY)
kfree(rctx->assoc);
return err;
}
static int essiv_aead_encrypt(struct aead_request *req)
{
return essiv_aead_crypt(req, true);
}
static int essiv_aead_decrypt(struct aead_request *req)
{
return essiv_aead_crypt(req, false);
}
static int essiv_init_tfm(struct essiv_instance_ctx *ictx,
struct essiv_tfm_ctx *tctx)
{
struct crypto_cipher *essiv_cipher;
struct crypto_shash *hash;
int err;
essiv_cipher = crypto_alloc_cipher(ictx->essiv_cipher_name, 0, 0);
if (IS_ERR(essiv_cipher))
return PTR_ERR(essiv_cipher);
hash = crypto_alloc_shash(ictx->shash_driver_name, 0, 0);
if (IS_ERR(hash)) {
err = PTR_ERR(hash);
goto err_free_essiv_cipher;
}
tctx->essiv_cipher = essiv_cipher;
tctx->hash = hash;
return 0;
err_free_essiv_cipher:
crypto_free_cipher(essiv_cipher);
return err;
}
static int essiv_skcipher_init_tfm(struct crypto_skcipher *tfm)
{
struct skcipher_instance *inst = skcipher_alg_instance(tfm);
struct essiv_instance_ctx *ictx = skcipher_instance_ctx(inst);
struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
struct crypto_skcipher *skcipher;
int err;
skcipher = crypto_spawn_skcipher(&ictx->u.skcipher_spawn);
if (IS_ERR(skcipher))
return PTR_ERR(skcipher);
crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
crypto_skcipher_reqsize(skcipher));
err = essiv_init_tfm(ictx, tctx);
if (err) {
crypto_free_skcipher(skcipher);
return err;
}
tctx->u.skcipher = skcipher;
return 0;
}
static int essiv_aead_init_tfm(struct crypto_aead *tfm)
{
struct aead_instance *inst = aead_alg_instance(tfm);
struct essiv_instance_ctx *ictx = aead_instance_ctx(inst);
struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
unsigned int subreq_size;
int err;
BUILD_BUG_ON(offsetofend(struct essiv_aead_request_ctx, aead_req) !=
sizeof(struct essiv_aead_request_ctx));
aead = crypto_spawn_aead(&ictx->u.aead_spawn);
if (IS_ERR(aead))
return PTR_ERR(aead);
subreq_size = sizeof_field(struct essiv_aead_request_ctx, aead_req) +
crypto_aead_reqsize(aead);
tctx->ivoffset = offsetof(struct essiv_aead_request_ctx, aead_req) +
subreq_size;
crypto_aead_set_reqsize(tfm, tctx->ivoffset + crypto_aead_ivsize(aead));
err = essiv_init_tfm(ictx, tctx);
if (err) {
crypto_free_aead(aead);
return err;
}
tctx->u.aead = aead;
return 0;
}
static void essiv_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct essiv_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
crypto_free_skcipher(tctx->u.skcipher);
crypto_free_cipher(tctx->essiv_cipher);
crypto_free_shash(tctx->hash);
}
static void essiv_aead_exit_tfm(struct crypto_aead *tfm)
{
struct essiv_tfm_ctx *tctx = crypto_aead_ctx(tfm);
crypto_free_aead(tctx->u.aead);
crypto_free_cipher(tctx->essiv_cipher);
crypto_free_shash(tctx->hash);
}
static void essiv_skcipher_free_instance(struct skcipher_instance *inst)
{
struct essiv_instance_ctx *ictx = skcipher_instance_ctx(inst);
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
kfree(inst);
}
static void essiv_aead_free_instance(struct aead_instance *inst)
{
struct essiv_instance_ctx *ictx = aead_instance_ctx(inst);
crypto_drop_aead(&ictx->u.aead_spawn);
kfree(inst);
}
static bool parse_cipher_name(char *essiv_cipher_name, const char *cra_name)
{
const char *p, *q;
int len;
/* find the last opening parens */
p = strrchr(cra_name, '(');
if (!p++)
return false;
/* find the first closing parens in the tail of the string */
q = strchr(p, ')');
if (!q)
return false;
len = q - p;
if (len >= CRYPTO_MAX_ALG_NAME)
return false;
memcpy(essiv_cipher_name, p, len);
essiv_cipher_name[len] = '\0';
return true;
}
static bool essiv_supported_algorithms(const char *essiv_cipher_name,
struct shash_alg *hash_alg,
int ivsize)
{
struct crypto_alg *alg;
bool ret = false;
alg = crypto_alg_mod_lookup(essiv_cipher_name,
CRYPTO_ALG_TYPE_CIPHER,
CRYPTO_ALG_TYPE_MASK);
if (IS_ERR(alg))
return false;
if (hash_alg->digestsize < alg->cra_cipher.cia_min_keysize ||
hash_alg->digestsize > alg->cra_cipher.cia_max_keysize)
goto out;
if (ivsize != alg->cra_blocksize)
goto out;
if (crypto_shash_alg_needs_key(hash_alg))
goto out;
ret = true;
out:
crypto_mod_put(alg);
return ret;
}
static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
{
struct crypto_attr_type *algt;
const char *inner_cipher_name;
const char *shash_name;
struct skcipher_instance *skcipher_inst = NULL;
struct aead_instance *aead_inst = NULL;
struct crypto_instance *inst;
struct crypto_alg *base, *block_base;
struct essiv_instance_ctx *ictx;
struct skcipher_alg *skcipher_alg = NULL;
struct aead_alg *aead_alg = NULL;
struct crypto_alg *_hash_alg;
struct shash_alg *hash_alg;
int ivsize;
u32 type;
u32 mask;
int err;
algt = crypto_get_attr_type(tb);
if (IS_ERR(algt))
return PTR_ERR(algt);
inner_cipher_name = crypto_attr_alg_name(tb[1]);
if (IS_ERR(inner_cipher_name))
return PTR_ERR(inner_cipher_name);
shash_name = crypto_attr_alg_name(tb[2]);
if (IS_ERR(shash_name))
return PTR_ERR(shash_name);
type = algt->type & algt->mask;
mask = crypto_algt_inherited_mask(algt);
switch (type) {
case CRYPTO_ALG_TYPE_SKCIPHER:
skcipher_inst = kzalloc(sizeof(*skcipher_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!skcipher_inst)
return -ENOMEM;
inst = skcipher_crypto_instance(skcipher_inst);
base = &skcipher_inst->alg.base;
ictx = crypto_instance_ctx(inst);
/* Symmetric cipher, e.g., "cbc(aes)" */
err = crypto_grab_skcipher(&ictx->u.skcipher_spawn, inst,
inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
skcipher_alg = crypto_spawn_skcipher_alg(&ictx->u.skcipher_spawn);
block_base = &skcipher_alg->base;
ivsize = crypto_skcipher_alg_ivsize(skcipher_alg);
break;
case CRYPTO_ALG_TYPE_AEAD:
aead_inst = kzalloc(sizeof(*aead_inst) +
sizeof(*ictx), GFP_KERNEL);
if (!aead_inst)
return -ENOMEM;
inst = aead_crypto_instance(aead_inst);
base = &aead_inst->alg.base;
ictx = crypto_instance_ctx(inst);
/* AEAD cipher, e.g., "authenc(hmac(sha256),cbc(aes))" */
err = crypto_grab_aead(&ictx->u.aead_spawn, inst,
inner_cipher_name, 0, mask);
if (err)
goto out_free_inst;
aead_alg = crypto_spawn_aead_alg(&ictx->u.aead_spawn);
block_base = &aead_alg->base;
if (!strstarts(block_base->cra_name, "authenc(")) {
pr_warn("Only authenc() type AEADs are supported by ESSIV\n");
err = -EINVAL;
goto out_drop_skcipher;
}
ivsize = aead_alg->ivsize;
break;
default:
return -EINVAL;
}
if (!parse_cipher_name(ictx->essiv_cipher_name, block_base->cra_name)) {
pr_warn("Failed to parse ESSIV cipher name from skcipher cra_name\n");
err = -EINVAL;
goto out_drop_skcipher;
}
/* Synchronous hash, e.g., "sha256" */
_hash_alg = crypto_alg_mod_lookup(shash_name,
CRYPTO_ALG_TYPE_SHASH,
CRYPTO_ALG_TYPE_MASK | mask);
if (IS_ERR(_hash_alg)) {
err = PTR_ERR(_hash_alg);
goto out_drop_skcipher;
}
hash_alg = __crypto_shash_alg(_hash_alg);
/* Check the set of algorithms */
if (!essiv_supported_algorithms(ictx->essiv_cipher_name, hash_alg,
ivsize)) {
pr_warn("Unsupported essiv instantiation: essiv(%s,%s)\n",
block_base->cra_name, hash_alg->base.cra_name);
err = -EINVAL;
goto out_free_hash;
}
/* record the driver name so we can instantiate this exact algo later */
strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
CRYPTO_MAX_ALG_NAME);
/* Instance fields */
err = -ENAMETOOLONG;
if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME,
"essiv(%s,%s)", block_base->cra_name,
hash_alg->base.cra_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_hash;
if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME,
"essiv(%s,%s)", block_base->cra_driver_name,
hash_alg->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto out_free_hash;
/*
* hash_alg wasn't gotten via crypto_grab*(), so we need to inherit its
* flags manually.
*/
base->cra_flags |= (hash_alg->base.cra_flags &
CRYPTO_ALG_INHERITED_FLAGS);
base->cra_blocksize = block_base->cra_blocksize;
base->cra_ctxsize = sizeof(struct essiv_tfm_ctx);
base->cra_alignmask = block_base->cra_alignmask;
base->cra_priority = block_base->cra_priority;
if (type == CRYPTO_ALG_TYPE_SKCIPHER) {
skcipher_inst->alg.setkey = essiv_skcipher_setkey;
skcipher_inst->alg.encrypt = essiv_skcipher_encrypt;
skcipher_inst->alg.decrypt = essiv_skcipher_decrypt;
skcipher_inst->alg.init = essiv_skcipher_init_tfm;
skcipher_inst->alg.exit = essiv_skcipher_exit_tfm;
skcipher_inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(skcipher_alg);
skcipher_inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(skcipher_alg);
skcipher_inst->alg.ivsize = ivsize;
skcipher_inst->alg.chunksize = crypto_skcipher_alg_chunksize(skcipher_alg);
skcipher_inst->alg.walksize = crypto_skcipher_alg_walksize(skcipher_alg);
skcipher_inst->free = essiv_skcipher_free_instance;
err = skcipher_register_instance(tmpl, skcipher_inst);
} else {
aead_inst->alg.setkey = essiv_aead_setkey;
aead_inst->alg.setauthsize = essiv_aead_setauthsize;
aead_inst->alg.encrypt = essiv_aead_encrypt;
aead_inst->alg.decrypt = essiv_aead_decrypt;
aead_inst->alg.init = essiv_aead_init_tfm;
aead_inst->alg.exit = essiv_aead_exit_tfm;
aead_inst->alg.ivsize = ivsize;
aead_inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(aead_alg);
aead_inst->alg.chunksize = crypto_aead_alg_chunksize(aead_alg);
aead_inst->free = essiv_aead_free_instance;
err = aead_register_instance(tmpl, aead_inst);
}
if (err)
goto out_free_hash;
crypto_mod_put(_hash_alg);
return 0;
out_free_hash:
crypto_mod_put(_hash_alg);
out_drop_skcipher:
if (type == CRYPTO_ALG_TYPE_SKCIPHER)
crypto_drop_skcipher(&ictx->u.skcipher_spawn);
else
crypto_drop_aead(&ictx->u.aead_spawn);
out_free_inst:
kfree(skcipher_inst);
kfree(aead_inst);
return err;
}
/* essiv(cipher_name, shash_name) */
static struct crypto_template essiv_tmpl = {
.name = "essiv",
.create = essiv_create,
.module = THIS_MODULE,
};
static int __init essiv_module_init(void)
{
return crypto_register_template(&essiv_tmpl);
}
static void __exit essiv_module_exit(void)
{
crypto_unregister_template(&essiv_tmpl);
}
subsys_initcall(essiv_module_init);
module_exit(essiv_module_exit);
MODULE_DESCRIPTION("ESSIV skcipher/aead wrapper for block encryption");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("essiv");
MODULE_IMPORT_NS(CRYPTO_INTERNAL);
| linux-master | crypto/essiv.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.