X-Git-Url: https://www.bearssl.org/gitweb//home/git/?p=BearSSL;a=blobdiff_plain;f=src%2Finner.h;h=07e1d0a478e801a91580658918130fb8b595e407;hp=8ada6125e631e9a8d5bf65e22fd699ec18ad2b29;hb=b715b43e411dc5d5949df6f75ef7bb65952db11c;hpb=2f454aad577ae53798935cc32438a2d3f02ba31f diff --git a/src/inner.h b/src/inner.h index 8ada612..07e1d0a 100644 --- a/src/inner.h +++ b/src/inner.h @@ -31,6 +31,15 @@ #include "config.h" #include "bearssl.h" +/* + * On MSVC, disable the warning about applying unary minus on an + * unsigned type: it is standard, we do it all the time, and for + * good reasons. + */ +#if _MSC_VER +#pragma warning( disable : 4146 ) +#endif + /* * Maximum size for a RSA modulus (in bits). Allocated stack buffers * depend on that size, so this value should be kept small. Currently, @@ -39,10 +48,22 @@ * already set their root keys to RSA-4096, so we should be able to * process such keys. * - * This value MUST be a multiple of 64. + * This value MUST be a multiple of 64. This value MUST NOT exceed 47666 + * (some computations in RSA key generation rely on the factor size being + * no more than 23833 bits). RSA key sizes beyond 3072 bits don't make a + * lot of sense anyway. */ #define BR_MAX_RSA_SIZE 4096 +/* + * Minimum size for a RSA modulus (in bits); this value is used only to + * filter out invalid parameters for key pair generation. Normally, + * applications should not use RSA keys smaller than 2048 bits; but some + * specific cases might need shorter keys, for legacy or research + * purposes. + */ +#define BR_MIN_RSA_SIZE 512 + /* * Maximum size for a RSA factor (in bits). This is for RSA private-key * operations. Default is to support factors up to a bit more than half @@ -76,7 +97,7 @@ * * The test on 'unsigned long' should already catch most cases, the one * notable exception being Windows code where 'unsigned long' is kept to - * 32-bit for compatbility with all the legacy code that liberally uses + * 32-bit for compatibility with all the legacy code that liberally uses * the 'DWORD' type for 32-bit values. * * Macro names are taken from: http://nadeausoftware.com/articles/2012/02/c_c_tip_how_detect_processor_type_using_compiler_predefined_macros @@ -93,6 +114,339 @@ #define BR_64 1 #elif defined(__x86_64__) || defined(_M_X64) #define BR_64 1 +#elif defined(__aarch64__) || defined(_M_ARM64) +#define BR_64 1 +#elif defined(__mips64) +#define BR_64 1 +#endif +#endif + +/* + * Set BR_LOMUL on platforms where it makes sense. + */ +#ifndef BR_LOMUL +#if BR_ARMEL_CORTEXM_GCC +#define BR_LOMUL 1 +#endif +#endif + +/* + * Architecture detection. + */ +#ifndef BR_i386 +#if __i386__ || _M_IX86 +#define BR_i386 1 +#endif +#endif + +#ifndef BR_amd64 +#if __x86_64__ || _M_X64 +#define BR_amd64 1 +#endif +#endif + +/* + * Compiler brand and version. + * + * Implementations that use intrinsics need to detect the compiler type + * and version because some specific actions may be needed to activate + * the corresponding opcodes, both for header inclusion, and when using + * them in a function. + * + * BR_GCC, BR_CLANG and BR_MSC will be set to 1 for, respectively, GCC, + * Clang and MS Visual C. For each of them, sub-macros will be defined + * for versions; each sub-macro is set whenever the compiler version is + * at least as recent as the one corresponding to the macro. + */ + +/* + * GCC thresholds are on versions 4.4 to 4.9 and 5.0. + */ +#ifndef BR_GCC +#if __GNUC__ && !__clang__ +#define BR_GCC 1 + +#if __GNUC__ > 4 +#define BR_GCC_5_0 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 +#define BR_GCC_4_9 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8 +#define BR_GCC_4_8 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7 +#define BR_GCC_4_7 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6 +#define BR_GCC_4_6 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 5 +#define BR_GCC_4_5 1 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 4 +#define BR_GCC_4_4 1 +#endif + +#if BR_GCC_5_0 +#define BR_GCC_4_9 1 +#endif +#if BR_GCC_4_9 +#define BR_GCC_4_8 1 +#endif +#if BR_GCC_4_8 +#define BR_GCC_4_7 1 +#endif +#if BR_GCC_4_7 +#define BR_GCC_4_6 1 +#endif +#if BR_GCC_4_6 +#define BR_GCC_4_5 1 +#endif +#if BR_GCC_4_5 +#define BR_GCC_4_4 1 +#endif + +#endif +#endif + +/* + * Clang thresholds are on versions 3.7.0 and 3.8.0. + */ +#ifndef BR_CLANG +#if __clang__ +#define BR_CLANG 1 + +#if __clang_major__ > 3 || (__clang_major__ == 3 && __clang_minor__ >= 8) +#define BR_CLANG_3_8 1 +#elif __clang_major__ == 3 && __clang_minor__ >= 7 +#define BR_CLANG_3_7 1 +#endif + +#if BR_CLANG_3_8 +#define BR_CLANG_3_7 1 +#endif + +#endif +#endif + +/* + * MS Visual C thresholds are on Visual Studio 2005 to 2015. + */ +#ifndef BR_MSC +#if _MSC_VER +#define BR_MSC 1 + +#if _MSC_VER >= 1900 +#define BR_MSC_2015 1 +#elif _MSC_VER >= 1800 +#define BR_MSC_2013 1 +#elif _MSC_VER >= 1700 +#define BR_MSC_2012 1 +#elif _MSC_VER >= 1600 +#define BR_MSC_2010 1 +#elif _MSC_VER >= 1500 +#define BR_MSC_2008 1 +#elif _MSC_VER >= 1400 +#define BR_MSC_2005 1 +#endif + +#if BR_MSC_2015 +#define BR_MSC_2013 1 +#endif +#if BR_MSC_2013 +#define BR_MSC_2012 1 +#endif +#if BR_MSC_2012 +#define BR_MSC_2010 1 +#endif +#if BR_MSC_2010 +#define BR_MSC_2008 1 +#endif +#if BR_MSC_2008 +#define BR_MSC_2005 1 +#endif + +#endif +#endif + +/* + * GCC 4.4+ and Clang 3.7+ allow tagging specific functions with a + * 'target' attribute that activates support for specific opcodes. + */ +#if BR_GCC_4_4 || BR_CLANG_3_7 +#define BR_TARGET(x) __attribute__((target(x))) +#else +#define BR_TARGET(x) +#endif + +/* + * AES-NI intrinsics are available on x86 (32-bit and 64-bit) with + * GCC 4.8+, Clang 3.7+ and MSC 2012+. + */ +#ifndef BR_AES_X86NI +#if (BR_i386 || BR_amd64) && (BR_GCC_4_8 || BR_CLANG_3_7 || BR_MSC_2012) +#define BR_AES_X86NI 1 +#endif +#endif + +/* + * SSE2 intrinsics are available on x86 (32-bit and 64-bit) with + * GCC 4.4+, Clang 3.7+ and MSC 2005+. + */ +#ifndef BR_SSE2 +#if (BR_i386 || BR_amd64) && (BR_GCC_4_4 || BR_CLANG_3_7 || BR_MSC_2005) +#define BR_SSE2 1 +#endif +#endif + +/* + * RDRAND intrinsics are available on x86 (32-bit and 64-bit) with + * GCC 4.6+, Clang 3.7+ and MSC 2012+. + */ +#ifndef BR_RDRAND +#if (BR_i386 || BR_amd64) && (BR_GCC_4_6 || BR_CLANG_3_7 || BR_MSC_2012) +#define BR_RDRAND 1 +#endif +#endif + +/* + * Determine type of OS for random number generation. Macro names and + * values are documented on: + * https://sourceforge.net/p/predef/wiki/OperatingSystems/ + * + * Win32's CryptGenRandom() should be available on Windows systems. + * + * /dev/urandom should work on all Unix-like systems (including macOS X). + * + * getentropy() is present on Linux (Glibc 2.25+), FreeBSD (12.0+) and + * OpenBSD (5.6+). For OpenBSD, there does not seem to be easy to use + * macros to test the minimum version, so we just assume that it is + * recent enough (last version without getentropy() has gone out of + * support in May 2015). + * + * Ideally we should use getentropy() on macOS (10.12+) too, but I don't + * know how to test the exact OS version with preprocessor macros. + * + * TODO: enrich the list of detected system. + */ + +#ifndef BR_USE_URANDOM +#if defined _AIX \ + || defined __ANDROID__ \ + || defined __FreeBSD__ \ + || defined __NetBSD__ \ + || defined __OpenBSD__ \ + || defined __DragonFly__ \ + || defined __linux__ \ + || (defined __sun && (defined __SVR4 || defined __svr4__)) \ + || (defined __APPLE__ && defined __MACH__) +#define BR_USE_URANDOM 1 +#endif +#endif + +#ifndef BR_USE_GETENTROPY +#if (defined __linux__ \ + && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 25))) \ + || (defined __FreeBSD__ && __FreeBSD__ >= 12) \ + || defined __OpenBSD__ +#define BR_USE_GETENTROPY 1 +#endif +#endif + +#ifndef BR_USE_WIN32_RAND +#if defined _WIN32 || defined _WIN64 +#define BR_USE_WIN32_RAND 1 +#endif +#endif + +/* + * POWER8 crypto support. We rely on compiler macros for the + * architecture, since we do not have a reliable, simple way to detect + * the required support at runtime (we could try running an opcode, and + * trapping the exception or signal on illegal instruction, but this + * induces some non-trivial OS dependencies that we would prefer to + * avoid if possible). + */ +#ifndef BR_POWER8 +#if __GNUC__ && ((_ARCH_PWR8 || _ARCH_PPC) && __CRYPTO__) +#define BR_POWER8 1 +#endif +#endif + +/* + * Detect endinanness on POWER8. + */ +#if BR_POWER8 +#if defined BR_POWER8_LE +#undef BR_POWER8_BE +#if BR_POWER8_LE +#define BR_POWER8_BE 0 +#else +#define BR_POWER8_BE 1 +#endif +#elif defined BR_POWER8_BE +#undef BR_POWER8_LE +#if BR_POWER8_BE +#define BR_POWER8_LE 0 +#else +#define BR_POWER8_LE 1 +#endif +#else +#if __LITTLE_ENDIAN__ +#define BR_POWER8_LE 1 +#define BR_POWER8_BE 0 +#else +#define BR_POWER8_LE 0 +#define BR_POWER8_BE 1 +#endif +#endif +#endif + +/* + * Detect support for 128-bit integers. + */ +#if !defined BR_INT128 && !defined BR_UMUL128 +#ifdef __SIZEOF_INT128__ +#define BR_INT128 1 +#elif _M_X64 +#define BR_UMUL128 1 +#endif +#endif + +/* + * Detect support for unaligned accesses with known endianness. + * + * x86 (both 32-bit and 64-bit) is little-endian and allows unaligned + * accesses. + * + * POWER/PowerPC allows unaligned accesses when big-endian. POWER8 and + * later also allow unaligned accesses when little-endian. + */ +#if !defined BR_LE_UNALIGNED && !defined BR_BE_UNALIGNED + +#if __i386 || __i386__ || __x86_64__ || _M_IX86 || _M_X64 +#define BR_LE_UNALIGNED 1 +#elif BR_POWER8_BE +#define BR_BE_UNALIGNED 1 +#elif BR_POWER8_LE +#define BR_LE_UNALIGNED 1 +#elif (__powerpc__ || __powerpc64__ || _M_PPC || _ARCH_PPC || _ARCH_PPC64) \ + && __BIG_ENDIAN__ +#define BR_BE_UNALIGNED 1 +#endif + +#endif + +/* + * Detect support for an OS-provided time source. + */ + +#ifndef BR_USE_UNIX_TIME +#if defined __unix__ || defined __linux__ \ + || defined _POSIX_SOURCE || defined _POSIX_C_SOURCE \ + || (defined __APPLE__ && defined __MACH__) +#define BR_USE_UNIX_TIME 1 +#endif +#endif + +#ifndef BR_USE_WIN32_TIME +#if defined _WIN32 || defined _WIN64 +#define BR_USE_WIN32_TIME 1 #endif #endif @@ -101,53 +455,95 @@ * Encoding/decoding functions. * * 32-bit and 64-bit decoding, both little-endian and big-endian, is - * implemented with the inline functions below. These functions are - * generic: they don't depend on the architecture natural endianness, - * and they can handle unaligned accesses. Optimized versions for some - * specific architectures may be implemented at a later time. - */ + * implemented with the inline functions below. + * + * When allowed by some compile-time options (autodetected or provided), + * optimised code is used, to perform direct memory access when the + * underlying architecture supports it, both for endianness and + * alignment. This, however, may trigger strict aliasing issues; the + * code below uses unions to perform (supposedly) safe type punning. + * Since the C aliasing rules are relatively complex and were amended, + * or at least re-explained with different phrasing, in all successive + * versions of the C standard, it is always a bit risky to bet that any + * specific version of a C compiler got it right, for some notion of + * "right". + */ + +typedef union { + uint16_t u; + unsigned char b[sizeof(uint16_t)]; +} br_union_u16; + +typedef union { + uint32_t u; + unsigned char b[sizeof(uint32_t)]; +} br_union_u32; + +typedef union { + uint64_t u; + unsigned char b[sizeof(uint64_t)]; +} br_union_u64; static inline void br_enc16le(void *dst, unsigned x) { +#if BR_LE_UNALIGNED + ((br_union_u16 *)dst)->u = x; +#else unsigned char *buf; buf = dst; buf[0] = (unsigned char)x; buf[1] = (unsigned char)(x >> 8); +#endif } static inline void br_enc16be(void *dst, unsigned x) { +#if BR_BE_UNALIGNED + ((br_union_u16 *)dst)->u = x; +#else unsigned char *buf; buf = dst; buf[0] = (unsigned char)(x >> 8); buf[1] = (unsigned char)x; +#endif } static inline unsigned br_dec16le(const void *src) { +#if BR_LE_UNALIGNED + return ((const br_union_u16 *)src)->u; +#else const unsigned char *buf; buf = src; return (unsigned)buf[0] | ((unsigned)buf[1] << 8); +#endif } static inline unsigned br_dec16be(const void *src) { +#if BR_BE_UNALIGNED + return ((const br_union_u16 *)src)->u; +#else const unsigned char *buf; buf = src; return ((unsigned)buf[0] << 8) | (unsigned)buf[1]; +#endif } static inline void br_enc32le(void *dst, uint32_t x) { +#if BR_LE_UNALIGNED + ((br_union_u32 *)dst)->u = x; +#else unsigned char *buf; buf = dst; @@ -155,11 +551,15 @@ br_enc32le(void *dst, uint32_t x) buf[1] = (unsigned char)(x >> 8); buf[2] = (unsigned char)(x >> 16); buf[3] = (unsigned char)(x >> 24); +#endif } static inline void br_enc32be(void *dst, uint32_t x) { +#if BR_BE_UNALIGNED + ((br_union_u32 *)dst)->u = x; +#else unsigned char *buf; buf = dst; @@ -167,11 +567,15 @@ br_enc32be(void *dst, uint32_t x) buf[1] = (unsigned char)(x >> 16); buf[2] = (unsigned char)(x >> 8); buf[3] = (unsigned char)x; +#endif } static inline uint32_t br_dec32le(const void *src) { +#if BR_LE_UNALIGNED + return ((const br_union_u32 *)src)->u; +#else const unsigned char *buf; buf = src; @@ -179,11 +583,15 @@ br_dec32le(const void *src) | ((uint32_t)buf[1] << 8) | ((uint32_t)buf[2] << 16) | ((uint32_t)buf[3] << 24); +#endif } static inline uint32_t br_dec32be(const void *src) { +#if BR_BE_UNALIGNED + return ((const br_union_u32 *)src)->u; +#else const unsigned char *buf; buf = src; @@ -191,46 +599,63 @@ br_dec32be(const void *src) | ((uint32_t)buf[1] << 16) | ((uint32_t)buf[2] << 8) | (uint32_t)buf[3]; +#endif } static inline void br_enc64le(void *dst, uint64_t x) { +#if BR_LE_UNALIGNED + ((br_union_u64 *)dst)->u = x; +#else unsigned char *buf; buf = dst; br_enc32le(buf, (uint32_t)x); br_enc32le(buf + 4, (uint32_t)(x >> 32)); +#endif } static inline void br_enc64be(void *dst, uint64_t x) { +#if BR_BE_UNALIGNED + ((br_union_u64 *)dst)->u = x; +#else unsigned char *buf; buf = dst; br_enc32be(buf, (uint32_t)(x >> 32)); br_enc32be(buf + 4, (uint32_t)x); +#endif } static inline uint64_t br_dec64le(const void *src) { +#if BR_LE_UNALIGNED + return ((const br_union_u64 *)src)->u; +#else const unsigned char *buf; buf = src; return (uint64_t)br_dec32le(buf) | ((uint64_t)br_dec32le(buf + 4) << 32); +#endif } static inline uint64_t br_dec64be(const void *src) { +#if BR_BE_UNALIGNED + return ((const br_union_u64 *)src)->u; +#else const unsigned char *buf; buf = src; return ((uint64_t)br_dec32be(buf) << 32) | (uint64_t)br_dec32be(buf + 4); +#endif } /* @@ -289,8 +714,8 @@ void br_sha2small_round(const unsigned char *buf, uint32_t *val); */ void br_tls_phash(void *dst, size_t len, const br_hash_class *dig, - const void *secret, size_t secret_len, - const char *label, const void *seed, size_t seed_len); + const void *secret, size_t secret_len, const char *label, + size_t seed_num, const br_tls_prf_seed_chunk *seed); /* * Copy all configured hash implementations from a multihash context @@ -300,7 +725,7 @@ static inline void br_multihash_copyimpl(br_multihash_context *dst, const br_multihash_context *src) { - memcpy(dst->impl, src->impl, sizeof src->impl); + memcpy((void *)dst->impl, src->impl, sizeof src->impl); } /* ==================================================================== */ @@ -1053,6 +1478,25 @@ void br_i31_from_monty(uint32_t *x, const uint32_t *m, uint32_t m0i); void br_i31_modpow(uint32_t *x, const unsigned char *e, size_t elen, const uint32_t *m, uint32_t m0i, uint32_t *t1, uint32_t *t2); +/* + * Compute a modular exponentiation. x[] MUST be an integer modulo m[] + * (same announced bit length, lower value). m[] MUST be odd. The + * exponent is in big-endian unsigned notation, over 'elen' bytes. The + * "m0i" parameter is equal to -(1/m0) mod 2^31, where m0 is the least + * significant value word of m[] (this works only if m[] is an odd + * integer). The tmp[] array is used for temporaries, and has size + * 'twlen' words; it must be large enough to accommodate at least two + * temporary values with the same size as m[] (including the leading + * "bit length" word). If there is room for more temporaries, then this + * function may use the extra room for window-based optimisation, + * resulting in faster computations. + * + * Returned value is 1 on success, 0 on error. An error is reported if + * the provided tmp[] array is too short. + */ +uint32_t br_i31_modpow_opt(uint32_t *x, const unsigned char *e, size_t elen, + const uint32_t *m, uint32_t m0i, uint32_t *tmp, size_t twlen); + /* * Compute d+a*b, result in d. The initial announced bit length of d[] * MUST match that of a[]. The d[] array MUST be large enough to @@ -1066,8 +1510,29 @@ void br_i31_modpow(uint32_t *x, const unsigned char *e, size_t elen, */ void br_i31_mulacc(uint32_t *d, const uint32_t *a, const uint32_t *b); +/* + * Compute x/y mod m, result in x. Values x and y must be between 0 and + * m-1, and have the same announced bit length as m. Modulus m must be + * odd. The "m0i" parameter is equal to -1/m mod 2^31. The array 't' + * must point to a temporary area that can hold at least three integers + * of the size of m. + * + * m may not overlap x and y. x and y may overlap each other (this can + * be useful to test whether a value is invertible modulo m). t must be + * disjoint from all other arrays. + * + * Returned value is 1 on success, 0 otherwise. Success is attained if + * y is invertible modulo m. + */ +uint32_t br_i31_moddiv(uint32_t *x, const uint32_t *y, + const uint32_t *m, uint32_t m0i, uint32_t *t); + /* ==================================================================== */ +/* + * FIXME: document "i15" functions. + */ + static inline void br_i15_zero(uint16_t *x, uint16_t bit_len) { @@ -1093,6 +1558,9 @@ void br_i15_to_monty(uint16_t *x, const uint16_t *m); void br_i15_modpow(uint16_t *x, const unsigned char *e, size_t elen, const uint16_t *m, uint16_t m0i, uint16_t *t1, uint16_t *t2); +uint32_t br_i15_modpow_opt(uint16_t *x, const unsigned char *e, size_t elen, + const uint16_t *m, uint16_t m0i, uint16_t *tmp, size_t twlen); + void br_i15_encode(void *dst, size_t len, const uint16_t *x); uint32_t br_i15_decode_mod(uint16_t *x, @@ -1113,6 +1581,36 @@ void br_i15_reduce(uint16_t *x, const uint16_t *a, const uint16_t *m); void br_i15_mulacc(uint16_t *d, const uint16_t *a, const uint16_t *b); +uint32_t br_i15_moddiv(uint16_t *x, const uint16_t *y, + const uint16_t *m, uint16_t m0i, uint16_t *t); + +/* + * Variant of br_i31_modpow_opt() that internally uses 64x64->128 + * multiplications. It expects the same parameters as br_i31_modpow_opt(), + * except that the temporaries should be 64-bit integers, not 32-bit + * integers. + */ +uint32_t br_i62_modpow_opt(uint32_t *x31, const unsigned char *e, size_t elen, + const uint32_t *m31, uint32_t m0i31, uint64_t *tmp, size_t twlen); + +/* + * Type for a function with the same API as br_i31_modpow_opt() (some + * implementations of this type may have stricter alignment requirements + * on the temporaries). + */ +typedef uint32_t (*br_i31_modpow_opt_type)(uint32_t *x, + const unsigned char *e, size_t elen, + const uint32_t *m, uint32_t m0i, uint32_t *tmp, size_t twlen); + +/* + * Wrapper for br_i62_modpow_opt() that uses the same type as + * br_i31_modpow_opt(); however, it requires its 'tmp' argument to the + * 64-bit aligned. + */ +uint32_t br_i62_modpow_opt_as_i31(uint32_t *x, + const unsigned char *e, size_t elen, + const uint32_t *m, uint32_t m0i, uint32_t *tmp, size_t twlen); + /* ==================================================================== */ static inline size_t @@ -1412,6 +1910,40 @@ unsigned br_aes_ct64_keysched(uint64_t *comp_skey, void br_aes_ct64_skey_expand(uint64_t *skey, unsigned num_rounds, const uint64_t *comp_skey); +/* + * Test support for AES-NI opcodes. + */ +int br_aes_x86ni_supported(void); + +/* + * AES key schedule, using x86 AES-NI instructions. This yields the + * subkeys in the encryption direction. Number of rounds is returned. + * Key size MUST be 16, 24 or 32 bytes; otherwise, 0 is returned. + */ +unsigned br_aes_x86ni_keysched_enc(unsigned char *skni, + const void *key, size_t len); + +/* + * AES key schedule, using x86 AES-NI instructions. This yields the + * subkeys in the decryption direction. Number of rounds is returned. + * Key size MUST be 16, 24 or 32 bytes; otherwise, 0 is returned. + */ +unsigned br_aes_x86ni_keysched_dec(unsigned char *skni, + const void *key, size_t len); + +/* + * Test support for AES POWER8 opcodes. + */ +int br_aes_pwr8_supported(void); + +/* + * AES key schedule, using POWER8 instructions. This yields the + * subkeys in the encryption direction. Number of rounds is returned. + * Key size MUST be 16, 24 or 32 bytes; otherwise, 0 is returned. + */ +unsigned br_aes_pwr8_keysched(unsigned char *skni, + const void *key, size_t len); + /* ==================================================================== */ /* * RSA. @@ -1435,6 +1967,61 @@ uint32_t br_rsa_pkcs1_sig_unpad(const unsigned char *sig, size_t sig_len, const unsigned char *hash_oid, size_t hash_len, unsigned char *hash_out); +/* + * Apply proper PSS padding. The 'x' buffer is output only: it + * receives the value that is to be exponentiated. + */ +uint32_t br_rsa_pss_sig_pad(const br_prng_class **rng, + const br_hash_class *hf_data, const br_hash_class *hf_mgf1, + const unsigned char *hash, size_t salt_len, + uint32_t n_bitlen, unsigned char *x); + +/* + * Check PSS padding. The provided value is the one _after_ + * the modular exponentiation; it is modified by this function. + * This function infers the signature length from the public key + * size, i.e. it assumes that this has already been verified (as + * part of the exponentiation). + */ +uint32_t br_rsa_pss_sig_unpad( + const br_hash_class *hf_data, const br_hash_class *hf_mgf1, + const unsigned char *hash, size_t salt_len, + const br_rsa_public_key *pk, unsigned char *x); + +/* + * Apply OAEP padding. Returned value is the actual padded string length, + * or zero on error. + */ +size_t br_rsa_oaep_pad(const br_prng_class **rnd, const br_hash_class *dig, + const void *label, size_t label_len, const br_rsa_public_key *pk, + void *dst, size_t dst_nax_len, const void *src, size_t src_len); + +/* + * Unravel and check OAEP padding. If the padding is correct, then 1 is + * returned, '*len' is adjusted to the length of the message, and the + * data is moved to the start of the 'data' buffer. If the padding is + * incorrect, then 0 is returned and '*len' is untouched. Either way, + * the complete buffer contents are altered. + */ +uint32_t br_rsa_oaep_unpad(const br_hash_class *dig, + const void *label, size_t label_len, void *data, size_t *len); + +/* + * Compute MGF1 for a given seed, and XOR the output into the provided + * buffer. + */ +void br_mgf1_xor(void *data, size_t len, + const br_hash_class *dig, const void *seed, size_t seed_len); + +/* + * Inner function for RSA key generation; used by the "i31" and "i62" + * implementations. + */ +uint32_t br_rsa_i31_keygen_inner(const br_prng_class **rng, + br_rsa_private_key *sk, void *kbuf_priv, + br_rsa_public_key *pk, void *kbuf_pub, + unsigned size, uint32_t pubexp, br_i31_modpow_opt_type mp31); + /* ==================================================================== */ /* * Elliptic curves. @@ -1485,6 +2072,72 @@ void br_ecdsa_i31_bits2int(uint32_t *x, void br_ecdsa_i15_bits2int(uint16_t *x, const void *src, size_t len, uint32_t ebitlen); +/* ==================================================================== */ +/* + * ASN.1 support functions. + */ + +/* + * A br_asn1_uint structure contains encoding information about an + * INTEGER nonnegative value: pointer to the integer contents (unsigned + * big-endian representation), length of the integer contents, + * and length of the encoded value. The data shall have minimal length: + * - If the integer value is zero, then 'len' must be zero. + * - If the integer value is not zero, then data[0] must be non-zero. + * + * Under these conditions, 'asn1len' is necessarily equal to either len + * or len+1. + */ +typedef struct { + const unsigned char *data; + size_t len; + size_t asn1len; +} br_asn1_uint; + +/* + * Given an encoded integer (unsigned big-endian, with possible leading + * bytes of value 0), returned the "prepared INTEGER" structure. + */ +br_asn1_uint br_asn1_uint_prepare(const void *xdata, size_t xlen); + +/* + * Encode an ASN.1 length. The length of the encoded length is returned. + * If 'dest' is NULL, then no encoding is performed, but the length of + * the encoded length is still computed and returned. + */ +size_t br_asn1_encode_length(void *dest, size_t len); + +/* + * Convenient macro for computing lengths of lengths. + */ +#define len_of_len(len) br_asn1_encode_length(NULL, len) + +/* + * Encode a (prepared) ASN.1 INTEGER. The encoded length is returned. + * If 'dest' is NULL, then no encoding is performed, but the length of + * the encoded integer is still computed and returned. + */ +size_t br_asn1_encode_uint(void *dest, br_asn1_uint pp); + +/* + * Get the OID that identifies an elliptic curve. Returned value is + * the DER-encoded OID, with the length (always one byte) but without + * the tag. Thus, the first byte of the returned buffer contains the + * number of subsequent bytes in the value. If the curve is not + * recognised, NULL is returned. + */ +const unsigned char *br_get_curve_OID(int curve); + +/* + * Inner function for EC private key encoding. This is equivalent to + * the API function br_encode_ec_raw_der(), except for an extra + * parameter: if 'include_curve_oid' is zero, then the curve OID is + * _not_ included in the output blob (this is for PKCS#8 support). + */ +size_t br_encode_ec_raw_der_inner(void *dest, + const br_ec_private_key *sk, const br_ec_public_key *pk, + int include_curve_oid); + /* ==================================================================== */ /* * SSL/TLS support functions. @@ -1670,6 +2323,34 @@ void br_ssl_engine_switch_chapol_in(br_ssl_engine_context *cc, void br_ssl_engine_switch_chapol_out(br_ssl_engine_context *cc, int is_client, int prf_id); +/* + * Switch to CCM decryption for incoming records. + * cc the engine context + * is_client non-zero for a client, zero for a server + * prf_id id of hash function for PRF + * bc_impl block cipher implementation (CTR+CBC) + * cipher_key_len block cipher key length (in bytes) + * tag_len tag length (in bytes) + */ +void br_ssl_engine_switch_ccm_in(br_ssl_engine_context *cc, + int is_client, int prf_id, + const br_block_ctrcbc_class *bc_impl, + size_t cipher_key_len, size_t tag_len); + +/* + * Switch to GCM encryption for outgoing records. + * cc the engine context + * is_client non-zero for a client, zero for a server + * prf_id id of hash function for PRF + * bc_impl block cipher implementation (CTR+CBC) + * cipher_key_len block cipher key length (in bytes) + * tag_len tag length (in bytes) + */ +void br_ssl_engine_switch_ccm_out(br_ssl_engine_context *cc, + int is_client, int prf_id, + const br_block_ctrcbc_class *bc_impl, + size_t cipher_key_len, size_t tag_len); + /* * Calls to T0-generated code. */ @@ -1689,4 +2370,208 @@ int br_ssl_choose_hash(unsigned bf); /* ==================================================================== */ +/* + * PowerPC / POWER assembly stuff. The special BR_POWER_ASM_MACROS macro + * must be defined before including this file; this is done by source + * files that use some inline assembly for PowerPC / POWER machines. + */ + +#if BR_POWER_ASM_MACROS + +#define lxvw4x(xt, ra, rb) lxvw4x_(xt, ra, rb) +#define stxvw4x(xt, ra, rb) stxvw4x_(xt, ra, rb) + +#define bdnz(foo) bdnz_(foo) +#define bdz(foo) bdz_(foo) +#define beq(foo) beq_(foo) + +#define li(rx, value) li_(rx, value) +#define addi(rx, ra, imm) addi_(rx, ra, imm) +#define cmpldi(rx, imm) cmpldi_(rx, imm) +#define mtctr(rx) mtctr_(rx) +#define vspltb(vrt, vrb, uim) vspltb_(vrt, vrb, uim) +#define vspltw(vrt, vrb, uim) vspltw_(vrt, vrb, uim) +#define vspltisb(vrt, imm) vspltisb_(vrt, imm) +#define vspltisw(vrt, imm) vspltisw_(vrt, imm) +#define vrlw(vrt, vra, vrb) vrlw_(vrt, vra, vrb) +#define vsbox(vrt, vra) vsbox_(vrt, vra) +#define vxor(vrt, vra, vrb) vxor_(vrt, vra, vrb) +#define vand(vrt, vra, vrb) vand_(vrt, vra, vrb) +#define vsro(vrt, vra, vrb) vsro_(vrt, vra, vrb) +#define vsl(vrt, vra, vrb) vsl_(vrt, vra, vrb) +#define vsldoi(vt, va, vb, sh) vsldoi_(vt, va, vb, sh) +#define vsr(vrt, vra, vrb) vsr_(vrt, vra, vrb) +#define vaddcuw(vrt, vra, vrb) vaddcuw_(vrt, vra, vrb) +#define vadduwm(vrt, vra, vrb) vadduwm_(vrt, vra, vrb) +#define vsububm(vrt, vra, vrb) vsububm_(vrt, vra, vrb) +#define vsubuwm(vrt, vra, vrb) vsubuwm_(vrt, vra, vrb) +#define vsrw(vrt, vra, vrb) vsrw_(vrt, vra, vrb) +#define vcipher(vt, va, vb) vcipher_(vt, va, vb) +#define vcipherlast(vt, va, vb) vcipherlast_(vt, va, vb) +#define vncipher(vt, va, vb) vncipher_(vt, va, vb) +#define vncipherlast(vt, va, vb) vncipherlast_(vt, va, vb) +#define vperm(vt, va, vb, vc) vperm_(vt, va, vb, vc) +#define vpmsumd(vt, va, vb) vpmsumd_(vt, va, vb) +#define xxpermdi(vt, va, vb, d) xxpermdi_(vt, va, vb, d) + +#define lxvw4x_(xt, ra, rb) "\tlxvw4x\t" #xt "," #ra "," #rb "\n" +#define stxvw4x_(xt, ra, rb) "\tstxvw4x\t" #xt "," #ra "," #rb "\n" + +#define label(foo) #foo "%=:\n" +#define bdnz_(foo) "\tbdnz\t" #foo "%=\n" +#define bdz_(foo) "\tbdz\t" #foo "%=\n" +#define beq_(foo) "\tbeq\t" #foo "%=\n" + +#define li_(rx, value) "\tli\t" #rx "," #value "\n" +#define addi_(rx, ra, imm) "\taddi\t" #rx "," #ra "," #imm "\n" +#define cmpldi_(rx, imm) "\tcmpldi\t" #rx "," #imm "\n" +#define mtctr_(rx) "\tmtctr\t" #rx "\n" +#define vspltb_(vrt, vrb, uim) "\tvspltb\t" #vrt "," #vrb "," #uim "\n" +#define vspltw_(vrt, vrb, uim) "\tvspltw\t" #vrt "," #vrb "," #uim "\n" +#define vspltisb_(vrt, imm) "\tvspltisb\t" #vrt "," #imm "\n" +#define vspltisw_(vrt, imm) "\tvspltisw\t" #vrt "," #imm "\n" +#define vrlw_(vrt, vra, vrb) "\tvrlw\t" #vrt "," #vra "," #vrb "\n" +#define vsbox_(vrt, vra) "\tvsbox\t" #vrt "," #vra "\n" +#define vxor_(vrt, vra, vrb) "\tvxor\t" #vrt "," #vra "," #vrb "\n" +#define vand_(vrt, vra, vrb) "\tvand\t" #vrt "," #vra "," #vrb "\n" +#define vsro_(vrt, vra, vrb) "\tvsro\t" #vrt "," #vra "," #vrb "\n" +#define vsl_(vrt, vra, vrb) "\tvsl\t" #vrt "," #vra "," #vrb "\n" +#define vsldoi_(vt, va, vb, sh) "\tvsldoi\t" #vt "," #va "," #vb "," #sh "\n" +#define vsr_(vrt, vra, vrb) "\tvsr\t" #vrt "," #vra "," #vrb "\n" +#define vaddcuw_(vrt, vra, vrb) "\tvaddcuw\t" #vrt "," #vra "," #vrb "\n" +#define vadduwm_(vrt, vra, vrb) "\tvadduwm\t" #vrt "," #vra "," #vrb "\n" +#define vsububm_(vrt, vra, vrb) "\tvsububm\t" #vrt "," #vra "," #vrb "\n" +#define vsubuwm_(vrt, vra, vrb) "\tvsubuwm\t" #vrt "," #vra "," #vrb "\n" +#define vsrw_(vrt, vra, vrb) "\tvsrw\t" #vrt "," #vra "," #vrb "\n" +#define vcipher_(vt, va, vb) "\tvcipher\t" #vt "," #va "," #vb "\n" +#define vcipherlast_(vt, va, vb) "\tvcipherlast\t" #vt "," #va "," #vb "\n" +#define vncipher_(vt, va, vb) "\tvncipher\t" #vt "," #va "," #vb "\n" +#define vncipherlast_(vt, va, vb) "\tvncipherlast\t" #vt "," #va "," #vb "\n" +#define vperm_(vt, va, vb, vc) "\tvperm\t" #vt "," #va "," #vb "," #vc "\n" +#define vpmsumd_(vt, va, vb) "\tvpmsumd\t" #vt "," #va "," #vb "\n" +#define xxpermdi_(vt, va, vb, d) "\txxpermdi\t" #vt "," #va "," #vb "," #d "\n" + +#endif + +/* ==================================================================== */ +/* + * Special "activate intrinsics" code, needed for some compiler versions. + * This is defined at the end of this file, so that it won't impact any + * of the inline functions defined previously; and it is controlled by + * a specific macro defined in the caller code. + * + * Calling code conventions: + * + * - Caller must define BR_ENABLE_INTRINSICS before including "inner.h". + * - Functions that use intrinsics must be enclosed in an "enabled" + * region (between BR_TARGETS_X86_UP and BR_TARGETS_X86_DOWN). + * - Functions that use intrinsics must be tagged with the appropriate + * BR_TARGET(). + */ + +#if BR_ENABLE_INTRINSICS && (BR_GCC_4_4 || BR_CLANG_3_7 || BR_MSC_2005) + +/* + * x86 intrinsics (both 32-bit and 64-bit). + */ +#if BR_i386 || BR_amd64 + +/* + * On GCC before version 5.0, we need to use the pragma to enable the + * target options globally, because the 'target' function attribute + * appears to be unreliable. Before 4.6 we must also avoid the + * push_options / pop_options mechanism, because it tends to trigger + * some internal compiler errors. + */ +#if BR_GCC && !BR_GCC_5_0 +#if BR_GCC_4_6 +#define BR_TARGETS_X86_UP \ + _Pragma("GCC push_options") \ + _Pragma("GCC target(\"sse2,ssse3,sse4.1,aes,pclmul,rdrnd\")") +#define BR_TARGETS_X86_DOWN \ + _Pragma("GCC pop_options") +#else +#define BR_TARGETS_X86_UP \ + _Pragma("GCC target(\"sse2,ssse3,sse4.1,aes,pclmul\")") +#define BR_TARGETS_X86_DOWN +#endif +#pragma GCC diagnostic ignored "-Wpsabi" +#endif + +#if BR_CLANG && !BR_CLANG_3_8 +#undef __SSE2__ +#undef __SSE3__ +#undef __SSSE3__ +#undef __SSE4_1__ +#undef __AES__ +#undef __PCLMUL__ +#undef __RDRND__ +#define __SSE2__ 1 +#define __SSE3__ 1 +#define __SSSE3__ 1 +#define __SSE4_1__ 1 +#define __AES__ 1 +#define __PCLMUL__ 1 +#define __RDRND__ 1 +#endif + +#ifndef BR_TARGETS_X86_UP +#define BR_TARGETS_X86_UP +#endif +#ifndef BR_TARGETS_X86_DOWN +#define BR_TARGETS_X86_DOWN +#endif + +#if BR_GCC || BR_CLANG +BR_TARGETS_X86_UP +#include +#include +#define br_bswap32 __builtin_bswap32 +BR_TARGETS_X86_DOWN +#endif + +#if BR_MSC +#include +#include +#include +#define br_bswap32 _byteswap_ulong +#endif + +static inline int +br_cpuid(uint32_t mask_eax, uint32_t mask_ebx, + uint32_t mask_ecx, uint32_t mask_edx) +{ +#if BR_GCC || BR_CLANG + unsigned eax, ebx, ecx, edx; + + if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) { + if ((eax & mask_eax) == mask_eax + && (ebx & mask_ebx) == mask_ebx + && (ecx & mask_ecx) == mask_ecx + && (edx & mask_edx) == mask_edx) + { + return 1; + } + } +#elif BR_MSC + int info[4]; + + __cpuid(info, 1); + if (((uint32_t)info[0] & mask_eax) == mask_eax + && ((uint32_t)info[1] & mask_ebx) == mask_ebx + && ((uint32_t)info[2] & mask_ecx) == mask_ecx + && ((uint32_t)info[3] & mask_edx) == mask_edx) + { + return 1; + } +#endif + return 0; +} + +#endif + +#endif + +/* ==================================================================== */ + #endif