X-Git-Url: https://www.bearssl.org/gitweb//home/git/?p=BearSSL;a=blobdiff_plain;f=src%2Fconfig.h;h=accae3e46d2d940780d95b27752d0895320134ff;hp=259f5bbb162a33503ea2305095fc9b0da4b29c49;hb=8e94ad2fcb11794c559025277e56f3fbeb676f5d;hpb=3210f38e0491b39aec1ef419cb4114e9483089fb;ds=sidebyside diff --git a/src/config.h b/src/config.h index 259f5bb..accae3e 100644 --- a/src/config.h +++ b/src/config.h @@ -41,6 +41,16 @@ #define BR_64 1 */ +/* + * When BR_LOMUL is enabled, then multiplications of 32-bit values whose + * result are truncated to the low 32 bits are assumed to be + * substantially more efficient than 32-bit multiplications that yield + * 64-bit results. This is typically the case on low-end ARM Cortex M + * systems (M0, M0+, M1, and arguably M3 and M4 as well). + * +#define BR_LOMUL 1 + */ + /* * When BR_SLOW_MUL is enabled, multiplications are assumed to be * substantially slow with regards to other integer operations, thus @@ -50,6 +60,15 @@ #define BR_SLOW_MUL 1 */ +/* + * When BR_SLOW_MUL15 is enabled, short multplications (on 15-bit words) + * are assumed to be substantially slow with regards to other integer + * operations, thus making it worth to make more integer operations if + * it allows using less multiplications. + * +#define BR_SLOW_MUL15 1 + */ + /* * When BR_CT_MUL31 is enabled, multiplications of 31-bit values (used * in the "i31" big integer implementation) use an alternate implementation @@ -60,6 +79,36 @@ #define BR_CT_MUL31 1 */ +/* + * When BR_CT_MUL15 is enabled, multiplications of 15-bit values (held + * in 32-bit words) use an alternate implementation which is slower and + * larger than the normal multiplication, but should ensure + * constant-time multiplications on most/all architectures where the + * basic multiplication is not constant-time. +#define BR_CT_MUL15 1 + */ + +/* + * When BR_NO_ARITH_SHIFT is enabled, arithmetic right shifts (with sign + * extension) are performed with a sequence of operations which is bigger + * and slower than a simple right shift on a signed value. This avoids + * relying on an implementation-defined behaviour. However, most if not + * all C compilers use sign extension for right shifts on signed values, + * so this alternate macro is disabled by default. +#define BR_NO_ARITH_SHIFT 1 + */ + +/* + * When BR_RDRAND is enabled, the SSL engine will use the RDRAND opcode + * to automatically obtain quality randomness for seeding its internal + * PRNG. Since that opcode is present only in recent x86 CPU, its + * support is dynamically tested; if the current CPU does not support + * it, then another random source will be used, such as /dev/urandom or + * CryptGenRandom(). + * +#define BR_RDRAND 1 + */ + /* * When BR_USE_URANDOM is enabled, the SSL engine will use /dev/urandom * to automatically obtain quality randomness for seedings its internal @@ -99,4 +148,82 @@ #define BR_USE_WIN32_TIME 1 */ +/* + * When BR_ARMEL_CORTEXM_GCC is enabled, some operations are replaced with + * inline assembly which is shorter and/or faster. This should be used + * only when all of the following are true: + * - target architecture is ARM in Thumb mode + * - target endianness is little-endian + * - compiler is GCC (or GCC-compatible for inline assembly syntax) + * + * This is meant for the low-end cores (Cortex M0, M0+, M1, M3). + * Note: if BR_LOMUL is not explicitly enabled or disabled, then + * enabling BR_ARMEL_CORTEXM_GCC also enables BR_LOMUL. + * +#define BR_ARMEL_CORTEXM_GCC 1 + */ + +/* + * When BR_AES_X86NI is enabled, the AES implementation using the x86 "NI" + * instructions (dedicated AES opcodes) will be compiled. If this is not + * enabled explicitly, then that AES implementation will be compiled only + * if a compatible compiler is detected. If set explicitly to 0, the + * implementation will not be compiled at all. + * +#define BR_AES_X86NI 1 + */ + +/* + * When BR_SSE2 is enabled, SSE2 intrinsics will be used for some + * algorithm implementations that use them (e.g. chacha20_sse2). If this + * is not enabled explicitly, then support for SSE2 intrinsics will be + * automatically detected. If set explicitly to 0, then SSE2 code will + * not be compiled at all. + * +#define BR_SSE2 1 + */ + +/* + * When BR_POWER8 is enabled, the AES implementation using the POWER ISA + * 2.07 opcodes (available on POWER8 processors and later) is compiled. + * If this is not enabled explicitly, then that implementation will be + * compiled only if a compatible compiler is detected, _and_ the target + * architecture is POWER8 or later. + * +#define BR_POWER8 1 + */ + +/* + * When BR_INT128 is enabled, then code using the 'unsigned __int64' + * and 'unsigned __int128' types will be used to leverage 64x64->128 + * unsigned multiplications. This should work with GCC and compatible + * compilers on 64-bit architectures. + * +#define BR_INT128 1 + */ + +/* + * When BR_UMUL128 is enabled, then code using the '_umul128()' and + * '_addcarry_u64()' intrinsics will be used to implement 64x64->128 + * unsigned multiplications. This should work on Visual C on x64 systems. + * +#define BR_UMUL128 1 + */ + +/* + * When BR_LE_UNALIGNED is enabled, then the current architecture is + * assumed to use little-endian encoding for integers, and to tolerate + * unaligned accesses with no or minimal time penalty. + * +#define BR_LE_UNALIGNED 1 + */ + +/* + * When BR_BE_UNALIGNED is enabled, then the current architecture is + * assumed to use little-endian encoding for integers, and to tolerate + * unaligned accesses with no or minimal time penalty. + * +#define BR_BE_UNALIGNED 1 + */ + #endif