2 * Copyright (c) 2017 Thomas Pornin <pornin@bolet.org>
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #if BR_AES_X86NI_GCC_OLD
31 #pragma GCC target("sse2,sse4.1,aes,pclmul")
33 #include <smmintrin.h>
34 #include <wmmintrin.h>
35 #define bswap32 __builtin_bswap32
41 #define bswap32 _byteswap_ulong
44 /* see bearssl_block.h */
46 br_aes_x86ni_ctr_init(br_aes_x86ni_ctr_keys
*ctx
,
47 const void *key
, size_t len
)
49 ctx
->vtable
= &br_aes_x86ni_ctr_vtable
;
50 ctx
->num_rounds
= br_aes_x86ni_keysched_enc(ctx
->skey
.skni
, key
, len
);
53 /* see bearssl_block.h */
54 BR_TARGET("sse2,sse4.1,aes")
56 br_aes_x86ni_ctr_run(const br_aes_x86ni_ctr_keys
*ctx
,
57 const void *iv
, uint32_t cc
, void *data
, size_t len
)
60 unsigned char ivbuf
[16];
67 memcpy(ivbuf
, iv
, 12);
68 num_rounds
= ctx
->num_rounds
;
69 for (u
= 0; u
<= num_rounds
; u
++) {
70 sk
[u
] = _mm_loadu_si128((void *)(ctx
->skey
.skni
+ (u
<< 4)));
72 ivx
= _mm_loadu_si128((void *)ivbuf
);
74 __m128i x0
, x1
, x2
, x3
;
76 x0
= _mm_insert_epi32(ivx
, bswap32(cc
+ 0), 3);
77 x1
= _mm_insert_epi32(ivx
, bswap32(cc
+ 1), 3);
78 x2
= _mm_insert_epi32(ivx
, bswap32(cc
+ 2), 3);
79 x3
= _mm_insert_epi32(ivx
, bswap32(cc
+ 3), 3);
80 x0
= _mm_xor_si128(x0
, sk
[0]);
81 x1
= _mm_xor_si128(x1
, sk
[0]);
82 x2
= _mm_xor_si128(x2
, sk
[0]);
83 x3
= _mm_xor_si128(x3
, sk
[0]);
84 x0
= _mm_aesenc_si128(x0
, sk
[1]);
85 x1
= _mm_aesenc_si128(x1
, sk
[1]);
86 x2
= _mm_aesenc_si128(x2
, sk
[1]);
87 x3
= _mm_aesenc_si128(x3
, sk
[1]);
88 x0
= _mm_aesenc_si128(x0
, sk
[2]);
89 x1
= _mm_aesenc_si128(x1
, sk
[2]);
90 x2
= _mm_aesenc_si128(x2
, sk
[2]);
91 x3
= _mm_aesenc_si128(x3
, sk
[2]);
92 x0
= _mm_aesenc_si128(x0
, sk
[3]);
93 x1
= _mm_aesenc_si128(x1
, sk
[3]);
94 x2
= _mm_aesenc_si128(x2
, sk
[3]);
95 x3
= _mm_aesenc_si128(x3
, sk
[3]);
96 x0
= _mm_aesenc_si128(x0
, sk
[4]);
97 x1
= _mm_aesenc_si128(x1
, sk
[4]);
98 x2
= _mm_aesenc_si128(x2
, sk
[4]);
99 x3
= _mm_aesenc_si128(x3
, sk
[4]);
100 x0
= _mm_aesenc_si128(x0
, sk
[5]);
101 x1
= _mm_aesenc_si128(x1
, sk
[5]);
102 x2
= _mm_aesenc_si128(x2
, sk
[5]);
103 x3
= _mm_aesenc_si128(x3
, sk
[5]);
104 x0
= _mm_aesenc_si128(x0
, sk
[6]);
105 x1
= _mm_aesenc_si128(x1
, sk
[6]);
106 x2
= _mm_aesenc_si128(x2
, sk
[6]);
107 x3
= _mm_aesenc_si128(x3
, sk
[6]);
108 x0
= _mm_aesenc_si128(x0
, sk
[7]);
109 x1
= _mm_aesenc_si128(x1
, sk
[7]);
110 x2
= _mm_aesenc_si128(x2
, sk
[7]);
111 x3
= _mm_aesenc_si128(x3
, sk
[7]);
112 x0
= _mm_aesenc_si128(x0
, sk
[8]);
113 x1
= _mm_aesenc_si128(x1
, sk
[8]);
114 x2
= _mm_aesenc_si128(x2
, sk
[8]);
115 x3
= _mm_aesenc_si128(x3
, sk
[8]);
116 x0
= _mm_aesenc_si128(x0
, sk
[9]);
117 x1
= _mm_aesenc_si128(x1
, sk
[9]);
118 x2
= _mm_aesenc_si128(x2
, sk
[9]);
119 x3
= _mm_aesenc_si128(x3
, sk
[9]);
120 if (num_rounds
== 10) {
121 x0
= _mm_aesenclast_si128(x0
, sk
[10]);
122 x1
= _mm_aesenclast_si128(x1
, sk
[10]);
123 x2
= _mm_aesenclast_si128(x2
, sk
[10]);
124 x3
= _mm_aesenclast_si128(x3
, sk
[10]);
125 } else if (num_rounds
== 12) {
126 x0
= _mm_aesenc_si128(x0
, sk
[10]);
127 x1
= _mm_aesenc_si128(x1
, sk
[10]);
128 x2
= _mm_aesenc_si128(x2
, sk
[10]);
129 x3
= _mm_aesenc_si128(x3
, sk
[10]);
130 x0
= _mm_aesenc_si128(x0
, sk
[11]);
131 x1
= _mm_aesenc_si128(x1
, sk
[11]);
132 x2
= _mm_aesenc_si128(x2
, sk
[11]);
133 x3
= _mm_aesenc_si128(x3
, sk
[11]);
134 x0
= _mm_aesenclast_si128(x0
, sk
[12]);
135 x1
= _mm_aesenclast_si128(x1
, sk
[12]);
136 x2
= _mm_aesenclast_si128(x2
, sk
[12]);
137 x3
= _mm_aesenclast_si128(x3
, sk
[12]);
139 x0
= _mm_aesenc_si128(x0
, sk
[10]);
140 x1
= _mm_aesenc_si128(x1
, sk
[10]);
141 x2
= _mm_aesenc_si128(x2
, sk
[10]);
142 x3
= _mm_aesenc_si128(x3
, sk
[10]);
143 x0
= _mm_aesenc_si128(x0
, sk
[11]);
144 x1
= _mm_aesenc_si128(x1
, sk
[11]);
145 x2
= _mm_aesenc_si128(x2
, sk
[11]);
146 x3
= _mm_aesenc_si128(x3
, sk
[11]);
147 x0
= _mm_aesenc_si128(x0
, sk
[12]);
148 x1
= _mm_aesenc_si128(x1
, sk
[12]);
149 x2
= _mm_aesenc_si128(x2
, sk
[12]);
150 x3
= _mm_aesenc_si128(x3
, sk
[12]);
151 x0
= _mm_aesenc_si128(x0
, sk
[13]);
152 x1
= _mm_aesenc_si128(x1
, sk
[13]);
153 x2
= _mm_aesenc_si128(x2
, sk
[13]);
154 x3
= _mm_aesenc_si128(x3
, sk
[13]);
155 x0
= _mm_aesenclast_si128(x0
, sk
[14]);
156 x1
= _mm_aesenclast_si128(x1
, sk
[14]);
157 x2
= _mm_aesenclast_si128(x2
, sk
[14]);
158 x3
= _mm_aesenclast_si128(x3
, sk
[14]);
161 x0
= _mm_xor_si128(x0
,
162 _mm_loadu_si128((void *)(buf
+ 0)));
163 x1
= _mm_xor_si128(x1
,
164 _mm_loadu_si128((void *)(buf
+ 16)));
165 x2
= _mm_xor_si128(x2
,
166 _mm_loadu_si128((void *)(buf
+ 32)));
167 x3
= _mm_xor_si128(x3
,
168 _mm_loadu_si128((void *)(buf
+ 48)));
169 _mm_storeu_si128((void *)(buf
+ 0), x0
);
170 _mm_storeu_si128((void *)(buf
+ 16), x1
);
171 _mm_storeu_si128((void *)(buf
+ 32), x2
);
172 _mm_storeu_si128((void *)(buf
+ 48), x3
);
177 unsigned char tmp
[64];
179 _mm_storeu_si128((void *)(tmp
+ 0), x0
);
180 _mm_storeu_si128((void *)(tmp
+ 16), x1
);
181 _mm_storeu_si128((void *)(tmp
+ 32), x2
);
182 _mm_storeu_si128((void *)(tmp
+ 48), x3
);
183 for (u
= 0; u
< len
; u
++) {
186 cc
+= (uint32_t)len
>> 4;
193 /* see bearssl_block.h */
194 const br_block_ctr_class br_aes_x86ni_ctr_vtable
= {
195 sizeof(br_aes_x86ni_ctr_keys
),
198 (void (*)(const br_block_ctr_class
**, const void *, size_t))
199 &br_aes_x86ni_ctr_init
,
200 (uint32_t (*)(const br_block_ctr_class
*const *,
201 const void *, uint32_t, void *, size_t))
202 &br_aes_x86ni_ctr_run
205 /* see bearssl_block.h */
206 const br_block_ctr_class
*
207 br_aes_x86ni_ctr_get_vtable(void)
209 return br_aes_x86ni_supported() ? &br_aes_x86ni_ctr_vtable
: NULL
;
214 /* see bearssl_block.h */
215 const br_block_ctr_class
*
216 br_aes_x86ni_ctr_get_vtable(void)