2 * Copyright (c) 2016 Thomas Pornin <pornin@bolet.org>
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 br_aes_ct64_bitslice_invSbox(uint64_t *q
)
32 * See br_aes_ct_bitslice_invSbox(). This is the natural extension
33 * to 64-bit registers.
35 uint64_t q0
, q1
, q2
, q3
, q4
, q5
, q6
, q7
;
54 br_aes_ct64_bitslice_Sbox(q
);
75 add_round_key(uint64_t *q
, const uint64_t *sk
)
79 for (i
= 0; i
< 8; i
++) {
85 inv_shift_rows(uint64_t *q
)
89 for (i
= 0; i
< 8; i
++) {
93 q
[i
] = (x
& (uint64_t)0x000000000000FFFF)
94 | ((x
& (uint64_t)0x000000000FFF0000) << 4)
95 | ((x
& (uint64_t)0x00000000F0000000) >> 12)
96 | ((x
& (uint64_t)0x000000FF00000000) << 8)
97 | ((x
& (uint64_t)0x0000FF0000000000) >> 8)
98 | ((x
& (uint64_t)0x000F000000000000) << 12)
99 | ((x
& (uint64_t)0xFFF0000000000000) >> 4);
103 static inline uint64_t
106 return (x
<< 32) | (x
>> 32);
110 inv_mix_columns(uint64_t *q
)
112 uint64_t q0
, q1
, q2
, q3
, q4
, q5
, q6
, q7
;
113 uint64_t r0
, r1
, r2
, r3
, r4
, r5
, r6
, r7
;
123 r0
= (q0
>> 16) | (q0
<< 48);
124 r1
= (q1
>> 16) | (q1
<< 48);
125 r2
= (q2
>> 16) | (q2
<< 48);
126 r3
= (q3
>> 16) | (q3
<< 48);
127 r4
= (q4
>> 16) | (q4
<< 48);
128 r5
= (q5
>> 16) | (q5
<< 48);
129 r6
= (q6
>> 16) | (q6
<< 48);
130 r7
= (q7
>> 16) | (q7
<< 48);
132 q
[0] = q5
^ q6
^ q7
^ r0
^ r5
^ r7
^ rotr32(q0
^ q5
^ q6
^ r0
^ r5
);
133 q
[1] = q0
^ q5
^ r0
^ r1
^ r5
^ r6
^ r7
^ rotr32(q1
^ q5
^ q7
^ r1
^ r5
^ r6
);
134 q
[2] = q0
^ q1
^ q6
^ r1
^ r2
^ r6
^ r7
^ rotr32(q0
^ q2
^ q6
^ r2
^ r6
^ r7
);
135 q
[3] = q0
^ q1
^ q2
^ q5
^ q6
^ r0
^ r2
^ r3
^ r5
^ rotr32(q0
^ q1
^ q3
^ q5
^ q6
^ q7
^ r0
^ r3
^ r5
^ r7
);
136 q
[4] = q1
^ q2
^ q3
^ q5
^ r1
^ r3
^ r4
^ r5
^ r6
^ r7
^ rotr32(q1
^ q2
^ q4
^ q5
^ q7
^ r1
^ r4
^ r5
^ r6
);
137 q
[5] = q2
^ q3
^ q4
^ q6
^ r2
^ r4
^ r5
^ r6
^ r7
^ rotr32(q2
^ q3
^ q5
^ q6
^ r2
^ r5
^ r6
^ r7
);
138 q
[6] = q3
^ q4
^ q5
^ q7
^ r3
^ r5
^ r6
^ r7
^ rotr32(q3
^ q4
^ q6
^ q7
^ r3
^ r6
^ r7
);
139 q
[7] = q4
^ q5
^ q6
^ r4
^ r6
^ r7
^ rotr32(q4
^ q5
^ q7
^ r4
^ r7
);
144 br_aes_ct64_bitslice_decrypt(unsigned num_rounds
,
145 const uint64_t *skey
, uint64_t *q
)
149 add_round_key(q
, skey
+ (num_rounds
<< 3));
150 for (u
= num_rounds
- 1; u
> 0; u
--) {
152 br_aes_ct64_bitslice_invSbox(q
);
153 add_round_key(q
, skey
+ (u
<< 3));
157 br_aes_ct64_bitslice_invSbox(q
);
158 add_round_key(q
, skey
);