@@ -64,17 +64,17 @@ MODULE_LICENSE("GPL v2");
64
64
65
65
/* defined in aes-modes.S */
66
66
asmlinkage void aes_ecb_encrypt (u8 out [], u8 const in [], u8 const rk [],
67
- int rounds , int blocks , int first );
67
+ int rounds , int blocks );
68
68
asmlinkage void aes_ecb_decrypt (u8 out [], u8 const in [], u8 const rk [],
69
- int rounds , int blocks , int first );
69
+ int rounds , int blocks );
70
70
71
71
asmlinkage void aes_cbc_encrypt (u8 out [], u8 const in [], u8 const rk [],
72
- int rounds , int blocks , u8 iv [], int first );
72
+ int rounds , int blocks , u8 iv []);
73
73
asmlinkage void aes_cbc_decrypt (u8 out [], u8 const in [], u8 const rk [],
74
- int rounds , int blocks , u8 iv [], int first );
74
+ int rounds , int blocks , u8 iv []);
75
75
76
76
asmlinkage void aes_ctr_encrypt (u8 out [], u8 const in [], u8 const rk [],
77
- int rounds , int blocks , u8 ctr [], int first );
77
+ int rounds , int blocks , u8 ctr []);
78
78
79
79
asmlinkage void aes_xts_encrypt (u8 out [], u8 const in [], u8 const rk1 [],
80
80
int rounds , int blocks , u8 const rk2 [], u8 iv [],
@@ -133,102 +133,98 @@ static int ecb_encrypt(struct skcipher_request *req)
133
133
{
134
134
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
135
135
struct crypto_aes_ctx * ctx = crypto_skcipher_ctx (tfm );
136
- int err , first , rounds = 6 + ctx -> key_length / 4 ;
136
+ int err , rounds = 6 + ctx -> key_length / 4 ;
137
137
struct skcipher_walk walk ;
138
138
unsigned int blocks ;
139
139
140
- err = skcipher_walk_virt (& walk , req , true );
140
+ err = skcipher_walk_virt (& walk , req , false );
141
141
142
- kernel_neon_begin ();
143
- for ( first = 1 ; ( blocks = ( walk . nbytes / AES_BLOCK_SIZE )); first = 0 ) {
142
+ while (( blocks = ( walk . nbytes / AES_BLOCK_SIZE ))) {
143
+ kernel_neon_begin ();
144
144
aes_ecb_encrypt (walk .dst .virt .addr , walk .src .virt .addr ,
145
- (u8 * )ctx -> key_enc , rounds , blocks , first );
145
+ (u8 * )ctx -> key_enc , rounds , blocks );
146
+ kernel_neon_end ();
146
147
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
147
148
}
148
- kernel_neon_end ();
149
149
return err ;
150
150
}
151
151
152
152
static int ecb_decrypt (struct skcipher_request * req )
153
153
{
154
154
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
155
155
struct crypto_aes_ctx * ctx = crypto_skcipher_ctx (tfm );
156
- int err , first , rounds = 6 + ctx -> key_length / 4 ;
156
+ int err , rounds = 6 + ctx -> key_length / 4 ;
157
157
struct skcipher_walk walk ;
158
158
unsigned int blocks ;
159
159
160
- err = skcipher_walk_virt (& walk , req , true );
160
+ err = skcipher_walk_virt (& walk , req , false );
161
161
162
- kernel_neon_begin ();
163
- for ( first = 1 ; ( blocks = ( walk . nbytes / AES_BLOCK_SIZE )); first = 0 ) {
162
+ while (( blocks = ( walk . nbytes / AES_BLOCK_SIZE ))) {
163
+ kernel_neon_begin ();
164
164
aes_ecb_decrypt (walk .dst .virt .addr , walk .src .virt .addr ,
165
- (u8 * )ctx -> key_dec , rounds , blocks , first );
165
+ (u8 * )ctx -> key_dec , rounds , blocks );
166
+ kernel_neon_end ();
166
167
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
167
168
}
168
- kernel_neon_end ();
169
169
return err ;
170
170
}
171
171
172
172
static int cbc_encrypt (struct skcipher_request * req )
173
173
{
174
174
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
175
175
struct crypto_aes_ctx * ctx = crypto_skcipher_ctx (tfm );
176
- int err , first , rounds = 6 + ctx -> key_length / 4 ;
176
+ int err , rounds = 6 + ctx -> key_length / 4 ;
177
177
struct skcipher_walk walk ;
178
178
unsigned int blocks ;
179
179
180
- err = skcipher_walk_virt (& walk , req , true );
180
+ err = skcipher_walk_virt (& walk , req , false );
181
181
182
- kernel_neon_begin ();
183
- for ( first = 1 ; ( blocks = ( walk . nbytes / AES_BLOCK_SIZE )); first = 0 ) {
182
+ while (( blocks = ( walk . nbytes / AES_BLOCK_SIZE ))) {
183
+ kernel_neon_begin ();
184
184
aes_cbc_encrypt (walk .dst .virt .addr , walk .src .virt .addr ,
185
- (u8 * )ctx -> key_enc , rounds , blocks , walk .iv ,
186
- first );
185
+ (u8 * )ctx -> key_enc , rounds , blocks , walk .iv );
186
+ kernel_neon_end ( );
187
187
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
188
188
}
189
- kernel_neon_end ();
190
189
return err ;
191
190
}
192
191
193
192
static int cbc_decrypt (struct skcipher_request * req )
194
193
{
195
194
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
196
195
struct crypto_aes_ctx * ctx = crypto_skcipher_ctx (tfm );
197
- int err , first , rounds = 6 + ctx -> key_length / 4 ;
196
+ int err , rounds = 6 + ctx -> key_length / 4 ;
198
197
struct skcipher_walk walk ;
199
198
unsigned int blocks ;
200
199
201
- err = skcipher_walk_virt (& walk , req , true );
200
+ err = skcipher_walk_virt (& walk , req , false );
202
201
203
- kernel_neon_begin ();
204
- for ( first = 1 ; ( blocks = ( walk . nbytes / AES_BLOCK_SIZE )); first = 0 ) {
202
+ while (( blocks = ( walk . nbytes / AES_BLOCK_SIZE ))) {
203
+ kernel_neon_begin ();
205
204
aes_cbc_decrypt (walk .dst .virt .addr , walk .src .virt .addr ,
206
- (u8 * )ctx -> key_dec , rounds , blocks , walk .iv ,
207
- first );
205
+ (u8 * )ctx -> key_dec , rounds , blocks , walk .iv );
206
+ kernel_neon_end ( );
208
207
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
209
208
}
210
- kernel_neon_end ();
211
209
return err ;
212
210
}
213
211
214
212
static int ctr_encrypt (struct skcipher_request * req )
215
213
{
216
214
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (req );
217
215
struct crypto_aes_ctx * ctx = crypto_skcipher_ctx (tfm );
218
- int err , first , rounds = 6 + ctx -> key_length / 4 ;
216
+ int err , rounds = 6 + ctx -> key_length / 4 ;
219
217
struct skcipher_walk walk ;
220
218
int blocks ;
221
219
222
- err = skcipher_walk_virt (& walk , req , true );
220
+ err = skcipher_walk_virt (& walk , req , false );
223
221
224
- first = 1 ;
225
- kernel_neon_begin ();
226
222
while ((blocks = (walk .nbytes / AES_BLOCK_SIZE ))) {
223
+ kernel_neon_begin ();
227
224
aes_ctr_encrypt (walk .dst .virt .addr , walk .src .virt .addr ,
228
- (u8 * )ctx -> key_enc , rounds , blocks , walk .iv ,
229
- first );
225
+ (u8 * )ctx -> key_enc , rounds , blocks , walk .iv );
230
226
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
231
- first = 0 ;
227
+ kernel_neon_end () ;
232
228
}
233
229
if (walk .nbytes ) {
234
230
u8 __aligned (8 ) tail [AES_BLOCK_SIZE ];
@@ -241,12 +237,13 @@ static int ctr_encrypt(struct skcipher_request *req)
241
237
*/
242
238
blocks = -1 ;
243
239
240
+ kernel_neon_begin ();
244
241
aes_ctr_encrypt (tail , NULL , (u8 * )ctx -> key_enc , rounds ,
245
- blocks , walk .iv , first );
242
+ blocks , walk .iv );
243
+ kernel_neon_end ();
246
244
crypto_xor_cpy (tdst , tsrc , tail , nbytes );
247
245
err = skcipher_walk_done (& walk , 0 );
248
246
}
249
- kernel_neon_end ();
250
247
251
248
return err ;
252
249
}
@@ -270,16 +267,16 @@ static int xts_encrypt(struct skcipher_request *req)
270
267
struct skcipher_walk walk ;
271
268
unsigned int blocks ;
272
269
273
- err = skcipher_walk_virt (& walk , req , true );
270
+ err = skcipher_walk_virt (& walk , req , false );
274
271
275
- kernel_neon_begin ();
276
272
for (first = 1 ; (blocks = (walk .nbytes / AES_BLOCK_SIZE )); first = 0 ) {
273
+ kernel_neon_begin ();
277
274
aes_xts_encrypt (walk .dst .virt .addr , walk .src .virt .addr ,
278
275
(u8 * )ctx -> key1 .key_enc , rounds , blocks ,
279
276
(u8 * )ctx -> key2 .key_enc , walk .iv , first );
277
+ kernel_neon_end ();
280
278
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
281
279
}
282
- kernel_neon_end ();
283
280
284
281
return err ;
285
282
}
@@ -292,16 +289,16 @@ static int xts_decrypt(struct skcipher_request *req)
292
289
struct skcipher_walk walk ;
293
290
unsigned int blocks ;
294
291
295
- err = skcipher_walk_virt (& walk , req , true );
292
+ err = skcipher_walk_virt (& walk , req , false );
296
293
297
- kernel_neon_begin ();
298
294
for (first = 1 ; (blocks = (walk .nbytes / AES_BLOCK_SIZE )); first = 0 ) {
295
+ kernel_neon_begin ();
299
296
aes_xts_decrypt (walk .dst .virt .addr , walk .src .virt .addr ,
300
297
(u8 * )ctx -> key1 .key_dec , rounds , blocks ,
301
298
(u8 * )ctx -> key2 .key_enc , walk .iv , first );
299
+ kernel_neon_end ();
302
300
err = skcipher_walk_done (& walk , walk .nbytes % AES_BLOCK_SIZE );
303
301
}
304
- kernel_neon_end ();
305
302
306
303
return err ;
307
304
}
@@ -425,7 +422,7 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
425
422
426
423
/* encrypt the zero vector */
427
424
kernel_neon_begin ();
428
- aes_ecb_encrypt (ctx -> consts , (u8 [AES_BLOCK_SIZE ]){}, rk , rounds , 1 , 1 );
425
+ aes_ecb_encrypt (ctx -> consts , (u8 [AES_BLOCK_SIZE ]){}, rk , rounds , 1 );
429
426
kernel_neon_end ();
430
427
431
428
cmac_gf128_mul_by_x (consts , consts );
@@ -454,8 +451,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
454
451
return err ;
455
452
456
453
kernel_neon_begin ();
457
- aes_ecb_encrypt (key , ks [0 ], rk , rounds , 1 , 1 );
458
- aes_ecb_encrypt (ctx -> consts , ks [1 ], rk , rounds , 2 , 0 );
454
+ aes_ecb_encrypt (key , ks [0 ], rk , rounds , 1 );
455
+ aes_ecb_encrypt (ctx -> consts , ks [1 ], rk , rounds , 2 );
459
456
kernel_neon_end ();
460
457
461
458
return cbcmac_setkey (tfm , key , sizeof (key ));
0 commit comments