@@ -27,6 +27,7 @@ size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
27
27
28
28
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29
29
#define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30
+ #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
30
31
31
32
int mlxsw_sp_acl_tcam_init (struct mlxsw_sp * mlxsw_sp ,
32
33
struct mlxsw_sp_acl_tcam * tcam )
@@ -732,16 +733,26 @@ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion
732
733
733
734
static int
734
735
mlxsw_sp_acl_tcam_vregion_rehash (struct mlxsw_sp * mlxsw_sp ,
735
- struct mlxsw_sp_acl_tcam_vregion * vregion );
736
+ struct mlxsw_sp_acl_tcam_vregion * vregion ,
737
+ int * credits );
736
738
737
739
static void mlxsw_sp_acl_tcam_vregion_rehash_work (struct work_struct * work )
738
740
{
739
741
struct mlxsw_sp_acl_tcam_vregion * vregion =
740
742
container_of (work , struct mlxsw_sp_acl_tcam_vregion ,
741
743
rehash .dw .work );
744
+ int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS ;
745
+ int err ;
742
746
743
- mlxsw_sp_acl_tcam_vregion_rehash (vregion -> mlxsw_sp , vregion );
744
- mlxsw_sp_acl_tcam_vregion_rehash_work_schedule (vregion );
747
+ err = mlxsw_sp_acl_tcam_vregion_rehash (vregion -> mlxsw_sp ,
748
+ vregion , & credits );
749
+ if (credits < 0 )
750
+ /* Rehash gone out of credits so it was interrupted.
751
+ * Schedule the work as soon as possible to continue.
752
+ */
753
+ mlxsw_core_schedule_dw (& vregion -> rehash .dw , 0 );
754
+ else
755
+ mlxsw_sp_acl_tcam_vregion_rehash_work_schedule (vregion );
745
756
}
746
757
747
758
static struct mlxsw_sp_acl_tcam_vregion *
@@ -1176,14 +1187,18 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1176
1187
static int
1177
1188
mlxsw_sp_acl_tcam_ventry_migrate (struct mlxsw_sp * mlxsw_sp ,
1178
1189
struct mlxsw_sp_acl_tcam_ventry * ventry ,
1179
- struct mlxsw_sp_acl_tcam_chunk * chunk )
1190
+ struct mlxsw_sp_acl_tcam_chunk * chunk ,
1191
+ int * credits )
1180
1192
{
1181
1193
struct mlxsw_sp_acl_tcam_entry * new_entry ;
1182
1194
1183
1195
/* First check if the entry is not already where we want it to be. */
1184
1196
if (ventry -> entry -> chunk == chunk )
1185
1197
return 0 ;
1186
1198
1199
+ if (-- (* credits ) < 0 )
1200
+ return 0 ;
1201
+
1187
1202
new_entry = mlxsw_sp_acl_tcam_entry_create (mlxsw_sp , ventry , chunk );
1188
1203
if (IS_ERR (new_entry ))
1189
1204
return PTR_ERR (new_entry );
@@ -1223,7 +1238,8 @@ static int
1223
1238
mlxsw_sp_acl_tcam_vchunk_migrate_one (struct mlxsw_sp * mlxsw_sp ,
1224
1239
struct mlxsw_sp_acl_tcam_vchunk * vchunk ,
1225
1240
struct mlxsw_sp_acl_tcam_region * region ,
1226
- struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1241
+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx ,
1242
+ int * credits )
1227
1243
{
1228
1244
struct mlxsw_sp_acl_tcam_ventry * ventry ;
1229
1245
int err ;
@@ -1240,7 +1256,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1240
1256
1241
1257
list_for_each_entry (ventry , & vchunk -> ventry_list , list ) {
1242
1258
err = mlxsw_sp_acl_tcam_ventry_migrate (mlxsw_sp , ventry ,
1243
- vchunk -> chunk );
1259
+ vchunk -> chunk , credits );
1244
1260
if (err ) {
1245
1261
if (ctx -> this_is_rollback )
1246
1262
return err ;
@@ -1250,6 +1266,11 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1250
1266
*/
1251
1267
swap (vchunk -> chunk , vchunk -> chunk2 );
1252
1268
return err ;
1269
+ } else if (* credits < 0 ) {
1270
+ /* We are out of credits, the rest of the ventries
1271
+ * will be migrated later.
1272
+ */
1273
+ return 0 ;
1253
1274
}
1254
1275
}
1255
1276
@@ -1260,16 +1281,17 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1260
1281
static int
1261
1282
mlxsw_sp_acl_tcam_vchunk_migrate_all (struct mlxsw_sp * mlxsw_sp ,
1262
1283
struct mlxsw_sp_acl_tcam_vregion * vregion ,
1263
- struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1284
+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx ,
1285
+ int * credits )
1264
1286
{
1265
1287
struct mlxsw_sp_acl_tcam_vchunk * vchunk ;
1266
1288
int err ;
1267
1289
1268
1290
list_for_each_entry (vchunk , & vregion -> vchunk_list , list ) {
1269
1291
err = mlxsw_sp_acl_tcam_vchunk_migrate_one (mlxsw_sp , vchunk ,
1270
1292
vregion -> region ,
1271
- ctx );
1272
- if (err )
1293
+ ctx , credits );
1294
+ if (err || * credits < 0 )
1273
1295
return err ;
1274
1296
}
1275
1297
return 0 ;
@@ -1278,21 +1300,24 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1278
1300
static int
1279
1301
mlxsw_sp_acl_tcam_vregion_migrate (struct mlxsw_sp * mlxsw_sp ,
1280
1302
struct mlxsw_sp_acl_tcam_vregion * vregion ,
1281
- struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1303
+ struct mlxsw_sp_acl_tcam_rehash_ctx * ctx ,
1304
+ int * credits )
1282
1305
{
1283
1306
int err , err2 ;
1284
1307
1285
1308
trace_mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion );
1286
1309
mutex_lock (& vregion -> lock );
1287
- err = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1310
+ err = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
1311
+ ctx , credits );
1288
1312
if (err ) {
1289
1313
/* In case migration was not successful, we need to swap
1290
1314
* so the original region pointer is assigned again
1291
1315
* to vregion->region.
1292
1316
*/
1293
1317
swap (vregion -> region , vregion -> region2 );
1294
1318
ctx -> this_is_rollback = true;
1295
- err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion , ctx );
1319
+ err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all (mlxsw_sp , vregion ,
1320
+ ctx , credits );
1296
1321
if (err2 )
1297
1322
vregion -> failed_rollback = true;
1298
1323
}
@@ -1301,6 +1326,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1301
1326
return err ;
1302
1327
}
1303
1328
1329
+ static bool
1330
+ mlxsw_sp_acl_tcam_vregion_rehash_in_progress (const struct mlxsw_sp_acl_tcam_rehash_ctx * ctx )
1331
+ {
1332
+ return ctx -> hints_priv ;
1333
+ }
1334
+
1304
1335
static int
1305
1336
mlxsw_sp_acl_tcam_vregion_rehash_start (struct mlxsw_sp * mlxsw_sp ,
1306
1337
struct mlxsw_sp_acl_tcam_vregion * vregion ,
@@ -1372,19 +1403,28 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1372
1403
1373
1404
static int
1374
1405
mlxsw_sp_acl_tcam_vregion_rehash (struct mlxsw_sp * mlxsw_sp ,
1375
- struct mlxsw_sp_acl_tcam_vregion * vregion )
1406
+ struct mlxsw_sp_acl_tcam_vregion * vregion ,
1407
+ int * credits )
1376
1408
{
1377
1409
struct mlxsw_sp_acl_tcam_rehash_ctx * ctx = & vregion -> rehash .ctx ;
1378
1410
int err ;
1379
1411
1380
- err = mlxsw_sp_acl_tcam_vregion_rehash_start (mlxsw_sp , vregion , ctx );
1381
- if (err ) {
1382
- if (err != - EAGAIN )
1383
- dev_err (mlxsw_sp -> bus_info -> dev , "Failed get rehash hints\n" );
1384
- return err ;
1412
+ /* Check if the previous rehash work was interrupted
1413
+ * which means we have to continue it now.
1414
+ * If not, start a new rehash.
1415
+ */
1416
+ if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress (ctx )) {
1417
+ err = mlxsw_sp_acl_tcam_vregion_rehash_start (mlxsw_sp ,
1418
+ vregion , ctx );
1419
+ if (err ) {
1420
+ if (err != - EAGAIN )
1421
+ dev_err (mlxsw_sp -> bus_info -> dev , "Failed get rehash hints\n" );
1422
+ return err ;
1423
+ }
1385
1424
}
1386
1425
1387
- err = mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion , ctx );
1426
+ err = mlxsw_sp_acl_tcam_vregion_migrate (mlxsw_sp , vregion ,
1427
+ ctx , credits );
1388
1428
if (err ) {
1389
1429
dev_err (mlxsw_sp -> bus_info -> dev , "Failed to migrate vregion\n" );
1390
1430
if (vregion -> failed_rollback ) {
@@ -1394,7 +1434,9 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1394
1434
}
1395
1435
}
1396
1436
1397
- mlxsw_sp_acl_tcam_vregion_rehash_end (mlxsw_sp , vregion , ctx );
1437
+ if (* credits >= 0 )
1438
+ mlxsw_sp_acl_tcam_vregion_rehash_end (mlxsw_sp , vregion , ctx );
1439
+
1398
1440
return err ;
1399
1441
}
1400
1442
0 commit comments