|
4 | 4 | #include <linux/netdevice.h>
|
5 | 5 | #include "lag.h"
|
6 | 6 |
|
| 7 | +enum { |
| 8 | + MLX5_LAG_FT_LEVEL_DEFINER, |
| 9 | +}; |
| 10 | + |
| 11 | +static struct mlx5_flow_group * |
| 12 | +mlx5_create_hash_flow_group(struct mlx5_flow_table *ft, |
| 13 | + struct mlx5_flow_definer *definer) |
| 14 | +{ |
| 15 | + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); |
| 16 | + struct mlx5_flow_group *fg; |
| 17 | + u32 *in; |
| 18 | + |
| 19 | + in = kvzalloc(inlen, GFP_KERNEL); |
| 20 | + if (!in) |
| 21 | + return ERR_PTR(-ENOMEM); |
| 22 | + |
| 23 | + MLX5_SET(create_flow_group_in, in, match_definer_id, |
| 24 | + mlx5_get_match_definer_id(definer)); |
| 25 | + MLX5_SET(create_flow_group_in, in, start_flow_index, 0); |
| 26 | + MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1); |
| 27 | + MLX5_SET(create_flow_group_in, in, group_type, |
| 28 | + MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT); |
| 29 | + |
| 30 | + fg = mlx5_create_flow_group(ft, in); |
| 31 | + kvfree(in); |
| 32 | + return fg; |
| 33 | +} |
| 34 | + |
| 35 | +static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev, |
| 36 | + struct mlx5_lag_definer *lag_definer, |
| 37 | + u8 port1, u8 port2) |
| 38 | +{ |
| 39 | + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; |
| 40 | + struct mlx5_flow_table_attr ft_attr = {}; |
| 41 | + struct mlx5_flow_destination dest = {}; |
| 42 | + MLX5_DECLARE_FLOW_ACT(flow_act); |
| 43 | + struct mlx5_flow_namespace *ns; |
| 44 | + int err, i; |
| 45 | + |
| 46 | + ft_attr.max_fte = MLX5_MAX_PORTS; |
| 47 | + ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER; |
| 48 | + |
| 49 | + ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL); |
| 50 | + if (!ns) { |
| 51 | + mlx5_core_warn(dev, "Failed to get port selection namespace\n"); |
| 52 | + return -EOPNOTSUPP; |
| 53 | + } |
| 54 | + |
| 55 | + lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr); |
| 56 | + if (IS_ERR(lag_definer->ft)) { |
| 57 | + mlx5_core_warn(dev, "Failed to create port selection table\n"); |
| 58 | + return PTR_ERR(lag_definer->ft); |
| 59 | + } |
| 60 | + |
| 61 | + lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft, |
| 62 | + lag_definer->definer); |
| 63 | + if (IS_ERR(lag_definer->fg)) { |
| 64 | + err = PTR_ERR(lag_definer->fg); |
| 65 | + goto destroy_ft; |
| 66 | + } |
| 67 | + |
| 68 | + dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK; |
| 69 | + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; |
| 70 | + flow_act.flags |= FLOW_ACT_NO_APPEND; |
| 71 | + for (i = 0; i < MLX5_MAX_PORTS; i++) { |
| 72 | + u8 affinity = i == 0 ? port1 : port2; |
| 73 | + |
| 74 | + dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev, |
| 75 | + vhca_id); |
| 76 | + lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft, |
| 77 | + NULL, &flow_act, |
| 78 | + &dest, 1); |
| 79 | + if (IS_ERR(lag_definer->rules[i])) { |
| 80 | + err = PTR_ERR(lag_definer->rules[i]); |
| 81 | + while (i--) |
| 82 | + mlx5_del_flow_rules(lag_definer->rules[i]); |
| 83 | + goto destroy_fg; |
| 84 | + } |
| 85 | + } |
| 86 | + |
| 87 | + return 0; |
| 88 | + |
| 89 | +destroy_fg: |
| 90 | + mlx5_destroy_flow_group(lag_definer->fg); |
| 91 | +destroy_ft: |
| 92 | + mlx5_destroy_flow_table(lag_definer->ft); |
| 93 | + return err; |
| 94 | +} |
| 95 | + |
7 | 96 | static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
|
8 | 97 | enum mlx5_traffic_types tt)
|
9 | 98 | {
|
@@ -186,6 +275,120 @@ static int mlx5_lag_set_definer(u32 *match_definer_mask,
|
186 | 275 | return format_id;
|
187 | 276 | }
|
188 | 277 |
|
| 278 | +static struct mlx5_lag_definer * |
| 279 | +mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash, |
| 280 | + enum mlx5_traffic_types tt, bool tunnel, u8 port1, |
| 281 | + u8 port2) |
| 282 | +{ |
| 283 | + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; |
| 284 | + struct mlx5_lag_definer *lag_definer; |
| 285 | + u32 *match_definer_mask; |
| 286 | + int format_id, err; |
| 287 | + |
| 288 | + lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL); |
| 289 | + if (!lag_definer) |
| 290 | + return ERR_PTR(ENOMEM); |
| 291 | + |
| 292 | + match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer, |
| 293 | + match_mask), |
| 294 | + GFP_KERNEL); |
| 295 | + if (!match_definer_mask) { |
| 296 | + err = -ENOMEM; |
| 297 | + goto free_lag_definer; |
| 298 | + } |
| 299 | + |
| 300 | + format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash); |
| 301 | + lag_definer->definer = |
| 302 | + mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL, |
| 303 | + format_id, match_definer_mask); |
| 304 | + if (IS_ERR(lag_definer->definer)) { |
| 305 | + err = PTR_ERR(lag_definer->definer); |
| 306 | + goto free_mask; |
| 307 | + } |
| 308 | + |
| 309 | + err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2); |
| 310 | + if (err) |
| 311 | + goto destroy_match_definer; |
| 312 | + |
| 313 | + kvfree(match_definer_mask); |
| 314 | + |
| 315 | + return lag_definer; |
| 316 | + |
| 317 | +destroy_match_definer: |
| 318 | + mlx5_destroy_match_definer(dev, lag_definer->definer); |
| 319 | +free_mask: |
| 320 | + kvfree(match_definer_mask); |
| 321 | +free_lag_definer: |
| 322 | + kfree(lag_definer); |
| 323 | + return ERR_PTR(err); |
| 324 | +} |
| 325 | + |
| 326 | +static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev, |
| 327 | + struct mlx5_lag_definer *lag_definer) |
| 328 | +{ |
| 329 | + struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; |
| 330 | + int i; |
| 331 | + |
| 332 | + for (i = 0; i < MLX5_MAX_PORTS; i++) |
| 333 | + mlx5_del_flow_rules(lag_definer->rules[i]); |
| 334 | + mlx5_destroy_flow_group(lag_definer->fg); |
| 335 | + mlx5_destroy_flow_table(lag_definer->ft); |
| 336 | + mlx5_destroy_match_definer(dev, lag_definer->definer); |
| 337 | + kfree(lag_definer); |
| 338 | +} |
| 339 | + |
| 340 | +static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev) |
| 341 | +{ |
| 342 | + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; |
| 343 | + int tt; |
| 344 | + |
| 345 | + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { |
| 346 | + if (port_sel->outer.definers[tt]) |
| 347 | + mlx5_lag_destroy_definer(ldev, |
| 348 | + port_sel->outer.definers[tt]); |
| 349 | + if (port_sel->inner.definers[tt]) |
| 350 | + mlx5_lag_destroy_definer(ldev, |
| 351 | + port_sel->inner.definers[tt]); |
| 352 | + } |
| 353 | +} |
| 354 | + |
| 355 | +static int mlx5_lag_create_definers(struct mlx5_lag *ldev, |
| 356 | + enum netdev_lag_hash hash_type, |
| 357 | + u8 port1, u8 port2) |
| 358 | +{ |
| 359 | + struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; |
| 360 | + struct mlx5_lag_definer *lag_definer; |
| 361 | + int tt, err; |
| 362 | + |
| 363 | + for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) { |
| 364 | + lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt, |
| 365 | + false, port1, port2); |
| 366 | + if (IS_ERR(lag_definer)) { |
| 367 | + err = PTR_ERR(lag_definer); |
| 368 | + goto destroy_definers; |
| 369 | + } |
| 370 | + port_sel->outer.definers[tt] = lag_definer; |
| 371 | + |
| 372 | + if (!port_sel->tunnel) |
| 373 | + continue; |
| 374 | + |
| 375 | + lag_definer = |
| 376 | + mlx5_lag_create_definer(ldev, hash_type, tt, |
| 377 | + true, port1, port2); |
| 378 | + if (IS_ERR(lag_definer)) { |
| 379 | + err = PTR_ERR(lag_definer); |
| 380 | + goto destroy_definers; |
| 381 | + } |
| 382 | + port_sel->inner.definers[tt] = lag_definer; |
| 383 | + } |
| 384 | + |
| 385 | + return 0; |
| 386 | + |
| 387 | +destroy_definers: |
| 388 | + mlx5_lag_destroy_definers(ldev); |
| 389 | + return err; |
| 390 | +} |
| 391 | + |
189 | 392 | static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
|
190 | 393 | enum netdev_lag_hash hash)
|
191 | 394 | {
|
|
0 commit comments