2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
46 #include <net/dst_metadata.h>
48 #include "en/tc/post_act.h"
49 #include "en/tc/act_stats.h"
51 #include "en/rep/tc.h"
52 #include "en/rep/neigh.h"
57 #include "en/tc_tun.h"
58 #include "en/mapping.h"
60 #include "en/mod_hdr.h"
61 #include "en/tc_tun_encap.h"
62 #include "en/tc/sample.h"
63 #include "en/tc/act/act.h"
64 #include "en/tc/post_meter.h"
65 #include "lib/devcom.h"
66 #include "lib/geneve.h"
67 #include "lib/fs_chains.h"
68 #include "diag/en_tc_tracepoint.h"
69 #include <asm/div64.h>
73 #define MLX5E_TC_TABLE_NUM_GROUPS 4
74 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
76 struct mlx5e_hairpin_params {
77 struct mlx5_core_dev *mdev;
82 struct mlx5e_tc_table {
83 /* Protects the dynamic assignment of the t parameter
84 * which is the nic tc root table.
87 struct mlx5e_priv *priv;
88 struct mlx5_flow_table *t;
89 struct mlx5_flow_table *miss_t;
90 struct mlx5_fs_chains *chains;
91 struct mlx5e_post_act *post_act;
95 struct mod_hdr_tbl mod_hdr;
96 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
97 DECLARE_HASHTABLE(hairpin_tbl, 8);
99 struct notifier_block netdevice_nb;
100 struct netdev_net_notifier netdevice_nn;
102 struct mlx5_tc_ct_priv *ct;
103 struct mapping_ctx *mapping;
104 struct mlx5e_hairpin_params hairpin_params;
105 struct dentry *dfs_root;
107 /* tc action stats */
108 struct mlx5e_tc_act_stats_handle *action_stats_handle;
111 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
112 [MAPPED_OBJ_TO_REG] = {
113 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
118 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
123 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
125 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
126 .soffset = MLX5_BYTE_OFF(fte_match_param,
127 misc_parameters_2.metadata_reg_c_1),
129 [ZONE_TO_REG] = zone_to_reg_ct,
130 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
131 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
132 [MARK_TO_REG] = mark_to_reg_ct,
133 [LABELS_TO_REG] = labels_to_reg_ct,
134 [FTEID_TO_REG] = fteid_to_reg_ct,
135 /* For NIC rules we store the restore metadata directly
136 * into reg_b that is passed to SW since we don't
137 * jump between steering domains.
139 [NIC_MAPPED_OBJ_TO_REG] = {
140 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
144 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
145 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
148 struct mlx5e_tc_jump_state {
151 struct mlx5_flow_attr *jumping_attr;
153 enum flow_action_id last_id;
157 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
159 struct mlx5e_tc_table *tc;
161 tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
162 return tc ? tc : ERR_PTR(-ENOMEM);
165 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
170 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
175 /* To avoid false lock dependency warning set the tc_ht lock
176 * class different than the lock class of the ht being used when deleting
177 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
178 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
179 * it's different than the ht->mutex here.
181 static struct lock_class_key tc_ht_lock_key;
182 static struct lock_class_key tc_ht_wq_key;
184 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
185 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
186 static void mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr);
189 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
190 enum mlx5e_tc_attr_to_reg type,
194 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
195 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
196 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
197 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
198 u32 max_mask = GENMASK(match_len - 1, 0);
199 __be32 curr_mask_be, curr_val_be;
200 u32 curr_mask, curr_val;
202 fmask = headers_c + soffset;
203 fval = headers_v + soffset;
205 memcpy(&curr_mask_be, fmask, 4);
206 memcpy(&curr_val_be, fval, 4);
208 curr_mask = be32_to_cpu(curr_mask_be);
209 curr_val = be32_to_cpu(curr_val_be);
211 //move to correct offset
212 WARN_ON(mask > max_mask);
215 max_mask <<= moffset;
218 curr_mask &= ~max_mask;
219 curr_val &= ~max_mask;
221 //add current to mask
225 //back to be32 and write
226 curr_mask_be = cpu_to_be32(curr_mask);
227 curr_val_be = cpu_to_be32(curr_val);
229 memcpy(fmask, &curr_mask_be, 4);
230 memcpy(fval, &curr_val_be, 4);
232 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
236 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
237 enum mlx5e_tc_attr_to_reg type,
241 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
242 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
243 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
244 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
245 u32 max_mask = GENMASK(match_len - 1, 0);
246 __be32 curr_mask_be, curr_val_be;
247 u32 curr_mask, curr_val;
249 fmask = headers_c + soffset;
250 fval = headers_v + soffset;
252 memcpy(&curr_mask_be, fmask, 4);
253 memcpy(&curr_val_be, fval, 4);
255 curr_mask = be32_to_cpu(curr_mask_be);
256 curr_val = be32_to_cpu(curr_val_be);
258 *mask = (curr_mask >> moffset) & max_mask;
259 *val = (curr_val >> moffset) & max_mask;
263 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
264 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
265 enum mlx5_flow_namespace_type ns,
266 enum mlx5e_tc_attr_to_reg type,
269 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
270 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
271 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
275 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
277 return PTR_ERR(modact);
279 /* Firmware has 5bit length field and 0 means 32bits */
283 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
284 MLX5_SET(set_action_in, modact, field, mfield);
285 MLX5_SET(set_action_in, modact, offset, moffset);
286 MLX5_SET(set_action_in, modact, length, mlen);
287 MLX5_SET(set_action_in, modact, data, data);
288 err = mod_hdr_acts->num_actions;
289 mod_hdr_acts->num_actions++;
294 static struct mlx5e_tc_act_stats_handle *
295 get_act_stats_handle(struct mlx5e_priv *priv)
297 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
298 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
299 struct mlx5_rep_uplink_priv *uplink_priv;
300 struct mlx5e_rep_priv *uplink_rpriv;
302 if (is_mdev_switchdev_mode(priv->mdev)) {
303 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
304 uplink_priv = &uplink_rpriv->uplink_priv;
306 return uplink_priv->action_stats_handle;
309 return tc->action_stats_handle;
312 struct mlx5e_tc_int_port_priv *
313 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
315 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
316 struct mlx5_rep_uplink_priv *uplink_priv;
317 struct mlx5e_rep_priv *uplink_rpriv;
319 if (is_mdev_switchdev_mode(priv->mdev)) {
320 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
321 uplink_priv = &uplink_rpriv->uplink_priv;
323 return uplink_priv->int_port_priv;
329 struct mlx5e_flow_meters *
330 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
332 struct mlx5_eswitch *esw = dev->priv.eswitch;
333 struct mlx5_rep_uplink_priv *uplink_priv;
334 struct mlx5e_rep_priv *uplink_rpriv;
335 struct mlx5e_priv *priv;
337 if (is_mdev_switchdev_mode(dev)) {
338 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
339 uplink_priv = &uplink_rpriv->uplink_priv;
340 priv = netdev_priv(uplink_rpriv->netdev);
341 if (!uplink_priv->flow_meters)
342 uplink_priv->flow_meters =
343 mlx5e_flow_meters_init(priv,
344 MLX5_FLOW_NAMESPACE_FDB,
345 uplink_priv->post_act);
346 if (!IS_ERR(uplink_priv->flow_meters))
347 return uplink_priv->flow_meters;
353 static struct mlx5_tc_ct_priv *
354 get_ct_priv(struct mlx5e_priv *priv)
356 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
357 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
358 struct mlx5_rep_uplink_priv *uplink_priv;
359 struct mlx5e_rep_priv *uplink_rpriv;
361 if (is_mdev_switchdev_mode(priv->mdev)) {
362 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
363 uplink_priv = &uplink_rpriv->uplink_priv;
365 return uplink_priv->ct_priv;
371 static struct mlx5e_tc_psample *
372 get_sample_priv(struct mlx5e_priv *priv)
374 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
375 struct mlx5_rep_uplink_priv *uplink_priv;
376 struct mlx5e_rep_priv *uplink_rpriv;
378 if (is_mdev_switchdev_mode(priv->mdev)) {
379 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
380 uplink_priv = &uplink_rpriv->uplink_priv;
382 return uplink_priv->tc_psample;
388 static struct mlx5e_post_act *
389 get_post_action(struct mlx5e_priv *priv)
391 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
392 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
393 struct mlx5_rep_uplink_priv *uplink_priv;
394 struct mlx5e_rep_priv *uplink_rpriv;
396 if (is_mdev_switchdev_mode(priv->mdev)) {
397 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
398 uplink_priv = &uplink_rpriv->uplink_priv;
400 return uplink_priv->post_act;
406 struct mlx5_flow_handle *
407 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
408 struct mlx5_flow_spec *spec,
409 struct mlx5_flow_attr *attr)
411 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
413 if (is_mdev_switchdev_mode(priv->mdev))
414 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
416 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
420 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
421 struct mlx5_flow_handle *rule,
422 struct mlx5_flow_attr *attr)
424 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
426 if (is_mdev_switchdev_mode(priv->mdev)) {
427 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
431 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
435 is_flow_meter_action(struct mlx5_flow_attr *attr)
437 return (((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
438 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)) ||
439 attr->flags & MLX5_ATTR_FLAG_MTU);
443 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
444 struct mlx5_flow_attr *attr)
446 struct mlx5e_post_act *post_act = get_post_action(priv);
447 struct mlx5e_post_meter_priv *post_meter;
448 enum mlx5_flow_namespace_type ns_type;
449 struct mlx5e_flow_meter_handle *meter;
450 enum mlx5e_post_meter_type type;
452 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
454 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
455 return PTR_ERR(meter);
458 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
459 type = meter->params.mtu ? MLX5E_POST_METER_MTU : MLX5E_POST_METER_RATE;
460 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act,
462 meter->act_counter, meter->drop_counter,
463 attr->branch_true, attr->branch_false);
464 if (IS_ERR(post_meter)) {
465 mlx5_core_err(priv->mdev, "Failed to init post meter\n");
469 attr->meter_attr.meter = meter;
470 attr->meter_attr.post_meter = post_meter;
471 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
472 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
477 mlx5e_tc_meter_put(meter);
478 return PTR_ERR(post_meter);
482 mlx5e_tc_del_flow_meter(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
484 mlx5e_post_meter_cleanup(esw, attr->meter_attr.post_meter);
485 mlx5e_tc_meter_put(attr->meter_attr.meter);
488 struct mlx5_flow_handle *
489 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
490 struct mlx5_flow_spec *spec,
491 struct mlx5_flow_attr *attr)
493 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
496 if (attr->flags & MLX5_ATTR_FLAG_CT) {
497 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
498 &attr->parse_attr->mod_hdr_acts;
500 return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
505 if (!is_mdev_switchdev_mode(priv->mdev))
506 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
508 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
509 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
511 if (is_flow_meter_action(attr)) {
512 err = mlx5e_tc_add_flow_meter(priv, attr);
517 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
521 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
522 struct mlx5_flow_handle *rule,
523 struct mlx5_flow_attr *attr)
525 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
527 if (attr->flags & MLX5_ATTR_FLAG_CT) {
528 mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
532 if (!is_mdev_switchdev_mode(priv->mdev)) {
533 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
537 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
538 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
542 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
544 if (attr->meter_attr.meter)
545 mlx5e_tc_del_flow_meter(esw, attr);
549 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
550 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
551 enum mlx5_flow_namespace_type ns,
552 enum mlx5e_tc_attr_to_reg type,
555 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
557 return ret < 0 ? ret : 0;
560 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
561 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
562 enum mlx5e_tc_attr_to_reg type,
563 int act_id, u32 data)
565 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
566 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
567 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
570 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
572 /* Firmware has 5bit length field and 0 means 32bits */
576 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
577 MLX5_SET(set_action_in, modact, field, mfield);
578 MLX5_SET(set_action_in, modact, offset, moffset);
579 MLX5_SET(set_action_in, modact, length, mlen);
580 MLX5_SET(set_action_in, modact, data, data);
583 struct mlx5e_hairpin {
584 struct mlx5_hairpin *pair;
586 struct mlx5_core_dev *func_mdev;
587 struct mlx5e_priv *func_priv;
589 struct mlx5e_tir direct_tir;
592 struct mlx5e_rqt indir_rqt;
593 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
594 struct mlx5_ttc_table *ttc;
597 struct mlx5e_hairpin_entry {
598 /* a node of a hash table which keeps all the hairpin entries */
599 struct hlist_node hairpin_hlist;
601 /* protects flows list */
602 spinlock_t flows_lock;
603 /* flows sharing the same hairpin */
604 struct list_head flows;
605 /* hpe's that were not fully initialized when dead peer update event
606 * function traversed them.
608 struct list_head dead_peer_wait_list;
612 struct mlx5e_hairpin *hp;
614 struct completion res_ready;
617 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
618 struct mlx5e_tc_flow *flow);
620 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
622 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
623 return ERR_PTR(-EINVAL);
627 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
629 if (refcount_dec_and_test(&flow->refcnt)) {
630 mlx5e_tc_del_flow(priv, flow);
631 kfree_rcu(flow, rcu_head);
635 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
637 return flow_flag_test(flow, ESWITCH);
640 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
642 return flow_flag_test(flow, FT);
645 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
647 return flow_flag_test(flow, OFFLOADED);
650 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
652 return mlx5e_is_eswitch_flow(flow) ?
653 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
656 static struct mlx5_core_dev *
657 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
659 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
662 static struct mod_hdr_tbl *
663 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
665 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
666 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
668 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
669 &esw->offloads.mod_hdr :
673 int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
674 struct mlx5e_tc_flow *flow,
675 struct mlx5_flow_attr *attr)
677 struct mlx5e_mod_hdr_handle *mh;
679 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
680 mlx5e_get_flow_namespace(flow),
681 &attr->parse_attr->mod_hdr_acts);
685 WARN_ON(attr->modify_hdr);
686 attr->modify_hdr = mlx5e_mod_hdr_get(mh);
692 void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
693 struct mlx5e_tc_flow *flow,
694 struct mlx5_flow_attr *attr)
696 /* flow wasn't fully initialized */
700 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
706 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
708 struct mlx5_core_dev *mdev;
709 struct net_device *netdev;
710 struct mlx5e_priv *priv;
712 netdev = dev_get_by_index(net, ifindex);
714 return ERR_PTR(-ENODEV);
716 priv = netdev_priv(netdev);
720 /* Mirred tc action holds a refcount on the ifindex net_device (see
721 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
722 * after dev_put(netdev), while we're in the context of adding a tc flow.
724 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
725 * stored in a hairpin object, which exists until all flows, that refer to it, get
728 * On the other hand, after a hairpin object has been created, the peer net_device may
729 * be removed/unbound while there are still some hairpin flows that are using it. This
730 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
731 * NETDEV_UNREGISTER event of the peer net_device.
736 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
738 struct mlx5e_tir_builder *builder;
741 builder = mlx5e_tir_builder_alloc(false);
745 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
749 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
750 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
755 mlx5e_tir_builder_free(builder);
759 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
764 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
766 mlx5e_tir_destroy(&hp->direct_tir);
767 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
770 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
772 struct mlx5e_priv *priv = hp->func_priv;
773 struct mlx5_core_dev *mdev = priv->mdev;
774 struct mlx5e_rss_params_indir *indir;
777 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
781 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
782 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
783 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
790 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
792 struct mlx5e_priv *priv = hp->func_priv;
793 struct mlx5e_rss_params_hash rss_hash;
794 enum mlx5_traffic_types tt, max_tt;
795 struct mlx5e_tir_builder *builder;
798 builder = mlx5e_tir_builder_alloc(false);
802 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
804 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
805 struct mlx5e_rss_params_traffic_type rss_tt;
807 rss_tt = mlx5e_rss_get_default_tt_config(tt);
809 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
810 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
812 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
814 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
816 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
817 goto err_destroy_tirs;
820 mlx5e_tir_builder_clear(builder);
824 mlx5e_tir_builder_free(builder);
829 for (tt = 0; tt < max_tt; tt++)
830 mlx5e_tir_destroy(&hp->indir_tir[tt]);
835 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
839 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
840 mlx5e_tir_destroy(&hp->indir_tir[tt]);
843 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
844 struct ttc_params *ttc_params)
846 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
849 memset(ttc_params, 0, sizeof(*ttc_params));
851 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
852 MLX5_FLOW_NAMESPACE_KERNEL);
853 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
854 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
855 ttc_params->dests[tt].tir_num =
857 mlx5e_tir_get_tirn(&hp->direct_tir) :
858 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
861 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
862 ft_attr->prio = MLX5E_TC_PRIO;
865 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
867 struct mlx5e_priv *priv = hp->func_priv;
868 struct ttc_params ttc_params;
869 struct mlx5_ttc_table *ttc;
872 err = mlx5e_hairpin_create_indirect_rqt(hp);
876 err = mlx5e_hairpin_create_indirect_tirs(hp);
878 goto err_create_indirect_tirs;
880 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
881 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
882 if (IS_ERR(hp->ttc)) {
883 err = PTR_ERR(hp->ttc);
884 goto err_create_ttc_table;
887 ttc = mlx5e_fs_get_ttc(priv->fs, false);
888 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
890 mlx5_get_ttc_flow_table(ttc)->id);
894 err_create_ttc_table:
895 mlx5e_hairpin_destroy_indirect_tirs(hp);
896 err_create_indirect_tirs:
897 mlx5e_rqt_destroy(&hp->indir_rqt);
902 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
904 mlx5_destroy_ttc_table(hp->ttc);
905 mlx5e_hairpin_destroy_indirect_tirs(hp);
906 mlx5e_rqt_destroy(&hp->indir_rqt);
909 static struct mlx5e_hairpin *
910 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
913 struct mlx5_core_dev *func_mdev, *peer_mdev;
914 struct mlx5e_hairpin *hp;
915 struct mlx5_hairpin *pair;
918 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
920 return ERR_PTR(-ENOMEM);
922 func_mdev = priv->mdev;
923 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
924 if (IS_ERR(peer_mdev)) {
925 err = PTR_ERR(peer_mdev);
926 goto create_pair_err;
929 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
932 goto create_pair_err;
935 hp->func_mdev = func_mdev;
936 hp->func_priv = priv;
937 hp->num_channels = params->num_channels;
939 err = mlx5e_hairpin_create_transport(hp);
941 goto create_transport_err;
943 if (hp->num_channels > 1) {
944 err = mlx5e_hairpin_rss_init(hp);
952 mlx5e_hairpin_destroy_transport(hp);
953 create_transport_err:
954 mlx5_core_hairpin_destroy(hp->pair);
960 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
962 if (hp->num_channels > 1)
963 mlx5e_hairpin_rss_cleanup(hp);
964 mlx5e_hairpin_destroy_transport(hp);
965 mlx5_core_hairpin_destroy(hp->pair);
969 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
971 return (peer_vhca_id << 16 | prio);
974 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
975 u16 peer_vhca_id, u8 prio)
977 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
978 struct mlx5e_hairpin_entry *hpe;
979 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
981 hash_for_each_possible(tc->hairpin_tbl, hpe,
982 hairpin_hlist, hash_key) {
983 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
984 refcount_inc(&hpe->refcnt);
992 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
993 struct mlx5e_hairpin_entry *hpe)
995 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
996 /* no more hairpin flows for us, release the hairpin pair */
997 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
999 hash_del(&hpe->hairpin_hlist);
1000 mutex_unlock(&tc->hairpin_tbl_lock);
1002 if (!IS_ERR_OR_NULL(hpe->hp)) {
1003 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
1004 dev_name(hpe->hp->pair->peer_mdev->device));
1006 mlx5e_hairpin_destroy(hpe->hp);
1009 WARN_ON(!list_empty(&hpe->flows));
1013 #define UNKNOWN_MATCH_PRIO 8
1015 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
1016 struct mlx5_flow_spec *spec, u8 *match_prio,
1017 struct netlink_ext_ack *extack)
1019 void *headers_c, *headers_v;
1020 u8 prio_val, prio_mask = 0;
1023 #ifdef CONFIG_MLX5_CORE_EN_DCB
1024 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
1025 NL_SET_ERR_MSG_MOD(extack,
1026 "only PCP trust state supported for hairpin");
1030 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
1031 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1033 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
1035 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
1036 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
1039 if (!vlan_present || !prio_mask) {
1040 prio_val = UNKNOWN_MATCH_PRIO;
1041 } else if (prio_mask != 0x7) {
1042 NL_SET_ERR_MSG_MOD(extack,
1043 "masked priority match not supported for hairpin");
1047 *match_prio = prio_val;
1051 static int debugfs_hairpin_num_active_get(void *data, u64 *val)
1053 struct mlx5e_tc_table *tc = data;
1054 struct mlx5e_hairpin_entry *hpe;
1058 mutex_lock(&tc->hairpin_tbl_lock);
1059 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1061 mutex_unlock(&tc->hairpin_tbl_lock);
1067 DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active,
1068 debugfs_hairpin_num_active_get, NULL, "%llu\n");
1070 static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv)
1073 struct mlx5e_tc_table *tc = file->private;
1074 struct mlx5e_hairpin_entry *hpe;
1077 mutex_lock(&tc->hairpin_tbl_lock);
1078 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
1079 seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n",
1080 hpe->peer_vhca_id, hpe->prio,
1081 refcount_read(&hpe->refcnt));
1082 mutex_unlock(&tc->hairpin_tbl_lock);
1086 DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump);
1088 static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc,
1089 struct dentry *dfs_root)
1091 if (IS_ERR_OR_NULL(dfs_root))
1094 tc->dfs_root = debugfs_create_dir("tc", dfs_root);
1096 debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc,
1097 &fops_hairpin_num_active);
1098 debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc,
1099 &debugfs_hairpin_table_dump_fops);
1103 mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params,
1104 struct mlx5_core_dev *mdev)
1109 hairpin_params->mdev = mdev;
1110 /* set hairpin pair per each 50Gbs share of the link */
1111 mlx5e_port_max_linkspeed(mdev, &link_speed);
1112 link_speed = max_t(u32, link_speed, 50000);
1113 link_speed64 = link_speed;
1114 do_div(link_speed64, 50000);
1115 hairpin_params->num_queues = link_speed64;
1117 hairpin_params->queue_size =
1118 BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev),
1119 MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets)));
1122 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
1123 struct mlx5e_tc_flow *flow,
1124 struct mlx5e_tc_flow_parse_attr *parse_attr,
1125 struct netlink_ext_ack *extack)
1127 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1128 int peer_ifindex = parse_attr->mirred_ifindex[0];
1129 struct mlx5_hairpin_params params;
1130 struct mlx5_core_dev *peer_mdev;
1131 struct mlx5e_hairpin_entry *hpe;
1132 struct mlx5e_hairpin *hp;
1137 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1138 if (IS_ERR(peer_mdev)) {
1139 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1140 return PTR_ERR(peer_mdev);
1143 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1144 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1148 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1149 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1154 mutex_lock(&tc->hairpin_tbl_lock);
1155 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1157 mutex_unlock(&tc->hairpin_tbl_lock);
1158 wait_for_completion(&hpe->res_ready);
1160 if (IS_ERR(hpe->hp)) {
1167 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1169 mutex_unlock(&tc->hairpin_tbl_lock);
1173 spin_lock_init(&hpe->flows_lock);
1174 INIT_LIST_HEAD(&hpe->flows);
1175 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1176 hpe->peer_vhca_id = peer_id;
1177 hpe->prio = match_prio;
1178 refcount_set(&hpe->refcnt, 1);
1179 init_completion(&hpe->res_ready);
1181 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1182 hash_hairpin_info(peer_id, match_prio));
1183 mutex_unlock(&tc->hairpin_tbl_lock);
1185 params.log_num_packets = ilog2(tc->hairpin_params.queue_size);
1186 params.log_data_size =
1188 params.log_num_packets +
1189 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev),
1190 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz),
1191 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1193 params.q_counter = priv->q_counter;
1194 params.num_channels = tc->hairpin_params.num_queues;
1196 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1198 complete_all(&hpe->res_ready);
1204 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1205 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1206 dev_name(hp->pair->peer_mdev->device),
1207 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1210 if (hpe->hp->num_channels > 1) {
1211 flow_flag_set(flow, HAIRPIN_RSS);
1212 flow->attr->nic_attr->hairpin_ft =
1213 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1215 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1219 spin_lock(&hpe->flows_lock);
1220 list_add(&flow->hairpin, &hpe->flows);
1221 spin_unlock(&hpe->flows_lock);
1226 mlx5e_hairpin_put(priv, hpe);
1230 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1231 struct mlx5e_tc_flow *flow)
1233 /* flow wasn't fully initialized */
1237 spin_lock(&flow->hpe->flows_lock);
1238 list_del(&flow->hairpin);
1239 spin_unlock(&flow->hpe->flows_lock);
1241 mlx5e_hairpin_put(priv, flow->hpe);
1245 struct mlx5_flow_handle *
1246 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1247 struct mlx5_flow_spec *spec,
1248 struct mlx5_flow_attr *attr)
1250 struct mlx5_flow_context *flow_context = &spec->flow_context;
1251 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1252 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1253 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1254 struct mlx5_flow_destination dest[2] = {};
1255 struct mlx5_fs_chains *nic_chains;
1256 struct mlx5_flow_act flow_act = {
1257 .action = attr->action,
1258 .flags = FLOW_ACT_NO_APPEND,
1260 struct mlx5_flow_handle *rule;
1261 struct mlx5_flow_table *ft;
1264 nic_chains = mlx5e_nic_chains(tc);
1265 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1266 flow_context->flow_tag = nic_attr->flow_tag;
1268 if (attr->dest_ft) {
1269 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1270 dest[dest_ix].ft = attr->dest_ft;
1272 } else if (nic_attr->hairpin_ft) {
1273 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1274 dest[dest_ix].ft = nic_attr->hairpin_ft;
1276 } else if (nic_attr->hairpin_tirn) {
1277 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1278 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1280 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1281 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1282 if (attr->dest_chain) {
1283 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1284 attr->dest_chain, 1,
1286 if (IS_ERR(dest[dest_ix].ft))
1287 return ERR_CAST(dest[dest_ix].ft);
1289 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1294 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1295 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1296 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1298 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1299 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1300 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1304 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1305 flow_act.modify_hdr = attr->modify_hdr;
1307 mutex_lock(&tc->t_lock);
1308 if (IS_ERR_OR_NULL(tc->t)) {
1309 /* Create the root table here if doesn't exist yet */
1311 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1313 if (IS_ERR(tc->t)) {
1314 mutex_unlock(&tc->t_lock);
1315 netdev_err(priv->netdev,
1316 "Failed to create tc offload table\n");
1317 rule = ERR_CAST(tc->t);
1321 mutex_unlock(&tc->t_lock);
1323 if (attr->chain || attr->prio)
1324 ft = mlx5_chains_get_table(nic_chains,
1325 attr->chain, attr->prio,
1331 rule = ERR_CAST(ft);
1335 if (attr->outer_match_level != MLX5_MATCH_NONE)
1336 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1338 rule = mlx5_add_flow_rules(ft, spec,
1339 &flow_act, dest, dest_ix);
1346 if (attr->chain || attr->prio)
1347 mlx5_chains_put_table(nic_chains,
1348 attr->chain, attr->prio,
1351 if (attr->dest_chain)
1352 mlx5_chains_put_table(nic_chains,
1353 attr->dest_chain, 1,
1356 return ERR_CAST(rule);
1360 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1361 struct mlx5_flow_attr *attr)
1364 struct mlx5_fc *counter;
1366 counter = mlx5_fc_create(counter_dev, true);
1367 if (IS_ERR(counter))
1368 return PTR_ERR(counter);
1370 attr->counter = counter;
1375 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1376 struct mlx5e_tc_flow *flow,
1377 struct netlink_ext_ack *extack)
1379 struct mlx5e_tc_flow_parse_attr *parse_attr;
1380 struct mlx5_flow_attr *attr = flow->attr;
1381 struct mlx5_core_dev *dev = priv->mdev;
1384 parse_attr = attr->parse_attr;
1386 if (flow_flag_test(flow, HAIRPIN)) {
1387 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1392 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1393 err = alloc_flow_attr_counter(dev, attr);
1398 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1399 err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
1404 if (attr->flags & MLX5_ATTR_FLAG_CT)
1405 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
1406 attr, &parse_attr->mod_hdr_acts);
1408 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1411 return PTR_ERR_OR_ZERO(flow->rule[0]);
1414 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1415 struct mlx5_flow_handle *rule,
1416 struct mlx5_flow_attr *attr)
1418 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1419 struct mlx5_fs_chains *nic_chains;
1421 nic_chains = mlx5e_nic_chains(tc);
1422 mlx5_del_flow_rules(rule);
1424 if (attr->chain || attr->prio)
1425 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1428 if (attr->dest_chain)
1429 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1433 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1434 struct mlx5e_tc_flow *flow)
1436 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1437 struct mlx5_flow_attr *attr = flow->attr;
1439 flow_flag_clear(flow, OFFLOADED);
1441 if (attr->flags & MLX5_ATTR_FLAG_CT)
1442 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1443 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1444 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1446 /* Remove root table if no rules are left to avoid
1447 * extra steering hops.
1449 mutex_lock(&tc->t_lock);
1450 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1451 !IS_ERR_OR_NULL(tc->t)) {
1452 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1455 mutex_unlock(&tc->t_lock);
1457 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1458 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1459 mlx5e_tc_detach_mod_hdr(priv, flow, attr);
1462 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1463 mlx5_fc_destroy(priv->mdev, attr->counter);
1465 if (flow_flag_test(flow, HAIRPIN))
1466 mlx5e_hairpin_flow_del(priv, flow);
1468 free_flow_post_acts(flow);
1470 kvfree(attr->parse_attr);
1474 struct mlx5_flow_handle *
1475 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1476 struct mlx5e_tc_flow *flow,
1477 struct mlx5_flow_spec *spec,
1478 struct mlx5_flow_attr *attr)
1480 struct mlx5_flow_handle *rule;
1482 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1483 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1485 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1490 if (attr->esw_attr->split_count) {
1491 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1492 if (IS_ERR(flow->rule[1]))
1499 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1500 return flow->rule[1];
1503 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1504 struct mlx5e_tc_flow *flow,
1505 struct mlx5_flow_attr *attr)
1507 flow_flag_clear(flow, OFFLOADED);
1509 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1510 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1512 if (attr->esw_attr->split_count)
1513 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1515 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1518 struct mlx5_flow_handle *
1519 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1520 struct mlx5e_tc_flow *flow,
1521 struct mlx5_flow_spec *spec)
1523 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1524 struct mlx5e_mod_hdr_handle *mh = NULL;
1525 struct mlx5_flow_attr *slow_attr;
1526 struct mlx5_flow_handle *rule;
1527 bool fwd_and_modify_cap;
1528 u32 chain_mapping = 0;
1531 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1533 return ERR_PTR(-ENOMEM);
1535 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1536 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1537 slow_attr->esw_attr->split_count = 0;
1538 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1540 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1541 if (!fwd_and_modify_cap)
1544 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1548 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1549 MAPPED_OBJ_TO_REG, chain_mapping);
1553 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1554 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1560 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1561 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1564 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1566 err = PTR_ERR(rule);
1570 flow->attr->slow_mh = mh;
1571 flow->chain_mapping = chain_mapping;
1572 flow_flag_set(flow, SLOW);
1574 mlx5e_mod_hdr_dealloc(&mod_acts);
1580 if (fwd_and_modify_cap)
1581 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1584 if (fwd_and_modify_cap)
1585 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1587 mlx5e_mod_hdr_dealloc(&mod_acts);
1589 return ERR_PTR(err);
1592 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1593 struct mlx5e_tc_flow *flow)
1595 struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
1596 struct mlx5_flow_attr *slow_attr;
1598 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1600 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1604 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1605 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1606 slow_attr->esw_attr->split_count = 0;
1607 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1609 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1610 slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
1612 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1614 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
1615 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1616 flow->chain_mapping = 0;
1617 flow->attr->slow_mh = NULL;
1619 flow_flag_clear(flow, SLOW);
1623 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1626 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1627 struct list_head *unready_flows)
1629 flow_flag_set(flow, NOT_READY);
1630 list_add_tail(&flow->unready, unready_flows);
1633 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1636 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1638 list_del(&flow->unready);
1639 flow_flag_clear(flow, NOT_READY);
1642 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1644 struct mlx5_rep_uplink_priv *uplink_priv;
1645 struct mlx5e_rep_priv *rpriv;
1646 struct mlx5_eswitch *esw;
1648 esw = flow->priv->mdev->priv.eswitch;
1649 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1650 uplink_priv = &rpriv->uplink_priv;
1652 mutex_lock(&uplink_priv->unready_flows_lock);
1653 unready_flow_add(flow, &uplink_priv->unready_flows);
1654 mutex_unlock(&uplink_priv->unready_flows_lock);
1657 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1659 struct mlx5_rep_uplink_priv *uplink_priv;
1660 struct mlx5e_rep_priv *rpriv;
1661 struct mlx5_eswitch *esw;
1663 esw = flow->priv->mdev->priv.eswitch;
1664 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1665 uplink_priv = &rpriv->uplink_priv;
1667 mutex_lock(&uplink_priv->unready_flows_lock);
1668 unready_flow_del(flow);
1669 mutex_unlock(&uplink_priv->unready_flows_lock);
1672 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1674 struct mlx5_core_dev *out_mdev, *route_mdev;
1675 struct mlx5e_priv *out_priv, *route_priv;
1677 out_priv = netdev_priv(out_dev);
1678 out_mdev = out_priv->mdev;
1679 route_priv = netdev_priv(route_dev);
1680 route_mdev = route_priv->mdev;
1682 if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1685 if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1686 route_mdev->coredev_type != MLX5_COREDEV_SF)
1689 return mlx5e_same_hw_devs(out_priv, route_priv);
1692 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1694 struct mlx5e_priv *out_priv, *route_priv;
1695 struct mlx5_core_dev *route_mdev;
1696 struct mlx5_eswitch *esw;
1699 out_priv = netdev_priv(out_dev);
1700 esw = out_priv->mdev->priv.eswitch;
1701 route_priv = netdev_priv(route_dev);
1702 route_mdev = route_priv->mdev;
1704 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1705 if (mlx5_lag_is_active(out_priv->mdev)) {
1706 struct mlx5_devcom *devcom;
1709 /* In lag case we may get devices from different eswitch instances.
1710 * If we failed to get vport num, it means, mostly, that we on the wrong
1713 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1718 devcom = out_priv->mdev->priv.devcom;
1719 esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1720 err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
1726 return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1730 set_encap_dests(struct mlx5e_priv *priv,
1731 struct mlx5e_tc_flow *flow,
1732 struct mlx5_flow_attr *attr,
1733 struct netlink_ext_ack *extack,
1736 struct mlx5e_tc_flow_parse_attr *parse_attr;
1737 struct mlx5_esw_flow_attr *esw_attr;
1738 struct net_device *encap_dev = NULL;
1739 struct mlx5e_rep_priv *rpriv;
1740 struct mlx5e_priv *out_priv;
1744 if (!mlx5e_is_eswitch_flow(flow))
1747 parse_attr = attr->parse_attr;
1748 esw_attr = attr->esw_attr;
1751 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1752 struct net_device *out_dev;
1755 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1758 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1759 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1761 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1765 err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1766 extack, &encap_dev);
1771 if (esw_attr->dests[out_index].flags &
1772 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1773 !esw_attr->dest_int_port)
1776 out_priv = netdev_priv(encap_dev);
1777 rpriv = out_priv->ppriv;
1778 esw_attr->dests[out_index].rep = rpriv->rep;
1779 esw_attr->dests[out_index].mdev = out_priv->mdev;
1782 if (*vf_tun && esw_attr->out_count > 1) {
1783 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1793 clean_encap_dests(struct mlx5e_priv *priv,
1794 struct mlx5e_tc_flow *flow,
1795 struct mlx5_flow_attr *attr,
1798 struct mlx5_esw_flow_attr *esw_attr;
1801 if (!mlx5e_is_eswitch_flow(flow))
1804 esw_attr = attr->esw_attr;
1807 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1808 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1811 if (esw_attr->dests[out_index].flags &
1812 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1813 !esw_attr->dest_int_port)
1816 mlx5e_detach_encap(priv, flow, attr, out_index);
1817 kfree(attr->parse_attr->tun_info[out_index]);
1822 verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
1825 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1826 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
1831 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
1832 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
1836 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1837 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
1838 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
1846 post_process_attr(struct mlx5e_tc_flow *flow,
1847 struct mlx5_flow_attr *attr,
1848 struct netlink_ext_ack *extack)
1853 err = verify_attr_actions(attr->action, extack);
1857 err = set_encap_dests(flow->priv, flow, attr, extack, &vf_tun);
1861 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1862 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
1867 if (attr->branch_true &&
1868 attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1869 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
1874 if (attr->branch_false &&
1875 attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1876 err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
1881 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1882 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
1892 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1893 struct mlx5e_tc_flow *flow,
1894 struct netlink_ext_ack *extack)
1896 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1897 struct mlx5e_tc_flow_parse_attr *parse_attr;
1898 struct mlx5_flow_attr *attr = flow->attr;
1899 struct mlx5_esw_flow_attr *esw_attr;
1900 u32 max_prio, max_chain;
1903 parse_attr = attr->parse_attr;
1904 esw_attr = attr->esw_attr;
1906 /* We check chain range only for tc flows.
1907 * For ft flows, we checked attr->chain was originally 0 and set it to
1908 * FDB_FT_CHAIN which is outside tc range.
1909 * See mlx5e_rep_setup_ft_cb().
1911 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1912 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1913 NL_SET_ERR_MSG_MOD(extack,
1914 "Requested chain is out of supported range");
1919 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1920 if (attr->prio > max_prio) {
1921 NL_SET_ERR_MSG_MOD(extack,
1922 "Requested priority is out of supported range");
1927 if (flow_flag_test(flow, TUN_RX)) {
1928 err = mlx5e_attach_decap_route(priv, flow);
1932 if (!attr->chain && esw_attr->int_port &&
1933 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1934 /* If decap route device is internal port, change the
1935 * source vport value in reg_c0 back to uplink just in
1936 * case the rule performs goto chain > 0. If we have a miss
1937 * on chain > 0 we want the metadata regs to hold the
1938 * chain id so SW will resume handling of this packet
1939 * from the proper chain.
1941 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1942 esw_attr->in_rep->vport);
1944 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1945 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1950 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1954 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1955 err = mlx5e_attach_decap(priv, flow, extack);
1960 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1961 struct mlx5e_tc_int_port *int_port;
1964 NL_SET_ERR_MSG_MOD(extack,
1965 "Internal port rule is only supported on chain 0");
1970 if (attr->dest_chain) {
1971 NL_SET_ERR_MSG_MOD(extack,
1972 "Internal port rule offload doesn't support goto action");
1977 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1978 parse_attr->filter_dev->ifindex,
1979 flow_flag_test(flow, EGRESS) ?
1980 MLX5E_TC_INT_PORT_EGRESS :
1981 MLX5E_TC_INT_PORT_INGRESS);
1982 if (IS_ERR(int_port)) {
1983 err = PTR_ERR(int_port);
1987 esw_attr->int_port = int_port;
1990 err = post_process_attr(flow, attr, extack);
1994 err = mlx5e_tc_act_stats_add_flow(get_act_stats_handle(priv), flow);
1998 /* we get here if one of the following takes place:
1999 * (1) there's no error
2000 * (2) there's an encap action and we don't have valid neigh
2002 if (flow_flag_test(flow, SLOW))
2003 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
2005 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
2007 if (IS_ERR(flow->rule[0])) {
2008 err = PTR_ERR(flow->rule[0]);
2011 flow_flag_set(flow, OFFLOADED);
2016 flow_flag_set(flow, FAILED);
2020 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
2022 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
2023 void *headers_v = MLX5_ADDR_OF(fte_match_param,
2026 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
2028 geneve_tlv_option_0_data);
2030 return !!geneve_tlv_opt_0_data;
2033 static void free_branch_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
2038 mlx5_free_flow_attr(flow, attr);
2039 kvfree(attr->parse_attr);
2043 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
2044 struct mlx5e_tc_flow *flow)
2046 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2047 struct mlx5_flow_attr *attr = flow->attr;
2048 struct mlx5_esw_flow_attr *esw_attr;
2051 esw_attr = attr->esw_attr;
2052 mlx5e_put_flow_tunnel_id(flow);
2054 if (flow_flag_test(flow, NOT_READY))
2055 remove_unready_flow(flow);
2057 if (mlx5e_is_offloaded_flow(flow)) {
2058 if (flow_flag_test(flow, SLOW))
2059 mlx5e_tc_unoffload_from_slow_path(esw, flow);
2061 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
2063 complete_all(&flow->del_hw_done);
2065 if (mlx5_flow_has_geneve_opt(flow))
2066 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
2068 if (flow->decap_route)
2069 mlx5e_detach_decap_route(priv, flow);
2071 clean_encap_dests(priv, flow, attr, &vf_tun);
2073 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
2075 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
2076 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
2077 mlx5e_tc_detach_mod_hdr(priv, flow, attr);
2080 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
2081 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
2083 if (esw_attr->int_port)
2084 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
2086 if (esw_attr->dest_int_port)
2087 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
2089 if (flow_flag_test(flow, L3_TO_L2_DECAP))
2090 mlx5e_detach_decap(priv, flow);
2092 mlx5e_tc_act_stats_del_flow(get_act_stats_handle(priv), flow);
2094 free_flow_post_acts(flow);
2095 free_branch_attr(flow, attr->branch_true);
2096 free_branch_attr(flow, attr->branch_false);
2098 kvfree(attr->esw_attr->rx_tun_attr);
2099 kvfree(attr->parse_attr);
2103 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
2105 struct mlx5_flow_attr *attr;
2107 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
2108 return attr->counter;
2111 /* Iterate over tmp_list of flows attached to flow_list head. */
2112 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
2114 struct mlx5e_tc_flow *flow, *tmp;
2116 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
2117 mlx5e_flow_put(priv, flow);
2120 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2122 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
2124 if (!flow_flag_test(flow, ESWITCH) ||
2125 !flow_flag_test(flow, DUP))
2128 mutex_lock(&esw->offloads.peer_mutex);
2129 list_del(&flow->peer);
2130 mutex_unlock(&esw->offloads.peer_mutex);
2132 flow_flag_clear(flow, DUP);
2134 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
2135 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
2136 kfree(flow->peer_flow);
2139 flow->peer_flow = NULL;
2142 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
2144 struct mlx5_core_dev *dev = flow->priv->mdev;
2145 struct mlx5_devcom *devcom = dev->priv.devcom;
2146 struct mlx5_eswitch *peer_esw;
2148 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2152 __mlx5e_tc_del_fdb_peer_flow(flow);
2153 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2156 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2157 struct mlx5e_tc_flow *flow)
2159 if (mlx5e_is_eswitch_flow(flow)) {
2160 mlx5e_tc_del_fdb_peer_flow(flow);
2161 mlx5e_tc_del_fdb_flow(priv, flow);
2163 mlx5e_tc_del_nic_flow(priv, flow);
2167 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2169 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2170 struct flow_action *flow_action = &rule->action;
2171 const struct flow_action_entry *act;
2177 flow_action_for_each(i, act, flow_action) {
2179 case FLOW_ACTION_GOTO:
2181 case FLOW_ACTION_SAMPLE:
2192 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2193 struct flow_dissector_key_enc_opts *opts,
2194 struct netlink_ext_ack *extack,
2197 struct geneve_opt *opt;
2202 while (opts->len > off) {
2203 opt = (struct geneve_opt *)&opts->data[off];
2205 if (!(*dont_care) || opt->opt_class || opt->type ||
2206 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2209 if (opt->opt_class != htons(U16_MAX) ||
2210 opt->type != U8_MAX) {
2211 NL_SET_ERR_MSG_MOD(extack,
2212 "Partial match of tunnel options in chain > 0 isn't supported");
2213 netdev_warn(priv->netdev,
2214 "Partial match of tunnel options in chain > 0 isn't supported");
2219 off += sizeof(struct geneve_opt) + opt->length * 4;
2225 #define COPY_DISSECTOR(rule, diss_key, dst)\
2227 struct flow_rule *__rule = (rule);\
2228 typeof(dst) __dst = dst;\
2231 skb_flow_dissector_target(__rule->match.dissector,\
2233 __rule->match.key),\
2237 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2238 struct mlx5e_tc_flow *flow,
2239 struct flow_cls_offload *f,
2240 struct net_device *filter_dev)
2242 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2243 struct netlink_ext_ack *extack = f->common.extack;
2244 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2245 struct flow_match_enc_opts enc_opts_match;
2246 struct tunnel_match_enc_opts tun_enc_opts;
2247 struct mlx5_rep_uplink_priv *uplink_priv;
2248 struct mlx5_flow_attr *attr = flow->attr;
2249 struct mlx5e_rep_priv *uplink_rpriv;
2250 struct tunnel_match_key tunnel_key;
2251 bool enc_opts_is_dont_care = true;
2252 u32 tun_id, enc_opts_id = 0;
2253 struct mlx5_eswitch *esw;
2257 esw = priv->mdev->priv.eswitch;
2258 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2259 uplink_priv = &uplink_rpriv->uplink_priv;
2261 memset(&tunnel_key, 0, sizeof(tunnel_key));
2262 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2263 &tunnel_key.enc_control);
2264 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2265 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2266 &tunnel_key.enc_ipv4);
2268 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2269 &tunnel_key.enc_ipv6);
2270 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2271 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2272 &tunnel_key.enc_tp);
2273 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2274 &tunnel_key.enc_key_id);
2275 tunnel_key.filter_ifindex = filter_dev->ifindex;
2277 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2281 flow_rule_match_enc_opts(rule, &enc_opts_match);
2282 err = enc_opts_is_dont_care_or_full_match(priv,
2283 enc_opts_match.mask,
2285 &enc_opts_is_dont_care);
2289 if (!enc_opts_is_dont_care) {
2290 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2291 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2292 sizeof(*enc_opts_match.key));
2293 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2294 sizeof(*enc_opts_match.mask));
2296 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2297 &tun_enc_opts, &enc_opts_id);
2302 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2303 mask = enc_opts_id ? TUNNEL_ID_MASK :
2304 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2307 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2308 TUNNEL_TO_REG, value, mask);
2310 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2311 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2312 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2313 TUNNEL_TO_REG, value);
2317 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2320 flow->attr->tunnel_id = value;
2325 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2328 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2332 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2334 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2335 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2336 struct mlx5_rep_uplink_priv *uplink_priv;
2337 struct mlx5e_rep_priv *uplink_rpriv;
2338 struct mlx5_eswitch *esw;
2340 esw = flow->priv->mdev->priv.eswitch;
2341 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2342 uplink_priv = &uplink_rpriv->uplink_priv;
2345 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2347 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2351 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2352 struct flow_match_basic *match, bool outer,
2353 void *headers_c, void *headers_v)
2355 bool ip_version_cap;
2357 ip_version_cap = outer ?
2358 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2359 ft_field_support.outer_ip_version) :
2360 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2361 ft_field_support.inner_ip_version);
2363 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2364 (match->key->n_proto == htons(ETH_P_IP) ||
2365 match->key->n_proto == htons(ETH_P_IPV6))) {
2366 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2367 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2368 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2370 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2371 ntohs(match->mask->n_proto));
2372 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2373 ntohs(match->key->n_proto));
2377 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2384 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2386 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2388 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2389 /* Return ip_version converted from ethertype anyway */
2391 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2392 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2394 else if (ethertype == ETH_P_IPV6)
2400 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2401 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2402 * +---------+----------------------------------------+
2403 * |Arriving | Arriving Outer Header |
2404 * | Inner +---------+---------+---------+----------+
2405 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2406 * +---------+---------+---------+---------+----------+
2407 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2408 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2409 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2410 * | CE | CE | CE | CE | CE |
2411 * +---------+---------+---------+---------+----------+
2413 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2414 * the inner ip_ecn value before hardware decap action.
2416 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2417 * so matching those values on inner ip_ecn before decap will fail.
2419 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2420 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2421 * and such we can drop the inner ip_ecn=CE match.
2424 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2425 struct flow_cls_offload *f,
2426 bool *match_inner_ecn)
2428 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2429 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2430 struct netlink_ext_ack *extack = f->common.extack;
2431 struct flow_match_ip match;
2433 *match_inner_ecn = true;
2435 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2436 flow_rule_match_enc_ip(rule, &match);
2437 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2438 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2441 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2442 flow_rule_match_ip(rule, &match);
2443 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2444 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2447 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2448 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2449 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2453 if (!outer_ecn_mask) {
2454 if (!inner_ecn_mask)
2457 NL_SET_ERR_MSG_MOD(extack,
2458 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2459 netdev_warn(priv->netdev,
2460 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2464 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2465 NL_SET_ERR_MSG_MOD(extack,
2466 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2467 netdev_warn(priv->netdev,
2468 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2472 if (!inner_ecn_mask)
2475 /* Both inner and outer have full mask on ecn */
2477 if (outer_ecn_key == INET_ECN_ECT_1) {
2478 /* inner ecn might change by DECAP action */
2480 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2481 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2485 if (outer_ecn_key != INET_ECN_CE)
2488 if (inner_ecn_key != INET_ECN_CE) {
2489 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2490 NL_SET_ERR_MSG_MOD(extack,
2491 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2492 netdev_warn(priv->netdev,
2493 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2497 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2498 * drop match on inner ecn
2500 *match_inner_ecn = false;
2505 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2506 struct mlx5e_tc_flow *flow,
2507 struct mlx5_flow_spec *spec,
2508 struct flow_cls_offload *f,
2509 struct net_device *filter_dev,
2513 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2514 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2515 struct netlink_ext_ack *extack = f->common.extack;
2516 bool needs_mapping, sets_mapping;
2519 if (!mlx5e_is_eswitch_flow(flow)) {
2520 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2524 needs_mapping = !!flow->attr->chain;
2525 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2526 *match_inner = !needs_mapping;
2528 if ((needs_mapping || sets_mapping) &&
2529 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2530 NL_SET_ERR_MSG_MOD(extack,
2531 "Chains on tunnel devices isn't supported without register loopback support");
2532 netdev_warn(priv->netdev,
2533 "Chains on tunnel devices isn't supported without register loopback support");
2537 if (!flow->attr->chain) {
2538 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2541 NL_SET_ERR_MSG_MOD(extack,
2542 "Failed to parse tunnel attributes");
2543 netdev_warn(priv->netdev,
2544 "Failed to parse tunnel attributes");
2548 /* With mpls over udp we decapsulate using packet reformat
2551 if (!netif_is_bareudp(filter_dev))
2552 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2553 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2556 } else if (tunnel) {
2557 struct mlx5_flow_spec *tmp_spec;
2559 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2561 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
2562 netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
2565 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2567 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2570 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2571 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2574 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2580 if (!needs_mapping && !sets_mapping)
2583 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2586 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2588 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2592 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2594 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2598 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2600 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2604 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2606 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2610 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2612 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2613 get_match_inner_headers_value(spec) :
2614 get_match_outer_headers_value(spec);
2617 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2619 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2620 get_match_inner_headers_criteria(spec) :
2621 get_match_outer_headers_criteria(spec);
2624 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2625 struct flow_cls_offload *f)
2627 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2628 struct netlink_ext_ack *extack = f->common.extack;
2629 struct net_device *ingress_dev;
2630 struct flow_match_meta match;
2632 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2635 flow_rule_match_meta(rule, &match);
2636 if (!match.mask->ingress_ifindex)
2639 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2640 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2644 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2645 match.key->ingress_ifindex);
2647 NL_SET_ERR_MSG_MOD(extack,
2648 "Can't find the ingress port to match on");
2652 if (ingress_dev != filter_dev) {
2653 NL_SET_ERR_MSG_MOD(extack,
2654 "Can't match on the ingress filter port");
2661 static bool skip_key_basic(struct net_device *filter_dev,
2662 struct flow_cls_offload *f)
2664 /* When doing mpls over udp decap, the user needs to provide
2665 * MPLS_UC as the protocol in order to be able to match on mpls
2666 * label fields. However, the actual ethertype is IP so we want to
2667 * avoid matching on this, otherwise we'll fail the match.
2669 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2675 static int __parse_cls_flower(struct mlx5e_priv *priv,
2676 struct mlx5e_tc_flow *flow,
2677 struct mlx5_flow_spec *spec,
2678 struct flow_cls_offload *f,
2679 struct net_device *filter_dev,
2680 u8 *inner_match_level, u8 *outer_match_level)
2682 struct netlink_ext_ack *extack = f->common.extack;
2683 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2685 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2687 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2689 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2691 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2693 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2695 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2696 struct flow_dissector *dissector = rule->match.dissector;
2697 enum fs_flow_table_type fs_type;
2698 bool match_inner_ecn = true;
2704 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2705 match_level = outer_match_level;
2707 if (dissector->used_keys &
2708 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2709 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2710 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2711 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2712 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2713 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2714 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2715 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2716 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2717 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2718 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2719 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2720 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2721 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2722 BIT(FLOW_DISSECTOR_KEY_TCP) |
2723 BIT(FLOW_DISSECTOR_KEY_IP) |
2724 BIT(FLOW_DISSECTOR_KEY_CT) |
2725 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2726 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2727 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2728 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2729 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2730 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2731 dissector->used_keys);
2735 if (mlx5e_get_tc_tun(filter_dev)) {
2736 bool match_inner = false;
2738 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2739 outer_match_level, &match_inner);
2744 /* header pointers should point to the inner headers
2745 * if the packet was decapsulated already.
2746 * outer headers are set by parse_tunnel_attr.
2748 match_level = inner_match_level;
2749 headers_c = get_match_inner_headers_criteria(spec);
2750 headers_v = get_match_inner_headers_value(spec);
2753 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2758 err = mlx5e_flower_parse_meta(filter_dev, f);
2762 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2763 !skip_key_basic(filter_dev, f)) {
2764 struct flow_match_basic match;
2766 flow_rule_match_basic(rule, &match);
2767 mlx5e_tc_set_ethertype(priv->mdev, &match,
2768 match_level == outer_match_level,
2769 headers_c, headers_v);
2771 if (match.mask->n_proto)
2772 *match_level = MLX5_MATCH_L2;
2774 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2775 is_vlan_dev(filter_dev)) {
2776 struct flow_dissector_key_vlan filter_dev_mask;
2777 struct flow_dissector_key_vlan filter_dev_key;
2778 struct flow_match_vlan match;
2780 if (is_vlan_dev(filter_dev)) {
2781 match.key = &filter_dev_key;
2782 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2783 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2784 match.key->vlan_priority = 0;
2785 match.mask = &filter_dev_mask;
2786 memset(match.mask, 0xff, sizeof(*match.mask));
2787 match.mask->vlan_priority = 0;
2789 flow_rule_match_vlan(rule, &match);
2791 if (match.mask->vlan_id ||
2792 match.mask->vlan_priority ||
2793 match.mask->vlan_tpid) {
2794 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2795 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2797 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2800 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2802 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2806 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2807 match.mask->vlan_id);
2808 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2809 match.key->vlan_id);
2811 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2812 match.mask->vlan_priority);
2813 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2814 match.key->vlan_priority);
2816 *match_level = MLX5_MATCH_L2;
2818 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2819 match.mask->vlan_eth_type &&
2820 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2821 ft_field_support.outer_second_vid,
2823 MLX5_SET(fte_match_set_misc, misc_c,
2824 outer_second_cvlan_tag, 1);
2825 spec->match_criteria_enable |=
2826 MLX5_MATCH_MISC_PARAMETERS;
2829 } else if (*match_level != MLX5_MATCH_NONE) {
2830 /* cvlan_tag enabled in match criteria and
2831 * disabled in match value means both S & C tags
2832 * don't exist (untagged of both)
2834 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2835 *match_level = MLX5_MATCH_L2;
2838 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2839 struct flow_match_vlan match;
2841 flow_rule_match_cvlan(rule, &match);
2842 if (match.mask->vlan_id ||
2843 match.mask->vlan_priority ||
2844 match.mask->vlan_tpid) {
2845 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2847 NL_SET_ERR_MSG_MOD(extack,
2848 "Matching on CVLAN is not supported");
2852 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2853 MLX5_SET(fte_match_set_misc, misc_c,
2854 outer_second_svlan_tag, 1);
2855 MLX5_SET(fte_match_set_misc, misc_v,
2856 outer_second_svlan_tag, 1);
2858 MLX5_SET(fte_match_set_misc, misc_c,
2859 outer_second_cvlan_tag, 1);
2860 MLX5_SET(fte_match_set_misc, misc_v,
2861 outer_second_cvlan_tag, 1);
2864 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2865 match.mask->vlan_id);
2866 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2867 match.key->vlan_id);
2868 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2869 match.mask->vlan_priority);
2870 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2871 match.key->vlan_priority);
2873 *match_level = MLX5_MATCH_L2;
2874 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2878 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2879 struct flow_match_eth_addrs match;
2881 flow_rule_match_eth_addrs(rule, &match);
2882 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2885 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2889 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2892 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2896 if (!is_zero_ether_addr(match.mask->src) ||
2897 !is_zero_ether_addr(match.mask->dst))
2898 *match_level = MLX5_MATCH_L2;
2901 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2902 struct flow_match_control match;
2904 flow_rule_match_control(rule, &match);
2905 addr_type = match.key->addr_type;
2907 /* the HW doesn't support frag first/later */
2908 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2909 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2913 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2914 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2915 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2916 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2918 /* the HW doesn't need L3 inline to match on frag=no */
2919 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2920 *match_level = MLX5_MATCH_L2;
2921 /* *** L2 attributes parsing up to here *** */
2923 *match_level = MLX5_MATCH_L3;
2927 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2928 struct flow_match_basic match;
2930 flow_rule_match_basic(rule, &match);
2931 ip_proto = match.key->ip_proto;
2933 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2934 match.mask->ip_proto);
2935 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2936 match.key->ip_proto);
2938 if (match.mask->ip_proto)
2939 *match_level = MLX5_MATCH_L3;
2942 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2943 struct flow_match_ipv4_addrs match;
2945 flow_rule_match_ipv4_addrs(rule, &match);
2946 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2947 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2948 &match.mask->src, sizeof(match.mask->src));
2949 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2950 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2951 &match.key->src, sizeof(match.key->src));
2952 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2953 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2954 &match.mask->dst, sizeof(match.mask->dst));
2955 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2956 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2957 &match.key->dst, sizeof(match.key->dst));
2959 if (match.mask->src || match.mask->dst)
2960 *match_level = MLX5_MATCH_L3;
2963 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2964 struct flow_match_ipv6_addrs match;
2966 flow_rule_match_ipv6_addrs(rule, &match);
2967 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2968 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2969 &match.mask->src, sizeof(match.mask->src));
2970 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2971 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2972 &match.key->src, sizeof(match.key->src));
2974 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2975 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2976 &match.mask->dst, sizeof(match.mask->dst));
2977 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2978 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2979 &match.key->dst, sizeof(match.key->dst));
2981 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2982 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2983 *match_level = MLX5_MATCH_L3;
2986 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2987 struct flow_match_ip match;
2989 flow_rule_match_ip(rule, &match);
2990 if (match_inner_ecn) {
2991 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2992 match.mask->tos & 0x3);
2993 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2994 match.key->tos & 0x3);
2997 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2998 match.mask->tos >> 2);
2999 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
3000 match.key->tos >> 2);
3002 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
3004 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
3007 if (match.mask->ttl &&
3008 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3009 ft_field_support.outer_ipv4_ttl)) {
3010 NL_SET_ERR_MSG_MOD(extack,
3011 "Matching on TTL is not supported");
3015 if (match.mask->tos || match.mask->ttl)
3016 *match_level = MLX5_MATCH_L3;
3019 /* *** L3 attributes parsing up to here *** */
3021 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
3022 struct flow_match_ports match;
3024 flow_rule_match_ports(rule, &match);
3027 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3028 tcp_sport, ntohs(match.mask->src));
3029 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3030 tcp_sport, ntohs(match.key->src));
3032 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3033 tcp_dport, ntohs(match.mask->dst));
3034 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3035 tcp_dport, ntohs(match.key->dst));
3039 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3040 udp_sport, ntohs(match.mask->src));
3041 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3042 udp_sport, ntohs(match.key->src));
3044 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
3045 udp_dport, ntohs(match.mask->dst));
3046 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
3047 udp_dport, ntohs(match.key->dst));
3050 NL_SET_ERR_MSG_MOD(extack,
3051 "Only UDP and TCP transports are supported for L4 matching");
3052 netdev_err(priv->netdev,
3053 "Only UDP and TCP transport are supported\n");
3057 if (match.mask->src || match.mask->dst)
3058 *match_level = MLX5_MATCH_L4;
3061 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
3062 struct flow_match_tcp match;
3064 flow_rule_match_tcp(rule, &match);
3065 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
3066 ntohs(match.mask->flags));
3067 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3068 ntohs(match.key->flags));
3070 if (match.mask->flags)
3071 *match_level = MLX5_MATCH_L4;
3073 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
3074 struct flow_match_icmp match;
3076 flow_rule_match_icmp(rule, &match);
3079 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3080 MLX5_FLEX_PROTO_ICMP)) {
3081 NL_SET_ERR_MSG_MOD(extack,
3082 "Match on Flex protocols for ICMP is not supported");
3085 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
3087 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
3089 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
3091 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
3094 case IPPROTO_ICMPV6:
3095 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
3096 MLX5_FLEX_PROTO_ICMPV6)) {
3097 NL_SET_ERR_MSG_MOD(extack,
3098 "Match on Flex protocols for ICMPV6 is not supported");
3101 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
3103 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
3105 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
3107 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
3111 NL_SET_ERR_MSG_MOD(extack,
3112 "Code and type matching only with ICMP and ICMPv6");
3113 netdev_err(priv->netdev,
3114 "Code and type matching only with ICMP and ICMPv6\n");
3117 if (match.mask->code || match.mask->type) {
3118 *match_level = MLX5_MATCH_L4;
3119 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
3122 /* Currently supported only for MPLS over UDP */
3123 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
3124 !netif_is_bareudp(filter_dev)) {
3125 NL_SET_ERR_MSG_MOD(extack,
3126 "Matching on MPLS is supported only for MPLS over UDP");
3127 netdev_err(priv->netdev,
3128 "Matching on MPLS is supported only for MPLS over UDP\n");
3135 static int parse_cls_flower(struct mlx5e_priv *priv,
3136 struct mlx5e_tc_flow *flow,
3137 struct mlx5_flow_spec *spec,
3138 struct flow_cls_offload *f,
3139 struct net_device *filter_dev)
3141 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
3142 struct netlink_ext_ack *extack = f->common.extack;
3143 struct mlx5_core_dev *dev = priv->mdev;
3144 struct mlx5_eswitch *esw = dev->priv.eswitch;
3145 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3146 struct mlx5_eswitch_rep *rep;
3147 bool is_eswitch_flow;
3150 inner_match_level = MLX5_MATCH_NONE;
3151 outer_match_level = MLX5_MATCH_NONE;
3153 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3154 &inner_match_level, &outer_match_level);
3155 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3156 outer_match_level : inner_match_level;
3158 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3159 if (!err && is_eswitch_flow) {
3161 if (rep->vport != MLX5_VPORT_UPLINK &&
3162 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3163 esw->offloads.inline_mode < non_tunnel_match_level)) {
3164 NL_SET_ERR_MSG_MOD(extack,
3165 "Flow is not offloaded due to min inline setting");
3166 netdev_warn(priv->netdev,
3167 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3168 non_tunnel_match_level, esw->offloads.inline_mode);
3173 flow->attr->inner_match_level = inner_match_level;
3174 flow->attr->outer_match_level = outer_match_level;
3180 struct mlx5_fields {
3188 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3189 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3190 offsetof(struct pedit_headers, field) + (off), \
3191 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3193 /* masked values are the same and there are no rewrites that do not have a
3196 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3197 type matchmaskx = *(type *)(matchmaskp); \
3198 type matchvalx = *(type *)(matchvalp); \
3199 type maskx = *(type *)(maskp); \
3200 type valx = *(type *)(valp); \
3202 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3206 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3207 void *matchmaskp, u8 bsize)
3213 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3216 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3219 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3226 static struct mlx5_fields fields[] = {
3227 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3228 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3229 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3230 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3231 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
3232 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3234 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
3235 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
3236 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3237 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3239 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3240 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3241 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3242 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3243 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3244 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3245 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3246 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3247 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3248 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3249 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3250 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3251 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3252 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3253 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3254 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3255 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3256 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
3258 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
3259 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
3260 /* in linux iphdr tcp_flags is 8 bits long */
3261 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
3263 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3264 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
3267 static unsigned long mask_to_le(unsigned long mask, int size)
3273 mask_be32 = (__force __be32)(mask);
3274 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
3275 } else if (size == 16) {
3276 mask_be32 = (__force __be32)(mask);
3277 mask_be16 = *(__be16 *)&mask_be32;
3278 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
3284 static int offload_pedit_fields(struct mlx5e_priv *priv,
3286 struct mlx5e_tc_flow_parse_attr *parse_attr,
3288 struct netlink_ext_ack *extack)
3290 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3291 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3292 void *headers_c, *headers_v, *action, *vals_p;
3293 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3294 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3295 unsigned long mask, field_mask;
3296 int i, first, last, next_z;
3297 struct mlx5_fields *f;
3300 mod_acts = &parse_attr->mod_hdr_acts;
3301 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3302 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3304 set_masks = &hdrs[0].masks;
3305 add_masks = &hdrs[1].masks;
3306 set_vals = &hdrs[0].vals;
3307 add_vals = &hdrs[1].vals;
3309 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3313 /* avoid seeing bits set from previous iterations */
3317 s_masks_p = (void *)set_masks + f->offset;
3318 a_masks_p = (void *)add_masks + f->offset;
3320 s_mask = *s_masks_p & f->field_mask;
3321 a_mask = *a_masks_p & f->field_mask;
3323 if (!s_mask && !a_mask) /* nothing to offload here */
3326 if (s_mask && a_mask) {
3327 NL_SET_ERR_MSG_MOD(extack,
3328 "can't set and add to the same HW field");
3329 netdev_warn(priv->netdev,
3330 "mlx5: can't set and add to the same HW field (%x)\n",
3337 void *match_mask = headers_c + f->match_offset;
3338 void *match_val = headers_v + f->match_offset;
3340 cmd = MLX5_ACTION_TYPE_SET;
3342 vals_p = (void *)set_vals + f->offset;
3343 /* don't rewrite if we have a match on the same value */
3344 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3345 match_mask, f->field_bsize))
3347 /* clear to denote we consumed this field */
3348 *s_masks_p &= ~f->field_mask;
3350 cmd = MLX5_ACTION_TYPE_ADD;
3352 vals_p = (void *)add_vals + f->offset;
3353 /* add 0 is no change */
3354 if ((*(u32 *)vals_p & f->field_mask) == 0)
3356 /* clear to denote we consumed this field */
3357 *a_masks_p &= ~f->field_mask;
3362 mask = mask_to_le(mask, f->field_bsize);
3364 first = find_first_bit(&mask, f->field_bsize);
3365 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3366 last = find_last_bit(&mask, f->field_bsize);
3367 if (first < next_z && next_z < last) {
3368 NL_SET_ERR_MSG_MOD(extack,
3369 "rewrite of few sub-fields isn't supported");
3370 netdev_warn(priv->netdev,
3371 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3376 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3377 if (IS_ERR(action)) {
3378 NL_SET_ERR_MSG_MOD(extack,
3379 "too many pedit actions, can't offload");
3380 mlx5_core_warn(priv->mdev,
3381 "mlx5: parsed %d pedit actions, can't do more\n",
3382 mod_acts->num_actions);
3383 return PTR_ERR(action);
3386 MLX5_SET(set_action_in, action, action_type, cmd);
3387 MLX5_SET(set_action_in, action, field, f->field);
3389 if (cmd == MLX5_ACTION_TYPE_SET) {
3392 field_mask = mask_to_le(f->field_mask, f->field_bsize);
3394 /* if field is bit sized it can start not from first bit */
3395 start = find_first_bit(&field_mask, f->field_bsize);
3397 MLX5_SET(set_action_in, action, offset, first - start);
3398 /* length is num of bits to be written, zero means length of 32 */
3399 MLX5_SET(set_action_in, action, length, (last - first + 1));
3402 if (f->field_bsize == 32)
3403 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3404 else if (f->field_bsize == 16)
3405 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3406 else if (f->field_bsize == 8)
3407 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3409 ++mod_acts->num_actions;
3415 static const struct pedit_headers zero_masks = {};
3417 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3418 struct mlx5e_tc_flow_parse_attr *parse_attr,
3419 struct netlink_ext_ack *extack)
3421 struct pedit_headers *cmd_masks;
3424 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3425 cmd_masks = &parse_attr->hdrs[cmd].masks;
3426 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3427 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3428 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3429 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3430 16, 1, cmd_masks, sizeof(zero_masks), true);
3438 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3439 struct mlx5e_tc_flow_parse_attr *parse_attr,
3441 struct netlink_ext_ack *extack)
3445 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3447 goto out_dealloc_parsed_actions;
3449 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3451 goto out_dealloc_parsed_actions;
3455 out_dealloc_parsed_actions:
3456 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3460 struct ip_ttl_word {
3466 struct ipv6_hoplimit_word {
3473 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
3474 bool *modify_ip_header, bool *modify_tuple,
3475 struct netlink_ext_ack *extack)
3480 htype = act->mangle.htype;
3481 offset = act->mangle.offset;
3482 mask = ~act->mangle.mask;
3483 /* For IPv4 & IPv6 header check 4 byte word,
3484 * to determine that modified fields
3485 * are NOT ttl & hop_limit only.
3487 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3488 struct ip_ttl_word *ttl_word =
3489 (struct ip_ttl_word *)&mask;
3491 if (offset != offsetof(struct iphdr, ttl) ||
3492 ttl_word->protocol ||
3494 *modify_ip_header = true;
3497 if (offset >= offsetof(struct iphdr, saddr))
3498 *modify_tuple = true;
3500 if (ct_flow && *modify_tuple) {
3501 NL_SET_ERR_MSG_MOD(extack,
3502 "can't offload re-write of ipv4 address with action ct");
3505 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3506 struct ipv6_hoplimit_word *hoplimit_word =
3507 (struct ipv6_hoplimit_word *)&mask;
3509 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3510 hoplimit_word->payload_len ||
3511 hoplimit_word->nexthdr) {
3512 *modify_ip_header = true;
3515 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3516 *modify_tuple = true;
3518 if (ct_flow && *modify_tuple) {
3519 NL_SET_ERR_MSG_MOD(extack,
3520 "can't offload re-write of ipv6 address with action ct");
3523 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3524 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3525 *modify_tuple = true;
3527 NL_SET_ERR_MSG_MOD(extack,
3528 "can't offload re-write of transport header ports with action ct");
3536 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3537 bool ct_flow, struct netlink_ext_ack *extack,
3538 struct mlx5e_priv *priv,
3539 struct mlx5_flow_spec *spec)
3541 if (!modify_tuple || ct_clear)
3545 NL_SET_ERR_MSG_MOD(extack,
3546 "can't offload tuple modification with non-clear ct()");
3547 netdev_info(priv->netdev,
3548 "can't offload tuple modification with non-clear ct()");
3552 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3553 * (or after clear action), as otherwise, since the tuple is changed,
3554 * we can't restore ct state
3556 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3557 NL_SET_ERR_MSG_MOD(extack,
3558 "can't offload tuple modification with ct matches and no ct(clear) action");
3559 netdev_info(priv->netdev,
3560 "can't offload tuple modification with ct matches and no ct(clear) action");
3567 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3568 struct mlx5_flow_spec *spec,
3569 struct flow_action *flow_action,
3570 u32 actions, bool ct_flow,
3572 struct netlink_ext_ack *extack)
3574 const struct flow_action_entry *act;
3575 bool modify_ip_header, modify_tuple;
3582 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3583 headers_v = mlx5e_get_match_headers_value(actions, spec);
3584 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3586 /* for non-IP we only re-write MACs, so we're okay */
3587 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3588 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3591 modify_ip_header = false;
3592 modify_tuple = false;
3593 flow_action_for_each(i, act, flow_action) {
3594 if (act->id != FLOW_ACTION_MANGLE &&
3595 act->id != FLOW_ACTION_ADD)
3598 if (!is_action_keys_supported(act, ct_flow,
3600 &modify_tuple, extack))
3604 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3608 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3609 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3610 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3611 NL_SET_ERR_MSG_MOD(extack,
3612 "can't offload re-write of non TCP/UDP");
3613 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3623 actions_match_supported_fdb(struct mlx5e_priv *priv,
3624 struct mlx5e_tc_flow *flow,
3625 struct netlink_ext_ack *extack)
3627 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3628 bool ct_flow, ct_clear;
3630 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3631 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3633 if (esw_attr->split_count && ct_flow &&
3634 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3635 /* All registers used by ct are cleared when using
3638 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3642 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3643 NL_SET_ERR_MSG_MOD(extack,
3644 "current firmware doesn't support split rule for port mirroring");
3645 netdev_warn_once(priv->netdev,
3646 "current firmware doesn't support split rule for port mirroring\n");
3654 actions_match_supported(struct mlx5e_priv *priv,
3655 struct flow_action *flow_action,
3657 struct mlx5e_tc_flow_parse_attr *parse_attr,
3658 struct mlx5e_tc_flow *flow,
3659 struct netlink_ext_ack *extack)
3661 bool ct_flow, ct_clear;
3663 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3664 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3666 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3667 !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3668 actions, ct_flow, ct_clear, extack))
3671 if (mlx5e_is_eswitch_flow(flow) &&
3672 !actions_match_supported_fdb(priv, flow, extack))
3678 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3680 return priv->mdev == peer_priv->mdev;
3683 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3685 struct mlx5_core_dev *fmdev, *pmdev;
3686 u64 fsystem_guid, psystem_guid;
3689 pmdev = peer_priv->mdev;
3691 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3692 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3694 return (fsystem_guid == psystem_guid);
3698 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3699 struct mlx5e_tc_flow *flow,
3700 struct mlx5_flow_attr *attr,
3701 struct netlink_ext_ack *extack)
3703 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3704 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3705 enum mlx5_flow_namespace_type ns_type;
3708 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3709 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3712 ns_type = mlx5e_get_flow_namespace(flow);
3714 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3718 if (parse_attr->mod_hdr_acts.num_actions > 0)
3721 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3722 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3723 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3725 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3728 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3729 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3730 attr->esw_attr->split_count = 0;
3735 static struct mlx5_flow_attr*
3736 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3737 enum mlx5_flow_namespace_type ns_type)
3739 struct mlx5e_tc_flow_parse_attr *parse_attr;
3740 u32 attr_sz = ns_to_attr_sz(ns_type);
3741 struct mlx5_flow_attr *attr2;
3743 attr2 = mlx5_alloc_flow_attr(ns_type);
3744 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3745 if (!attr2 || !parse_attr) {
3751 memcpy(attr2, attr, attr_sz);
3752 INIT_LIST_HEAD(&attr2->list);
3753 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3755 attr2->counter = NULL;
3756 attr2->tc_act_cookies_count = 0;
3758 attr2->parse_attr = parse_attr;
3759 attr2->dest_chain = 0;
3760 attr2->dest_ft = NULL;
3761 attr2->act_id_restore_rule = NULL;
3763 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3764 attr2->esw_attr->out_count = 0;
3765 attr2->esw_attr->split_count = 0;
3768 attr2->branch_true = NULL;
3769 attr2->branch_false = NULL;
3770 attr2->jumping_attr = NULL;
3774 struct mlx5_flow_attr *
3775 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3777 struct mlx5_esw_flow_attr *esw_attr;
3778 struct mlx5_flow_attr *attr;
3781 list_for_each_entry(attr, &flow->attrs, list) {
3782 esw_attr = attr->esw_attr;
3783 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3784 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3793 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3795 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3796 struct mlx5_flow_attr *attr;
3798 list_for_each_entry(attr, &flow->attrs, list) {
3799 if (list_is_last(&attr->list, &flow->attrs))
3802 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3807 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3809 struct mlx5_flow_attr *attr, *tmp;
3811 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3812 if (list_is_last(&attr->list, &flow->attrs))
3815 mlx5_free_flow_attr(flow, attr);
3816 free_branch_attr(flow, attr->branch_true);
3817 free_branch_attr(flow, attr->branch_false);
3819 list_del(&attr->list);
3820 kvfree(attr->parse_attr);
3826 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3828 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3829 struct mlx5_flow_attr *attr;
3832 list_for_each_entry(attr, &flow->attrs, list) {
3833 if (list_is_last(&attr->list, &flow->attrs))
3836 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3844 /* TC filter rule HW translation:
3846 * +---------------------+
3847 * + ft prio (tc chain) +
3848 * + original match +
3849 * +---------------------+
3851 * | if multi table action
3854 * +---------------------+
3855 * + post act ft |<----.
3856 * + match fte id | | split on multi table action
3857 * + do actions |-----'
3858 * +---------------------+
3862 * Do rest of the actions after last multi table action.
3865 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3867 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3868 struct mlx5_flow_attr *attr, *next_attr = NULL;
3869 struct mlx5e_post_act_handle *handle;
3872 /* This is going in reverse order as needed.
3873 * The first entry is the last attribute.
3875 list_for_each_entry(attr, &flow->attrs, list) {
3877 /* Set counter action on last post act rule. */
3878 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3881 if (next_attr && !(attr->flags & MLX5_ATTR_FLAG_TERMINATING)) {
3882 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3887 /* Don't add post_act rule for first attr (last in the list).
3888 * It's being handled by the caller.
3890 if (list_is_last(&attr->list, &flow->attrs))
3893 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3897 err = post_process_attr(flow, attr, extack);
3901 handle = mlx5e_tc_post_act_add(post_act, attr);
3902 if (IS_ERR(handle)) {
3903 err = PTR_ERR(handle);
3907 attr->post_act_handle = handle;
3909 if (attr->jumping_attr) {
3910 err = mlx5e_tc_act_set_next_post_act(flow, attr->jumping_attr, attr);
3918 if (flow_flag_test(flow, SLOW))
3921 err = mlx5e_tc_offload_flow_post_acts(flow);
3929 free_flow_post_acts(flow);
3934 alloc_branch_attr(struct mlx5e_tc_flow *flow,
3935 struct mlx5e_tc_act_branch_ctrl *cond,
3936 struct mlx5_flow_attr **cond_attr,
3938 struct netlink_ext_ack *extack)
3940 struct mlx5_flow_attr *attr;
3943 *cond_attr = mlx5e_clone_flow_attr_for_post_act(flow->attr,
3944 mlx5e_get_flow_namespace(flow));
3950 switch (cond->act_id) {
3951 case FLOW_ACTION_DROP:
3952 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3954 case FLOW_ACTION_ACCEPT:
3955 case FLOW_ACTION_PIPE:
3956 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3957 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
3959 case FLOW_ACTION_JUMP:
3961 NL_SET_ERR_MSG_MOD(extack, "Cannot offload flows with nested jumps");
3965 *jump_count = cond->extval;
3966 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3967 attr->dest_ft = mlx5e_tc_post_act_get_ft(get_post_action(flow->priv));
3982 dec_jump_count(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
3983 struct mlx5_flow_attr *attr, struct mlx5e_priv *priv,
3984 struct mlx5e_tc_jump_state *jump_state)
3986 if (!jump_state->jump_count)
3989 /* Single tc action can instantiate multiple offload actions (e.g. pedit)
3990 * Jump only over a tc action
3992 if (act->id == jump_state->last_id && act->hw_index == jump_state->last_index)
3995 jump_state->last_id = act->id;
3996 jump_state->last_index = act->hw_index;
3998 /* nothing to do for intermediate actions */
3999 if (--jump_state->jump_count > 1)
4002 if (jump_state->jump_count == 1) { /* last action in the jump action list */
4004 /* create a new attribute after this action */
4005 jump_state->jump_target = true;
4007 if (tc_act->is_terminating_action) { /* the branch ends here */
4008 attr->flags |= MLX5_ATTR_FLAG_TERMINATING;
4009 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4010 } else { /* the branch continues executing the rest of the actions */
4011 struct mlx5e_post_act *post_act;
4013 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4014 post_act = get_post_action(priv);
4015 attr->dest_ft = mlx5e_tc_post_act_get_ft(post_act);
4017 } else if (jump_state->jump_count == 0) { /* first attr after the jump action list */
4018 /* This is the post action for the jumping attribute (either red or green)
4019 * Use the stored jumping_attr to set the post act id on the jumping attribute
4021 attr->jumping_attr = jump_state->jumping_attr;
4026 parse_branch_ctrl(struct flow_action_entry *act, struct mlx5e_tc_act *tc_act,
4027 struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr,
4028 struct mlx5e_tc_jump_state *jump_state,
4029 struct netlink_ext_ack *extack)
4031 struct mlx5e_tc_act_branch_ctrl cond_true, cond_false;
4032 u32 jump_count = jump_state->jump_count;
4035 if (!tc_act->get_branch_ctrl)
4038 tc_act->get_branch_ctrl(act, &cond_true, &cond_false);
4040 err = alloc_branch_attr(flow, &cond_true,
4041 &attr->branch_true, &jump_count, extack);
4046 jump_state->jumping_attr = attr->branch_true;
4048 err = alloc_branch_attr(flow, &cond_false,
4049 &attr->branch_false, &jump_count, extack);
4051 goto err_branch_false;
4053 if (jump_count && !jump_state->jumping_attr)
4054 jump_state->jumping_attr = attr->branch_false;
4056 jump_state->jump_count = jump_count;
4058 /* branching action requires its own counter */
4059 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
4060 flow_flag_set(flow, USE_ACT_STATS);
4065 free_branch_attr(flow, attr->branch_true);
4071 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
4072 struct flow_action *flow_action)
4074 struct netlink_ext_ack *extack = parse_state->extack;
4075 struct mlx5e_tc_flow_action flow_action_reorder;
4076 struct mlx5e_tc_flow *flow = parse_state->flow;
4077 struct mlx5e_tc_jump_state jump_state = {};
4078 struct mlx5_flow_attr *attr = flow->attr;
4079 enum mlx5_flow_namespace_type ns_type;
4080 struct mlx5e_priv *priv = flow->priv;
4081 struct flow_action_entry *act, **_act;
4082 struct mlx5e_tc_act *tc_act;
4085 flow_action_reorder.num_entries = flow_action->num_entries;
4086 flow_action_reorder.entries = kcalloc(flow_action->num_entries,
4087 sizeof(flow_action), GFP_KERNEL);
4088 if (!flow_action_reorder.entries)
4091 mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
4093 ns_type = mlx5e_get_flow_namespace(flow);
4094 list_add(&attr->list, &flow->attrs);
4096 flow_action_for_each(i, _act, &flow_action_reorder) {
4097 jump_state.jump_target = false;
4099 tc_act = mlx5e_tc_act_get(act->id, ns_type);
4101 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
4106 if (!tc_act->can_offload(parse_state, act, i, attr)) {
4111 err = tc_act->parse_action(parse_state, act, priv, attr);
4115 dec_jump_count(act, tc_act, attr, priv, &jump_state);
4117 err = parse_branch_ctrl(act, tc_act, flow, attr, &jump_state, extack);
4121 parse_state->actions |= attr->action;
4122 if (!tc_act->stats_action)
4123 attr->tc_act_cookies[attr->tc_act_cookies_count++] = act->cookie;
4125 /* Split attr for multi table act if not the last act. */
4126 if (jump_state.jump_target ||
4127 (tc_act->is_multi_table_act &&
4128 tc_act->is_multi_table_act(priv, act, attr) &&
4129 i < flow_action_reorder.num_entries - 1)) {
4130 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
4134 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
4140 list_add(&attr->list, &flow->attrs);
4144 kfree(flow_action_reorder.entries);
4146 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
4148 goto out_free_post_acts;
4150 err = alloc_flow_post_acts(flow, extack);
4152 goto out_free_post_acts;
4157 kfree(flow_action_reorder.entries);
4159 free_flow_post_acts(flow);
4165 flow_action_supported(struct flow_action *flow_action,
4166 struct netlink_ext_ack *extack)
4168 if (!flow_action_has_entries(flow_action)) {
4169 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
4173 if (!flow_action_hw_stats_check(flow_action, extack,
4174 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
4175 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4183 parse_tc_nic_actions(struct mlx5e_priv *priv,
4184 struct flow_action *flow_action,
4185 struct mlx5e_tc_flow *flow,
4186 struct netlink_ext_ack *extack)
4188 struct mlx5e_tc_act_parse_state *parse_state;
4189 struct mlx5e_tc_flow_parse_attr *parse_attr;
4190 struct mlx5_flow_attr *attr = flow->attr;
4193 err = flow_action_supported(flow_action, extack);
4197 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
4198 parse_attr = attr->parse_attr;
4199 parse_state = &parse_attr->parse_state;
4200 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4201 parse_state->ct_priv = get_ct_priv(priv);
4203 err = parse_tc_actions(parse_state, flow_action);
4207 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4211 err = verify_attr_actions(attr->action, extack);
4215 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4216 parse_attr, flow, extack))
4222 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
4223 struct net_device *peer_netdev)
4225 struct mlx5e_priv *peer_priv;
4227 peer_priv = netdev_priv(peer_netdev);
4229 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
4230 mlx5e_eswitch_vf_rep(priv->netdev) &&
4231 mlx5e_eswitch_vf_rep(peer_netdev) &&
4232 mlx5e_same_hw_devs(priv, peer_priv));
4235 static bool same_hw_reps(struct mlx5e_priv *priv,
4236 struct net_device *peer_netdev)
4238 struct mlx5e_priv *peer_priv;
4240 peer_priv = netdev_priv(peer_netdev);
4242 return mlx5e_eswitch_rep(priv->netdev) &&
4243 mlx5e_eswitch_rep(peer_netdev) &&
4244 mlx5e_same_hw_devs(priv, peer_priv);
4247 static bool is_lag_dev(struct mlx5e_priv *priv,
4248 struct net_device *peer_netdev)
4250 return ((mlx5_lag_is_sriov(priv->mdev) ||
4251 mlx5_lag_is_multipath(priv->mdev)) &&
4252 same_hw_reps(priv, peer_netdev));
4255 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4257 return same_hw_reps(priv, out_dev) && mlx5_lag_is_mpesw(priv->mdev);
4260 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4261 struct net_device *out_dev)
4263 if (is_merged_eswitch_vfs(priv, out_dev))
4266 if (is_multiport_eligible(priv, out_dev))
4269 if (is_lag_dev(priv, out_dev))
4272 return mlx5e_eswitch_rep(out_dev) &&
4273 same_port_devs(priv, netdev_priv(out_dev));
4276 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4277 struct mlx5_flow_attr *attr,
4279 enum mlx5e_tc_int_port_type type,
4283 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4284 struct mlx5e_tc_int_port_priv *int_port_priv;
4285 struct mlx5e_tc_flow_parse_attr *parse_attr;
4286 struct mlx5e_tc_int_port *dest_int_port;
4289 parse_attr = attr->parse_attr;
4290 int_port_priv = mlx5e_get_int_port_priv(priv);
4292 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4293 if (IS_ERR(dest_int_port))
4294 return PTR_ERR(dest_int_port);
4296 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4297 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4298 mlx5e_tc_int_port_get_metadata(dest_int_port));
4300 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4304 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4306 esw_attr->dest_int_port = dest_int_port;
4307 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4308 esw_attr->split_count = out_index;
4310 /* Forward to root fdb for matching against the new source vport */
4311 attr->dest_chain = 0;
4317 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4318 struct flow_action *flow_action,
4319 struct mlx5e_tc_flow *flow,
4320 struct netlink_ext_ack *extack)
4322 struct mlx5e_tc_act_parse_state *parse_state;
4323 struct mlx5e_tc_flow_parse_attr *parse_attr;
4324 struct mlx5_flow_attr *attr = flow->attr;
4325 struct mlx5_esw_flow_attr *esw_attr;
4326 struct net_device *filter_dev;
4329 err = flow_action_supported(flow_action, extack);
4333 esw_attr = attr->esw_attr;
4334 parse_attr = attr->parse_attr;
4335 filter_dev = parse_attr->filter_dev;
4336 parse_state = &parse_attr->parse_state;
4337 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4338 parse_state->ct_priv = get_ct_priv(priv);
4340 err = parse_tc_actions(parse_state, flow_action);
4344 /* Forward to/from internal port can only have 1 dest */
4345 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4346 esw_attr->out_count > 1) {
4347 NL_SET_ERR_MSG_MOD(extack,
4348 "Rules with internal port can have only one destination");
4352 /* Forward from tunnel/internal port to internal port is not supported */
4353 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4354 esw_attr->dest_int_port) {
4355 NL_SET_ERR_MSG_MOD(extack,
4356 "Forwarding from tunnel/internal port to internal port is not supported");
4360 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4364 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4365 parse_attr, flow, extack))
4371 static void get_flags(int flags, unsigned long *flow_flags)
4373 unsigned long __flow_flags = 0;
4375 if (flags & MLX5_TC_FLAG(INGRESS))
4376 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4377 if (flags & MLX5_TC_FLAG(EGRESS))
4378 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4380 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4381 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4382 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4383 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4384 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4385 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4387 *flow_flags = __flow_flags;
4390 static const struct rhashtable_params tc_ht_params = {
4391 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4392 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4393 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4394 .automatic_shrinking = true,
4397 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4398 unsigned long flags)
4400 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4401 struct mlx5e_rep_priv *rpriv;
4403 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4404 rpriv = priv->ppriv;
4405 return &rpriv->tc_ht;
4406 } else /* NIC offload */
4410 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4412 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4413 struct mlx5_flow_attr *attr = flow->attr;
4414 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4415 flow_flag_test(flow, INGRESS);
4416 bool act_is_encap = !!(attr->action &
4417 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4418 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4419 MLX5_DEVCOM_ESW_OFFLOADS);
4424 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4425 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4426 (is_rep_ingress || act_is_encap))
4429 if (mlx5_lag_is_mpesw(esw_attr->in_mdev))
4435 struct mlx5_flow_attr *
4436 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4438 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4439 sizeof(struct mlx5_esw_flow_attr) :
4440 sizeof(struct mlx5_nic_flow_attr);
4441 struct mlx5_flow_attr *attr;
4443 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4447 INIT_LIST_HEAD(&attr->list);
4452 mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
4454 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
4460 if (attr->post_act_handle)
4461 mlx5e_tc_post_act_del(get_post_action(flow->priv), attr->post_act_handle);
4463 clean_encap_dests(flow->priv, flow, attr, &vf_tun);
4465 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
4466 mlx5_fc_destroy(counter_dev, attr->counter);
4468 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
4469 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
4470 mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
4475 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4476 struct flow_cls_offload *f, unsigned long flow_flags,
4477 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4478 struct mlx5e_tc_flow **__flow)
4480 struct mlx5e_tc_flow_parse_attr *parse_attr;
4481 struct mlx5_flow_attr *attr;
4482 struct mlx5e_tc_flow *flow;
4486 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4487 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4488 if (!parse_attr || !flow)
4491 flow->flags = flow_flags;
4492 flow->cookie = f->cookie;
4495 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4501 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4502 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4503 INIT_LIST_HEAD(&flow->hairpin);
4504 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4505 INIT_LIST_HEAD(&flow->attrs);
4506 refcount_set(&flow->refcnt, 1);
4507 init_completion(&flow->init_done);
4508 init_completion(&flow->del_hw_done);
4511 *__parse_attr = parse_attr;
4522 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4523 struct mlx5e_tc_flow_parse_attr *parse_attr,
4524 struct flow_cls_offload *f)
4526 attr->parse_attr = parse_attr;
4527 attr->chain = f->common.chain_index;
4528 attr->prio = f->common.prio;
4532 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4533 struct mlx5e_priv *priv,
4534 struct mlx5e_tc_flow_parse_attr *parse_attr,
4535 struct flow_cls_offload *f,
4536 struct mlx5_eswitch_rep *in_rep,
4537 struct mlx5_core_dev *in_mdev)
4539 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4540 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4542 mlx5e_flow_attr_init(attr, parse_attr, f);
4544 esw_attr->in_rep = in_rep;
4545 esw_attr->in_mdev = in_mdev;
4547 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4548 MLX5_COUNTER_SOURCE_ESWITCH)
4549 esw_attr->counter_dev = in_mdev;
4551 esw_attr->counter_dev = priv->mdev;
4554 static struct mlx5e_tc_flow *
4555 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4556 struct flow_cls_offload *f,
4557 unsigned long flow_flags,
4558 struct net_device *filter_dev,
4559 struct mlx5_eswitch_rep *in_rep,
4560 struct mlx5_core_dev *in_mdev)
4562 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4563 struct netlink_ext_ack *extack = f->common.extack;
4564 struct mlx5e_tc_flow_parse_attr *parse_attr;
4565 struct mlx5e_tc_flow *flow;
4568 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4569 attr_size = sizeof(struct mlx5_esw_flow_attr);
4570 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4571 &parse_attr, &flow);
4575 parse_attr->filter_dev = filter_dev;
4576 mlx5e_flow_esw_attr_init(flow->attr,
4578 f, in_rep, in_mdev);
4580 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4585 /* actions validation depends on parsing the ct matches first */
4586 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4587 &flow->attr->ct_attr, extack);
4591 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4595 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4596 complete_all(&flow->init_done);
4598 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4601 add_unready_flow(flow);
4607 mlx5e_flow_put(priv, flow);
4609 return ERR_PTR(err);
4612 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4613 struct mlx5e_tc_flow *flow,
4614 unsigned long flow_flags)
4616 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4617 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4618 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4619 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4620 struct mlx5e_tc_flow_parse_attr *parse_attr;
4621 struct mlx5e_rep_priv *peer_urpriv;
4622 struct mlx5e_tc_flow *peer_flow;
4623 struct mlx5_core_dev *in_mdev;
4626 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4630 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4631 peer_priv = netdev_priv(peer_urpriv->netdev);
4633 /* in_mdev is assigned of which the packet originated from.
4634 * So packets redirected to uplink use the same mdev of the
4635 * original flow and packets redirected from uplink use the
4637 * In multiport eswitch it's a special case that we need to
4638 * keep the original mdev.
4640 if (attr->in_rep->vport == MLX5_VPORT_UPLINK && !mlx5_lag_is_mpesw(priv->mdev))
4641 in_mdev = peer_priv->mdev;
4643 in_mdev = priv->mdev;
4645 parse_attr = flow->attr->parse_attr;
4646 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4647 parse_attr->filter_dev,
4648 attr->in_rep, in_mdev);
4649 if (IS_ERR(peer_flow)) {
4650 err = PTR_ERR(peer_flow);
4654 flow->peer_flow = peer_flow;
4655 flow_flag_set(flow, DUP);
4656 mutex_lock(&esw->offloads.peer_mutex);
4657 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4658 mutex_unlock(&esw->offloads.peer_mutex);
4661 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4666 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4667 struct flow_cls_offload *f,
4668 unsigned long flow_flags,
4669 struct net_device *filter_dev,
4670 struct mlx5e_tc_flow **__flow)
4672 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4673 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4674 struct mlx5_core_dev *in_mdev = priv->mdev;
4675 struct mlx5e_tc_flow *flow;
4678 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4681 return PTR_ERR(flow);
4683 if (is_peer_flow_needed(flow)) {
4684 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4686 mlx5e_tc_del_fdb_flow(priv, flow);
4700 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4701 struct flow_cls_offload *f,
4702 unsigned long flow_flags,
4703 struct net_device *filter_dev,
4704 struct mlx5e_tc_flow **__flow)
4706 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4707 struct netlink_ext_ack *extack = f->common.extack;
4708 struct mlx5e_tc_flow_parse_attr *parse_attr;
4709 struct mlx5e_tc_flow *flow;
4712 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4713 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4715 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4719 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4720 attr_size = sizeof(struct mlx5_nic_flow_attr);
4721 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4722 &parse_attr, &flow);
4726 parse_attr->filter_dev = filter_dev;
4727 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4729 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4734 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4735 &flow->attr->ct_attr, extack);
4739 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4743 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4747 flow_flag_set(flow, OFFLOADED);
4753 flow_flag_set(flow, FAILED);
4754 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4755 mlx5e_flow_put(priv, flow);
4761 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4762 struct flow_cls_offload *f,
4763 unsigned long flags,
4764 struct net_device *filter_dev,
4765 struct mlx5e_tc_flow **flow)
4767 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4768 unsigned long flow_flags;
4771 get_flags(flags, &flow_flags);
4773 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4776 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4777 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4780 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4786 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4787 struct mlx5e_rep_priv *rpriv)
4789 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4790 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4791 * function is called from NIC mode.
4793 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4796 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4797 struct flow_cls_offload *f, unsigned long flags)
4799 struct netlink_ext_ack *extack = f->common.extack;
4800 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4801 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4802 struct mlx5e_tc_flow *flow;
4805 if (!mlx5_esw_hold(priv->mdev))
4808 mlx5_esw_get(priv->mdev);
4811 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4813 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4816 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4819 NL_SET_ERR_MSG_MOD(extack,
4820 "flow cookie already exists, ignoring");
4821 netdev_warn_once(priv->netdev,
4822 "flow cookie %lx already exists, ignoring\n",
4832 trace_mlx5e_configure_flower(f);
4833 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4837 /* Flow rule offloaded to non-uplink representor sharing tc block,
4838 * set the flow's owner dev.
4840 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4841 flow->orig_dev = dev;
4843 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4847 mlx5_esw_release(priv->mdev);
4851 mlx5e_flow_put(priv, flow);
4853 mlx5_esw_put(priv->mdev);
4854 mlx5_esw_release(priv->mdev);
4858 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4860 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4861 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4863 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4864 flow_flag_test(flow, EGRESS) == dir_egress;
4867 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4868 struct flow_cls_offload *f, unsigned long flags)
4870 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4871 struct mlx5e_tc_flow *flow;
4875 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4876 if (!flow || !same_flow_direction(flow, flags)) {
4881 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4884 if (flow_flag_test_and_set(flow, DELETED)) {
4888 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4891 trace_mlx5e_delete_flower(f);
4892 mlx5e_flow_put(priv, flow);
4894 mlx5_esw_put(priv->mdev);
4902 int mlx5e_tc_fill_action_stats(struct mlx5e_priv *priv,
4903 struct flow_offload_action *fl_act)
4905 return mlx5e_tc_act_stats_fill_stats(get_act_stats_handle(priv), fl_act);
4908 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4909 struct flow_cls_offload *f, unsigned long flags)
4911 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4912 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4913 struct mlx5_eswitch *peer_esw;
4914 struct mlx5e_tc_flow *flow;
4915 struct mlx5_fc *counter;
4922 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4926 return PTR_ERR(flow);
4928 if (!same_flow_direction(flow, flags)) {
4933 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4934 if (flow_flag_test(flow, USE_ACT_STATS)) {
4935 f->use_act_stats = true;
4937 counter = mlx5e_tc_get_counter(flow);
4941 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4945 /* Under multipath it's possible for one rule to be currently
4946 * un-offloaded while the other rule is offloaded.
4948 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4952 if (flow_flag_test(flow, DUP) &&
4953 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4958 if (flow_flag_test(flow, USE_ACT_STATS)) {
4959 f->use_act_stats = true;
4961 counter = mlx5e_tc_get_counter(flow->peer_flow);
4963 goto no_peer_counter;
4964 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4967 packets += packets2;
4968 lastuse = max_t(u64, lastuse, lastuse2);
4973 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4975 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4976 FLOW_ACTION_HW_STATS_DELAYED);
4977 trace_mlx5e_stats_flower(f);
4979 mlx5e_flow_put(priv, flow);
4983 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4984 struct netlink_ext_ack *extack)
4986 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4987 struct mlx5_eswitch *esw;
4992 vport_num = rpriv->rep->vport;
4993 if (vport_num >= MLX5_VPORT_ECPF) {
4994 NL_SET_ERR_MSG_MOD(extack,
4995 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4999 esw = priv->mdev->priv.eswitch;
5000 /* rate is given in bytes/sec.
5001 * First convert to bits/sec and then round to the nearest mbit/secs.
5002 * mbit means million bits.
5003 * Moreover, if rate is non zero we choose to configure to a minimum of
5007 rate = (rate * BITS_PER_BYTE) + 500000;
5008 do_div(rate, 1000000);
5009 rate_mbps = max_t(u32, rate, 1);
5012 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
5014 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
5020 tc_matchall_police_validate(const struct flow_action *action,
5021 const struct flow_action_entry *act,
5022 struct netlink_ext_ack *extack)
5024 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
5025 NL_SET_ERR_MSG_MOD(extack,
5026 "Offload not supported when conform action is not continue");
5030 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
5031 NL_SET_ERR_MSG_MOD(extack,
5032 "Offload not supported when exceed action is not drop");
5036 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
5037 !flow_action_is_last_entry(action, act)) {
5038 NL_SET_ERR_MSG_MOD(extack,
5039 "Offload not supported when conform action is ok, but action is not last");
5043 if (act->police.peakrate_bytes_ps ||
5044 act->police.avrate || act->police.overhead) {
5045 NL_SET_ERR_MSG_MOD(extack,
5046 "Offload not supported when peakrate/avrate/overhead is configured");
5053 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
5054 struct flow_action *flow_action,
5055 struct netlink_ext_ack *extack)
5057 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5058 const struct flow_action_entry *act;
5062 if (!flow_action_has_entries(flow_action)) {
5063 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
5067 if (!flow_offload_has_one_action(flow_action)) {
5068 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
5072 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
5073 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
5077 flow_action_for_each(i, act, flow_action) {
5079 case FLOW_ACTION_POLICE:
5080 err = tc_matchall_police_validate(flow_action, act, extack);
5084 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
5088 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
5091 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
5099 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
5100 struct tc_cls_matchall_offload *ma)
5102 struct netlink_ext_ack *extack = ma->common.extack;
5104 if (ma->common.prio != 1) {
5105 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
5109 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
5112 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
5113 struct tc_cls_matchall_offload *ma)
5115 struct netlink_ext_ack *extack = ma->common.extack;
5117 return apply_police_params(priv, 0, extack);
5120 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
5121 struct tc_cls_matchall_offload *ma)
5123 struct mlx5e_rep_priv *rpriv = priv->ppriv;
5124 struct rtnl_link_stats64 cur_stats;
5128 cur_stats = priv->stats.vf_vport;
5129 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
5130 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
5131 rpriv->prev_vf_vport_stats = cur_stats;
5132 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
5133 FLOW_ACTION_HW_STATS_DELAYED);
5136 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
5137 struct mlx5e_priv *peer_priv)
5139 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5140 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
5141 struct mlx5e_hairpin_entry *hpe, *tmp;
5142 LIST_HEAD(init_wait_list);
5146 if (!mlx5e_same_hw_devs(priv, peer_priv))
5149 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
5151 mutex_lock(&tc->hairpin_tbl_lock);
5152 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
5153 if (refcount_inc_not_zero(&hpe->refcnt))
5154 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
5155 mutex_unlock(&tc->hairpin_tbl_lock);
5157 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
5158 wait_for_completion(&hpe->res_ready);
5159 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
5160 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
5162 mlx5e_hairpin_put(priv, hpe);
5166 static int mlx5e_tc_netdev_event(struct notifier_block *this,
5167 unsigned long event, void *ptr)
5169 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
5170 struct mlx5e_priv *peer_priv;
5171 struct mlx5e_tc_table *tc;
5172 struct mlx5e_priv *priv;
5174 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
5175 event != NETDEV_UNREGISTER ||
5176 ndev->reg_state == NETREG_REGISTERED)
5179 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
5181 peer_priv = netdev_priv(ndev);
5182 if (priv == peer_priv ||
5183 !(priv->netdev->features & NETIF_F_HW_TC))
5186 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
5191 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
5193 int tc_grp_size, tc_tbl_size;
5194 u32 max_flow_counter;
5196 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
5197 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
5199 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
5201 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
5202 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
5207 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
5209 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5210 struct mlx5_flow_table **ft = &tc->miss_t;
5211 struct mlx5_flow_table_attr ft_attr = {};
5212 struct mlx5_flow_namespace *ns;
5215 ft_attr.max_fte = 1;
5216 ft_attr.autogroup.max_num_groups = 1;
5217 ft_attr.level = MLX5E_TC_MISS_LEVEL;
5219 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
5221 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
5224 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
5230 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
5232 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5234 mlx5_destroy_flow_table(tc->miss_t);
5237 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
5239 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5240 struct mlx5_core_dev *dev = priv->mdev;
5241 struct mapping_ctx *chains_mapping;
5242 struct mlx5_chains_attr attr = {};
5246 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
5247 mutex_init(&tc->t_lock);
5248 mutex_init(&tc->hairpin_tbl_lock);
5249 hash_init(tc->hairpin_tbl);
5252 err = rhashtable_init(&tc->ht, &tc_ht_params);
5256 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
5257 lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5259 mapping_id = mlx5_query_nic_system_image_guid(dev);
5261 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
5262 sizeof(struct mlx5_mapped_obj),
5263 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
5265 if (IS_ERR(chains_mapping)) {
5266 err = PTR_ERR(chains_mapping);
5269 tc->mapping = chains_mapping;
5271 err = mlx5e_tc_nic_create_miss_table(priv);
5275 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
5276 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
5277 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
5278 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
5279 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5280 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5281 attr.default_ft = tc->miss_t;
5282 attr.mapping = chains_mapping;
5284 tc->chains = mlx5_chains_create(dev, &attr);
5285 if (IS_ERR(tc->chains)) {
5286 err = PTR_ERR(tc->chains);
5290 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5291 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5292 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5294 mlx5e_hairpin_params_init(&tc->hairpin_params, dev);
5296 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5297 err = register_netdevice_notifier_dev_net(priv->netdev,
5301 tc->netdevice_nb.notifier_call = NULL;
5302 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5306 mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs));
5308 tc->action_stats_handle = mlx5e_tc_act_stats_create();
5309 if (IS_ERR(tc->action_stats_handle)) {
5310 err = PTR_ERR(tc->action_stats_handle);
5317 unregister_netdevice_notifier_dev_net(priv->netdev,
5321 mlx5_tc_ct_clean(tc->ct);
5322 mlx5e_tc_post_act_destroy(tc->post_act);
5323 mlx5_chains_destroy(tc->chains);
5325 mlx5e_tc_nic_destroy_miss_table(priv);
5327 mapping_destroy(chains_mapping);
5329 rhashtable_destroy(&tc->ht);
5333 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5335 struct mlx5e_tc_flow *flow = ptr;
5336 struct mlx5e_priv *priv = flow->priv;
5338 mlx5e_tc_del_flow(priv, flow);
5342 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5344 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5346 debugfs_remove_recursive(tc->dfs_root);
5348 if (tc->netdevice_nb.notifier_call)
5349 unregister_netdevice_notifier_dev_net(priv->netdev,
5353 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5354 mutex_destroy(&tc->hairpin_tbl_lock);
5356 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5358 if (!IS_ERR_OR_NULL(tc->t)) {
5359 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5362 mutex_destroy(&tc->t_lock);
5364 mlx5_tc_ct_clean(tc->ct);
5365 mlx5e_tc_post_act_destroy(tc->post_act);
5366 mapping_destroy(tc->mapping);
5367 mlx5_chains_destroy(tc->chains);
5368 mlx5e_tc_nic_destroy_miss_table(priv);
5369 mlx5e_tc_act_stats_free(tc->action_stats_handle);
5372 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5376 err = rhashtable_init(tc_ht, &tc_ht_params);
5380 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5381 lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
5386 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5388 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5391 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5393 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5394 struct mlx5e_rep_priv *rpriv;
5395 struct mapping_ctx *mapping;
5396 struct mlx5_eswitch *esw;
5397 struct mlx5e_priv *priv;
5401 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5402 priv = netdev_priv(rpriv->netdev);
5403 esw = priv->mdev->priv.eswitch;
5405 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5406 MLX5_FLOW_NAMESPACE_FDB);
5407 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5409 &esw->offloads.mod_hdr,
5410 MLX5_FLOW_NAMESPACE_FDB,
5411 uplink_priv->post_act);
5413 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5415 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5417 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5419 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5420 sizeof(struct tunnel_match_key),
5421 TUNNEL_INFO_BITS_MASK, true);
5423 if (IS_ERR(mapping)) {
5424 err = PTR_ERR(mapping);
5425 goto err_tun_mapping;
5427 uplink_priv->tunnel_mapping = mapping;
5429 /* Two last values are reserved for stack devices slow path table mark
5430 * and bridge ingress push mark.
5432 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5433 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5434 if (IS_ERR(mapping)) {
5435 err = PTR_ERR(mapping);
5436 goto err_enc_opts_mapping;
5438 uplink_priv->tunnel_enc_opts_mapping = mapping;
5440 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5441 if (IS_ERR(uplink_priv->encap)) {
5442 err = PTR_ERR(uplink_priv->encap);
5443 goto err_register_fib_notifier;
5446 uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create();
5447 if (IS_ERR(uplink_priv->action_stats_handle)) {
5448 err = PTR_ERR(uplink_priv->action_stats_handle);
5449 goto err_action_counter;
5452 mlx5_esw_offloads_devcom_init(esw);
5457 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5458 err_register_fib_notifier:
5459 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5460 err_enc_opts_mapping:
5461 mapping_destroy(uplink_priv->tunnel_mapping);
5463 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5464 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5465 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5466 netdev_warn(priv->netdev,
5467 "Failed to initialize tc (eswitch), err: %d", err);
5468 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5472 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5474 struct mlx5e_rep_priv *rpriv;
5475 struct mlx5_eswitch *esw;
5476 struct mlx5e_priv *priv;
5478 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5479 priv = netdev_priv(rpriv->netdev);
5480 esw = priv->mdev->priv.eswitch;
5482 mlx5_esw_offloads_devcom_cleanup(esw);
5484 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5486 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5487 mapping_destroy(uplink_priv->tunnel_mapping);
5489 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5490 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5491 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5492 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5493 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5494 mlx5e_tc_act_stats_free(uplink_priv->action_stats_handle);
5497 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5499 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5501 return atomic_read(&tc_ht->nelems);
5504 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5506 struct mlx5e_tc_flow *flow, *tmp;
5508 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5509 __mlx5e_tc_del_fdb_peer_flow(flow);
5512 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5514 struct mlx5_rep_uplink_priv *rpriv =
5515 container_of(work, struct mlx5_rep_uplink_priv,
5516 reoffload_flows_work);
5517 struct mlx5e_tc_flow *flow, *tmp;
5519 mutex_lock(&rpriv->unready_flows_lock);
5520 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5521 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5522 unready_flow_del(flow);
5524 mutex_unlock(&rpriv->unready_flows_lock);
5527 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5528 struct flow_cls_offload *cls_flower,
5529 unsigned long flags)
5531 switch (cls_flower->command) {
5532 case FLOW_CLS_REPLACE:
5533 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5535 case FLOW_CLS_DESTROY:
5536 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5538 case FLOW_CLS_STATS:
5539 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5546 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5549 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5550 struct mlx5e_priv *priv = cb_priv;
5552 if (!priv->netdev || !netif_device_present(priv->netdev))
5555 if (mlx5e_is_uplink_rep(priv))
5556 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5558 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5561 case TC_SETUP_CLSFLOWER:
5562 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5568 static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
5569 struct mlx5e_tc_update_priv *tc_priv,
5572 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5573 struct tunnel_match_enc_opts enc_opts = {};
5574 struct mlx5_rep_uplink_priv *uplink_priv;
5575 struct mlx5e_rep_priv *uplink_rpriv;
5576 struct metadata_dst *tun_dst;
5577 struct tunnel_match_key key;
5578 u32 tun_id, enc_opts_id;
5579 struct net_device *dev;
5582 enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
5583 tun_id = tunnel_id >> ENC_OPTS_BITS;
5588 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5589 uplink_priv = &uplink_rpriv->uplink_priv;
5591 err = mapping_find(uplink_priv->tunnel_mapping, tun_id, &key);
5593 netdev_dbg(priv->netdev,
5594 "Couldn't find tunnel for tun_id: %d, err: %d\n",
5600 err = mapping_find(uplink_priv->tunnel_enc_opts_mapping,
5601 enc_opts_id, &enc_opts);
5603 netdev_dbg(priv->netdev,
5604 "Couldn't find tunnel (opts) for tun_id: %d, err: %d\n",
5610 switch (key.enc_control.addr_type) {
5611 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
5612 tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
5613 key.enc_ip.tos, key.enc_ip.ttl,
5614 key.enc_tp.dst, TUNNEL_KEY,
5615 key32_to_tunnel_id(key.enc_key_id.keyid),
5618 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
5619 tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
5620 key.enc_ip.tos, key.enc_ip.ttl,
5621 key.enc_tp.dst, 0, TUNNEL_KEY,
5622 key32_to_tunnel_id(key.enc_key_id.keyid),
5626 netdev_dbg(priv->netdev,
5627 "Couldn't restore tunnel, unsupported addr_type: %d\n",
5628 key.enc_control.addr_type);
5633 netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
5637 tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
5639 if (enc_opts.key.len)
5640 ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
5643 enc_opts.key.dst_opt_type);
5645 skb_dst_set(skb, (struct dst_entry *)tun_dst);
5646 dev = dev_get_by_index(&init_net, key.filter_ifindex);
5648 netdev_dbg(priv->netdev,
5649 "Couldn't find tunnel device with ifindex: %d\n",
5650 key.filter_ifindex);
5654 /* Set fwd_dev so we do dev_put() after datapath */
5655 tc_priv->fwd_dev = dev;
5662 static bool mlx5e_tc_restore_skb_tc_meta(struct sk_buff *skb, struct mlx5_tc_ct_priv *ct_priv,
5663 struct mlx5_mapped_obj *mapped_obj, u32 zone_restore_id,
5664 u32 tunnel_id, struct mlx5e_tc_update_priv *tc_priv)
5666 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5667 struct tc_skb_ext *tc_skb_ext;
5668 u64 act_miss_cookie;
5671 chain = mapped_obj->type == MLX5_MAPPED_OBJ_CHAIN ? mapped_obj->chain : 0;
5672 act_miss_cookie = mapped_obj->type == MLX5_MAPPED_OBJ_ACT_MISS ?
5673 mapped_obj->act_miss_cookie : 0;
5674 if (chain || act_miss_cookie) {
5675 if (!mlx5e_tc_ct_restore_flow(ct_priv, skb, zone_restore_id))
5678 tc_skb_ext = tc_skb_ext_alloc(skb);
5684 if (act_miss_cookie) {
5685 tc_skb_ext->act_miss_cookie = act_miss_cookie;
5686 tc_skb_ext->act_miss = 1;
5688 tc_skb_ext->chain = chain;
5693 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5698 static void mlx5e_tc_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
5699 struct mlx5_mapped_obj *mapped_obj,
5700 struct mlx5e_tc_update_priv *tc_priv)
5702 if (!mlx5e_tc_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) {
5703 netdev_dbg(priv->netdev,
5704 "Failed to restore tunnel info for sampled packet\n");
5707 mlx5e_tc_sample_skb(skb, mapped_obj);
5710 static bool mlx5e_tc_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
5711 struct mlx5_mapped_obj *mapped_obj,
5712 struct mlx5e_tc_update_priv *tc_priv,
5715 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5716 struct mlx5_rep_uplink_priv *uplink_priv;
5717 struct mlx5e_rep_priv *uplink_rpriv;
5718 bool forward_tx = false;
5720 /* Tunnel restore takes precedence over int port restore */
5722 return mlx5e_tc_restore_tunnel(priv, skb, tc_priv, tunnel_id);
5724 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
5725 uplink_priv = &uplink_rpriv->uplink_priv;
5727 if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
5728 mapped_obj->int_port_metadata, &forward_tx)) {
5729 /* Set fwd_dev for future dev_put */
5730 tc_priv->fwd_dev = skb->dev;
5731 tc_priv->forward_tx = forward_tx;
5739 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb,
5740 struct mapping_ctx *mapping_ctx, u32 mapped_obj_id,
5741 struct mlx5_tc_ct_priv *ct_priv,
5742 u32 zone_restore_id, u32 tunnel_id,
5743 struct mlx5e_tc_update_priv *tc_priv)
5745 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5746 struct mlx5_mapped_obj mapped_obj;
5749 err = mapping_find(mapping_ctx, mapped_obj_id, &mapped_obj);
5751 netdev_dbg(skb->dev,
5752 "Couldn't find mapped object for mapped_obj_id: %d, err: %d\n",
5753 mapped_obj_id, err);
5757 switch (mapped_obj.type) {
5758 case MLX5_MAPPED_OBJ_CHAIN:
5759 case MLX5_MAPPED_OBJ_ACT_MISS:
5760 return mlx5e_tc_restore_skb_tc_meta(skb, ct_priv, &mapped_obj, zone_restore_id,
5761 tunnel_id, tc_priv);
5762 case MLX5_MAPPED_OBJ_SAMPLE:
5763 mlx5e_tc_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
5764 tc_priv->skb_done = true;
5766 case MLX5_MAPPED_OBJ_INT_PORT_METADATA:
5767 return mlx5e_tc_restore_skb_int_port(priv, skb, &mapped_obj, tc_priv, tunnel_id);
5769 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5776 bool mlx5e_tc_update_skb_nic(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
5778 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5779 u32 mapped_obj_id, reg_b, zone_restore_id;
5780 struct mlx5_tc_ct_priv *ct_priv;
5781 struct mapping_ctx *mapping_ctx;
5782 struct mlx5e_tc_table *tc;
5784 reg_b = be32_to_cpu(cqe->ft_metadata);
5785 tc = mlx5e_fs_get_tc(priv->fs);
5786 mapped_obj_id = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5787 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5790 mapping_ctx = tc->mapping;
5792 return mlx5e_tc_update_skb(cqe, skb, mapping_ctx, mapped_obj_id, ct_priv, zone_restore_id,
5796 int mlx5e_tc_action_miss_mapping_get(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5797 u64 act_miss_cookie, u32 *act_miss_mapping)
5799 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5800 struct mlx5_mapped_obj mapped_obj = {};
5801 struct mapping_ctx *ctx;
5804 ctx = esw->offloads.reg_c0_obj_pool;
5806 mapped_obj.type = MLX5_MAPPED_OBJ_ACT_MISS;
5807 mapped_obj.act_miss_cookie = act_miss_cookie;
5808 err = mapping_add(ctx, &mapped_obj, act_miss_mapping);
5812 attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
5813 if (IS_ERR(attr->act_id_restore_rule))
5819 mapping_remove(ctx, *act_miss_mapping);
5823 void mlx5e_tc_action_miss_mapping_put(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr,
5824 u32 act_miss_mapping)
5826 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
5827 struct mapping_ctx *ctx;
5829 ctx = esw->offloads.reg_c0_obj_pool;
5830 mlx5_del_flow_rules(attr->act_id_restore_rule);
5831 mapping_remove(ctx, act_miss_mapping);