2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
53 #include "en/tc/post_meter.h"
55 #define mlx5_esw_for_each_rep(esw, i, rep) \
56 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
58 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
59 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
61 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
62 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
63 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
65 /* There are two match-all miss flows, one for unicast dst mac and
68 #define MLX5_ESW_MISS_FLOWS (2)
69 #define UPLINK_REP_INDEX 0
71 #define MLX5_ESW_VPORT_TBL_SIZE 128
72 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
74 #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
76 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
77 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
78 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
82 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
85 return xa_load(&esw->offloads.vport_reps, vport_num);
89 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
90 struct mlx5_flow_spec *spec,
91 struct mlx5_esw_flow_attr *attr)
93 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
97 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
102 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
103 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
104 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
107 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
108 * are not needed as well in the following process. So clear them all for simplicity.
111 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
113 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
116 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
117 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
119 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
120 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
122 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
123 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
128 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
129 struct mlx5_flow_spec *spec,
130 struct mlx5_flow_attr *attr,
131 struct mlx5_eswitch *src_esw,
134 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
139 /* Use metadata matching because vport is not represented by single
140 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
142 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
143 if (mlx5_esw_indir_table_decap_vport(attr))
144 vport = mlx5_esw_indir_table_decap_vport(attr);
146 if (!attr->chain && esw_attr && esw_attr->int_port)
148 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
151 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
153 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
154 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
156 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
157 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
158 mlx5_eswitch_get_vport_metadata_mask());
160 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
162 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
163 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
165 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
166 MLX5_SET(fte_match_set_misc, misc,
167 source_eswitch_owner_vhca_id,
168 MLX5_CAP_GEN(src_esw->dev, vhca_id));
170 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
171 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
172 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
173 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
174 source_eswitch_owner_vhca_id);
176 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
181 esw_setup_decap_indir(struct mlx5_eswitch *esw,
182 struct mlx5_flow_attr *attr)
184 struct mlx5_flow_table *ft;
186 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
189 ft = mlx5_esw_indir_table_get(esw, attr,
190 mlx5_esw_indir_table_decap_vport(attr), true);
191 return PTR_ERR_OR_ZERO(ft);
195 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
196 struct mlx5_flow_attr *attr)
198 if (mlx5_esw_indir_table_decap_vport(attr))
199 mlx5_esw_indir_table_put(esw,
200 mlx5_esw_indir_table_decap_vport(attr),
205 esw_setup_mtu_dest(struct mlx5_flow_destination *dest,
206 struct mlx5e_meter_attr *meter,
209 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_RANGE;
210 dest[i].range.field = MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN;
211 dest[i].range.min = 0;
212 dest[i].range.max = meter->params.mtu;
213 dest[i].range.hit_ft = mlx5e_post_meter_get_mtu_true_ft(meter->post_meter);
214 dest[i].range.miss_ft = mlx5e_post_meter_get_mtu_false_ft(meter->post_meter);
220 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
221 struct mlx5_flow_act *flow_act,
225 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
226 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
227 dest[i].sampler_id = sampler_id;
233 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
234 struct mlx5_flow_act *flow_act,
235 struct mlx5_eswitch *esw,
236 struct mlx5_flow_attr *attr,
239 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
240 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
241 dest[i].ft = attr->dest_ft;
243 if (mlx5_esw_indir_table_decap_vport(attr))
244 return esw_setup_decap_indir(esw, attr);
249 esw_setup_accept_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
250 struct mlx5_fs_chains *chains, int i)
252 if (mlx5_chains_ignore_flow_level_supported(chains))
253 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
254 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
255 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
259 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
260 struct mlx5_eswitch *esw, int i)
262 if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level))
263 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
264 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
265 dest[i].ft = mlx5_eswitch_get_slow_fdb(esw);
269 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
270 struct mlx5_flow_act *flow_act,
271 struct mlx5_fs_chains *chains,
272 u32 chain, u32 prio, u32 level,
275 struct mlx5_flow_table *ft;
277 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
278 ft = mlx5_chains_get_table(chains, chain, prio, level);
282 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
287 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
290 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
291 struct mlx5_fs_chains *chains = esw_chains(esw);
294 for (i = from; i < to; i++)
295 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
296 mlx5_chains_put_table(chains, 0, 1, 0);
297 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
298 esw_attr->dests[i].mdev))
299 mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
304 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
308 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
309 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
315 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
316 struct mlx5_flow_act *flow_act,
317 struct mlx5_eswitch *esw,
318 struct mlx5_fs_chains *chains,
319 struct mlx5_flow_attr *attr,
322 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
325 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
328 /* flow steering cannot handle more than one dest with the same ft
331 if (esw_attr->out_count - esw_attr->split_count > 1)
334 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
338 if (esw_attr->dests[esw_attr->split_count].pkt_reformat) {
339 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
340 flow_act->pkt_reformat = esw_attr->dests[esw_attr->split_count].pkt_reformat;
347 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
348 struct mlx5_flow_attr *attr)
350 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
352 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
356 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
358 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
362 /* Indirect table is supported only for flows with in_port uplink
363 * and the destination is vport on the same eswitch as the uplink,
364 * return false in case at least one of destinations doesn't meet
367 for (i = esw_attr->split_count; i < esw_attr->out_count; i++) {
368 if (esw_attr->dests[i].rep &&
369 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
370 esw_attr->dests[i].mdev)) {
381 esw_setup_indir_table(struct mlx5_flow_destination *dest,
382 struct mlx5_flow_act *flow_act,
383 struct mlx5_eswitch *esw,
384 struct mlx5_flow_attr *attr,
385 bool ignore_flow_lvl,
388 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
391 if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
394 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
396 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
397 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
399 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
400 esw_attr->dests[j].rep->vport, false);
401 if (IS_ERR(dest[*i].ft)) {
402 err = PTR_ERR(dest[*i].ft);
403 goto err_indir_tbl_get;
407 if (mlx5_esw_indir_table_decap_vport(attr)) {
408 err = esw_setup_decap_indir(esw, attr);
410 goto err_indir_tbl_get;
416 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
420 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
422 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
424 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
425 esw_cleanup_decap_indir(esw, attr);
429 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
431 mlx5_chains_put_table(chains, chain, prio, level);
435 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
436 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
437 int attr_idx, int dest_idx, bool pkt_reformat)
439 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
440 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
441 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
442 dest[dest_idx].vport.vhca_id =
443 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
444 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
445 if (dest[dest_idx].vport.num == MLX5_VPORT_UPLINK &&
446 mlx5_lag_is_mpesw(esw->dev))
447 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
449 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP_VALID) {
451 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
452 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
454 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
455 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
460 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
461 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
466 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
467 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
472 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
474 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
475 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
476 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
480 esw_setup_dests(struct mlx5_flow_destination *dest,
481 struct mlx5_flow_act *flow_act,
482 struct mlx5_eswitch *esw,
483 struct mlx5_flow_attr *attr,
484 struct mlx5_flow_spec *spec,
487 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
488 struct mlx5_fs_chains *chains = esw_chains(esw);
491 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
492 esw_src_port_rewrite_supported(esw))
493 attr->flags |= MLX5_ATTR_FLAG_SRC_REWRITE;
495 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH) {
496 esw_setup_slow_path_dest(dest, flow_act, esw, *i);
501 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
502 esw_setup_sampler_dest(dest, flow_act, attr->sample_attr.sampler_id, *i);
504 } else if (attr->flags & MLX5_ATTR_FLAG_ACCEPT) {
505 esw_setup_accept_dest(dest, flow_act, chains, *i);
507 } else if (attr->flags & MLX5_ATTR_FLAG_MTU) {
508 err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
510 } else if (esw_is_indir_table(esw, attr)) {
511 err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
512 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
513 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
515 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
518 err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
520 } else if (attr->dest_chain) {
521 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
532 esw_cleanup_dests(struct mlx5_eswitch *esw,
533 struct mlx5_flow_attr *attr)
535 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
536 struct mlx5_fs_chains *chains = esw_chains(esw);
539 esw_cleanup_decap_indir(esw, attr);
540 } else if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
541 if (attr->dest_chain)
542 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
543 else if (esw_is_indir_table(esw, attr))
544 esw_cleanup_indir_table(esw, attr);
545 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
546 esw_cleanup_chain_src_port_rewrite(esw, attr);
551 esw_setup_meter(struct mlx5_flow_attr *attr, struct mlx5_flow_act *flow_act)
553 struct mlx5e_flow_meter_handle *meter;
555 meter = attr->meter_attr.meter;
556 flow_act->exe_aso.type = attr->exe_aso_type;
557 flow_act->exe_aso.object_id = meter->obj_id;
558 flow_act->exe_aso.flow_meter.meter_idx = meter->idx;
559 flow_act->exe_aso.flow_meter.init_color = MLX5_FLOW_METER_COLOR_GREEN;
560 /* use metadata reg 5 for packet color */
561 flow_act->exe_aso.return_reg_id = 5;
564 struct mlx5_flow_handle *
565 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
566 struct mlx5_flow_spec *spec,
567 struct mlx5_flow_attr *attr)
569 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
570 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
571 struct mlx5_fs_chains *chains = esw_chains(esw);
572 bool split = !!(esw_attr->split_count);
573 struct mlx5_vport_tbl_attr fwd_attr;
574 struct mlx5_flow_destination *dest;
575 struct mlx5_flow_handle *rule;
576 struct mlx5_flow_table *fdb;
579 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
580 return ERR_PTR(-EOPNOTSUPP);
582 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
583 return ERR_PTR(-EOPNOTSUPP);
585 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
587 return ERR_PTR(-ENOMEM);
589 flow_act.action = attr->action;
591 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
592 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
593 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
594 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
595 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
596 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
597 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
598 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
602 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
604 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
607 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
610 goto err_create_goto_table;
614 if (esw_attr->decap_pkt_reformat)
615 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
617 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
618 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
619 dest[i].counter_id = mlx5_fc_id(attr->counter);
623 if (attr->outer_match_level != MLX5_MATCH_NONE)
624 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
625 if (attr->inner_match_level != MLX5_MATCH_NONE)
626 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
628 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
629 flow_act.modify_hdr = attr->modify_hdr;
631 if ((flow_act.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
632 attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER)
633 esw_setup_meter(attr, &flow_act);
636 fwd_attr.chain = attr->chain;
637 fwd_attr.prio = attr->prio;
638 fwd_attr.vport = esw_attr->in_rep->vport;
639 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
641 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
643 if (attr->chain || attr->prio)
644 fdb = mlx5_chains_get_table(chains, attr->chain,
649 if (!(attr->flags & MLX5_ATTR_FLAG_NO_IN_PORT))
650 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
651 esw_attr->in_mdev->priv.eswitch,
652 esw_attr->in_rep->vport);
655 rule = ERR_CAST(fdb);
664 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
665 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
668 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
672 atomic64_inc(&esw->offloads.num_flows);
679 mlx5_esw_vporttbl_put(esw, &fwd_attr);
680 else if (attr->chain || attr->prio)
681 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
683 esw_cleanup_dests(esw, attr);
684 err_create_goto_table:
689 struct mlx5_flow_handle *
690 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
691 struct mlx5_flow_spec *spec,
692 struct mlx5_flow_attr *attr)
694 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
695 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
696 struct mlx5_fs_chains *chains = esw_chains(esw);
697 struct mlx5_vport_tbl_attr fwd_attr;
698 struct mlx5_flow_destination *dest;
699 struct mlx5_flow_table *fast_fdb;
700 struct mlx5_flow_table *fwd_fdb;
701 struct mlx5_flow_handle *rule;
704 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
706 return ERR_PTR(-ENOMEM);
708 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
709 if (IS_ERR(fast_fdb)) {
710 rule = ERR_CAST(fast_fdb);
714 fwd_attr.chain = attr->chain;
715 fwd_attr.prio = attr->prio;
716 fwd_attr.vport = esw_attr->in_rep->vport;
717 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
718 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
719 if (IS_ERR(fwd_fdb)) {
720 rule = ERR_CAST(fwd_fdb);
724 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
725 for (i = 0; i < esw_attr->split_count; i++) {
726 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
727 /* Source port rewrite (forward to ovs internal port or statck device) isn't
728 * supported in the rule of split action.
732 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
736 goto err_chain_src_rewrite;
739 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
740 dest[i].ft = fwd_fdb;
743 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
744 esw_attr->in_mdev->priv.eswitch,
745 esw_attr->in_rep->vport);
747 if (attr->outer_match_level != MLX5_MATCH_NONE)
748 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
750 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
751 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
754 i = esw_attr->split_count;
755 goto err_chain_src_rewrite;
758 atomic64_inc(&esw->offloads.num_flows);
762 err_chain_src_rewrite:
763 mlx5_esw_vporttbl_put(esw, &fwd_attr);
765 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
772 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
773 struct mlx5_flow_handle *rule,
774 struct mlx5_flow_attr *attr,
777 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
778 struct mlx5_fs_chains *chains = esw_chains(esw);
779 bool split = (esw_attr->split_count > 0);
780 struct mlx5_vport_tbl_attr fwd_attr;
783 mlx5_del_flow_rules(rule);
785 if (!mlx5e_tc_attr_flags_skip(attr->flags)) {
786 /* unref the term table */
787 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
788 if (esw_attr->dests[i].termtbl)
789 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
793 atomic64_dec(&esw->offloads.num_flows);
795 if (fwd_rule || split) {
796 fwd_attr.chain = attr->chain;
797 fwd_attr.prio = attr->prio;
798 fwd_attr.vport = esw_attr->in_rep->vport;
799 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
803 mlx5_esw_vporttbl_put(esw, &fwd_attr);
804 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
807 mlx5_esw_vporttbl_put(esw, &fwd_attr);
808 else if (attr->chain || attr->prio)
809 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
810 esw_cleanup_dests(esw, attr);
815 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
816 struct mlx5_flow_handle *rule,
817 struct mlx5_flow_attr *attr)
819 __mlx5_eswitch_del_rule(esw, rule, attr, false);
823 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
824 struct mlx5_flow_handle *rule,
825 struct mlx5_flow_attr *attr)
827 __mlx5_eswitch_del_rule(esw, rule, attr, true);
830 struct mlx5_flow_handle *
831 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
832 struct mlx5_eswitch *from_esw,
833 struct mlx5_eswitch_rep *rep,
836 struct mlx5_flow_act flow_act = {0};
837 struct mlx5_flow_destination dest = {};
838 struct mlx5_flow_handle *flow_rule;
839 struct mlx5_flow_spec *spec;
842 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
844 flow_rule = ERR_PTR(-ENOMEM);
848 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
849 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
850 /* source vport is the esw manager */
851 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
852 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
853 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
854 MLX5_CAP_GEN(from_esw->dev, vhca_id));
856 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
857 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
858 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
859 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
860 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
861 source_eswitch_owner_vhca_id);
863 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
864 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
865 dest.vport.num = rep->vport;
866 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
867 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
868 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
870 if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
871 rep->vport == MLX5_VPORT_UPLINK)
872 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
874 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
875 spec, &flow_act, &dest, 1);
876 if (IS_ERR(flow_rule))
877 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
883 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
885 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
887 mlx5_del_flow_rules(rule);
890 void mlx5_eswitch_del_send_to_vport_meta_rule(struct mlx5_flow_handle *rule)
893 mlx5_del_flow_rules(rule);
896 struct mlx5_flow_handle *
897 mlx5_eswitch_add_send_to_vport_meta_rule(struct mlx5_eswitch *esw, u16 vport_num)
899 struct mlx5_flow_destination dest = {};
900 struct mlx5_flow_act flow_act = {0};
901 struct mlx5_flow_handle *flow_rule;
902 struct mlx5_flow_spec *spec;
904 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
906 return ERR_PTR(-ENOMEM);
908 MLX5_SET(fte_match_param, spec->match_criteria,
909 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
910 MLX5_SET(fte_match_param, spec->match_criteria,
911 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
912 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
913 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
915 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
916 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
917 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
919 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
920 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
921 dest.vport.num = vport_num;
923 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
924 spec, &flow_act, &dest, 1);
925 if (IS_ERR(flow_rule))
926 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule vport %d, err %ld\n",
927 vport_num, PTR_ERR(flow_rule));
933 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
935 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
936 MLX5_FDB_TO_VPORT_REG_C_1;
939 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
941 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
942 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
943 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
947 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
948 !mlx5_eswitch_vport_match_metadata_enabled(esw))
951 MLX5_SET(query_esw_vport_context_in, in, opcode,
952 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
953 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
957 curr = MLX5_GET(query_esw_vport_context_out, out,
958 esw_vport_context.fdb_to_vport_reg_c_id);
959 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
960 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
961 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
968 MLX5_SET(modify_esw_vport_context_in, min,
969 esw_vport_context.fdb_to_vport_reg_c_id, curr);
970 MLX5_SET(modify_esw_vport_context_in, min,
971 field_select.fdb_to_vport_reg_c_id, 1);
973 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
975 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
976 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
978 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
984 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
985 struct mlx5_core_dev *peer_dev,
986 struct mlx5_flow_spec *spec,
987 struct mlx5_flow_destination *dest)
991 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
992 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
994 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
995 mlx5_eswitch_get_vport_metadata_mask());
997 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
999 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1002 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1003 MLX5_CAP_GEN(peer_dev, vhca_id));
1005 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1007 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1009 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1010 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1011 source_eswitch_owner_vhca_id);
1014 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1015 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1016 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1017 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1020 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1021 struct mlx5_eswitch *peer_esw,
1022 struct mlx5_flow_spec *spec,
1027 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1028 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1030 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1031 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1034 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1036 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1040 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1041 struct mlx5_core_dev *peer_dev)
1043 struct mlx5_flow_destination dest = {};
1044 struct mlx5_flow_act flow_act = {0};
1045 struct mlx5_flow_handle **flows;
1046 /* total vports is the same for both e-switches */
1047 int nvports = esw->total_vports;
1048 struct mlx5_flow_handle *flow;
1049 struct mlx5_flow_spec *spec;
1050 struct mlx5_vport *vport;
1055 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1059 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1061 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1064 goto alloc_flows_err;
1067 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1068 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1071 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1072 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1073 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1074 spec, MLX5_VPORT_PF);
1076 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1077 spec, &flow_act, &dest, 1);
1079 err = PTR_ERR(flow);
1080 goto add_pf_flow_err;
1082 flows[vport->index] = flow;
1085 if (mlx5_ecpf_vport_exists(esw->dev)) {
1086 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1087 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1088 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1089 spec, &flow_act, &dest, 1);
1091 err = PTR_ERR(flow);
1092 goto add_ecpf_flow_err;
1094 flows[vport->index] = flow;
1097 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1098 esw_set_peer_miss_rule_source_port(esw,
1099 peer_dev->priv.eswitch,
1100 spec, vport->vport);
1102 flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1103 spec, &flow_act, &dest, 1);
1105 err = PTR_ERR(flow);
1106 goto add_vf_flow_err;
1108 flows[vport->index] = flow;
1111 esw->fdb_table.offloads.peer_miss_rules = flows;
1117 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1118 if (!flows[vport->index])
1120 mlx5_del_flow_rules(flows[vport->index]);
1122 if (mlx5_ecpf_vport_exists(esw->dev)) {
1123 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1124 mlx5_del_flow_rules(flows[vport->index]);
1127 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1128 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1129 mlx5_del_flow_rules(flows[vport->index]);
1132 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1139 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1141 struct mlx5_flow_handle **flows;
1142 struct mlx5_vport *vport;
1145 flows = esw->fdb_table.offloads.peer_miss_rules;
1147 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1148 mlx5_del_flow_rules(flows[vport->index]);
1150 if (mlx5_ecpf_vport_exists(esw->dev)) {
1151 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1152 mlx5_del_flow_rules(flows[vport->index]);
1155 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1156 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1157 mlx5_del_flow_rules(flows[vport->index]);
1162 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1164 struct mlx5_flow_act flow_act = {0};
1165 struct mlx5_flow_destination dest = {};
1166 struct mlx5_flow_handle *flow_rule = NULL;
1167 struct mlx5_flow_spec *spec;
1174 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1180 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1181 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1183 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1184 outer_headers.dmac_47_16);
1187 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1188 dest.vport.num = esw->manager_vport;
1189 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1191 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1192 spec, &flow_act, &dest, 1);
1193 if (IS_ERR(flow_rule)) {
1194 err = PTR_ERR(flow_rule);
1195 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1199 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1201 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1203 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1204 outer_headers.dmac_47_16);
1206 flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw),
1207 spec, &flow_act, &dest, 1);
1208 if (IS_ERR(flow_rule)) {
1209 err = PTR_ERR(flow_rule);
1210 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1211 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1215 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1222 struct mlx5_flow_handle *
1223 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1225 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1226 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1227 struct mlx5_flow_context *flow_context;
1228 struct mlx5_flow_handle *flow_rule;
1229 struct mlx5_flow_destination dest;
1230 struct mlx5_flow_spec *spec;
1233 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1234 return ERR_PTR(-EOPNOTSUPP);
1236 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1238 return ERR_PTR(-ENOMEM);
1240 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1242 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1243 ESW_REG_C0_USER_DATA_METADATA_MASK);
1244 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1246 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1247 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1248 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1249 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1250 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1252 flow_context = &spec->flow_context;
1253 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1254 flow_context->flow_tag = tag;
1255 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1256 dest.ft = esw->offloads.ft_offloads;
1258 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1261 if (IS_ERR(flow_rule))
1263 "Failed to create restore rule for tag: %d, err(%d)\n",
1264 tag, (int)PTR_ERR(flow_rule));
1269 #define MAX_PF_SQ 256
1270 #define MAX_SQ_NVPORTS 32
1272 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1275 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1279 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1280 MLX5_SET(create_flow_group_in, flow_group_in,
1281 match_criteria_enable,
1282 MLX5_MATCH_MISC_PARAMETERS_2);
1284 MLX5_SET(fte_match_param, match_criteria,
1285 misc_parameters_2.metadata_reg_c_0,
1286 mlx5_eswitch_get_vport_metadata_mask());
1288 MLX5_SET(create_flow_group_in, flow_group_in,
1289 match_criteria_enable,
1290 MLX5_MATCH_MISC_PARAMETERS);
1292 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1293 misc_parameters.source_port);
1297 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1298 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1300 struct mlx5_vport_tbl_attr attr;
1301 struct mlx5_vport *vport;
1306 mlx5_esw_for_each_vport(esw, i, vport) {
1307 attr.vport = vport->vport;
1308 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1309 mlx5_esw_vporttbl_put(esw, &attr);
1313 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1315 struct mlx5_vport_tbl_attr attr;
1316 struct mlx5_flow_table *fdb;
1317 struct mlx5_vport *vport;
1322 mlx5_esw_for_each_vport(esw, i, vport) {
1323 attr.vport = vport->vport;
1324 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1325 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1332 esw_vport_tbl_put(esw);
1333 return PTR_ERR(fdb);
1336 #define fdb_modify_header_fwd_to_table_supported(esw) \
1337 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1338 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1340 struct mlx5_core_dev *dev = esw->dev;
1342 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1343 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1345 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1346 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1347 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1348 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1349 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1350 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1351 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1352 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1353 /* Disabled when ttl workaround is needed, e.g
1354 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1357 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1358 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1360 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1361 esw_info(dev, "Supported tc chains and prios offload\n");
1364 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1365 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1369 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1371 struct mlx5_core_dev *dev = esw->dev;
1372 struct mlx5_flow_table *nf_ft, *ft;
1373 struct mlx5_chains_attr attr = {};
1374 struct mlx5_fs_chains *chains;
1378 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1380 esw_init_chains_offload_flags(esw, &attr.flags);
1381 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1382 attr.max_ft_sz = fdb_max;
1383 attr.max_grp_num = esw->params.large_group_num;
1384 attr.default_ft = miss_fdb;
1385 attr.mapping = esw->offloads.reg_c0_obj_pool;
1387 chains = mlx5_chains_create(dev, &attr);
1388 if (IS_ERR(chains)) {
1389 err = PTR_ERR(chains);
1390 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1394 esw->fdb_table.offloads.esw_chains_priv = chains;
1396 /* Create tc_end_ft which is the always created ft chain */
1397 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1399 if (IS_ERR(nf_ft)) {
1400 err = PTR_ERR(nf_ft);
1404 /* Always open the root for fast path */
1405 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1411 /* Open level 1 for split fdb rules now if prios isn't supported */
1412 if (!mlx5_chains_prios_supported(chains)) {
1413 err = esw_vport_tbl_get(esw);
1418 mlx5_chains_set_end_ft(chains, nf_ft);
1423 mlx5_chains_put_table(chains, 0, 1, 0);
1425 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1427 mlx5_chains_destroy(chains);
1428 esw->fdb_table.offloads.esw_chains_priv = NULL;
1434 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1436 if (!mlx5_chains_prios_supported(chains))
1437 esw_vport_tbl_put(esw);
1438 mlx5_chains_put_table(chains, 0, 1, 0);
1439 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1440 mlx5_chains_destroy(chains);
1443 #else /* CONFIG_MLX5_CLS_ACT */
1446 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1450 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1456 esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
1457 struct mlx5_flow_table *fdb,
1461 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1462 struct mlx5_flow_group *g;
1463 void *match_criteria;
1466 memset(flow_group_in, 0, inlen);
1468 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1469 MLX5_MATCH_MISC_PARAMETERS);
1471 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1473 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1474 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1475 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1476 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1477 misc_parameters.source_eswitch_owner_vhca_id);
1478 MLX5_SET(create_flow_group_in, flow_group_in,
1479 source_eswitch_owner_vhca_id_valid, 1);
1482 /* See comment at table_size calculation */
1483 count = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1484 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1485 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, *ix + count - 1);
1488 g = mlx5_create_flow_group(fdb, flow_group_in);
1491 esw_warn(esw->dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1494 esw->fdb_table.offloads.send_to_vport_grp = g;
1501 esw_create_meta_send_to_vport_group(struct mlx5_eswitch *esw,
1502 struct mlx5_flow_table *fdb,
1506 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1507 struct mlx5_flow_group *g;
1508 void *match_criteria;
1511 if (!esw_src_port_rewrite_supported(esw))
1514 memset(flow_group_in, 0, inlen);
1516 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1517 MLX5_MATCH_MISC_PARAMETERS_2);
1519 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1521 MLX5_SET(fte_match_param, match_criteria,
1522 misc_parameters_2.metadata_reg_c_0,
1523 mlx5_eswitch_get_vport_metadata_mask());
1524 MLX5_SET(fte_match_param, match_criteria,
1525 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1527 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1528 MLX5_SET(create_flow_group_in, flow_group_in,
1529 end_flow_index, *ix + esw->total_vports - 1);
1530 *ix += esw->total_vports;
1532 g = mlx5_create_flow_group(fdb, flow_group_in);
1536 "Failed to create send-to-vport meta flow group err(%d)\n", err);
1537 goto send_vport_meta_err;
1539 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1543 send_vport_meta_err:
1548 esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
1549 struct mlx5_flow_table *fdb,
1553 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1554 struct mlx5_flow_group *g;
1555 void *match_criteria;
1558 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
1561 memset(flow_group_in, 0, inlen);
1563 esw_set_flow_group_source_port(esw, flow_group_in);
1565 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1566 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1570 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1571 misc_parameters.source_eswitch_owner_vhca_id);
1573 MLX5_SET(create_flow_group_in, flow_group_in,
1574 source_eswitch_owner_vhca_id_valid, 1);
1577 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1578 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1579 *ix + esw->total_vports - 1);
1580 *ix += esw->total_vports;
1582 g = mlx5_create_flow_group(fdb, flow_group_in);
1585 esw_warn(esw->dev, "Failed to create peer miss flow group err(%d)\n", err);
1588 esw->fdb_table.offloads.peer_miss_grp = g;
1595 esw_create_miss_group(struct mlx5_eswitch *esw,
1596 struct mlx5_flow_table *fdb,
1600 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1601 struct mlx5_flow_group *g;
1602 void *match_criteria;
1606 memset(flow_group_in, 0, inlen);
1608 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1609 MLX5_MATCH_OUTER_HEADERS);
1610 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1612 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1613 outer_headers.dmac_47_16);
1616 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
1617 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1618 *ix + MLX5_ESW_MISS_FLOWS);
1620 g = mlx5_create_flow_group(fdb, flow_group_in);
1623 esw_warn(esw->dev, "Failed to create miss flow group err(%d)\n", err);
1626 esw->fdb_table.offloads.miss_grp = g;
1628 err = esw_add_fdb_miss_rule(esw);
1635 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1640 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1642 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1643 struct mlx5_flow_table_attr ft_attr = {};
1644 struct mlx5_core_dev *dev = esw->dev;
1645 struct mlx5_flow_namespace *root_ns;
1646 struct mlx5_flow_table *fdb = NULL;
1647 int table_size, ix = 0, err = 0;
1648 u32 flags = 0, *flow_group_in;
1650 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1652 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1656 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1658 esw_warn(dev, "Failed to get FDB flow namespace\n");
1662 esw->fdb_table.offloads.ns = root_ns;
1663 err = mlx5_flow_namespace_set_mode(root_ns,
1664 esw->dev->priv.steering->mode);
1666 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1670 /* To be strictly correct:
1671 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1673 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1674 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1675 * but as the peer device might not be in switchdev mode it's not
1676 * possible. We use the fact that by default FW sets max vfs and max sfs
1677 * to the same value on both devices. If it needs to be changed in the future note
1678 * the peer miss group should also be created based on the number of
1679 * total vports of the peer (currently is also uses esw->total_vports).
1681 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1682 esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
1684 /* create the slow path fdb with encap set, so further table instances
1685 * can be created at run time while VFs are probed if the FW allows that.
1687 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1688 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1689 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1691 ft_attr.flags = flags;
1692 ft_attr.max_fte = table_size;
1693 ft_attr.prio = FDB_SLOW_PATH;
1695 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1698 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1701 esw->fdb_table.offloads.slow_fdb = fdb;
1703 /* Create empty TC-miss managed table. This allows plugging in following
1704 * priorities without directly exposing their level 0 table to
1705 * eswitch_offloads and passing it as miss_fdb to following call to
1706 * esw_chains_create().
1708 memset(&ft_attr, 0, sizeof(ft_attr));
1709 ft_attr.prio = FDB_TC_MISS;
1710 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1711 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1712 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1713 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1714 goto tc_miss_table_err;
1717 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1719 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1720 goto fdb_chains_err;
1723 err = esw_create_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1725 goto send_vport_err;
1727 err = esw_create_meta_send_to_vport_group(esw, fdb, flow_group_in, &ix);
1729 goto send_vport_meta_err;
1731 err = esw_create_peer_esw_miss_group(esw, fdb, flow_group_in, &ix);
1735 err = esw_create_miss_group(esw, fdb, flow_group_in, &ix);
1739 kvfree(flow_group_in);
1743 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1744 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1746 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1747 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1748 send_vport_meta_err:
1749 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1751 esw_chains_destroy(esw, esw_chains(esw));
1753 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1755 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1757 /* Holds true only as long as DMFS is the default */
1758 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1760 kvfree(flow_group_in);
1764 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1766 if (!mlx5_eswitch_get_slow_fdb(esw))
1769 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1770 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1771 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1772 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1773 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1774 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1775 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1776 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1777 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1779 esw_chains_destroy(esw, esw_chains(esw));
1781 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1782 mlx5_destroy_flow_table(mlx5_eswitch_get_slow_fdb(esw));
1783 /* Holds true only as long as DMFS is the default */
1784 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1785 MLX5_FLOW_STEERING_MODE_DMFS);
1786 atomic64_set(&esw->user_count, 0);
1789 static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
1793 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1794 if (mlx5e_tc_int_port_supported(esw))
1795 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1800 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1802 struct mlx5_flow_table_attr ft_attr = {};
1803 struct mlx5_core_dev *dev = esw->dev;
1804 struct mlx5_flow_table *ft_offloads;
1805 struct mlx5_flow_namespace *ns;
1808 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1810 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1814 ft_attr.max_fte = esw_get_nr_ft_offloads_steering_src_ports(esw) +
1815 MLX5_ESW_FT_OFFLOADS_DROP_RULE;
1818 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1819 if (IS_ERR(ft_offloads)) {
1820 err = PTR_ERR(ft_offloads);
1821 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1825 esw->offloads.ft_offloads = ft_offloads;
1829 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1831 struct mlx5_esw_offload *offloads = &esw->offloads;
1833 mlx5_destroy_flow_table(offloads->ft_offloads);
1836 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1838 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1839 struct mlx5_flow_group *g;
1844 nvports = esw_get_nr_ft_offloads_steering_src_ports(esw);
1845 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1849 /* create vport rx group */
1850 esw_set_flow_group_source_port(esw, flow_group_in);
1852 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1853 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1855 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1859 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1863 esw->offloads.vport_rx_group = g;
1865 kvfree(flow_group_in);
1869 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1871 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1874 static int esw_create_vport_rx_drop_rule_index(struct mlx5_eswitch *esw)
1876 /* ft_offloads table is enlarged by MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
1877 * for the drop rule, which is placed at the end of the table.
1878 * So return the total of vport and int_port as rule index.
1880 return esw_get_nr_ft_offloads_steering_src_ports(esw);
1883 static int esw_create_vport_rx_drop_group(struct mlx5_eswitch *esw)
1885 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1886 struct mlx5_flow_group *g;
1891 flow_index = esw_create_vport_rx_drop_rule_index(esw);
1893 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1897 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index);
1898 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index);
1900 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1904 mlx5_core_warn(esw->dev, "Failed to create vport rx drop group err %d\n", err);
1908 esw->offloads.vport_rx_drop_group = g;
1910 kvfree(flow_group_in);
1914 static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
1916 if (esw->offloads.vport_rx_drop_group)
1917 mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
1920 struct mlx5_flow_handle *
1921 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1922 struct mlx5_flow_destination *dest)
1924 struct mlx5_flow_act flow_act = {0};
1925 struct mlx5_flow_handle *flow_rule;
1926 struct mlx5_flow_spec *spec;
1929 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1931 flow_rule = ERR_PTR(-ENOMEM);
1935 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1936 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1937 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1938 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1940 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1941 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1942 mlx5_eswitch_get_vport_metadata_mask());
1944 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1946 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1947 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1949 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1950 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1952 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1955 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1956 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1957 &flow_act, dest, 1);
1958 if (IS_ERR(flow_rule)) {
1959 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
1968 static int esw_create_vport_rx_drop_rule(struct mlx5_eswitch *esw)
1970 struct mlx5_flow_act flow_act = {};
1971 struct mlx5_flow_handle *flow_rule;
1973 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
1974 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, NULL,
1975 &flow_act, NULL, 0);
1976 if (IS_ERR(flow_rule)) {
1978 "fs offloads: Failed to add vport rx drop rule err %ld\n",
1979 PTR_ERR(flow_rule));
1980 return PTR_ERR(flow_rule);
1983 esw->offloads.vport_rx_drop_rule = flow_rule;
1988 static void esw_destroy_vport_rx_drop_rule(struct mlx5_eswitch *esw)
1990 if (esw->offloads.vport_rx_drop_rule)
1991 mlx5_del_flow_rules(esw->offloads.vport_rx_drop_rule);
1994 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
1996 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1997 struct mlx5_core_dev *dev = esw->dev;
1998 struct mlx5_vport *vport;
2001 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2004 if (!mlx5_esw_is_fdb_created(esw))
2007 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2008 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2009 mlx5_mode = MLX5_INLINE_MODE_NONE;
2011 case MLX5_CAP_INLINE_MODE_L2:
2012 mlx5_mode = MLX5_INLINE_MODE_L2;
2014 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2019 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2020 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2021 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2022 if (prev_mlx5_mode != mlx5_mode)
2024 prev_mlx5_mode = mlx5_mode;
2032 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2034 struct mlx5_esw_offload *offloads = &esw->offloads;
2036 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2039 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2040 mlx5_destroy_flow_group(offloads->restore_group);
2041 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2044 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2046 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2047 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2048 struct mlx5_flow_table_attr ft_attr = {};
2049 struct mlx5_core_dev *dev = esw->dev;
2050 struct mlx5_flow_namespace *ns;
2051 struct mlx5_modify_hdr *mod_hdr;
2052 void *match_criteria, *misc;
2053 struct mlx5_flow_table *ft;
2054 struct mlx5_flow_group *g;
2058 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2061 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2063 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2067 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2068 if (!flow_group_in) {
2073 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2074 ft = mlx5_create_flow_table(ns, &ft_attr);
2077 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2082 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2084 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2087 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2088 ESW_REG_C0_USER_DATA_METADATA_MASK);
2089 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2090 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2091 ft_attr.max_fte - 1);
2092 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2093 MLX5_MATCH_MISC_PARAMETERS_2);
2094 g = mlx5_create_flow_group(ft, flow_group_in);
2097 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2102 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2103 MLX5_SET(copy_action_in, modact, src_field,
2104 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2105 MLX5_SET(copy_action_in, modact, dst_field,
2106 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2107 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2108 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2110 if (IS_ERR(mod_hdr)) {
2111 err = PTR_ERR(mod_hdr);
2112 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2117 esw->offloads.ft_offloads_restore = ft;
2118 esw->offloads.restore_group = g;
2119 esw->offloads.restore_copy_hdr_id = mod_hdr;
2121 kvfree(flow_group_in);
2126 mlx5_destroy_flow_group(g);
2128 mlx5_destroy_flow_table(ft);
2130 kvfree(flow_group_in);
2135 static int esw_offloads_start(struct mlx5_eswitch *esw,
2136 struct netlink_ext_ack *extack)
2140 esw->mode = MLX5_ESWITCH_OFFLOADS;
2141 err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs);
2143 NL_SET_ERR_MSG_MOD(extack,
2144 "Failed setting eswitch to offloads");
2145 esw->mode = MLX5_ESWITCH_LEGACY;
2146 mlx5_rescan_drivers(esw->dev);
2148 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2149 if (mlx5_eswitch_inline_mode_get(esw,
2150 &esw->offloads.inline_mode)) {
2151 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2152 NL_SET_ERR_MSG_MOD(extack,
2153 "Inline mode is different between vports");
2159 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2160 struct mlx5_eswitch_rep *rep,
2165 /* Copy the mark from vport to its rep */
2166 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2168 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2171 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2173 struct mlx5_eswitch_rep *rep;
2177 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2181 rep->vport = vport->vport;
2182 rep->vport_index = vport->index;
2183 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2184 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2186 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2190 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2191 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2192 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2200 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2201 struct mlx5_eswitch_rep *rep)
2203 xa_erase(&esw->offloads.vport_reps, rep->vport);
2207 static void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2209 struct mlx5_eswitch_rep *rep;
2212 mlx5_esw_for_each_rep(esw, i, rep)
2213 mlx5_esw_offloads_rep_cleanup(esw, rep);
2214 xa_destroy(&esw->offloads.vport_reps);
2217 static int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2219 struct mlx5_vport *vport;
2223 xa_init(&esw->offloads.vport_reps);
2225 mlx5_esw_for_each_vport(esw, i, vport) {
2226 err = mlx5_esw_offloads_rep_init(esw, vport);
2233 esw_offloads_cleanup_reps(esw);
2237 static int esw_port_metadata_set(struct devlink *devlink, u32 id,
2238 struct devlink_param_gset_ctx *ctx)
2240 struct mlx5_core_dev *dev = devlink_priv(devlink);
2241 struct mlx5_eswitch *esw = dev->priv.eswitch;
2244 down_write(&esw->mode_lock);
2245 if (mlx5_esw_is_fdb_created(esw)) {
2249 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2254 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2256 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2258 up_write(&esw->mode_lock);
2262 static int esw_port_metadata_get(struct devlink *devlink, u32 id,
2263 struct devlink_param_gset_ctx *ctx)
2265 struct mlx5_core_dev *dev = devlink_priv(devlink);
2267 ctx->val.vbool = mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch);
2271 static int esw_port_metadata_validate(struct devlink *devlink, u32 id,
2272 union devlink_param_value val,
2273 struct netlink_ext_ack *extack)
2275 struct mlx5_core_dev *dev = devlink_priv(devlink);
2278 esw_mode = mlx5_eswitch_mode(dev);
2279 if (esw_mode == MLX5_ESWITCH_OFFLOADS) {
2280 NL_SET_ERR_MSG_MOD(extack,
2281 "E-Switch must either disabled or non switchdev mode");
2287 static const struct devlink_param esw_devlink_params[] = {
2288 DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_PORT_METADATA,
2289 "esw_port_metadata", DEVLINK_PARAM_TYPE_BOOL,
2290 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
2291 esw_port_metadata_get,
2292 esw_port_metadata_set,
2293 esw_port_metadata_validate),
2296 int esw_offloads_init(struct mlx5_eswitch *esw)
2300 err = esw_offloads_init_reps(esw);
2304 err = devl_params_register(priv_to_devlink(esw->dev),
2306 ARRAY_SIZE(esw_devlink_params));
2313 esw_offloads_cleanup_reps(esw);
2317 void esw_offloads_cleanup(struct mlx5_eswitch *esw)
2319 devl_params_unregister(priv_to_devlink(esw->dev),
2321 ARRAY_SIZE(esw_devlink_params));
2322 esw_offloads_cleanup_reps(esw);
2325 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2326 struct mlx5_eswitch_rep *rep, u8 rep_type)
2328 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2329 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2330 esw->offloads.rep_ops[rep_type]->unload(rep);
2333 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2335 struct mlx5_eswitch_rep *rep;
2338 mlx5_esw_for_each_sf_rep(esw, i, rep)
2339 __esw_offloads_unload_rep(esw, rep, rep_type);
2342 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2344 struct mlx5_eswitch_rep *rep;
2347 __unload_reps_sf_vport(esw, rep_type);
2349 mlx5_esw_for_each_vf_rep(esw, i, rep)
2350 __esw_offloads_unload_rep(esw, rep, rep_type);
2352 if (mlx5_ecpf_vport_exists(esw->dev)) {
2353 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2354 __esw_offloads_unload_rep(esw, rep, rep_type);
2357 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2358 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2359 __esw_offloads_unload_rep(esw, rep, rep_type);
2362 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2363 __esw_offloads_unload_rep(esw, rep, rep_type);
2366 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2368 struct mlx5_eswitch_rep *rep;
2372 rep = mlx5_eswitch_get_rep(esw, vport_num);
2373 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2374 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2375 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2376 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2384 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2385 for (--rep_type; rep_type >= 0; rep_type--)
2386 __esw_offloads_unload_rep(esw, rep, rep_type);
2390 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2392 struct mlx5_eswitch_rep *rep;
2395 rep = mlx5_eswitch_get_rep(esw, vport_num);
2396 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2397 __esw_offloads_unload_rep(esw, rep, rep_type);
2400 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2404 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2407 if (vport_num != MLX5_VPORT_UPLINK) {
2408 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2413 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2419 if (vport_num != MLX5_VPORT_UPLINK)
2420 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2424 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2426 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2429 mlx5_esw_offloads_rep_unload(esw, vport_num);
2431 if (vport_num != MLX5_VPORT_UPLINK)
2432 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2435 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2436 struct mlx5_core_dev *slave)
2438 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2439 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2440 struct mlx5_flow_root_namespace *root;
2441 struct mlx5_flow_namespace *ns;
2444 MLX5_SET(set_flow_table_root_in, in, opcode,
2445 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2446 MLX5_SET(set_flow_table_root_in, in, table_type,
2450 ns = mlx5_get_flow_namespace(master,
2451 MLX5_FLOW_NAMESPACE_FDB);
2452 root = find_root(&ns->node);
2453 mutex_lock(&root->chain_lock);
2454 MLX5_SET(set_flow_table_root_in, in,
2455 table_eswitch_owner_vhca_id_valid, 1);
2456 MLX5_SET(set_flow_table_root_in, in,
2457 table_eswitch_owner_vhca_id,
2458 MLX5_CAP_GEN(master, vhca_id));
2459 MLX5_SET(set_flow_table_root_in, in, table_id,
2462 ns = mlx5_get_flow_namespace(slave,
2463 MLX5_FLOW_NAMESPACE_FDB);
2464 root = find_root(&ns->node);
2465 mutex_lock(&root->chain_lock);
2466 MLX5_SET(set_flow_table_root_in, in, table_id,
2470 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2471 mutex_unlock(&root->chain_lock);
2476 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2477 struct mlx5_core_dev *slave,
2478 struct mlx5_vport *vport,
2479 struct mlx5_flow_table *acl)
2481 struct mlx5_flow_handle *flow_rule = NULL;
2482 struct mlx5_flow_destination dest = {};
2483 struct mlx5_flow_act flow_act = {};
2484 struct mlx5_flow_spec *spec;
2488 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2492 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2493 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2495 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2496 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
2497 MLX5_CAP_GEN(slave, vhca_id));
2499 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2500 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2501 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2502 source_eswitch_owner_vhca_id);
2504 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2505 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2506 dest.vport.num = slave->priv.eswitch->manager_vport;
2507 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2508 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2510 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2512 if (IS_ERR(flow_rule))
2513 err = PTR_ERR(flow_rule);
2515 vport->egress.offloads.bounce_rule = flow_rule;
2521 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2522 struct mlx5_core_dev *slave)
2524 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2525 struct mlx5_eswitch *esw = master->priv.eswitch;
2526 struct mlx5_flow_table_attr ft_attr = {
2527 .max_fte = 1, .prio = 0, .level = 0,
2528 .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
2530 struct mlx5_flow_namespace *egress_ns;
2531 struct mlx5_flow_table *acl;
2532 struct mlx5_flow_group *g;
2533 struct mlx5_vport *vport;
2534 void *match_criteria;
2538 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2540 return PTR_ERR(vport);
2542 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2543 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2548 if (vport->egress.acl)
2551 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2555 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2561 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2563 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2564 misc_parameters.source_port);
2565 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2566 misc_parameters.source_eswitch_owner_vhca_id);
2567 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2568 MLX5_MATCH_MISC_PARAMETERS);
2570 MLX5_SET(create_flow_group_in, flow_group_in,
2571 source_eswitch_owner_vhca_id_valid, 1);
2572 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2573 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
2575 g = mlx5_create_flow_group(acl, flow_group_in);
2581 err = __esw_set_master_egress_rule(master, slave, vport, acl);
2585 vport->egress.acl = acl;
2586 vport->egress.offloads.bounce_grp = g;
2588 kvfree(flow_group_in);
2593 mlx5_destroy_flow_group(g);
2595 mlx5_destroy_flow_table(acl);
2597 kvfree(flow_group_in);
2601 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
2603 struct mlx5_vport *vport;
2605 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2606 dev->priv.eswitch->manager_vport);
2608 esw_acl_egress_ofld_cleanup(vport);
2611 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
2612 struct mlx5_eswitch *slave_esw)
2616 err = esw_set_slave_root_fdb(master_esw->dev,
2621 err = esw_set_master_egress_rule(master_esw->dev,
2629 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2634 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
2635 struct mlx5_eswitch *slave_esw)
2637 esw_unset_master_egress_rule(master_esw->dev);
2638 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2641 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2642 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2644 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
2646 const struct mlx5_eswitch_rep_ops *ops;
2647 struct mlx5_eswitch_rep *rep;
2651 mlx5_esw_for_each_rep(esw, i, rep) {
2652 rep_type = NUM_REP_TYPES;
2653 while (rep_type--) {
2654 ops = esw->offloads.rep_ops[rep_type];
2655 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2657 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
2662 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2664 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2665 mlx5e_tc_clean_fdb_peer_flows(esw);
2667 mlx5_esw_offloads_rep_event_unpair(esw);
2668 esw_del_fdb_peer_miss_rules(esw);
2671 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2672 struct mlx5_eswitch *peer_esw)
2674 const struct mlx5_eswitch_rep_ops *ops;
2675 struct mlx5_eswitch_rep *rep;
2680 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2684 mlx5_esw_for_each_rep(esw, i, rep) {
2685 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2686 ops = esw->offloads.rep_ops[rep_type];
2687 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2689 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2699 mlx5_esw_offloads_unpair(esw);
2703 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2704 struct mlx5_eswitch *peer_esw,
2707 struct mlx5_flow_root_namespace *peer_ns;
2708 struct mlx5_flow_root_namespace *ns;
2711 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2712 ns = esw->dev->priv.steering->fdb_root_ns;
2715 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2719 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2721 mlx5_flow_namespace_set_peer(ns, NULL);
2725 mlx5_flow_namespace_set_peer(ns, NULL);
2726 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2732 static int mlx5_esw_offloads_devcom_event(int event,
2736 struct mlx5_eswitch *esw = my_data;
2737 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2738 struct mlx5_eswitch *peer_esw = event_data;
2742 case ESW_OFFLOADS_DEVCOM_PAIR:
2743 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2744 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2747 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2750 err = mlx5_esw_offloads_pair(esw, peer_esw);
2754 err = mlx5_esw_offloads_pair(peer_esw, esw);
2758 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2761 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2762 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2765 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2766 mlx5_esw_offloads_unpair(peer_esw);
2767 mlx5_esw_offloads_unpair(esw);
2768 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2775 mlx5_esw_offloads_unpair(esw);
2777 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2779 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2784 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2786 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2788 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2789 mutex_init(&esw->offloads.peer_mutex);
2791 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2794 if (!mlx5_is_lag_supported(esw->dev))
2797 mlx5_devcom_register_component(devcom,
2798 MLX5_DEVCOM_ESW_OFFLOADS,
2799 mlx5_esw_offloads_devcom_event,
2802 mlx5_devcom_send_event(devcom,
2803 MLX5_DEVCOM_ESW_OFFLOADS,
2804 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2807 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2809 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2811 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2814 if (!mlx5_is_lag_supported(esw->dev))
2817 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2818 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2820 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2823 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2825 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2828 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2829 MLX5_FDB_TO_VPORT_REG_C_0))
2832 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2838 #define MLX5_ESW_METADATA_RSVD_UPLINK 1
2840 /* Share the same metadata for uplink's. This is fine because:
2841 * (a) In shared FDB mode (LAG) both uplink's are treated the
2842 * same and tagged with the same metadata.
2843 * (b) In non shared FDB mode, packets from physical port0
2844 * cannot hit eswitch of PF1 and vice versa.
2846 static u32 mlx5_esw_match_metadata_reserved(struct mlx5_eswitch *esw)
2848 return MLX5_ESW_METADATA_RSVD_UPLINK;
2851 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2853 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2854 /* Reserve 0xf for internal port offload */
2855 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2859 /* Only 4 bits of pf_num */
2860 pf_num = mlx5_get_dev_index(esw->dev);
2861 if (pf_num > max_pf_num)
2864 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2865 /* Use only non-zero vport_id (2-4095) for all PF's */
2866 id = ida_alloc_range(&esw->offloads.vport_metadata_ida,
2867 MLX5_ESW_METADATA_RSVD_UPLINK + 1,
2868 vport_end_ida, GFP_KERNEL);
2871 id = (pf_num << ESW_VPORT_BITS) | id;
2875 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2877 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2879 /* Metadata contains only 12 bits of actual ida id */
2880 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2883 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2884 struct mlx5_vport *vport)
2886 if (vport->vport == MLX5_VPORT_UPLINK)
2887 vport->default_metadata = mlx5_esw_match_metadata_reserved(esw);
2889 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2891 vport->metadata = vport->default_metadata;
2892 return vport->metadata ? 0 : -ENOSPC;
2895 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2896 struct mlx5_vport *vport)
2898 if (!vport->default_metadata)
2901 if (vport->vport == MLX5_VPORT_UPLINK)
2904 WARN_ON(vport->metadata != vport->default_metadata);
2905 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2908 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2910 struct mlx5_vport *vport;
2913 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2916 mlx5_esw_for_each_vport(esw, i, vport)
2917 esw_offloads_vport_metadata_cleanup(esw, vport);
2920 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2922 struct mlx5_vport *vport;
2926 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2929 mlx5_esw_for_each_vport(esw, i, vport) {
2930 err = esw_offloads_vport_metadata_setup(esw, vport);
2938 esw_offloads_metadata_uninit(esw);
2942 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2946 down_write(&esw->mode_lock);
2947 if (mlx5_esw_is_fdb_created(esw)) {
2951 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2956 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2958 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2960 up_write(&esw->mode_lock);
2965 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2966 struct mlx5_vport *vport)
2970 err = esw_acl_ingress_ofld_setup(esw, vport);
2974 err = esw_acl_egress_ofld_setup(esw, vport);
2981 esw_acl_ingress_ofld_cleanup(esw, vport);
2986 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2987 struct mlx5_vport *vport)
2989 esw_acl_egress_ofld_cleanup(vport);
2990 esw_acl_ingress_ofld_cleanup(esw, vport);
2993 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2995 struct mlx5_vport *vport;
2997 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2999 return PTR_ERR(vport);
3001 return esw_vport_create_offloads_acl_tables(esw, vport);
3004 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
3006 struct mlx5_vport *vport;
3008 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
3012 esw_vport_destroy_offloads_acl_tables(esw, vport);
3015 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
3017 struct mlx5_eswitch_rep *rep;
3021 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
3024 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3025 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3028 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3032 mlx5_esw_for_each_rep(esw, i, rep) {
3033 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3034 mlx5_esw_offloads_rep_load(esw, rep->vport);
3040 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3042 struct mlx5_esw_indir_table *indir;
3045 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3046 mutex_init(&esw->fdb_table.offloads.vports.lock);
3047 hash_init(esw->fdb_table.offloads.vports.table);
3048 atomic64_set(&esw->user_count, 0);
3050 indir = mlx5_esw_indir_table_init();
3051 if (IS_ERR(indir)) {
3052 err = PTR_ERR(indir);
3053 goto create_indir_err;
3055 esw->fdb_table.offloads.indir = indir;
3057 err = esw_create_uplink_offloads_acl_tables(esw);
3059 goto create_acl_err;
3061 err = esw_create_offloads_table(esw);
3063 goto create_offloads_err;
3065 err = esw_create_restore_table(esw);
3067 goto create_restore_err;
3069 err = esw_create_offloads_fdb_tables(esw);
3071 goto create_fdb_err;
3073 err = esw_create_vport_rx_group(esw);
3077 err = esw_create_vport_rx_drop_group(esw);
3079 goto create_rx_drop_fg_err;
3081 err = esw_create_vport_rx_drop_rule(esw);
3083 goto create_rx_drop_rule_err;
3087 create_rx_drop_rule_err:
3088 esw_destroy_vport_rx_drop_group(esw);
3089 create_rx_drop_fg_err:
3090 esw_destroy_vport_rx_group(esw);
3092 esw_destroy_offloads_fdb_tables(esw);
3094 esw_destroy_restore_table(esw);
3096 esw_destroy_offloads_table(esw);
3097 create_offloads_err:
3098 esw_destroy_uplink_offloads_acl_tables(esw);
3100 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3102 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3106 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3108 esw_destroy_vport_rx_drop_rule(esw);
3109 esw_destroy_vport_rx_drop_group(esw);
3110 esw_destroy_vport_rx_group(esw);
3111 esw_destroy_offloads_fdb_tables(esw);
3112 esw_destroy_restore_table(esw);
3113 esw_destroy_offloads_table(esw);
3114 esw_destroy_uplink_offloads_acl_tables(esw);
3115 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3116 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3120 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3122 struct devlink *devlink;
3123 bool host_pf_disabled;
3126 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3127 host_params_context.host_num_of_vfs);
3128 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3129 host_params_context.host_pf_disabled);
3131 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3134 devlink = priv_to_devlink(esw->dev);
3136 /* Number of VFs can only change from "0 to x" or "x to 0". */
3137 if (esw->esw_funcs.num_vfs > 0) {
3138 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3142 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3143 MLX5_VPORT_UC_ADDR_CHANGE);
3145 devl_unlock(devlink);
3149 esw->esw_funcs.num_vfs = new_num_vfs;
3150 devl_unlock(devlink);
3153 static void esw_functions_changed_event_handler(struct work_struct *work)
3155 struct mlx5_host_work *host_work;
3156 struct mlx5_eswitch *esw;
3159 host_work = container_of(work, struct mlx5_host_work, work);
3160 esw = host_work->esw;
3162 out = mlx5_esw_query_functions(esw->dev);
3166 esw_vfs_changed_event_handler(esw, out);
3172 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3174 struct mlx5_esw_functions *esw_funcs;
3175 struct mlx5_host_work *host_work;
3176 struct mlx5_eswitch *esw;
3178 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3182 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3183 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3185 host_work->esw = esw;
3187 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3188 queue_work(esw->work_queue, &host_work->work);
3193 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3195 const u32 *query_host_out;
3197 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3200 query_host_out = mlx5_esw_query_functions(esw->dev);
3201 if (IS_ERR(query_host_out))
3202 return PTR_ERR(query_host_out);
3204 /* Mark non local controller with non zero controller number. */
3205 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3206 host_params_context.host_number);
3207 kvfree(query_host_out);
3211 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3213 /* Local controller is always valid */
3214 if (controller == 0)
3217 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3220 /* External host number starts with zero in device */
3221 return (controller == esw->offloads.host_number + 1);
3224 int esw_offloads_enable(struct mlx5_eswitch *esw)
3226 struct mapping_ctx *reg_c0_obj_pool;
3227 struct mlx5_vport *vport;
3232 mutex_init(&esw->offloads.termtbl_mutex);
3233 mlx5_rdma_enable_roce(esw->dev);
3235 err = mlx5_esw_host_number_init(esw);
3239 err = esw_offloads_metadata_init(esw);
3243 err = esw_set_passing_vport_metadata(esw, true);
3245 goto err_vport_metadata;
3247 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3249 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3250 sizeof(struct mlx5_mapped_obj),
3251 ESW_REG_C0_USER_DATA_METADATA_MASK,
3254 if (IS_ERR(reg_c0_obj_pool)) {
3255 err = PTR_ERR(reg_c0_obj_pool);
3258 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3260 err = esw_offloads_steering_init(esw);
3262 goto err_steering_init;
3264 /* Representor will control the vport link state */
3265 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3266 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3268 /* Uplink vport rep must load first. */
3269 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3273 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3277 esw_offloads_devcom_init(esw);
3282 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3284 esw_offloads_steering_cleanup(esw);
3286 mapping_destroy(reg_c0_obj_pool);
3288 esw_set_passing_vport_metadata(esw, false);
3290 esw_offloads_metadata_uninit(esw);
3292 mlx5_rdma_disable_roce(esw->dev);
3293 mutex_destroy(&esw->offloads.termtbl_mutex);
3297 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3298 struct netlink_ext_ack *extack)
3302 esw->mode = MLX5_ESWITCH_LEGACY;
3304 /* If changing from switchdev to legacy mode without sriov enabled,
3305 * no need to create legacy fdb.
3307 if (!mlx5_sriov_is_enabled(esw->dev))
3310 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
3312 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3317 void esw_offloads_disable(struct mlx5_eswitch *esw)
3319 esw_offloads_devcom_cleanup(esw);
3320 mlx5_eswitch_disable_pf_vf_vports(esw);
3321 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3322 esw_set_passing_vport_metadata(esw, false);
3323 esw_offloads_steering_cleanup(esw);
3324 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3325 esw_offloads_metadata_uninit(esw);
3326 mlx5_rdma_disable_roce(esw->dev);
3327 mutex_destroy(&esw->offloads.termtbl_mutex);
3330 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3333 case DEVLINK_ESWITCH_MODE_LEGACY:
3334 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3336 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3337 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3346 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3348 switch (mlx5_mode) {
3349 case MLX5_ESWITCH_LEGACY:
3350 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3352 case MLX5_ESWITCH_OFFLOADS:
3353 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3362 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3365 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3366 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3368 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3369 *mlx5_mode = MLX5_INLINE_MODE_L2;
3371 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3372 *mlx5_mode = MLX5_INLINE_MODE_IP;
3374 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3375 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3384 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3386 switch (mlx5_mode) {
3387 case MLX5_INLINE_MODE_NONE:
3388 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3390 case MLX5_INLINE_MODE_L2:
3391 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3393 case MLX5_INLINE_MODE_IP:
3394 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3396 case MLX5_INLINE_MODE_TCP_UDP:
3397 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3406 static bool esw_offloads_devlink_ns_eq_netdev_ns(struct devlink *devlink)
3408 struct net *devl_net, *netdev_net;
3409 struct mlx5_eswitch *esw;
3411 esw = mlx5_devlink_eswitch_get(devlink);
3412 netdev_net = dev_net(esw->dev->mlx5e_res.uplink_netdev);
3413 devl_net = devlink_net(devlink);
3415 return net_eq(devl_net, netdev_net);
3418 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3419 struct netlink_ext_ack *extack)
3421 u16 cur_mlx5_mode, mlx5_mode = 0;
3422 struct mlx5_eswitch *esw;
3425 esw = mlx5_devlink_eswitch_get(devlink);
3427 return PTR_ERR(esw);
3429 if (esw_mode_from_devlink(mode, &mlx5_mode))
3432 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
3433 !esw_offloads_devlink_ns_eq_netdev_ns(devlink)) {
3434 NL_SET_ERR_MSG_MOD(extack,
3435 "Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
3439 mlx5_lag_disable_change(esw->dev);
3440 err = mlx5_esw_try_lock(esw);
3442 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3445 cur_mlx5_mode = err;
3448 if (cur_mlx5_mode == mlx5_mode)
3451 mlx5_eswitch_disable_locked(esw);
3452 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3453 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3454 NL_SET_ERR_MSG_MOD(extack,
3455 "Can't change mode while devlink traps are active");
3459 err = esw_offloads_start(esw, extack);
3460 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3461 err = esw_offloads_stop(esw, extack);
3462 mlx5_rescan_drivers(esw->dev);
3468 mlx5_esw_unlock(esw);
3470 mlx5_lag_enable_change(esw->dev);
3474 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3476 struct mlx5_eswitch *esw;
3479 esw = mlx5_devlink_eswitch_get(devlink);
3481 return PTR_ERR(esw);
3483 down_read(&esw->mode_lock);
3484 err = esw_mode_to_devlink(esw->mode, mode);
3485 up_read(&esw->mode_lock);
3489 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3490 struct netlink_ext_ack *extack)
3492 struct mlx5_core_dev *dev = esw->dev;
3493 struct mlx5_vport *vport;
3494 u16 err_vport_num = 0;
3498 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3499 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3501 err_vport_num = vport->vport;
3502 NL_SET_ERR_MSG_MOD(extack,
3503 "Failed to set min inline on vport");
3504 goto revert_inline_mode;
3510 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3511 if (vport->vport == err_vport_num)
3513 mlx5_modify_nic_vport_min_inline(dev,
3515 esw->offloads.inline_mode);
3520 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3521 struct netlink_ext_ack *extack)
3523 struct mlx5_core_dev *dev = devlink_priv(devlink);
3524 struct mlx5_eswitch *esw;
3528 esw = mlx5_devlink_eswitch_get(devlink);
3530 return PTR_ERR(esw);
3532 down_write(&esw->mode_lock);
3534 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3535 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3536 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3542 case MLX5_CAP_INLINE_MODE_L2:
3543 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3546 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3550 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3551 NL_SET_ERR_MSG_MOD(extack,
3552 "Can't set inline mode when flows are configured");
3557 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3561 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3565 esw->offloads.inline_mode = mlx5_mode;
3566 up_write(&esw->mode_lock);
3570 up_write(&esw->mode_lock);
3574 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3576 struct mlx5_eswitch *esw;
3579 esw = mlx5_devlink_eswitch_get(devlink);
3581 return PTR_ERR(esw);
3583 down_read(&esw->mode_lock);
3584 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3585 up_read(&esw->mode_lock);
3589 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3590 enum devlink_eswitch_encap_mode encap,
3591 struct netlink_ext_ack *extack)
3593 struct mlx5_core_dev *dev = devlink_priv(devlink);
3594 struct mlx5_eswitch *esw;
3597 esw = mlx5_devlink_eswitch_get(devlink);
3599 return PTR_ERR(esw);
3601 down_write(&esw->mode_lock);
3603 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3604 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3605 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3610 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3615 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3616 esw->offloads.encap = encap;
3620 if (esw->offloads.encap == encap)
3623 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3624 NL_SET_ERR_MSG_MOD(extack,
3625 "Can't set encapsulation when flows are configured");
3630 esw_destroy_offloads_fdb_tables(esw);
3632 esw->offloads.encap = encap;
3634 err = esw_create_offloads_fdb_tables(esw);
3637 NL_SET_ERR_MSG_MOD(extack,
3638 "Failed re-creating fast FDB table");
3639 esw->offloads.encap = !encap;
3640 (void)esw_create_offloads_fdb_tables(esw);
3644 up_write(&esw->mode_lock);
3648 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3649 enum devlink_eswitch_encap_mode *encap)
3651 struct mlx5_eswitch *esw;
3653 esw = mlx5_devlink_eswitch_get(devlink);
3655 return PTR_ERR(esw);
3657 down_read(&esw->mode_lock);
3658 *encap = esw->offloads.encap;
3659 up_read(&esw->mode_lock);
3664 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3666 /* Currently, only ECPF based device has representor for host PF. */
3667 if (vport_num == MLX5_VPORT_PF &&
3668 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3671 if (vport_num == MLX5_VPORT_ECPF &&
3672 !mlx5_ecpf_vport_exists(esw->dev))
3678 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3679 const struct mlx5_eswitch_rep_ops *ops,
3682 struct mlx5_eswitch_rep_data *rep_data;
3683 struct mlx5_eswitch_rep *rep;
3686 esw->offloads.rep_ops[rep_type] = ops;
3687 mlx5_esw_for_each_rep(esw, i, rep) {
3688 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3690 rep_data = &rep->rep_data[rep_type];
3691 atomic_set(&rep_data->state, REP_REGISTERED);
3695 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3697 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3699 struct mlx5_eswitch_rep *rep;
3702 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3703 __unload_reps_all_vport(esw, rep_type);
3705 mlx5_esw_for_each_rep(esw, i, rep)
3706 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3708 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3710 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3712 struct mlx5_eswitch_rep *rep;
3714 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3715 return rep->rep_data[rep_type].priv;
3718 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3722 struct mlx5_eswitch_rep *rep;
3724 rep = mlx5_eswitch_get_rep(esw, vport);
3726 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3727 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3728 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3731 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3733 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3735 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3737 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3739 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3742 return mlx5_eswitch_get_rep(esw, vport);
3744 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3746 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3748 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3750 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3752 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3754 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3756 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3758 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3761 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3763 if (WARN_ON_ONCE(IS_ERR(vport)))
3766 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3768 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3770 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3771 u16 vport_num, u32 controller, u32 sfnum)
3775 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3779 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3783 mlx5_esw_vport_debugfs_create(esw, vport_num, true, sfnum);
3784 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3790 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
3791 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3793 mlx5_esw_vport_disable(esw, vport_num);
3797 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3799 mlx5_esw_offloads_rep_unload(esw, vport_num);
3800 mlx5_esw_vport_debugfs_destroy(esw, vport_num);
3801 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3802 mlx5_esw_vport_disable(esw, vport_num);
3805 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3807 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3813 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3814 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3817 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3821 err = mlx5_vport_get_other_func_general_cap(esw->dev, vport_num, query_ctx);
3825 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3826 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3833 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3835 u16 *old_entry, *vhca_map_entry, vhca_id;
3838 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3840 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3845 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3846 if (!vhca_map_entry)
3849 *vhca_map_entry = vport_num;
3850 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3851 if (xa_is_err(old_entry)) {
3852 kfree(vhca_map_entry);
3853 return xa_err(old_entry);
3859 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3861 u16 *vhca_map_entry, vhca_id;
3864 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3866 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3869 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3870 kfree(vhca_map_entry);
3873 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3875 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3884 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3887 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3889 if (WARN_ON_ONCE(IS_ERR(vport)))
3892 return vport->metadata;
3894 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);
3897 is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
3899 return vport_num == MLX5_VPORT_PF ||
3900 mlx5_eswitch_is_vf_vport(esw, vport_num) ||
3901 mlx5_esw_is_sf_vport(esw, vport_num);
3904 int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
3905 u8 *hw_addr, int *hw_addr_len,
3906 struct netlink_ext_ack *extack)
3908 struct mlx5_eswitch *esw;
3909 struct mlx5_vport *vport;
3912 esw = mlx5_devlink_eswitch_get(port->devlink);
3914 return PTR_ERR(esw);
3916 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3917 if (!is_port_function_supported(esw, vport_num))
3920 vport = mlx5_eswitch_get_vport(esw, vport_num);
3921 if (IS_ERR(vport)) {
3922 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3923 return PTR_ERR(vport);
3926 mutex_lock(&esw->state_lock);
3927 ether_addr_copy(hw_addr, vport->info.mac);
3928 *hw_addr_len = ETH_ALEN;
3929 mutex_unlock(&esw->state_lock);
3933 int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
3934 const u8 *hw_addr, int hw_addr_len,
3935 struct netlink_ext_ack *extack)
3937 struct mlx5_eswitch *esw;
3940 esw = mlx5_devlink_eswitch_get(port->devlink);
3942 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
3943 return PTR_ERR(esw);
3946 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3947 if (!is_port_function_supported(esw, vport_num)) {
3948 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
3952 return mlx5_eswitch_set_vport_mac(esw, vport_num, hw_addr);
3955 static struct mlx5_vport *
3956 mlx5_devlink_port_fn_get_vport(struct devlink_port *port, struct mlx5_eswitch *esw)
3960 if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3961 return ERR_PTR(-EOPNOTSUPP);
3963 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
3964 if (!is_port_function_supported(esw, vport_num))
3965 return ERR_PTR(-EOPNOTSUPP);
3967 return mlx5_eswitch_get_vport(esw, vport_num);
3970 int mlx5_devlink_port_fn_migratable_get(struct devlink_port *port, bool *is_enabled,
3971 struct netlink_ext_ack *extack)
3973 struct mlx5_eswitch *esw;
3974 struct mlx5_vport *vport;
3975 int err = -EOPNOTSUPP;
3977 esw = mlx5_devlink_eswitch_get(port->devlink);
3979 return PTR_ERR(esw);
3981 if (!MLX5_CAP_GEN(esw->dev, migration)) {
3982 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
3986 vport = mlx5_devlink_port_fn_get_vport(port, esw);
3987 if (IS_ERR(vport)) {
3988 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
3989 return PTR_ERR(vport);
3992 mutex_lock(&esw->state_lock);
3993 if (vport->enabled) {
3994 *is_enabled = vport->info.mig_enabled;
3997 mutex_unlock(&esw->state_lock);
4001 int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
4002 struct netlink_ext_ack *extack)
4004 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4005 struct mlx5_eswitch *esw;
4006 struct mlx5_vport *vport;
4009 int err = -EOPNOTSUPP;
4011 esw = mlx5_devlink_eswitch_get(port->devlink);
4013 return PTR_ERR(esw);
4015 if (!MLX5_CAP_GEN(esw->dev, migration)) {
4016 NL_SET_ERR_MSG_MOD(extack, "Device doesn't support migration");
4020 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4021 if (IS_ERR(vport)) {
4022 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4023 return PTR_ERR(vport);
4026 mutex_lock(&esw->state_lock);
4027 if (!vport->enabled) {
4028 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4032 if (vport->info.mig_enabled == enable) {
4037 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4043 err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
4044 MLX5_CAP_GENERAL_2);
4046 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4050 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4051 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
4053 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
4054 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
4056 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA migratable cap");
4060 vport->info.mig_enabled = enable;
4065 mutex_unlock(&esw->state_lock);
4069 int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
4070 struct netlink_ext_ack *extack)
4072 struct mlx5_eswitch *esw;
4073 struct mlx5_vport *vport;
4074 int err = -EOPNOTSUPP;
4076 esw = mlx5_devlink_eswitch_get(port->devlink);
4078 return PTR_ERR(esw);
4080 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4081 if (IS_ERR(vport)) {
4082 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4083 return PTR_ERR(vport);
4086 mutex_lock(&esw->state_lock);
4087 if (vport->enabled) {
4088 *is_enabled = vport->info.roce_enabled;
4091 mutex_unlock(&esw->state_lock);
4095 int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
4096 struct netlink_ext_ack *extack)
4098 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
4099 struct mlx5_eswitch *esw;
4100 struct mlx5_vport *vport;
4101 int err = -EOPNOTSUPP;
4106 esw = mlx5_devlink_eswitch_get(port->devlink);
4108 return PTR_ERR(esw);
4110 vport = mlx5_devlink_port_fn_get_vport(port, esw);
4111 if (IS_ERR(vport)) {
4112 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
4113 return PTR_ERR(vport);
4115 vport_num = vport->vport;
4117 mutex_lock(&esw->state_lock);
4118 if (!vport->enabled) {
4119 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
4123 if (vport->info.roce_enabled == enable) {
4128 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
4134 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
4137 NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
4141 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
4142 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
4144 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
4145 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
4147 NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA roce cap");
4151 vport->info.roce_enabled = enable;
4156 mutex_unlock(&esw->state_lock);