1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
6 int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
12 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
13 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
16 MLX5_SET(query_esw_vport_context_in, in, opcode,
17 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
18 MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
19 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
21 err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
26 MLX5_GET64(query_esw_vport_context_out, out,
27 esw_vport_context.sw_steering_vport_icm_address_rx);
29 MLX5_GET64(query_esw_vport_context_out, out,
30 esw_vport_context.sw_steering_vport_icm_address_tx);
34 int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
35 u16 vport_number, u16 *gvmi)
37 u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
42 out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
43 out = kzalloc(out_size, GFP_KERNEL);
47 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
48 MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
49 MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
50 MLX5_SET(query_hca_cap_in, in, op_mod,
51 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
52 HCA_CAP_OPMOD_GET_CUR);
54 err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
60 *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
66 int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
67 struct mlx5dr_esw_caps *caps)
69 caps->drop_icm_address_rx =
70 MLX5_CAP64_ESW_FLOWTABLE(mdev,
71 sw_steering_fdb_action_drop_icm_address_rx);
72 caps->drop_icm_address_tx =
73 MLX5_CAP64_ESW_FLOWTABLE(mdev,
74 sw_steering_fdb_action_drop_icm_address_tx);
75 caps->uplink_icm_address_rx =
76 MLX5_CAP64_ESW_FLOWTABLE(mdev,
77 sw_steering_uplink_icm_address_rx);
78 caps->uplink_icm_address_tx =
79 MLX5_CAP64_ESW_FLOWTABLE(mdev,
80 sw_steering_uplink_icm_address_tx);
81 caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
82 if (!caps->sw_owner_v2)
83 caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
88 static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
89 u16 vport, bool *roce_en)
91 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
92 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
95 MLX5_SET(query_nic_vport_context_in, in, opcode,
96 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
97 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
98 MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
100 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
104 *roce_en = MLX5_GET(query_nic_vport_context_out, out,
105 nic_vport_context.roce_en);
109 int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
110 struct mlx5dr_cmd_caps *caps)
115 caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
116 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager);
117 caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id);
118 caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols);
119 caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version);
120 caps->roce_caps.fl_rc_qp_when_roce_disabled =
121 MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
123 if (MLX5_CAP_GEN(mdev, roce)) {
124 err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
128 caps->roce_caps.roce_en = roce_en;
129 caps->roce_caps.fl_rc_qp_when_roce_disabled |=
130 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
131 caps->roce_caps.fl_rc_qp_when_roce_enabled =
132 MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
135 caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
137 /* geneve_tlv_option_0_exist is the indication of
138 * STE support for lookup type flex_parser_ok
140 caps->flex_parser_ok_bits_supp =
141 MLX5_CAP_FLOWTABLE(mdev,
142 flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
144 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
145 caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
146 caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
149 if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
150 caps->flex_parser_id_icmpv6_dw0 =
151 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
152 caps->flex_parser_id_icmpv6_dw1 =
153 MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
156 if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
157 caps->flex_parser_id_geneve_tlv_option_0 =
158 MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
160 if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
161 caps->flex_parser_id_mpls_over_gre =
162 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
164 if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
165 caps->flex_parser_id_mpls_over_udp =
166 MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
168 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
169 caps->flex_parser_id_gtpu_dw_0 =
170 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
172 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
173 caps->flex_parser_id_gtpu_teid =
174 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
176 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
177 caps->flex_parser_id_gtpu_dw_2 =
178 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
180 if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
181 caps->flex_parser_id_gtpu_first_ext_dw_0 =
182 MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
184 caps->nic_rx_drop_address =
185 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
186 caps->nic_tx_drop_address =
187 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
188 caps->nic_tx_allow_address =
189 MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
191 caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
192 caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
194 if (!caps->rx_sw_owner_v2)
195 caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
196 if (!caps->tx_sw_owner_v2)
197 caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
199 caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
201 caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
202 caps->hdr_modify_icm_addr =
203 MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
205 caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
207 caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
212 int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
213 enum fs_flow_table_type type,
215 struct mlx5dr_cmd_query_flow_table_details *output)
217 u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
218 u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
221 MLX5_SET(query_flow_table_in, in, opcode,
222 MLX5_CMD_OP_QUERY_FLOW_TABLE);
224 MLX5_SET(query_flow_table_in, in, table_type, type);
225 MLX5_SET(query_flow_table_in, in, table_id, table_id);
227 err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
231 output->status = MLX5_GET(query_flow_table_out, out, status);
232 output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
234 output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
235 flow_table_context.sw_owner_icm_root_1);
236 output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
237 flow_table_context.sw_owner_icm_root_0);
242 int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
247 u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
248 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
252 MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
253 MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
254 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
255 MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
256 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
258 ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
262 attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
264 *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
265 sw_steering_icm_address_rx);
266 *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
267 sw_steering_icm_address_tx);
272 int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
274 u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
276 /* Skip SYNC in case the device is internal error state.
277 * Besides a device error, this also happens when we're
280 if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
283 MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
285 return mlx5_cmd_exec_in(mdev, sync_steering, in);
288 int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
292 u32 modify_header_id,
295 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
296 void *in_flow_context;
302 inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
303 1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
305 in = kvzalloc(inlen, GFP_KERNEL);
309 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
310 MLX5_SET(set_fte_in, in, table_type, table_type);
311 MLX5_SET(set_fte_in, in, table_id, table_id);
313 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
314 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
315 MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
316 MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
317 MLX5_SET(flow_context, in_flow_context, action,
318 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
319 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
321 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
322 MLX5_SET(dest_format_struct, in_dests, destination_type,
323 MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
324 MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
326 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
332 int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
336 u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
338 MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
339 MLX5_SET(delete_fte_in, in, table_type, table_type);
340 MLX5_SET(delete_fte_in, in, table_id, table_id);
342 return mlx5_cmd_exec_in(mdev, delete_fte, in);
345 int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
349 u32 *modify_header_id)
351 u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
357 inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
358 num_of_actions * sizeof(u64);
359 in = kvzalloc(inlen, GFP_KERNEL);
363 MLX5_SET(alloc_modify_header_context_in, in, opcode,
364 MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
365 MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
366 MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
367 p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
368 memcpy(p_actions, actions, num_of_actions * sizeof(u64));
370 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
374 *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
381 int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
382 u32 modify_header_id)
384 u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
386 MLX5_SET(dealloc_modify_header_context_in, in, opcode,
387 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
388 MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
391 return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
394 int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
399 u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
400 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
404 in = kvzalloc(inlen, GFP_KERNEL);
408 MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
409 MLX5_SET(create_flow_group_in, in, table_type, table_type);
410 MLX5_SET(create_flow_group_in, in, table_id, table_id);
412 err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
416 *group_id = MLX5_GET(create_flow_group_out, out, group_id);
423 int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
428 u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
430 MLX5_SET(destroy_flow_group_in, in, opcode,
431 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
432 MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
433 MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
434 MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
436 return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
439 int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
440 struct mlx5dr_cmd_create_flow_table_attr *attr,
441 u64 *fdb_rx_icm_addr,
444 u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
445 u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
449 MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
450 MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
451 MLX5_SET(create_flow_table_in, in, uid, attr->uid);
453 ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
454 MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
455 MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
456 MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
458 if (attr->sw_owner) {
459 /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
460 * icm_addr_1 used for FDB TX
462 if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
463 MLX5_SET64(flow_table_context, ft_mdev,
464 sw_owner_icm_root_0, attr->icm_addr_rx);
465 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
466 MLX5_SET64(flow_table_context, ft_mdev,
467 sw_owner_icm_root_0, attr->icm_addr_tx);
468 } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
469 MLX5_SET64(flow_table_context, ft_mdev,
470 sw_owner_icm_root_0, attr->icm_addr_rx);
471 MLX5_SET64(flow_table_context, ft_mdev,
472 sw_owner_icm_root_1, attr->icm_addr_tx);
476 MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
478 MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
481 err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
485 *table_id = MLX5_GET(create_flow_table_out, out, table_id);
486 if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
489 (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
490 (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
491 (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
496 int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
500 u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
502 MLX5_SET(destroy_flow_table_in, in, opcode,
503 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
504 MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
505 MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
507 return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
510 int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
511 enum mlx5_reformat_ctx_type rt,
514 size_t reformat_size,
518 u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
519 size_t inlen, cmd_data_sz, cmd_total_sz;
525 cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
526 cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
527 packet_reformat_context.reformat_data);
528 inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
529 in = kvzalloc(inlen, GFP_KERNEL);
533 MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
534 MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
536 prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
537 pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
539 MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
540 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
541 MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
542 MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
543 if (reformat_data && reformat_size)
544 memcpy(pdata, reformat_data, reformat_size);
546 err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
550 *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
556 void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
559 u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
561 MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
562 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
563 MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
566 mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
569 static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
573 if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
576 MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
577 MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
578 MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
579 MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
580 MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
581 MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
582 MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
583 MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
584 MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
586 MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
587 MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
588 MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
589 MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
590 MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
591 MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
592 MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
593 MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
596 int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
603 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
604 u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
608 ptr = MLX5_ADDR_OF(create_match_definer_in, in,
609 general_obj_in_cmd_hdr);
610 MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
611 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
612 MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
613 MLX5_OBJ_TYPE_MATCH_DEFINER);
615 ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
616 MLX5_SET(match_definer, ptr, format_id, format_id);
618 dr_cmd_set_definer_format(ptr, format_id,
619 dw_selectors, byte_selectors);
621 ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
622 memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
624 err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
628 *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
634 mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
636 u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
637 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
639 MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
640 MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
641 MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
643 mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
646 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
647 u16 index, struct mlx5dr_cmd_gid_attr *attr)
649 u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
650 u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
653 MLX5_SET(query_roce_address_in, in, opcode,
654 MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
656 MLX5_SET(query_roce_address_in, in, roce_address_index, index);
657 MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
659 err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
664 MLX5_ADDR_OF(query_roce_address_out,
665 out, roce_address.source_l3_address),
668 MLX5_ADDR_OF(query_roce_address_out, out,
669 roce_address.source_mac_47_32),
672 if (MLX5_GET(query_roce_address_out, out,
673 roce_address.roce_version) == MLX5_ROCE_VERSION_2)
674 attr->roce_ver = MLX5_ROCE_VERSION_2;
676 attr->roce_ver = MLX5_ROCE_VERSION_1;
681 static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
682 struct mlx5dr_cmd_fte_info *fte,
685 int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
686 int num_fwd_destinations = 0;
690 *extended_dest = false;
691 if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
693 for (i = 0; i < fte->dests_size; i++) {
694 if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
695 fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
697 if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
698 fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
699 fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
701 num_fwd_destinations++;
704 if (num_fwd_destinations > 1 && num_encap > 0)
705 *extended_dest = true;
707 if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
708 mlx5_core_warn(dev, "FW does not support extended destination");
711 if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
712 mlx5_core_warn(dev, "FW does not support more than %d encaps",
713 1 << fw_log_max_fdb_encap_uplink);
720 int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
721 int opmod, int modify_mask,
722 struct mlx5dr_cmd_ft_info *ft,
724 struct mlx5dr_cmd_fte_info *fte)
726 u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
727 void *in_flow_context, *vlan;
728 bool extended_dest = false;
729 void *in_match_value;
737 if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
741 dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
743 dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
745 inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
746 in = kvzalloc(inlen, GFP_KERNEL);
750 MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
751 MLX5_SET(set_fte_in, in, op_mod, opmod);
752 MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
753 MLX5_SET(set_fte_in, in, table_type, ft->type);
754 MLX5_SET(set_fte_in, in, table_id, ft->id);
755 MLX5_SET(set_fte_in, in, flow_index, fte->index);
756 MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
758 MLX5_SET(set_fte_in, in, vport_number, ft->vport);
759 MLX5_SET(set_fte_in, in, other_vport, 1);
762 in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
763 MLX5_SET(flow_context, in_flow_context, group_id, group_id);
765 MLX5_SET(flow_context, in_flow_context, flow_tag,
766 fte->flow_context.flow_tag);
767 MLX5_SET(flow_context, in_flow_context, flow_source,
768 fte->flow_context.flow_source);
770 MLX5_SET(flow_context, in_flow_context, extended_destination,
775 action = fte->action.action &
776 ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
777 MLX5_SET(flow_context, in_flow_context, action, action);
779 MLX5_SET(flow_context, in_flow_context, action,
781 if (fte->action.pkt_reformat)
782 MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
783 fte->action.pkt_reformat->id);
785 if (fte->action.modify_hdr)
786 MLX5_SET(flow_context, in_flow_context, modify_header_id,
787 fte->action.modify_hdr->id);
789 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
791 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
792 MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
793 MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
795 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
797 MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
798 MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
799 MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
801 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
803 memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
805 in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
806 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
809 for (i = 0; i < fte->dests_size; i++) {
810 enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
811 enum mlx5_ifc_flow_destination_type ifc_type;
814 if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
818 case MLX5_FLOW_DESTINATION_TYPE_NONE:
820 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
821 id = fte->dest_arr[i].ft_num;
822 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
824 case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
825 id = fte->dest_arr[i].ft_id;
826 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
829 case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
830 case MLX5_FLOW_DESTINATION_TYPE_VPORT:
831 if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
832 id = fte->dest_arr[i].vport.num;
833 MLX5_SET(dest_format_struct, in_dests,
834 destination_eswitch_owner_vhca_id_valid,
835 !!(fte->dest_arr[i].vport.flags &
836 MLX5_FLOW_DEST_VPORT_VHCA_ID));
837 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
840 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
841 MLX5_SET(dest_format_struct, in_dests,
842 destination_eswitch_owner_vhca_id_valid, 1);
844 MLX5_SET(dest_format_struct, in_dests,
845 destination_eswitch_owner_vhca_id,
846 fte->dest_arr[i].vport.vhca_id);
847 if (extended_dest && (fte->dest_arr[i].vport.flags &
848 MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
849 MLX5_SET(dest_format_struct, in_dests,
851 !!(fte->dest_arr[i].vport.flags &
852 MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
853 MLX5_SET(extended_dest_format, in_dests,
855 fte->dest_arr[i].vport.reformat_id);
858 case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
859 id = fte->dest_arr[i].sampler_id;
860 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
863 id = fte->dest_arr[i].tir_num;
864 ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
867 MLX5_SET(dest_format_struct, in_dests, destination_type,
869 MLX5_SET(dest_format_struct, in_dests, destination_id, id);
870 in_dests += dst_cnt_size;
874 MLX5_SET(flow_context, in_flow_context, destination_list_size,
878 if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
879 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
880 log_max_flow_counter,
884 for (i = 0; i < fte->dests_size; i++) {
885 if (fte->dest_arr[i].type !=
886 MLX5_FLOW_DESTINATION_TYPE_COUNTER)
889 MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
890 fte->dest_arr[i].counter_id);
891 in_dests += dst_cnt_size;
894 if (list_size > max_list_size) {
899 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
903 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));